diff --git a/.circleci/config.yml b/.circleci/config.yml index 9fbed7bde44d..a646df4c88e3 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -3,7 +3,7 @@ version: 2.1 parameters: ci_builder_image: type: string - default: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:v0.54.0 + default: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:v0.55.0 ci_builder_rust_image: type: string default: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder-rust:latest @@ -26,6 +26,9 @@ parameters: reproducibility_dispatch: type: boolean default: false + diff_asterisc_bytecode_dispatch: + type: boolean + default: false kontrol_dispatch: type: boolean default: false @@ -41,12 +44,19 @@ parameters: publish_contract_artifacts_dispatch: type: boolean default: false + stale_check_dispatch: + type: boolean + default: false + contracts_coverage_dispatch: + type: boolean + default: false orbs: go: circleci/go@1.8.0 gcp-cli: circleci/gcp-cli@3.0.1 slack: circleci/slack@4.10.1 shellcheck: circleci/shellcheck@3.2.0 + codecov: codecov/codecov@5.0.3 commands: gcp-oidc-authenticate: description: "Authenticate with GCP using a CircleCI OIDC token." @@ -113,7 +123,11 @@ commands: description: "Install the dependencies for the smart contracts" steps: - run: - command: just install + name: Install dependencies + command: | + # Manually craft the submodule update command in order to take advantage + # of the -j parameter, which speeds it up a lot. + git submodule update --init --recursive --force -j 8 working_directory: packages/contracts-bedrock notify-failures-on-develop: @@ -196,10 +210,10 @@ jobs: gotestsum --format=testname --junitfile=../tmp/test-results/cannon-32.xml --jsonfile=../tmp/testlogs/log-32.json \ -- -parallel=$(nproc) -coverpkg=github.com/ethereum-optimism/optimism/cannon/... -coverprofile=coverage-32.out ./... working_directory: cannon - - run: - name: Upload Cannon coverage - command: codecov --verbose --clean --flags cannon-go-tests-32 -f ./coverage-32.out - working_directory: cannon + - codecov/upload: + disable_search: true + files: ./cannon/coverage-32.out + flags: cannon-go-tests-32 - when: condition: equal: [64, <>] @@ -211,10 +225,10 @@ jobs: gotestsum --format=testname --junitfile=../tmp/test-results/cannon-64.xml --jsonfile=../tmp/testlogs/log-64.json \ -- --tags=cannon64 -parallel=$(nproc) -coverpkg=github.com/ethereum-optimism/optimism/cannon/... -coverprofile=coverage-64.out ./... working_directory: cannon - - run: - name: Upload Cannon coverage - command: codecov --verbose --clean --flags cannon-go-tests-64 -f ./coverage-64.out - working_directory: cannon + - codecov/upload: + disable_search: true + files: ./cannon/coverage-64.out + flags: cannon-go-tests-64 - store_test_results: path: ./tmp/test-results - store_artifacts: @@ -239,14 +253,58 @@ jobs: command: python3 maketests.py && git diff --exit-code working_directory: cannon/mipsevm/tests/open_mips_tests + diff-asterisc-bytecode: + docker: + - image: <> + resource_class: medium + steps: + - checkout + - run: + name: Check `RISCV.sol` bytecode + working_directory: packages/contracts-bedrock + command: | + # Clone asterisc @ the pinned version to fetch remote `RISCV.sol` + ASTERISC_REV="v$(yq '.tools.asterisc' ../../mise.toml)" + REMOTE_ASTERISC_PATH="./src/vendor/asterisc/RISCV_Remote.sol" + git clone https://github.com/ethereum-optimism/asterisc \ + -b $ASTERISC_REV && \ + cp ./asterisc/rvsol/src/RISCV.sol $REMOTE_ASTERISC_PATH + + # Replace import paths + sed -i -e 's/@optimism\///' $REMOTE_ASTERISC_PATH + # Replace legacy interface paths + sed -i -e 's/src\/cannon\/interfaces\//interfaces\/cannon\//g' $REMOTE_ASTERISC_PATH + sed -i -e 's/src\/dispute\/interfaces\//interfaces\/dispute\//g' $REMOTE_ASTERISC_PATH + # Replace contract name + sed -i -e 's/contract RISCV/contract RISCV_Remote/' $REMOTE_ASTERISC_PATH + + # Install deps + forge install + + # Diff bytecode, with both contracts compiled in the local environment. + REMOTE_ASTERISC_CODE="$(forge inspect RISCV_Remote bytecode | tr -d '\n')" + LOCAL_ASTERISC_CODE="$(forge inspect RISCV bytecode | tr -d '\n')" + if [ "$REMOTE_ASTERISC_CODE" != "$LOCAL_ASTERISC_CODE" ]; then + echo "Asterisc bytecode mismatch. Local version does not match remote. Diff:" + diff <(echo "$REMOTE_ASTERISC_CODE") <(echo "$LOCAL_ASTERISC_CODE") + else + echo "Asterisc version up to date." + fi + - notify-failures-on-develop: + mentions: "@clabby @proofs-team" + contracts-bedrock-build: machine: true resource_class: ethereum-optimism/latitude-1 parameters: - skip_pattern: - description: Glob pattern of tests to skip + build_args: + description: Forge build arguments type: string default: "" + profile: + description: Profile to use for building + type: string + default: ci steps: - checkout - install-contracts-dependencies @@ -259,9 +317,9 @@ jobs: working_directory: packages/contracts-bedrock - run: name: Build contracts - command: forge build --deny-warnings --skip <> + command: forge build <> environment: - FOUNDRY_PROFILE: ci + FOUNDRY_PROFILE: <> working_directory: packages/contracts-bedrock - run: name: Generate allocs @@ -347,7 +405,7 @@ jobs: machine: image: <> resource_class: "<>" - docker_layer_caching: true # we rely on this for faster builds, and actively warm it up for builds with common stages + docker_layer_caching: true # we rely on this for faster builds, and actively warm it up for builds with common stages steps: - checkout - attach_workspace: @@ -458,7 +516,7 @@ jobs: docker save -o /tmp/docker_images/<>.tar $IMAGE_NAME - persist_to_workspace: root: /tmp/docker_images - paths: # only write the one file, to avoid concurrent workspace-file additions + paths: # only write the one file, to avoid concurrent workspace-file additions - "<>.tar" - when: condition: "<>" @@ -471,18 +529,18 @@ jobs: condition: or: - and: - - "<>" - - "<>" + - "<>" + - "<>" - and: - - "<>" - - equal: [develop, << pipeline.git.branch >>] + - "<>" + - equal: [develop, << pipeline.git.branch >>] steps: - gcp-oidc-authenticate: service_account_email: GCP_SERVICE_ATTESTOR_ACCOUNT_EMAIL - run: name: Sign command: | - VER=$(jq -r .binary_signer < versions.json) + VER=$(yq '.tools.binary_signer' mise.toml) wget -O - "https://github.com/ethereum-optimism/binary_signer/archive/refs/tags/v${VER}.tar.gz" | tar xz cd "binary_signer-${VER}/signer" @@ -561,23 +619,17 @@ jobs: command: just coverage-lcov no_output_timeout: 18m environment: - FOUNDRY_PROFILE: ci + FOUNDRY_PROFILE: cicoverage working_directory: packages/contracts-bedrock - - run: - name: upload coverage - command: codecov --verbose --clean --flags contracts-bedrock-tests - environment: - FOUNDRY_PROFILE: ci + - codecov/upload: + disable_search: true + files: ./packages/contracts-bedrock/lcov.info + flags: contracts-bedrock-tests contracts-bedrock-tests: - docker: - - image: <> - resource_class: xlarge + machine: true + resource_class: ethereum-optimism/latitude-1 parameters: - test_parallelism: - description: Number of test jobs to run in parallel - type: integer - default: 4 test_list: description: List of test files to run type: string @@ -601,7 +653,6 @@ jobs: description: Profile to use for testing type: string default: ci - parallelism: <> steps: - checkout - attach_workspace: { at: "." } @@ -617,14 +668,6 @@ jobs: working_directory: packages/contracts-bedrock - check-changed: patterns: contracts-bedrock,op-node - - restore_cache: - name: Restore Go modules cache - key: gomod-{{ checksum "go.sum" }} - - restore_cache: - name: Restore Go build cache - keys: - - golang-build-cache-contracts-bedrock-tests-{{ checksum "go.sum" }} - - golang-build-cache-contracts-bedrock-tests- - run: name: Print dependencies command: just dep-status @@ -660,6 +703,10 @@ jobs: FOUNDRY_PROFILE: ci working_directory: packages/contracts-bedrock when: on_fail + - run: + name: Lint forge test names + command: just lint-forge-tests-check-no-build + working_directory: packages/contracts-bedrock - save_cache: name: Save Go build cache key: golang-build-cache-contracts-bedrock-tests-{{ checksum "go.sum" }} @@ -668,17 +715,14 @@ jobs: - notify-failures-on-develop contracts-bedrock-checks: - docker: - - image: <> - resource_class: xlarge + machine: true + resource_class: ethereum-optimism/latitude-1 steps: - checkout - attach_workspace: { at: "." } - install-contracts-dependencies - check-changed: patterns: contracts-bedrock,op-node - - setup_remote_docker: - docker_layer_caching: true - run: name: print forge version command: forge --version @@ -707,22 +751,7 @@ jobs: - run-contracts-check: command: unused-imports-check-no-build - run-contracts-check: - command: lint-forge-tests-check-no-build - - contracts-bedrock-validate-spacers: - docker: - - image: <> - resource_class: medium - steps: - - checkout - - attach_workspace: { at: "." } - - install-contracts-dependencies - - check-changed: - patterns: contracts-bedrock - - run: - name: validate spacers - command: just validate-spacers-no-build - working_directory: packages/contracts-bedrock + command: validate-spacers-no-build todo-issues: parameters: @@ -835,115 +864,84 @@ jobs: path: tmp/testlogs when: always - go-test: + go-tests: + parameters: + notify: + description: Whether to notify on failure + type: boolean + default: false + mentions: + description: Slack user or group to mention when notifying of failures + type: string + default: "" + resource_class: + description: Machine resource class + type: string + default: ethereum-optimism/latitude-1-go-e2e + no_output_timeout: + description: Timeout for when CircleCI kills the job if there's no output + type: string + default: 60m + test_timeout: + description: Timeout for running tests + type: string + default: 10m + environment_overrides: + description: Environment overrides + type: string + default: "" + packages: + description: List of packages to test + type: string machine: true - resource_class: ethereum-optimism/latitude-1 + resource_class: <> steps: - checkout - attach_workspace: at: "." + - run: + name: build op-program-client + command: make op-program-client + working_directory: op-program - run: name: run tests + no_output_timeout: <> command: | mkdir -p ./tmp/test-results && mkdir -p ./tmp/testlogs + cd op-e2e && make pre-test && cd .. + packages=( - op-batcher - op-chain-ops - op-node - op-proposer - op-challenger - op-dispute-mon - op-conductor - op-program - op-service - op-supervisor - op-deployer + <> ) formatted_packages="" for package in "${packages[@]}"; do formatted_packages="$formatted_packages ./$package/..." done + export OP_E2E_CANNON_ENABLED="false" + export OP_E2E_SKIP_SLOW_TEST=true + export OP_E2E_USE_HTTP=true export ENABLE_ANVIL=true export SEPOLIA_RPC_URL="https://ci-sepolia-l1-archive.optimism.io" + export MAINNET_RPC_URL="https://ci-mainnet-l1-archive.optimism.io" + + <> gotestsum --format=testname \ --junitfile=./tmp/test-results/results.xml \ --jsonfile=./tmp/testlogs/log.json \ - -- -coverpkg=github.com/ethereum-optimism/optimism/... \ - -coverprofile=coverage.out $formatted_packages + --rerun-fails=2 \ + --packages="$formatted_packages" \ + -- -coverprofile=coverage.out -timeout=<> + - codecov/upload: + disable_search: true + files: ./coverage.out - store_test_results: path: ./tmp/test-results - store_artifacts: path: ./tmp/testlogs when: always - - go-e2e-test: - parameters: - module: - description: Go Module Name - type: string - target: - description: The make target to execute - type: string - notify: - description: Whether to notify on failure - type: boolean - default: false - mentions: - description: Slack user or group to mention when notifying of failures - type: string - default: "" - resource_class: - description: Machine resource class - type: string - default: ethereum-optimism/latitude-1 - skip_slow_tests: - description: Indicates that slow tests should be skipped - type: boolean - default: false - machine: true - resource_class: <> - steps: - - checkout - - attach_workspace: - at: ./tmp/workspace - - run: - name: Load devnet-allocs and artifacts - command: | - mkdir -p .devnet - cp -r ./tmp/workspace/.devnet* . - cp -r ./tmp/workspace/packages/contracts-bedrock/forge-artifacts packages/contracts-bedrock/forge-artifacts - cp ./tmp/workspace/packages/contracts-bedrock/deploy-config/devnetL1.json packages/contracts-bedrock/deploy-config/devnetL1.json - cp -r ./tmp/workspace/packages/contracts-bedrock/deployments/devnetL1 packages/contracts-bedrock/deployments/devnetL1 - - run: - name: print go's available MIPS targets - command: go tool dist list | grep mips - - run: - name: run tests - no_output_timeout: 20m - command: | - mkdir -p ./tmp/testlogs - mkdir -p ./tmp/test-results - - # The below env var gets overridden when running make test-cannon, but we - # need to explicitly set it here to prevent Cannon from running when we don't - # want it to. - export OP_E2E_CANNON_ENABLED="false" - export OP_E2E_SKIP_SLOW_TEST=<> - # Note: We don't use circle CI test splits because we need to split by test name, not by package. There is an additional - # constraint that gotestsum does not currently (nor likely will) accept files from different packages when building. - JUNIT_FILE=../tmp/test-results/<>_<>.xml JSON_LOG_FILE=../tmp/testlogs/test.log make <> - working_directory: <> - - store_artifacts: - path: ./tmp/testlogs - when: always - - store_artifacts: - path: ./tmp/test-results - when: always - - store_test_results: - path: ./tmp/test-results - when: condition: "<>" steps: @@ -1000,8 +998,7 @@ jobs: steps: - checkout - setup_remote_docker - - run: - make -C op-program verify-reproducibility + - run: make -C op-program verify-reproducibility - notify-failures-on-develop: mentions: "@proofs-team" @@ -1053,7 +1050,7 @@ jobs: - checkout - unless: condition: - equal: [ "develop", << pipeline.git.branch >> ] + equal: ["develop", << pipeline.git.branch >>] steps: - run: # Scan changed files in PRs, block on new issues only (existing issues ignored) @@ -1109,7 +1106,7 @@ jobs: paths: - "/go/pkg/mod" - bedrock-go-tests: # just a helper, that depends on all the actual test jobs + bedrock-go-tests: # just a helper, that depends on all the actual test jobs docker: # Use a smaller base image to avoid pulling the huge ci-builder # image which is not needed for this job and sometimes misses @@ -1120,6 +1117,7 @@ jobs: - run: echo Done fpp-verify: + circleci_ip_ranges: true docker: - image: cimg/go:1.21 steps: @@ -1271,49 +1269,59 @@ jobs: command: | goreleaser release --clean -f ./<>/<> + stale-check: + docker: + - image: cimg/python:3.11 + steps: + - run: + name: Run Stale Check Script + command: | + git clone --branch main --depth 1 https://github.com/ethereum-optimism/circleci-utils.git /tmp/circleci-utils + cd /tmp/circleci-utils/stale-check + pip3 install -r requirements.txt + python3 stale-check.py --repo "ethereum-optimism/${CIRCLE_PROJECT_REPONAME}" --github-token "${STALE_GITHUB_TOKEN}" + workflows: main: when: and: - or: # Trigger on new commits - - equal: [ webhook, << pipeline.trigger_source >> ] + - equal: [webhook, << pipeline.trigger_source >>] # Trigger on manual triggers if explicitly requested - - equal: [ true, << pipeline.parameters.main_dispatch >> ] + - equal: [true, << pipeline.parameters.main_dispatch >>] - not: - equal: [ scheduled_pipeline, << pipeline.trigger_source >> ] + equal: [scheduled_pipeline, << pipeline.trigger_source >>] jobs: - go-mod-download - contracts-bedrock-build: + name: contracts-bedrock-build # Build with just core + script contracts. - skip_pattern: test + build_args: --deny-warnings --skip test + - contracts-bedrock-build: + name: contracts-bedrock-build-coverage + profile: cicoverage - check-kontrol-build: requires: - contracts-bedrock-build - contracts-bedrock-tests: # Test everything except PreimageOracle.t.sol since it's slow. name: contracts-bedrock-tests - test_parallelism: 4 test_list: find test -name "*.t.sol" -not -name "PreimageOracle.t.sol" - contracts-bedrock-tests: # PreimageOracle test is slow, run it separately to unblock CI. name: contracts-bedrock-tests-preimage-oracle - test_parallelism: 1 test_list: find test -name "PreimageOracle.t.sol" - contracts-bedrock-tests: # Heavily fuzz any fuzz tests within added or modified test files. name: contracts-bedrock-tests-heavy-fuzz-modified - test_parallelism: 1 - test_list: git diff origin/develop...HEAD --name-only -- './test/**/*.t.sol' | sed 's|packages/contracts-bedrock/||' + test_list: git diff origin/develop...HEAD --name-only --diff-filter=AM -- './test/**/*.t.sol' | sed 's|packages/contracts-bedrock/||' test_timeout: 1h test_profile: ciheavy - - contracts-bedrock-coverage - contracts-bedrock-checks: requires: - contracts-bedrock-build - - contracts-bedrock-validate-spacers: - requires: - - contracts-bedrock-build + - diff-asterisc-bytecode - semgrep-scan: name: semgrep-scan-local scan_command: semgrep scan --timeout=100 --config .semgrep/rules/ --error . @@ -1343,33 +1351,32 @@ workflows: on_changes: op-e2e,packages/contracts-bedrock/src uses_artifacts: true requires: ["contracts-bedrock-build"] - - go-test: - name: go-test-all - requires: - - contracts-bedrock-build - go-test-kurtosis: name: op-deployer-integration module: op-deployer test_directory: ./pkg/deployer/integration_test uses_artifacts: true requires: ["contracts-bedrock-build"] - - go-e2e-test: - name: op-e2e-HTTP-tests - module: op-e2e - target: test-http - requires: - - contracts-bedrock-build - - go-e2e-test: - name: op-e2e-action-tests - module: op-e2e - target: test-actions - requires: - - contracts-bedrock-build - - go-e2e-test: - name: op-e2e-fault-proof-tests - module: op-e2e - target: test-fault-proofs - skip_slow_tests: true + - go-tests: + packages: | + op-batcher + op-chain-ops + op-node + op-proposer + op-challenger + op-dispute-mon + op-conductor + op-program + op-service + op-supervisor + op-deployer + op-e2e/system + op-e2e/e2eutils + op-e2e/opgeth + op-e2e/interop + op-e2e/actions + op-e2e/faultproofs + packages/contracts-bedrock/scripts/checks requires: - contracts-bedrock-build - cannon-prestate @@ -1386,16 +1393,13 @@ workflows: - go-mod-download - op-deployer-integration - op-program-compat - - op-e2e-HTTP-tests - - op-e2e-fault-proof-tests - - op-e2e-action-tests # Not needed for the devnet but we want to make sure they build successfully - cannon-docker-build - op-dispute-mon-docker-build - op-program-docker-build - op-supervisor-docker-build - proofs-tools-docker-build - - go-test-all + - go-tests - docker-build: name: <>-docker-build docker_tags: <>,<> @@ -1425,7 +1429,7 @@ workflows: notify: true matrix: parameters: - mips_word_size: [ 32, 64 ] + mips_word_size: [32, 64] - cannon-build-test-vectors - todo-issues: name: todo-issues-check @@ -1434,8 +1438,7 @@ workflows: name: shell-check # We don't need the `exclude` key as the orb detects the `.shellcheckrc` dir: . - ignore-dirs: - ./packages/contracts-bedrock/lib + ignore-dirs: ./packages/contracts-bedrock/lib go-release-deployer: jobs: @@ -1452,7 +1455,7 @@ workflows: release: when: not: - equal: [ scheduled_pipeline, << pipeline.trigger_source >> ] + equal: [scheduled_pipeline, << pipeline.trigger_source >>] jobs: # Wait for approval on the release - hold: @@ -1545,7 +1548,7 @@ workflows: scheduled-todo-issues: when: - equal: [ build_four_hours, <> ] + equal: [build_four_hours, <>] jobs: - todo-issues: name: todo-issue-checks @@ -1554,7 +1557,7 @@ workflows: scheduled-fpp: when: - equal: [ build_hourly, <> ] + equal: [build_hourly, <>] jobs: - fpp-verify: context: @@ -1564,8 +1567,9 @@ workflows: develop-publish-contract-artifacts: when: or: - - equal: [ "develop", <> ] - - equal: [ true, <> ] + - equal: ["develop", <>] + - equal: + [true, <>] jobs: - publish-contract-artifacts @@ -1573,10 +1577,10 @@ workflows: when: and: - or: - - equal: [ "develop", <> ] - - equal: [ true, <> ] + - equal: ["develop", <>] + - equal: [true, <>] - not: - equal: [ scheduled_pipeline, << pipeline.trigger_source >> ] + equal: [scheduled_pipeline, << pipeline.trigger_source >>] jobs: - go-mod-download - cannon-prestate @@ -1586,19 +1590,20 @@ workflows: context: - slack - contracts-bedrock-build: - skip_pattern: test + build_args: --deny-warnings --skip test context: - slack - - go-e2e-test: + - go-tests: name: op-e2e-cannon-tests - module: op-e2e - target: test-cannon notify: true mentions: "@proofs-team" + no_output_timeout: 60m + test_timeout: 59m resource_class: ethereum-optimism/latitude-fps-1 - requires: - - contracts-bedrock-build - - cannon-prestate + environment_overrides: | + export OP_E2E_CANNON_ENABLED="true" + packages: | + op-e2e/faultproofs context: - slack @@ -1606,10 +1611,10 @@ workflows: when: and: - or: - - equal: [ "develop", <> ] - - equal: [ true, <> ] + - equal: ["develop", <>] + - equal: [true, <>] - not: - equal: [ scheduled_pipeline, << pipeline.trigger_source >> ] + equal: [scheduled_pipeline, << pipeline.trigger_source >>] jobs: - kontrol-tests: context: @@ -1619,11 +1624,11 @@ workflows: scheduled-cannon-full-tests: when: or: - - equal: [ build_four_hours, <> ] - - equal: [ true, << pipeline.parameters.cannon_full_test_dispatch >> ] + - equal: [build_four_hours, <>] + - equal: [true, << pipeline.parameters.cannon_full_test_dispatch >>] jobs: - contracts-bedrock-build: - skip_pattern: test + build_args: --deny-warnings --skip test - cannon-go-lint-and-test: name: cannon-go-lint-and-test-<>-bit requires: @@ -1632,14 +1637,25 @@ workflows: - slack matrix: parameters: - mips_word_size: [ 32, 64 ] + mips_word_size: [32, 64] + + scheduled-forge-coverage: + when: + and: + - or: + - equal: ["develop", <>] + - equal: [true, <>] + - not: + equal: [scheduled_pipeline, << pipeline.trigger_source >>] + jobs: + - contracts-bedrock-coverage scheduled-docker-publish: when: or: - - equal: [ build_hourly, <> ] + - equal: [build_hourly, <>] # Trigger on manual triggers if explicitly requested - - equal: [ true, << pipeline.parameters.docker_publish_dispatch >> ] + - equal: [true, << pipeline.parameters.docker_publish_dispatch >>] jobs: - docker-build: matrix: @@ -1677,23 +1693,23 @@ workflows: name: <>-cross-platform requires: - <>-docker-publish - - docker-build: - name: contracts-bedrock-docker-publish - docker_name: contracts-bedrock - docker_tags: <>,<> - resource_class: xlarge - publish: true - context: - - oplabs-gcr - - slack scheduled-preimage-reproducibility: when: or: - - equal: [build_daily, <> ] + - equal: [build_daily, <>] # Trigger on manual triggers if explicitly requested - - equal: [ true, << pipeline.parameters.reproducibility_dispatch >> ] + - equal: [true, << pipeline.parameters.reproducibility_dispatch >>] jobs: - preimage-reproducibility: - context: - slack + context: slack + + scheduled-stale-check: + when: + or: + - equal: [build_daily, <>] + # Trigger on manual triggers if explicitly requested + - equal: [true, << pipeline.parameters.stale_check_dispatch >>] + jobs: + - stale-check: + context: github-token-stale-check diff --git a/.github/workflows/close-stale.yml b/.github/workflows/close-stale.yml deleted file mode 100644 index 68e8b4ec82c5..000000000000 --- a/.github/workflows/close-stale.yml +++ /dev/null @@ -1,18 +0,0 @@ -name: 'Close stale issues and PRs' -on: - schedule: - - cron: '30 1 * * *' - -jobs: - stale: - runs-on: ubuntu-latest - steps: - - uses: actions/stale@v9 - with: - stale-pr-message: 'This PR is stale because it has been open 14 days with no activity. Remove stale label or comment or this will be closed in 5 days.' - stale-issue-label: 'S-stale' - exempt-pr-labels: 'S-exempt-stale' - days-before-issue-stale: 999 - days-before-pr-stale: 14 - days-before-close: 5 - repo-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/tag-service.yml b/.github/workflows/tag-service.yml deleted file mode 100644 index 439b48f13d4e..000000000000 --- a/.github/workflows/tag-service.yml +++ /dev/null @@ -1,69 +0,0 @@ -name: Tag Service - -on: - workflow_dispatch: - inputs: - bump: - description: 'How much to bump the version by' - required: true - type: choice - options: - - major - - minor - - patch - - prerelease - - finalize-prerelease - service: - description: 'Which service to release' - required: true - type: choice - options: - - ci-builder - - ci-builder-rust - - op-node - - op-batcher - - op-proposer - - op-challenger - - op-program - - op-dispute-mon - - op-ufm - - da-server - - op-contracts - - op-conductor - prerelease: - description: Increment major/minor/patch as prerelease? - required: false - type: boolean - default: false - -permissions: - contents: write - -jobs: - release: - runs-on: ubuntu-latest - environment: op-stack-production - - steps: - - uses: actions/checkout@v4 - - name: Fetch tags - run: git fetch --tags origin --force - - name: Setup Python 3.10 - uses: actions/setup-python@v5 - with: - python-version: "3.10" - - name: Install deps - run: pip install -r requirements.txt - working-directory: ops/tag-service - - run: ops/tag-service/tag-service.py --bump="$BUMP" --service="$SERVICE" - env: - INPUT_GITHUB_TOKEN: ${{ github.token }} - BUMP: ${{ github.event.inputs.bump }} - SERVICE: ${{ github.event.inputs.service }} - if: ${{ github.event.inputs.prerelease == 'false' }} - - run: ops/tag-service/tag-service.py --bump="$BUMP" --service="$SERVICE" --pre-release - env: - INPUT_GITHUB_TOKEN: ${{ github.token }} - BUMP: ${{ github.event.inputs.bump }} - SERVICE: ${{ github.event.inputs.service }} - if: ${{ github.event.inputs.prerelease == 'true' }} diff --git a/.semgrep/rules/sol-rules.yaml b/.semgrep/rules/sol-rules.yaml index 57dc88a3e51e..177fabb6fa8d 100644 --- a/.semgrep/rules/sol-rules.yaml +++ b/.semgrep/rules/sol-rules.yaml @@ -84,18 +84,18 @@ rules: pattern-regex: function\s+\w+\s*\(\s*([^)]*?\b\w+\s+(?!_)(?!memory\b)(?!calldata\b)(?!storage\b)(?!payable\b)\w+\s*(?=,|\))) paths: exclude: - - packages/contracts-bedrock/src/universal/interfaces/IOptimismMintableERC721.sol - - packages/contracts-bedrock/src/universal/interfaces/IWETH98.sol - - packages/contracts-bedrock/src/dispute/interfaces/IDelayedWETH.sol + - packages/contracts-bedrock/interfaces/universal/IOptimismMintableERC721.sol + - packages/contracts-bedrock/interfaces/universal/IWETH98.sol + - packages/contracts-bedrock/interfaces/dispute/IDelayedWETH.sol - op-chain-ops/script/testdata/scripts/ScriptExample.s.sol - packages/contracts-bedrock/test - packages/contracts-bedrock/scripts/libraries/Solarray.sol - packages/contracts-bedrock/scripts/interfaces/IGnosisSafe.sol - - packages/contracts-bedrock/src/universal/interfaces/IWETH.sol + - packages/contracts-bedrock/interfaces/universal/IWETH.sol - packages/contracts-bedrock/src/universal/WETH98.sol - - packages/contracts-bedrock/src/L2/interfaces/ISuperchainWETH.sol + - packages/contracts-bedrock/interfaces/L2/ISuperchainWETH.sol - packages/contracts-bedrock/src/L2/SuperchainWETH.sol - - packages/contracts-bedrock/src/governance/interfaces/IGovernanceToken.sol + - packages/contracts-bedrock/interfaces/governance/IGovernanceToken.sol - packages/contracts-bedrock/src/governance/GovernanceToken.sol - id: sol-style-return-arg-fmt @@ -105,13 +105,13 @@ rules: pattern-regex: returns\s*(\w+\s*)?\(\s*([^)]*?\b\w+\s+(?!memory\b)(?!calldata\b)(?!storage\b)(?!payable\b)\w+(?> (((insn >> 6) & 0x1f) + 32)) default: - panic(fmt.Sprintf("invalid instruction: %x", insn)) + panic(fmt.Sprintf("invalid instruction: 0x%08x", insn)) } } else { switch opcode { diff --git a/cannon/mipsevm/multithreaded/instrumented_test.go b/cannon/mipsevm/multithreaded/instrumented_test.go index ec079e50c8ce..659a5b7a5c29 100644 --- a/cannon/mipsevm/multithreaded/instrumented_test.go +++ b/cannon/mipsevm/multithreaded/instrumented_test.go @@ -34,29 +34,178 @@ func TestInstrumentedState_Claim(t *testing.T) { testutil.RunVMTest_Claim(t, CreateInitialState, vmFactory, false) } -func TestInstrumentedState_MultithreadedProgram(t *testing.T) { +func TestInstrumentedState_UtilsCheck(t *testing.T) { + // Sanity check that test running utilities will return a non-zero exit code on failure t.Parallel() - state, _ := testutil.LoadELFProgram(t, testutil.ProgramPath("multithreaded"), CreateInitialState, false) - oracle := testutil.StaticOracle(t, []byte{}) - - var stdOutBuf, stdErrBuf bytes.Buffer - us := NewInstrumentedState(state, oracle, io.MultiWriter(&stdOutBuf, os.Stdout), io.MultiWriter(&stdErrBuf, os.Stderr), testutil.CreateLogger(), nil) - for i := 0; i < 2_000_000; i++ { - if us.GetState().GetExited() { - break - } - _, err := us.Step(false) - require.NoError(t, err) + cases := []struct { + name string + expectedOutput string + }{ + {name: "utilscheck", expectedOutput: "Test failed: ShouldFail"}, + {name: "utilscheck2", expectedOutput: "Test failed: ShouldFail (subtest 2)"}, + {name: "utilscheck3", expectedOutput: "Test panicked: ShouldFail (panic test)"}, + {name: "utilscheck4", expectedOutput: "Test panicked: ShouldFail"}, } - t.Logf("Completed in %d steps", state.Step) - require.True(t, state.Exited, "must complete program") - require.Equal(t, uint8(0), state.ExitCode, "exit with 0") - require.Contains(t, "waitgroup result: 42", stdErrBuf.String()) - require.Contains(t, "channels result: 1234", stdErrBuf.String()) - require.Equal(t, "", stdErrBuf.String(), "should not print any errors") + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + state, meta := testutil.LoadELFProgram(t, testutil.ProgramPath(c.name), CreateInitialState, false) + oracle := testutil.StaticOracle(t, []byte{}) + + var stdOutBuf, stdErrBuf bytes.Buffer + us := NewInstrumentedState(state, oracle, io.MultiWriter(&stdOutBuf, os.Stdout), io.MultiWriter(&stdErrBuf, os.Stderr), testutil.CreateLogger(), meta) + + for i := 0; i < 1_000_000; i++ { + if us.GetState().GetExited() { + break + } + _, err := us.Step(false) + require.NoError(t, err) + } + t.Logf("Completed in %d steps", state.Step) + + require.True(t, state.Exited, "must complete program") + require.Equal(t, uint8(1), state.ExitCode, "exit with 1") + require.Contains(t, stdOutBuf.String(), c.expectedOutput) + require.NotContains(t, stdOutBuf.String(), "Passed test that should have failed") + require.Equal(t, "", stdErrBuf.String(), "should not print any errors") + }) + } } +func TestInstrumentedState_MultithreadedProgram(t *testing.T) { + if os.Getenv("SKIP_SLOW_TESTS") == "true" { + t.Skip("Skipping slow test because SKIP_SLOW_TESTS is enabled") + } + + t.Parallel() + cases := []struct { + name string + expectedOutput []string + programName string + steps int + }{ + { + name: "general concurrency test", + expectedOutput: []string{ + "waitgroup result: 42", + "channels result: 1234", + "GC complete!", + }, + programName: "mt-general", + steps: 5_000_000, + }, + { + name: "atomic test", + expectedOutput: []string{ + "Atomic tests passed", + }, + programName: "mt-atomic", + steps: 350_000_000, + }, + { + name: "waitgroup test", + expectedOutput: []string{ + "WaitGroup tests passed", + }, + programName: "mt-wg", + steps: 15_000_000, + }, + { + name: "mutex test", + expectedOutput: []string{ + "Mutex test passed", + }, + programName: "mt-mutex", + steps: 5_000_000, + }, + { + name: "cond test", + expectedOutput: []string{ + "Cond test passed", + }, + programName: "mt-cond", + steps: 5_000_000, + }, + { + name: "rwmutex test", + expectedOutput: []string{ + "RWMutex test passed", + }, + programName: "mt-rwmutex", + steps: 5_000_000, + }, + { + name: "once test", + expectedOutput: []string{ + "Once test passed", + }, + programName: "mt-once", + steps: 5_000_000, + }, + { + name: "oncefunc test", + expectedOutput: []string{ + "OnceFunc tests passed", + }, + programName: "mt-oncefunc", + steps: 15_000_000, + }, + { + name: "map test", + expectedOutput: []string{ + "Map test passed", + }, + programName: "mt-map", + steps: 150_000_000, + }, + { + name: "pool test", + expectedOutput: []string{ + "Pool test passed", + }, + programName: "mt-pool", + steps: 50_000_000, + }, + { + name: "value test", + expectedOutput: []string{ + "Value tests passed", + }, + programName: "mt-value", + steps: 3_000_000, + }, + } + + for _, test := range cases { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + state, meta := testutil.LoadELFProgram(t, testutil.ProgramPath(test.programName), CreateInitialState, false) + oracle := testutil.StaticOracle(t, []byte{}) + + var stdOutBuf, stdErrBuf bytes.Buffer + us := NewInstrumentedState(state, oracle, io.MultiWriter(&stdOutBuf, os.Stdout), io.MultiWriter(&stdErrBuf, os.Stderr), testutil.CreateLogger(), meta) + + for i := 0; i < test.steps; i++ { + if us.GetState().GetExited() { + break + } + _, err := us.Step(false) + require.NoError(t, err) + } + t.Logf("Completed in %d steps", state.Step) + + require.True(t, state.Exited, "must complete program") + require.Equal(t, uint8(0), state.ExitCode, "exit with 0") + for _, expected := range test.expectedOutput { + require.Contains(t, stdOutBuf.String(), expected) + } + require.Equal(t, "", stdErrBuf.String(), "should not print any errors") + }) + } +} func TestInstrumentedState_Alloc(t *testing.T) { if os.Getenv("SKIP_SLOW_TESTS") == "true" { t.Skip("Skipping slow test because SKIP_SLOW_TESTS is enabled") diff --git a/cannon/mipsevm/versions/detect_test.go b/cannon/mipsevm/versions/detect_test.go index be849269fff9..bd1acd115eaf 100644 --- a/cannon/mipsevm/versions/detect_test.go +++ b/cannon/mipsevm/versions/detect_test.go @@ -7,10 +7,12 @@ import ( "strconv" "testing" + "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" "github.com/ethereum-optimism/optimism/cannon/mipsevm/singlethreaded" "github.com/ethereum-optimism/optimism/op-service/ioutil" - "github.com/stretchr/testify/require" ) const statesPath = "testdata/states" @@ -18,7 +20,7 @@ const statesPath = "testdata/states" //go:embed testdata/states var historicStates embed.FS -func TestDetectVersion(t *testing.T) { +func TestDetectVersion_fromFile(t *testing.T) { testDetection := func(t *testing.T, version StateVersion, ext string) { filename := strconv.Itoa(int(version)) + ext dir := t.TempDir() @@ -34,9 +36,6 @@ func TestDetectVersion(t *testing.T) { // Iterate all known versions to ensure we have a test case to detect every state version for _, version := range StateVersionTypes { version := version - if version == VersionMultiThreaded64 { - t.Skip("TODO(#12205)") - } t.Run(version.String(), func(t *testing.T) { testDetection(t, version, ".bin.gz") }) @@ -47,28 +46,38 @@ func TestDetectVersion(t *testing.T) { }) } } +} - // Additionally, check that the latest supported versions write new states in a way that is detected correctly - t.Run("SingleThreadedBinary", func(t *testing.T) { - state, err := NewFromState(singlethreaded.CreateEmptyState()) - require.NoError(t, err) - path := writeToFile(t, "state.bin.gz", state) - version, err := DetectVersion(path) - require.NoError(t, err) - require.Equal(t, VersionSingleThreaded2, version) - }) +// Check that the latest supported versions write new states in a way that is detected correctly +func TestDetectVersion_singleThreadedBinary(t *testing.T) { + targetVersion := VersionSingleThreaded2 + if !arch.IsMips32 { + t.Skip("Single-threaded states are not supported for 64-bit VMs") + } - t.Run("MultiThreadedBinary", func(t *testing.T) { - state, err := NewFromState(multithreaded.CreateEmptyState()) - require.NoError(t, err) - path := writeToFile(t, "state.bin.gz", state) - version, err := DetectVersion(path) - require.NoError(t, err) - require.Equal(t, VersionMultiThreaded, version) - }) + state, err := NewFromState(singlethreaded.CreateEmptyState()) + require.NoError(t, err) + path := writeToFile(t, "state.bin.gz", state) + version, err := DetectVersion(path) + require.NoError(t, err) + require.Equal(t, targetVersion, version) +} + +func TestDetectVersion_multiThreadedBinary(t *testing.T) { + targetVersion := VersionMultiThreaded + if !arch.IsMips32 { + targetVersion = VersionMultiThreaded64 + } + + state, err := NewFromState(multithreaded.CreateEmptyState()) + require.NoError(t, err) + path := writeToFile(t, "state.bin.gz", state) + version, err := DetectVersion(path) + require.NoError(t, err) + require.Equal(t, targetVersion, version) } -func TestDetectVersionInvalid(t *testing.T) { +func TestDetectVersion_invalid(t *testing.T) { t.Run("bad gzip", func(t *testing.T) { dir := t.TempDir() filename := "state.bin.gz" diff --git a/cannon/mipsevm/versions/testdata/states/3.bin.gz b/cannon/mipsevm/versions/testdata/states/3.bin.gz new file mode 100644 index 000000000000..b6f6728313a7 Binary files /dev/null and b/cannon/mipsevm/versions/testdata/states/3.bin.gz differ diff --git a/cannon/scripts/build-legacy-cannons.sh b/cannon/scripts/build-legacy-cannons.sh index 62b543839841..3df0a43c31ea 100755 --- a/cannon/scripts/build-legacy-cannons.sh +++ b/cannon/scripts/build-legacy-cannons.sh @@ -3,7 +3,7 @@ set -euo pipefail SCRIPTS_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # This script builds a version of the cannon executable that includes support for both current and legacy state versions. -# Each cannon release is built +# Each cannon release is built separately. TMP_DIR=$(mktemp -d) function cleanup() { diff --git a/cannon/testdata/example/mt-atomic/atomic_test_copy.go b/cannon/testdata/example/mt-atomic/atomic_test_copy.go new file mode 100644 index 000000000000..e0cd1ebd69ff --- /dev/null +++ b/cannon/testdata/example/mt-atomic/atomic_test_copy.go @@ -0,0 +1,2567 @@ +// This file is based on code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/atomic/atomic_test.go +// +// --- Original License Notice --- +// +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package main + +import ( + "fmt" + "reflect" + "runtime" + "runtime/debug" + "strings" + . "sync/atomic" + "testing" + "unsafe" + + "utils/testutil" +) + +// Tests of correct behavior, without contention. +// (Does the function work as advertised?) +// +// Test that the Add functions add correctly. +// Test that the CompareAndSwap functions actually +// do the comparison and the swap correctly. +// +// The loop over power-of-two values is meant to +// ensure that the operations apply to the full word size. +// The struct fields x.before and x.after check that the +// operations do not extend past the full word size. + +const ( + magic32 = 0xdedbeef + magic64 = 0xdeddeadbeefbeef +) + +func TestSwapInt32(t *testutil.TestRunner) { + var x struct { + before int32 + i int32 + after int32 + } + x.before = magic32 + x.after = magic32 + var j int32 + for delta := int32(1); delta+delta > delta; delta += delta { + k := SwapInt32(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestSwapInt32Method(t *testutil.TestRunner) { + var x struct { + before int32 + i Int32 + after int32 + } + x.before = magic32 + x.after = magic32 + var j int32 + for delta := int32(1); delta+delta > delta; delta += delta { + k := x.i.Swap(delta) + if x.i.Load() != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k) + } + j = delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestSwapUint32(t *testutil.TestRunner) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + var j uint32 + for delta := uint32(1); delta+delta > delta; delta += delta { + k := SwapUint32(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestSwapUint32Method(t *testutil.TestRunner) { + var x struct { + before uint32 + i Uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + var j uint32 + for delta := uint32(1); delta+delta > delta; delta += delta { + k := x.i.Swap(delta) + if x.i.Load() != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k) + } + j = delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestSwapInt64(t *testutil.TestRunner) { + var x struct { + before int64 + i int64 + after int64 + } + magic64 := int64(magic64) + x.before = magic64 + x.after = magic64 + var j int64 + for delta := int64(1); delta+delta > delta; delta += delta { + k := SwapInt64(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestSwapInt64Method(t *testutil.TestRunner) { + var x struct { + before int64 + i Int64 + after int64 + } + magic64 := int64(magic64) + x.before = magic64 + x.after = magic64 + var j int64 + for delta := int64(1); delta+delta > delta; delta += delta { + k := x.i.Swap(delta) + if x.i.Load() != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k) + } + j = delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestSwapUint64(t *testutil.TestRunner) { + var x struct { + before uint64 + i uint64 + after uint64 + } + magic64 := uint64(magic64) + x.before = magic64 + x.after = magic64 + var j uint64 + for delta := uint64(1); delta+delta > delta; delta += delta { + k := SwapUint64(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestSwapUint64Method(t *testutil.TestRunner) { + var x struct { + before uint64 + i Uint64 + after uint64 + } + magic64 := uint64(magic64) + x.before = magic64 + x.after = magic64 + var j uint64 + for delta := uint64(1); delta+delta > delta; delta += delta { + k := x.i.Swap(delta) + if x.i.Load() != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k) + } + j = delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestSwapUintptr(t *testutil.TestRunner) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + var j uintptr + for delta := uintptr(1); delta+delta > delta; delta += delta { + k := SwapUintptr(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestSwapUintptrMethod(t *testutil.TestRunner) { + var x struct { + before uintptr + i Uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + var j uintptr + for delta := uintptr(1); delta+delta > delta; delta += delta { + k := x.i.Swap(delta) + if x.i.Load() != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k) + } + j = delta + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +var global [1024]byte + +func testPointers() []unsafe.Pointer { + var pointers []unsafe.Pointer + // globals + for i := 0; i < 10; i++ { + pointers = append(pointers, unsafe.Pointer(&global[1< delta; delta += delta { + k := AddInt32(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestAddInt32Method(t *testutil.TestRunner) { + var x struct { + before int32 + i Int32 + after int32 + } + x.before = magic32 + x.after = magic32 + var j int32 + for delta := int32(1); delta+delta > delta; delta += delta { + k := x.i.Add(delta) + j += delta + if x.i.Load() != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestAddUint32(t *testutil.TestRunner) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + var j uint32 + for delta := uint32(1); delta+delta > delta; delta += delta { + k := AddUint32(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestAddUint32Method(t *testutil.TestRunner) { + var x struct { + before uint32 + i Uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + var j uint32 + for delta := uint32(1); delta+delta > delta; delta += delta { + k := x.i.Add(delta) + j += delta + if x.i.Load() != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestAddInt64(t *testutil.TestRunner) { + var x struct { + before int64 + i int64 + after int64 + } + magic64 := int64(magic64) + x.before = magic64 + x.after = magic64 + var j int64 + for delta := int64(1); delta+delta > delta; delta += delta { + k := AddInt64(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestAddInt64Method(t *testutil.TestRunner) { + var x struct { + before int64 + i Int64 + after int64 + } + magic64 := int64(magic64) + x.before = magic64 + x.after = magic64 + var j int64 + for delta := int64(1); delta+delta > delta; delta += delta { + k := x.i.Add(delta) + j += delta + if x.i.Load() != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestAddUint64(t *testutil.TestRunner) { + var x struct { + before uint64 + i uint64 + after uint64 + } + magic64 := uint64(magic64) + x.before = magic64 + x.after = magic64 + var j uint64 + for delta := uint64(1); delta+delta > delta; delta += delta { + k := AddUint64(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestAddUint64Method(t *testutil.TestRunner) { + var x struct { + before uint64 + i Uint64 + after uint64 + } + magic64 := uint64(magic64) + x.before = magic64 + x.after = magic64 + var j uint64 + for delta := uint64(1); delta+delta > delta; delta += delta { + k := x.i.Add(delta) + j += delta + if x.i.Load() != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestAddUintptr(t *testutil.TestRunner) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + var j uintptr + for delta := uintptr(1); delta+delta > delta; delta += delta { + k := AddUintptr(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestAddUintptrMethod(t *testutil.TestRunner) { + var x struct { + before uintptr + i Uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + var j uintptr + for delta := uintptr(1); delta+delta > delta; delta += delta { + k := x.i.Add(delta) + j += delta + if x.i.Load() != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestCompareAndSwapInt32(t *testutil.TestRunner) { + var x struct { + before int32 + i int32 + after int32 + } + x.before = magic32 + x.after = magic32 + for val := int32(1); val+val > val; val += val { + x.i = val + if !CompareAndSwapInt32(&x.i, val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if CompareAndSwapInt32(&x.i, val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestCompareAndSwapInt32Method(t *testutil.TestRunner) { + var x struct { + before int32 + i Int32 + after int32 + } + x.before = magic32 + x.after = magic32 + for val := int32(1); val+val > val; val += val { + x.i.Store(val) + if !x.i.CompareAndSwap(val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i.Load() != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1) + } + x.i.Store(val + 1) + if x.i.CompareAndSwap(val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i.Load() != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestCompareAndSwapUint32(t *testutil.TestRunner) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + for val := uint32(1); val+val > val; val += val { + x.i = val + if !CompareAndSwapUint32(&x.i, val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if CompareAndSwapUint32(&x.i, val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestCompareAndSwapUint32Method(t *testutil.TestRunner) { + var x struct { + before uint32 + i Uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + for val := uint32(1); val+val > val; val += val { + x.i.Store(val) + if !x.i.CompareAndSwap(val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i.Load() != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1) + } + x.i.Store(val + 1) + if x.i.CompareAndSwap(val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i.Load() != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestCompareAndSwapInt64(t *testutil.TestRunner) { + var x struct { + before int64 + i int64 + after int64 + } + magic64 := int64(magic64) + x.before = magic64 + x.after = magic64 + for val := int64(1); val+val > val; val += val { + x.i = val + if !CompareAndSwapInt64(&x.i, val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if CompareAndSwapInt64(&x.i, val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestCompareAndSwapInt64Method(t *testutil.TestRunner) { + var x struct { + before int64 + i Int64 + after int64 + } + magic64 := int64(magic64) + x.before = magic64 + x.after = magic64 + for val := int64(1); val+val > val; val += val { + x.i.Store(val) + if !x.i.CompareAndSwap(val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i.Load() != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1) + } + x.i.Store(val + 1) + if x.i.CompareAndSwap(val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i.Load() != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func testCompareAndSwapUint64(t testing.TB, cas func(*uint64, uint64, uint64) bool) { + var x struct { + before uint64 + i uint64 + after uint64 + } + magic64 := uint64(magic64) + x.before = magic64 + x.after = magic64 + for val := uint64(1); val+val > val; val += val { + x.i = val + if !cas(&x.i, val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if cas(&x.i, val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestCompareAndSwapUint64(t *testutil.TestRunner) { + testCompareAndSwapUint64(t, CompareAndSwapUint64) +} + +func TestCompareAndSwapUint64Method(t *testutil.TestRunner) { + var x struct { + before uint64 + i Uint64 + after uint64 + } + magic64 := uint64(magic64) + x.before = magic64 + x.after = magic64 + for val := uint64(1); val+val > val; val += val { + x.i.Store(val) + if !x.i.CompareAndSwap(val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i.Load() != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1) + } + x.i.Store(val + 1) + if x.i.CompareAndSwap(val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i.Load() != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestCompareAndSwapUintptr(t *testutil.TestRunner) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + for val := uintptr(1); val+val > val; val += val { + x.i = val + if !CompareAndSwapUintptr(&x.i, val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if CompareAndSwapUintptr(&x.i, val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestCompareAndSwapUintptrMethod(t *testutil.TestRunner) { + var x struct { + before uintptr + i Uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + for val := uintptr(1); val+val > val; val += val { + x.i.Store(val) + if !x.i.CompareAndSwap(val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i.Load() != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1) + } + x.i.Store(val + 1) + if x.i.CompareAndSwap(val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i.Load() != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uintptr(magicptr), uintptr(magicptr)) + } +} + +func TestCompareAndSwapPointer(t *testutil.TestRunner) { + var x struct { + before uintptr + i unsafe.Pointer + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + q := unsafe.Pointer(new(byte)) + for _, p := range testPointers() { + x.i = p + if !CompareAndSwapPointer(&x.i, p, q) { + t.Fatalf("should have swapped %p %p", p, q) + } + if x.i != q { + t.Fatalf("wrong x.i after swap: x.i=%p want %p", x.i, q) + } + if CompareAndSwapPointer(&x.i, p, nil) { + t.Fatalf("should not have swapped %p nil", p) + } + if x.i != q { + t.Fatalf("wrong x.i after swap: x.i=%p want %p", x.i, q) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestCompareAndSwapPointerMethod(t *testutil.TestRunner) { + var x struct { + before uintptr + i Pointer[byte] + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + q := new(byte) + for _, p := range testPointers() { + p := (*byte)(p) + x.i.Store(p) + if !x.i.CompareAndSwap(p, q) { + t.Fatalf("should have swapped %p %p", p, q) + } + if x.i.Load() != q { + t.Fatalf("wrong x.i after swap: x.i=%p want %p", x.i.Load(), q) + } + if x.i.CompareAndSwap(p, nil) { + t.Fatalf("should not have swapped %p nil", p) + } + if x.i.Load() != q { + t.Fatalf("wrong x.i after swap: x.i=%p want %p", x.i.Load(), q) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestLoadInt32(t *testutil.TestRunner) { + var x struct { + before int32 + i int32 + after int32 + } + x.before = magic32 + x.after = magic32 + for delta := int32(1); delta+delta > delta; delta += delta { + k := LoadInt32(&x.i) + if k != x.i { + t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k) + } + x.i += delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestLoadInt32Method(t *testutil.TestRunner) { + var x struct { + before int32 + i Int32 + after int32 + } + x.before = magic32 + x.after = magic32 + want := int32(0) + for delta := int32(1); delta+delta > delta; delta += delta { + k := x.i.Load() + if k != want { + t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want) + } + x.i.Store(k + delta) + want = k + delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestLoadUint32(t *testutil.TestRunner) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + for delta := uint32(1); delta+delta > delta; delta += delta { + k := LoadUint32(&x.i) + if k != x.i { + t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k) + } + x.i += delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestLoadUint32Method(t *testutil.TestRunner) { + var x struct { + before uint32 + i Uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + want := uint32(0) + for delta := uint32(1); delta+delta > delta; delta += delta { + k := x.i.Load() + if k != want { + t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want) + } + x.i.Store(k + delta) + want = k + delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestLoadInt64(t *testutil.TestRunner) { + var x struct { + before int64 + i int64 + after int64 + } + magic64 := int64(magic64) + x.before = magic64 + x.after = magic64 + for delta := int64(1); delta+delta > delta; delta += delta { + k := LoadInt64(&x.i) + if k != x.i { + t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k) + } + x.i += delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestLoadInt64Method(t *testutil.TestRunner) { + var x struct { + before int64 + i Int64 + after int64 + } + magic64 := int64(magic64) + x.before = magic64 + x.after = magic64 + want := int64(0) + for delta := int64(1); delta+delta > delta; delta += delta { + k := x.i.Load() + if k != want { + t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want) + } + x.i.Store(k + delta) + want = k + delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestLoadUint64(t *testutil.TestRunner) { + var x struct { + before uint64 + i uint64 + after uint64 + } + magic64 := uint64(magic64) + x.before = magic64 + x.after = magic64 + for delta := uint64(1); delta+delta > delta; delta += delta { + k := LoadUint64(&x.i) + if k != x.i { + t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k) + } + x.i += delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestLoadUint64Method(t *testutil.TestRunner) { + var x struct { + before uint64 + i Uint64 + after uint64 + } + magic64 := uint64(magic64) + x.before = magic64 + x.after = magic64 + want := uint64(0) + for delta := uint64(1); delta+delta > delta; delta += delta { + k := x.i.Load() + if k != want { + t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want) + } + x.i.Store(k + delta) + want = k + delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestLoadUintptr(t *testutil.TestRunner) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + for delta := uintptr(1); delta+delta > delta; delta += delta { + k := LoadUintptr(&x.i) + if k != x.i { + t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k) + } + x.i += delta + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestLoadUintptrMethod(t *testutil.TestRunner) { + var x struct { + before uintptr + i Uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + want := uintptr(0) + for delta := uintptr(1); delta+delta > delta; delta += delta { + k := x.i.Load() + if k != want { + t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want) + } + x.i.Store(k + delta) + want = k + delta + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestLoadPointer(t *testutil.TestRunner) { + var x struct { + before uintptr + i unsafe.Pointer + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + for _, p := range testPointers() { + x.i = p + k := LoadPointer(&x.i) + if k != p { + t.Fatalf("p=%x k=%x", p, k) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestLoadPointerMethod(t *testutil.TestRunner) { + var x struct { + before uintptr + i Pointer[byte] + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + for _, p := range testPointers() { + p := (*byte)(p) + x.i.Store(p) + k := x.i.Load() + if k != p { + t.Fatalf("p=%x k=%x", p, k) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestStoreInt32(t *testutil.TestRunner) { + var x struct { + before int32 + i int32 + after int32 + } + x.before = magic32 + x.after = magic32 + v := int32(0) + for delta := int32(1); delta+delta > delta; delta += delta { + StoreInt32(&x.i, v) + if x.i != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v) + } + v += delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestStoreInt32Method(t *testutil.TestRunner) { + var x struct { + before int32 + i Int32 + after int32 + } + x.before = magic32 + x.after = magic32 + v := int32(0) + for delta := int32(1); delta+delta > delta; delta += delta { + x.i.Store(v) + if x.i.Load() != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v) + } + v += delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestStoreUint32(t *testutil.TestRunner) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + v := uint32(0) + for delta := uint32(1); delta+delta > delta; delta += delta { + StoreUint32(&x.i, v) + if x.i != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v) + } + v += delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestStoreUint32Method(t *testutil.TestRunner) { + var x struct { + before uint32 + i Uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + v := uint32(0) + for delta := uint32(1); delta+delta > delta; delta += delta { + x.i.Store(v) + if x.i.Load() != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v) + } + v += delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestStoreInt64(t *testutil.TestRunner) { + var x struct { + before int64 + i int64 + after int64 + } + magic64 := int64(magic64) + x.before = magic64 + x.after = magic64 + v := int64(0) + for delta := int64(1); delta+delta > delta; delta += delta { + StoreInt64(&x.i, v) + if x.i != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v) + } + v += delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestStoreInt64Method(t *testutil.TestRunner) { + var x struct { + before int64 + i Int64 + after int64 + } + magic64 := int64(magic64) + x.before = magic64 + x.after = magic64 + v := int64(0) + for delta := int64(1); delta+delta > delta; delta += delta { + x.i.Store(v) + if x.i.Load() != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v) + } + v += delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestStoreUint64(t *testutil.TestRunner) { + var x struct { + before uint64 + i uint64 + after uint64 + } + magic64 := uint64(magic64) + x.before = magic64 + x.after = magic64 + v := uint64(0) + for delta := uint64(1); delta+delta > delta; delta += delta { + StoreUint64(&x.i, v) + if x.i != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v) + } + v += delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestStoreUint64Method(t *testutil.TestRunner) { + var x struct { + before uint64 + i Uint64 + after uint64 + } + magic64 := uint64(magic64) + x.before = magic64 + x.after = magic64 + v := uint64(0) + for delta := uint64(1); delta+delta > delta; delta += delta { + x.i.Store(v) + if x.i.Load() != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v) + } + v += delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestStoreUintptr(t *testutil.TestRunner) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + v := uintptr(0) + for delta := uintptr(1); delta+delta > delta; delta += delta { + StoreUintptr(&x.i, v) + if x.i != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v) + } + v += delta + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestStoreUintptrMethod(t *testutil.TestRunner) { + var x struct { + before uintptr + i Uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + v := uintptr(0) + for delta := uintptr(1); delta+delta > delta; delta += delta { + x.i.Store(v) + if x.i.Load() != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v) + } + v += delta + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestStorePointer(t *testutil.TestRunner) { + var x struct { + before uintptr + i unsafe.Pointer + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + for _, p := range testPointers() { + StorePointer(&x.i, p) + if x.i != p { + t.Fatalf("x.i=%p p=%p", x.i, p) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestStorePointerMethod(t *testutil.TestRunner) { + var x struct { + before uintptr + i Pointer[byte] + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + for _, p := range testPointers() { + p := (*byte)(p) + x.i.Store(p) + if x.i.Load() != p { + t.Fatalf("x.i=%p p=%p", x.i.Load(), p) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +// Tests of correct behavior, with contention. +// (Is the function atomic?) +// +// For each function, we write a "hammer" function that repeatedly +// uses the atomic operation to add 1 to a value. After running +// multiple hammers in parallel, check that we end with the correct +// total. +// Swap can't add 1, so it uses a different scheme. +// The functions repeatedly generate a pseudo-random number such that +// low bits are equal to high bits, swap, check that the old value +// has low and high bits equal. + +var hammer32 = map[string]func(*uint32, int){ + "SwapInt32": hammerSwapInt32, + "SwapUint32": hammerSwapUint32, + "SwapUintptr": hammerSwapUintptr32, + "AddInt32": hammerAddInt32, + "AddUint32": hammerAddUint32, + "AddUintptr": hammerAddUintptr32, + "CompareAndSwapInt32": hammerCompareAndSwapInt32, + "CompareAndSwapUint32": hammerCompareAndSwapUint32, + "CompareAndSwapUintptr": hammerCompareAndSwapUintptr32, + + "SwapInt32Method": hammerSwapInt32Method, + "SwapUint32Method": hammerSwapUint32Method, + "SwapUintptrMethod": hammerSwapUintptr32Method, + "AddInt32Method": hammerAddInt32Method, + "AddUint32Method": hammerAddUint32Method, + "AddUintptrMethod": hammerAddUintptr32Method, + "CompareAndSwapInt32Method": hammerCompareAndSwapInt32Method, + "CompareAndSwapUint32Method": hammerCompareAndSwapUint32Method, + "CompareAndSwapUintptrMethod": hammerCompareAndSwapUintptr32Method, +} + +func init() { + var v uint64 = 1 << 50 + if uintptr(v) != 0 { + // 64-bit system; clear uintptr tests + delete(hammer32, "SwapUintptr") + delete(hammer32, "AddUintptr") + delete(hammer32, "CompareAndSwapUintptr") + delete(hammer32, "SwapUintptrMethod") + delete(hammer32, "AddUintptrMethod") + delete(hammer32, "CompareAndSwapUintptrMethod") + } +} + +func hammerSwapInt32(uaddr *uint32, count int) { + addr := (*int32)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16 + old := uint32(SwapInt32(addr, int32(new))) + if old>>16 != old<<16>>16 { + panic(fmt.Sprintf("SwapInt32 is not atomic: %v", old)) + } + } +} + +func hammerSwapInt32Method(uaddr *uint32, count int) { + addr := (*Int32)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16 + old := uint32(addr.Swap(int32(new))) + if old>>16 != old<<16>>16 { + panic(fmt.Sprintf("SwapInt32 is not atomic: %v", old)) + } + } +} + +func hammerSwapUint32(addr *uint32, count int) { + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16 + old := SwapUint32(addr, new) + if old>>16 != old<<16>>16 { + panic(fmt.Sprintf("SwapUint32 is not atomic: %v", old)) + } + } +} + +func hammerSwapUint32Method(uaddr *uint32, count int) { + addr := (*Uint32)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16 + old := addr.Swap(new) + if old>>16 != old<<16>>16 { + panic(fmt.Sprintf("SwapUint32 is not atomic: %v", old)) + } + } +} + +func hammerSwapUintptr32(uaddr *uint32, count int) { + // only safe when uintptr is 32-bit. + // not called on 64-bit systems. + addr := (*uintptr)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uintptr(seed+i)<<16 | uintptr(seed+i)<<16>>16 + old := SwapUintptr(addr, new) + if old>>16 != old<<16>>16 { + panic(fmt.Sprintf("SwapUintptr is not atomic: %#08x", old)) + } + } +} + +func hammerSwapUintptr32Method(uaddr *uint32, count int) { + // only safe when uintptr is 32-bit. + // not called on 64-bit systems. + addr := (*Uintptr)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uintptr(seed+i)<<16 | uintptr(seed+i)<<16>>16 + old := addr.Swap(new) + if old>>16 != old<<16>>16 { + panic(fmt.Sprintf("Uintptr.Swap is not atomic: %#08x", old)) + } + } +} + +func hammerAddInt32(uaddr *uint32, count int) { + addr := (*int32)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + AddInt32(addr, 1) + } +} + +func hammerAddInt32Method(uaddr *uint32, count int) { + addr := (*Int32)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + addr.Add(1) + } +} + +func hammerAddUint32(addr *uint32, count int) { + for i := 0; i < count; i++ { + AddUint32(addr, 1) + } +} + +func hammerAddUint32Method(uaddr *uint32, count int) { + addr := (*Uint32)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + addr.Add(1) + } +} + +func hammerAddUintptr32(uaddr *uint32, count int) { + // only safe when uintptr is 32-bit. + // not called on 64-bit systems. + addr := (*uintptr)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + AddUintptr(addr, 1) + } +} + +func hammerAddUintptr32Method(uaddr *uint32, count int) { + // only safe when uintptr is 32-bit. + // not called on 64-bit systems. + addr := (*Uintptr)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + addr.Add(1) + } +} + +func hammerCompareAndSwapInt32(uaddr *uint32, count int) { + addr := (*int32)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := LoadInt32(addr) + if CompareAndSwapInt32(addr, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapInt32Method(uaddr *uint32, count int) { + addr := (*Int32)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := addr.Load() + if addr.CompareAndSwap(v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUint32(addr *uint32, count int) { + for i := 0; i < count; i++ { + for { + v := LoadUint32(addr) + if CompareAndSwapUint32(addr, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUint32Method(uaddr *uint32, count int) { + addr := (*Uint32)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := addr.Load() + if addr.CompareAndSwap(v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUintptr32(uaddr *uint32, count int) { + // only safe when uintptr is 32-bit. + // not called on 64-bit systems. + addr := (*uintptr)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := LoadUintptr(addr) + if CompareAndSwapUintptr(addr, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUintptr32Method(uaddr *uint32, count int) { + // only safe when uintptr is 32-bit. + // not called on 64-bit systems. + addr := (*Uintptr)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := addr.Load() + if addr.CompareAndSwap(v, v+1) { + break + } + } + } +} + +func TestHammer32(t *testutil.TestRunner) { + const p = 4 + n := 100000 + if short { + n = 1000 + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p)) + + for name, testf := range hammer32 { + c := make(chan int) + var val uint32 + for i := 0; i < p; i++ { + go func() { + defer func() { + if err := recover(); err != nil { + t.Error(err.(string)) + } + c <- 1 + }() + testf(&val, n) + }() + } + for i := 0; i < p; i++ { + <-c + } + if !strings.HasPrefix(name, "Swap") && val != uint32(n)*p { + t.Fatalf("%s: val=%d want %d", name, val, n*p) + } + } +} + +var hammer64 = map[string]func(*uint64, int){ + "SwapInt64": hammerSwapInt64, + "SwapUint64": hammerSwapUint64, + "SwapUintptr": hammerSwapUintptr64, + "AddInt64": hammerAddInt64, + "AddUint64": hammerAddUint64, + "AddUintptr": hammerAddUintptr64, + "CompareAndSwapInt64": hammerCompareAndSwapInt64, + "CompareAndSwapUint64": hammerCompareAndSwapUint64, + "CompareAndSwapUintptr": hammerCompareAndSwapUintptr64, + + "SwapInt64Method": hammerSwapInt64Method, + "SwapUint64Method": hammerSwapUint64Method, + "SwapUintptrMethod": hammerSwapUintptr64Method, + "AddInt64Method": hammerAddInt64Method, + "AddUint64Method": hammerAddUint64Method, + "AddUintptrMethod": hammerAddUintptr64Method, + "CompareAndSwapInt64Method": hammerCompareAndSwapInt64Method, + "CompareAndSwapUint64Method": hammerCompareAndSwapUint64Method, + "CompareAndSwapUintptrMethod": hammerCompareAndSwapUintptr64Method, +} + +func init() { + var v uint64 = 1 << 50 + if uintptr(v) == 0 { + // 32-bit system; clear uintptr tests + delete(hammer64, "SwapUintptr") + delete(hammer64, "SwapUintptrMethod") + delete(hammer64, "AddUintptr") + delete(hammer64, "AddUintptrMethod") + delete(hammer64, "CompareAndSwapUintptr") + delete(hammer64, "CompareAndSwapUintptrMethod") + } +} + +func hammerSwapInt64(uaddr *uint64, count int) { + addr := (*int64)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32 + old := uint64(SwapInt64(addr, int64(new))) + if old>>32 != old<<32>>32 { + panic(fmt.Sprintf("SwapInt64 is not atomic: %v", old)) + } + } +} + +func hammerSwapInt64Method(uaddr *uint64, count int) { + addr := (*Int64)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32 + old := uint64(addr.Swap(int64(new))) + if old>>32 != old<<32>>32 { + panic(fmt.Sprintf("SwapInt64 is not atomic: %v", old)) + } + } +} + +func hammerSwapUint64(addr *uint64, count int) { + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32 + old := SwapUint64(addr, new) + if old>>32 != old<<32>>32 { + panic(fmt.Sprintf("SwapUint64 is not atomic: %v", old)) + } + } +} + +func hammerSwapUint64Method(uaddr *uint64, count int) { + addr := (*Uint64)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32 + old := addr.Swap(new) + if old>>32 != old<<32>>32 { + panic(fmt.Sprintf("SwapUint64 is not atomic: %v", old)) + } + } +} + +const arch32 = unsafe.Sizeof(uintptr(0)) == 4 + +func hammerSwapUintptr64(uaddr *uint64, count int) { + // only safe when uintptr is 64-bit. + // not called on 32-bit systems. + if !arch32 { + addr := (*uintptr)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uintptr(seed+i)<<32 | uintptr(seed+i)<<32>>32 + old := SwapUintptr(addr, new) + if old>>32 != old<<32>>32 { + panic(fmt.Sprintf("SwapUintptr is not atomic: %v", old)) + } + } + } +} + +func hammerSwapUintptr64Method(uaddr *uint64, count int) { + // only safe when uintptr is 64-bit. + // not called on 32-bit systems. + if !arch32 { + addr := (*Uintptr)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uintptr(seed+i)<<32 | uintptr(seed+i)<<32>>32 + old := addr.Swap(new) + if old>>32 != old<<32>>32 { + panic(fmt.Sprintf("SwapUintptr is not atomic: %v", old)) + } + } + } +} + +func hammerAddInt64(uaddr *uint64, count int) { + addr := (*int64)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + AddInt64(addr, 1) + } +} + +func hammerAddInt64Method(uaddr *uint64, count int) { + addr := (*Int64)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + addr.Add(1) + } +} + +func hammerAddUint64(addr *uint64, count int) { + for i := 0; i < count; i++ { + AddUint64(addr, 1) + } +} + +func hammerAddUint64Method(uaddr *uint64, count int) { + addr := (*Uint64)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + addr.Add(1) + } +} + +func hammerAddUintptr64(uaddr *uint64, count int) { + // only safe when uintptr is 64-bit. + // not called on 32-bit systems. + addr := (*uintptr)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + AddUintptr(addr, 1) + } +} + +func hammerAddUintptr64Method(uaddr *uint64, count int) { + // only safe when uintptr is 64-bit. + // not called on 32-bit systems. + addr := (*Uintptr)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + addr.Add(1) + } +} + +func hammerCompareAndSwapInt64(uaddr *uint64, count int) { + addr := (*int64)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := LoadInt64(addr) + if CompareAndSwapInt64(addr, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapInt64Method(uaddr *uint64, count int) { + addr := (*Int64)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := addr.Load() + if addr.CompareAndSwap(v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUint64(addr *uint64, count int) { + for i := 0; i < count; i++ { + for { + v := LoadUint64(addr) + if CompareAndSwapUint64(addr, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUint64Method(uaddr *uint64, count int) { + addr := (*Uint64)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := addr.Load() + if addr.CompareAndSwap(v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUintptr64(uaddr *uint64, count int) { + // only safe when uintptr is 64-bit. + // not called on 32-bit systems. + addr := (*uintptr)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := LoadUintptr(addr) + if CompareAndSwapUintptr(addr, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUintptr64Method(uaddr *uint64, count int) { + // only safe when uintptr is 64-bit. + // not called on 32-bit systems. + addr := (*Uintptr)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := addr.Load() + if addr.CompareAndSwap(v, v+1) { + break + } + } + } +} + +func TestHammer64(t *testutil.TestRunner) { + const p = 4 + n := 100000 + if short { + n = 1000 + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p)) + + for name, testf := range hammer64 { + c := make(chan int) + var val uint64 + for i := 0; i < p; i++ { + go func() { + defer func() { + if err := recover(); err != nil { + t.Error(err.(string)) + } + c <- 1 + }() + testf(&val, n) + }() + } + for i := 0; i < p; i++ { + <-c + } + if !strings.HasPrefix(name, "Swap") && val != uint64(n)*p { + t.Fatalf("%s: val=%d want %d", name, val, n*p) + } + } +} + +func hammerStoreLoadInt32(t testing.TB, paddr unsafe.Pointer) { + addr := (*int32)(paddr) + v := LoadInt32(addr) + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("Int32: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + StoreInt32(addr, new) +} + +func hammerStoreLoadInt32Method(t testing.TB, paddr unsafe.Pointer) { + addr := (*int32)(paddr) + v := LoadInt32(addr) + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("Int32: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + StoreInt32(addr, new) +} + +func hammerStoreLoadUint32(t testing.TB, paddr unsafe.Pointer) { + addr := (*uint32)(paddr) + v := LoadUint32(addr) + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("Uint32: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + StoreUint32(addr, new) +} + +func hammerStoreLoadUint32Method(t testing.TB, paddr unsafe.Pointer) { + addr := (*Uint32)(paddr) + v := addr.Load() + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("Uint32: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + addr.Store(new) +} + +func hammerStoreLoadInt64(t testing.TB, paddr unsafe.Pointer) { + addr := (*int64)(paddr) + v := LoadInt64(addr) + vlo := v & ((1 << 32) - 1) + vhi := v >> 32 + if vlo != vhi { + t.Fatalf("Int64: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<32 + StoreInt64(addr, new) +} + +func hammerStoreLoadInt64Method(t testing.TB, paddr unsafe.Pointer) { + addr := (*Int64)(paddr) + v := addr.Load() + vlo := v & ((1 << 32) - 1) + vhi := v >> 32 + if vlo != vhi { + t.Fatalf("Int64: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<32 + addr.Store(new) +} + +func hammerStoreLoadUint64(t testing.TB, paddr unsafe.Pointer) { + addr := (*uint64)(paddr) + v := LoadUint64(addr) + vlo := v & ((1 << 32) - 1) + vhi := v >> 32 + if vlo != vhi { + t.Fatalf("Uint64: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<32 + StoreUint64(addr, new) +} + +func hammerStoreLoadUint64Method(t testing.TB, paddr unsafe.Pointer) { + addr := (*Uint64)(paddr) + v := addr.Load() + vlo := v & ((1 << 32) - 1) + vhi := v >> 32 + if vlo != vhi { + t.Fatalf("Uint64: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<32 + addr.Store(new) +} + +func hammerStoreLoadUintptr(t testing.TB, paddr unsafe.Pointer) { + addr := (*uintptr)(paddr) + v := LoadUintptr(addr) + new := v + if arch32 { + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("Uintptr: %#x != %#x", vlo, vhi) + } + new = v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + } else { + vlo := v & ((1 << 32) - 1) + vhi := v >> 32 + if vlo != vhi { + t.Fatalf("Uintptr: %#x != %#x", vlo, vhi) + } + inc := uint64(1 + 1<<32) + new = v + uintptr(inc) + } + StoreUintptr(addr, new) +} + +//go:nocheckptr +func hammerStoreLoadUintptrMethod(t testing.TB, paddr unsafe.Pointer) { + addr := (*Uintptr)(paddr) + v := addr.Load() + new := v + if arch32 { + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("Uintptr: %#x != %#x", vlo, vhi) + } + new = v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + } else { + vlo := v & ((1 << 32) - 1) + vhi := v >> 32 + if vlo != vhi { + t.Fatalf("Uintptr: %#x != %#x", vlo, vhi) + } + inc := uint64(1 + 1<<32) + new = v + uintptr(inc) + } + addr.Store(new) +} + +// This code is just testing that LoadPointer/StorePointer operate +// atomically; it's not actually calculating pointers. +// +//go:nocheckptr +func hammerStoreLoadPointer(t testing.TB, paddr unsafe.Pointer) { + addr := (*unsafe.Pointer)(paddr) + v := uintptr(LoadPointer(addr)) + new := v + if arch32 { + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("Pointer: %#x != %#x", vlo, vhi) + } + new = v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + } else { + vlo := v & ((1 << 32) - 1) + vhi := v >> 32 + if vlo != vhi { + t.Fatalf("Pointer: %#x != %#x", vlo, vhi) + } + inc := uint64(1 + 1<<32) + new = v + uintptr(inc) + } + StorePointer(addr, unsafe.Pointer(new)) +} + +// This code is just testing that LoadPointer/StorePointer operate +// atomically; it's not actually calculating pointers. +// +//go:nocheckptr +func hammerStoreLoadPointerMethod(t testing.TB, paddr unsafe.Pointer) { + addr := (*Pointer[byte])(paddr) + v := uintptr(unsafe.Pointer(addr.Load())) + new := v + if arch32 { + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("Pointer: %#x != %#x", vlo, vhi) + } + new = v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + } else { + vlo := v & ((1 << 32) - 1) + vhi := v >> 32 + if vlo != vhi { + t.Fatalf("Pointer: %#x != %#x", vlo, vhi) + } + inc := uint64(1 + 1<<32) + new = v + uintptr(inc) + } + addr.Store((*byte)(unsafe.Pointer(new))) +} + +func TestHammerStoreLoad(t *testutil.TestRunner) { + tests := []func(testing.TB, unsafe.Pointer){ + hammerStoreLoadInt32, hammerStoreLoadUint32, + hammerStoreLoadUintptr, hammerStoreLoadPointer, + hammerStoreLoadInt32Method, hammerStoreLoadUint32Method, + hammerStoreLoadUintptrMethod, hammerStoreLoadPointerMethod, + hammerStoreLoadInt64, hammerStoreLoadUint64, + hammerStoreLoadInt64Method, hammerStoreLoadUint64Method, + } + n := int(1e6) + if short { + n = int(1e4) + } + const procs = 8 + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(procs)) + // Disable the GC because hammerStoreLoadPointer invokes + // write barriers on values that aren't real pointers. + defer debug.SetGCPercent(debug.SetGCPercent(-1)) + // Ensure any in-progress GC is finished. + runtime.GC() + for _, tt := range tests { + c := make(chan int) + var val uint64 + for p := 0; p < procs; p++ { + go func() { + for i := 0; i < n; i++ { + tt(t, unsafe.Pointer(&val)) + } + c <- 1 + }() + } + for p := 0; p < procs; p++ { + <-c + } + } +} + +func TestStoreLoadSeqCst32(t *testutil.TestRunner) { + if runtime.NumCPU() == 1 { + t.Skipf("Skipping test on %v processor machine", runtime.NumCPU()) + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) + N := int32(1e3) + if short { + N = int32(1e2) + } + c := make(chan bool, 2) + X := [2]int32{} + ack := [2][3]int32{{-1, -1, -1}, {-1, -1, -1}} + for p := 0; p < 2; p++ { + go func(me int) { + he := 1 - me + for i := int32(1); i < N; i++ { + StoreInt32(&X[me], i) + my := LoadInt32(&X[he]) + StoreInt32(&ack[me][i%3], my) + for w := 1; LoadInt32(&ack[he][i%3]) == -1; w++ { + if w%1000 == 0 { + runtime.Gosched() + } + } + his := LoadInt32(&ack[he][i%3]) + if (my != i && my != i-1) || (his != i && his != i-1) { + t.Errorf("invalid values: %d/%d (%d)", my, his, i) + break + } + if my != i && his != i { + t.Errorf("store/load are not sequentially consistent: %d/%d (%d)", my, his, i) + break + } + StoreInt32(&ack[me][(i-1)%3], -1) + } + c <- true + }(p) + } + <-c + <-c +} + +func TestStoreLoadSeqCst64(t *testutil.TestRunner) { + if runtime.NumCPU() == 1 { + t.Skipf("Skipping test on %v processor machine", runtime.NumCPU()) + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) + N := int64(1e3) + if short { + N = int64(1e2) + } + c := make(chan bool, 2) + X := [2]int64{} + ack := [2][3]int64{{-1, -1, -1}, {-1, -1, -1}} + for p := 0; p < 2; p++ { + go func(me int) { + he := 1 - me + for i := int64(1); i < N; i++ { + StoreInt64(&X[me], i) + my := LoadInt64(&X[he]) + StoreInt64(&ack[me][i%3], my) + for w := 1; LoadInt64(&ack[he][i%3]) == -1; w++ { + if w%1000 == 0 { + runtime.Gosched() + } + } + his := LoadInt64(&ack[he][i%3]) + if (my != i && my != i-1) || (his != i && his != i-1) { + t.Errorf("invalid values: %d/%d (%d)", my, his, i) + break + } + if my != i && his != i { + t.Errorf("store/load are not sequentially consistent: %d/%d (%d)", my, his, i) + break + } + StoreInt64(&ack[me][(i-1)%3], -1) + } + c <- true + }(p) + } + <-c + <-c +} + +func TestStoreLoadRelAcq32(t *testutil.TestRunner) { + if runtime.NumCPU() == 1 { + t.Skipf("Skipping test on %v processor machine", runtime.NumCPU()) + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) + N := int32(1e3) + if short { + N = int32(1e2) + } + c := make(chan bool, 2) + type Data struct { + signal int32 + pad1 [128]int8 + data1 int32 + pad2 [128]int8 + data2 float32 + } + var X Data + for p := int32(0); p < 2; p++ { + go func(p int32) { + for i := int32(1); i < N; i++ { + if (i+p)%2 == 0 { + X.data1 = i + X.data2 = float32(i) + StoreInt32(&X.signal, i) + } else { + for w := 1; LoadInt32(&X.signal) != i; w++ { + if w%1000 == 0 { + runtime.Gosched() + } + } + d1 := X.data1 + d2 := X.data2 + if d1 != i || d2 != float32(i) { + t.Errorf("incorrect data: %d/%g (%d)", d1, d2, i) + break + } + } + } + c <- true + }(p) + } + <-c + <-c +} + +func TestStoreLoadRelAcq64(t *testutil.TestRunner) { + if runtime.NumCPU() == 1 { + t.Skipf("Skipping test on %v processor machine", runtime.NumCPU()) + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) + N := int64(1e3) + if short { + N = int64(1e2) + } + c := make(chan bool, 2) + type Data struct { + signal int64 + pad1 [128]int8 + data1 int64 + pad2 [128]int8 + data2 float64 + } + var X Data + for p := int64(0); p < 2; p++ { + go func(p int64) { + for i := int64(1); i < N; i++ { + if (i+p)%2 == 0 { + X.data1 = i + X.data2 = float64(i) + StoreInt64(&X.signal, i) + } else { + for w := 1; LoadInt64(&X.signal) != i; w++ { + if w%1000 == 0 { + runtime.Gosched() + } + } + d1 := X.data1 + d2 := X.data2 + if d1 != i || d2 != float64(i) { + t.Errorf("incorrect data: %d/%g (%d)", d1, d2, i) + break + } + } + } + c <- true + }(p) + } + <-c + <-c +} + +func shouldPanic(t testing.TB, name string, f func()) { + defer func() { + // Check that all GC maps are sane. + runtime.GC() + + err := recover() + want := "unaligned 64-bit atomic operation" + if err == nil { + t.Errorf("%s did not panic", name) + } else if s, _ := err.(string); s != want { + t.Errorf("%s: wanted panic %q, got %q", name, want, err) + } + }() + f() +} + +func TestUnaligned64(t *testutil.TestRunner) { + // Unaligned 64-bit atomics on 32-bit systems are + // a continual source of pain. Test that on 32-bit systems they crash + // instead of failing silently. + if !arch32 { + t.Skip("test only runs on 32-bit systems") + } + + x := make([]uint32, 4) + p := (*uint64)(unsafe.Pointer(&x[1])) // misaligned + + shouldPanic(t, "LoadUint64", func() { LoadUint64(p) }) + shouldPanic(t, "LoadUint64Method", func() { (*Uint64)(unsafe.Pointer(p)).Load() }) + shouldPanic(t, "StoreUint64", func() { StoreUint64(p, 1) }) + shouldPanic(t, "StoreUint64Method", func() { (*Uint64)(unsafe.Pointer(p)).Store(1) }) + shouldPanic(t, "CompareAndSwapUint64", func() { CompareAndSwapUint64(p, 1, 2) }) + shouldPanic(t, "CompareAndSwapUint64Method", func() { (*Uint64)(unsafe.Pointer(p)).CompareAndSwap(1, 2) }) + shouldPanic(t, "AddUint64", func() { AddUint64(p, 3) }) + shouldPanic(t, "AddUint64Method", func() { (*Uint64)(unsafe.Pointer(p)).Add(3) }) +} + +func TestAutoAligned64(t *testutil.TestRunner) { + var signed struct { + _ uint32 + i Int64 + } + if o := reflect.TypeOf(&signed).Elem().Field(1).Offset; o != 8 { + t.Fatalf("Int64 offset = %d, want 8", o) + } + if p := reflect.ValueOf(&signed).Elem().Field(1).Addr().Pointer(); p&7 != 0 { + t.Fatalf("Int64 pointer = %#x, want 8-aligned", p) + } + + var unsigned struct { + _ uint32 + i Uint64 + } + if o := reflect.TypeOf(&unsigned).Elem().Field(1).Offset; o != 8 { + t.Fatalf("Uint64 offset = %d, want 8", o) + } + if p := reflect.ValueOf(&unsigned).Elem().Field(1).Addr().Pointer(); p&7 != 0 { + t.Fatalf("Int64 pointer = %#x, want 8-aligned", p) + } +} + +func TestNilDeref(t *testutil.TestRunner) { + funcs := [...]func(){ + func() { CompareAndSwapInt32(nil, 0, 0) }, + func() { (*Int32)(nil).CompareAndSwap(0, 0) }, + func() { CompareAndSwapInt64(nil, 0, 0) }, + func() { (*Int64)(nil).CompareAndSwap(0, 0) }, + func() { CompareAndSwapUint32(nil, 0, 0) }, + func() { (*Uint32)(nil).CompareAndSwap(0, 0) }, + func() { CompareAndSwapUint64(nil, 0, 0) }, + func() { (*Uint64)(nil).CompareAndSwap(0, 0) }, + func() { CompareAndSwapUintptr(nil, 0, 0) }, + func() { (*Uintptr)(nil).CompareAndSwap(0, 0) }, + func() { CompareAndSwapPointer(nil, nil, nil) }, + func() { (*Pointer[byte])(nil).CompareAndSwap(nil, nil) }, + func() { SwapInt32(nil, 0) }, + func() { (*Int32)(nil).Swap(0) }, + func() { SwapUint32(nil, 0) }, + func() { (*Uint32)(nil).Swap(0) }, + func() { SwapInt64(nil, 0) }, + func() { (*Int64)(nil).Swap(0) }, + func() { SwapUint64(nil, 0) }, + func() { (*Uint64)(nil).Swap(0) }, + func() { SwapUintptr(nil, 0) }, + func() { (*Uintptr)(nil).Swap(0) }, + func() { SwapPointer(nil, nil) }, + func() { (*Pointer[byte])(nil).Swap(nil) }, + func() { AddInt32(nil, 0) }, + func() { (*Int32)(nil).Add(0) }, + func() { AddUint32(nil, 0) }, + func() { (*Uint32)(nil).Add(0) }, + func() { AddInt64(nil, 0) }, + func() { (*Int64)(nil).Add(0) }, + func() { AddUint64(nil, 0) }, + func() { (*Uint64)(nil).Add(0) }, + func() { AddUintptr(nil, 0) }, + func() { (*Uintptr)(nil).Add(0) }, + func() { LoadInt32(nil) }, + func() { (*Int32)(nil).Load() }, + func() { LoadInt64(nil) }, + func() { (*Int64)(nil).Load() }, + func() { LoadUint32(nil) }, + func() { (*Uint32)(nil).Load() }, + func() { LoadUint64(nil) }, + func() { (*Uint64)(nil).Load() }, + func() { LoadUintptr(nil) }, + func() { (*Uintptr)(nil).Load() }, + func() { LoadPointer(nil) }, + func() { (*Pointer[byte])(nil).Load() }, + func() { StoreInt32(nil, 0) }, + func() { (*Int32)(nil).Store(0) }, + func() { StoreInt64(nil, 0) }, + func() { (*Int64)(nil).Store(0) }, + func() { StoreUint32(nil, 0) }, + func() { (*Uint32)(nil).Store(0) }, + func() { StoreUint64(nil, 0) }, + func() { (*Uint64)(nil).Store(0) }, + func() { StoreUintptr(nil, 0) }, + func() { (*Uintptr)(nil).Store(0) }, + func() { StorePointer(nil, nil) }, + func() { (*Pointer[byte])(nil).Store(nil) }, + } + for _, f := range funcs { + func() { + defer func() { + runtime.GC() + recover() + }() + f() + }() + } +} + +// Test that this compiles. +// When atomic.Pointer used _ [0]T, it did not. +type List struct { + Next Pointer[List] +} diff --git a/cannon/testdata/example/mt-atomic/go.mod b/cannon/testdata/example/mt-atomic/go.mod new file mode 100644 index 000000000000..042386cf702d --- /dev/null +++ b/cannon/testdata/example/mt-atomic/go.mod @@ -0,0 +1,8 @@ +module atomic + +go 1.22 + +toolchain go1.22.0 + +require utils v0.0.0 +replace utils => ../../utils diff --git a/cannon/testdata/example/mt-atomic/main.go b/cannon/testdata/example/mt-atomic/main.go new file mode 100644 index 000000000000..9d683c5bd598 --- /dev/null +++ b/cannon/testdata/example/mt-atomic/main.go @@ -0,0 +1,80 @@ +package main + +import ( + "fmt" + + "utils/testutil" +) + +func main() { + testutil.RunTest(TestSwapInt32, "TestSwapInt32") + testutil.RunTest(TestSwapInt32Method, "TestSwapInt32Method") + testutil.RunTest(TestSwapUint32, "TestSwapUint32") + testutil.RunTest(TestSwapUint32Method, "TestSwapUint32Method") + testutil.RunTest(TestSwapInt64, "TestSwapInt64") + testutil.RunTest(TestSwapInt64Method, "TestSwapInt64Method") + testutil.RunTest(TestSwapUint64, "TestSwapUint64") + testutil.RunTest(TestSwapUint64Method, "TestSwapUint64Method") + testutil.RunTest(TestSwapUintptr, "TestSwapUintptr") + testutil.RunTest(TestSwapUintptrMethod, "TestSwapUintptrMethod") + testutil.RunTest(TestSwapPointer, "TestSwapPointer") + testutil.RunTest(TestSwapPointerMethod, "TestSwapPointerMethod") + testutil.RunTest(TestAddInt32, "TestAddInt32") + testutil.RunTest(TestAddInt32Method, "TestAddInt32Method") + testutil.RunTest(TestAddUint32, "TestAddUint32") + testutil.RunTest(TestAddUint32Method, "TestAddUint32Method") + testutil.RunTest(TestAddInt64, "TestAddInt64") + testutil.RunTest(TestAddInt64Method, "TestAddInt64Method") + testutil.RunTest(TestAddUint64, "TestAddUint64") + testutil.RunTest(TestAddUint64Method, "TestAddUint64Method") + testutil.RunTest(TestAddUintptr, "TestAddUintptr") + testutil.RunTest(TestAddUintptrMethod, "TestAddUintptrMethod") + testutil.RunTest(TestCompareAndSwapInt32, "TestCompareAndSwapInt32") + testutil.RunTest(TestCompareAndSwapInt32Method, "TestCompareAndSwapInt32Method") + testutil.RunTest(TestCompareAndSwapUint32, "TestCompareAndSwapUint32") + testutil.RunTest(TestCompareAndSwapUint32Method, "TestCompareAndSwapUint32Method") + testutil.RunTest(TestCompareAndSwapInt64, "TestCompareAndSwapInt64") + testutil.RunTest(TestCompareAndSwapInt64Method, "TestCompareAndSwapInt64Method") + testutil.RunTest(TestCompareAndSwapUint64, "TestCompareAndSwapUint64") + testutil.RunTest(TestCompareAndSwapUint64Method, "TestCompareAndSwapUint64Method") + testutil.RunTest(TestCompareAndSwapUintptr, "TestCompareAndSwapUintptr") + testutil.RunTest(TestCompareAndSwapUintptrMethod, "TestCompareAndSwapUintptrMethod") + testutil.RunTest(TestCompareAndSwapPointer, "TestCompareAndSwapPointer") + testutil.RunTest(TestCompareAndSwapPointerMethod, "TestCompareAndSwapPointerMethod") + testutil.RunTest(TestLoadInt32, "TestLoadInt32") + testutil.RunTest(TestLoadInt32Method, "TestLoadInt32Method") + testutil.RunTest(TestLoadUint32, "TestLoadUint32") + testutil.RunTest(TestLoadUint32Method, "TestLoadUint32Method") + testutil.RunTest(TestLoadInt64, "TestLoadInt64") + testutil.RunTest(TestLoadInt64Method, "TestLoadInt64Method") + testutil.RunTest(TestLoadUint64, "TestLoadUint64") + testutil.RunTest(TestLoadUint64Method, "TestLoadUint64Method") + testutil.RunTest(TestLoadUintptr, "TestLoadUintptr") + testutil.RunTest(TestLoadUintptrMethod, "TestLoadUintptrMethod") + testutil.RunTest(TestLoadPointer, "TestLoadPointer") + testutil.RunTest(TestLoadPointerMethod, "TestLoadPointerMethod") + testutil.RunTest(TestStoreInt32, "TestStoreInt32") + testutil.RunTest(TestStoreInt32Method, "TestStoreInt32Method") + testutil.RunTest(TestStoreUint32, "TestStoreUint32") + testutil.RunTest(TestStoreUint32Method, "TestStoreUint32Method") + testutil.RunTest(TestStoreInt64, "TestStoreInt64") + testutil.RunTest(TestStoreInt64Method, "TestStoreInt64Method") + testutil.RunTest(TestStoreUint64, "TestStoreUint64") + testutil.RunTest(TestStoreUint64Method, "TestStoreUint64Method") + testutil.RunTest(TestStoreUintptr, "TestStoreUintptr") + testutil.RunTest(TestStoreUintptrMethod, "TestStoreUintptrMethod") + testutil.RunTest(TestStorePointer, "TestStorePointer") + testutil.RunTest(TestStorePointerMethod, "TestStorePointerMethod") + testutil.RunTest(TestHammer32, "TestHammer32") + testutil.RunTest(TestHammer64, "TestHammer64") + testutil.RunTest(TestAutoAligned64, "TestAutoAligned64") + testutil.RunTest(TestNilDeref, "TestNilDeref") + testutil.RunTest(TestStoreLoadSeqCst32, "TestStoreLoadSeqCst32") + testutil.RunTest(TestStoreLoadSeqCst64, "TestStoreLoadSeqCst64") + testutil.RunTest(TestStoreLoadRelAcq32, "TestStoreLoadRelAcq32") + testutil.RunTest(TestStoreLoadRelAcq64, "TestStoreLoadRelAcq64") + testutil.RunTest(TestUnaligned64, "TestUnaligned64") + testutil.RunTest(TestHammerStoreLoad, "TestHammerStoreLoad") + + fmt.Println("Atomic tests passed") +} diff --git a/cannon/testdata/example/multithreaded/go.mod b/cannon/testdata/example/mt-cond/go.mod similarity index 58% rename from cannon/testdata/example/multithreaded/go.mod rename to cannon/testdata/example/mt-cond/go.mod index e1bdb77a9aff..d6d1853d5af2 100644 --- a/cannon/testdata/example/multithreaded/go.mod +++ b/cannon/testdata/example/mt-cond/go.mod @@ -1,4 +1,4 @@ -module multithreaded +module cond go 1.22 diff --git a/cannon/testdata/example/mt-cond/main.go b/cannon/testdata/example/mt-cond/main.go new file mode 100644 index 000000000000..2b584cec999b --- /dev/null +++ b/cannon/testdata/example/mt-cond/main.go @@ -0,0 +1,302 @@ +// Portions of this code are derived from code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/400433af3660905ecaceaf19ddad3e6c24b141df/src/sync/cond_test.go +// +// --- Original License Notice --- +// +// Copyright 2009 The Go Authors. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google LLC nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +package main + +import ( + "fmt" + "os" + "reflect" + "runtime" + "sync" +) + +func main() { + TestCondSignal() + TestCondSignalGenerations() + TestCondBroadcast() + TestRace() + TestCondSignalStealing() + TestCondCopy() + + fmt.Println("Cond test passed") +} + +func TestCondSignal() { + var m sync.Mutex + c := sync.NewCond(&m) + n := 2 + running := make(chan bool, n) + awake := make(chan bool, n) + for i := 0; i < n; i++ { + go func() { + m.Lock() + running <- true + c.Wait() + awake <- true + m.Unlock() + }() + } + for i := 0; i < n; i++ { + <-running // Wait for everyone to run. + } + for n > 0 { + select { + case <-awake: + _, _ = fmt.Fprintln(os.Stderr, "goroutine not asleep") + os.Exit(1) + default: + } + m.Lock() + c.Signal() + m.Unlock() + <-awake // Will deadlock if no goroutine wakes up + select { + case <-awake: + _, _ = fmt.Fprintln(os.Stderr, "too many goroutines awake") + os.Exit(1) + default: + } + n-- + } + c.Signal() +} + +func TestCondSignalGenerations() { + var m sync.Mutex + c := sync.NewCond(&m) + n := 100 + running := make(chan bool, n) + awake := make(chan int, n) + for i := 0; i < n; i++ { + go func(i int) { + m.Lock() + running <- true + c.Wait() + awake <- i + m.Unlock() + }(i) + if i > 0 { + a := <-awake + if a != i-1 { + _, _ = fmt.Fprintf(os.Stderr, "wrong goroutine woke up: want %d, got %d\n", i-1, a) + os.Exit(1) + } + } + <-running + m.Lock() + c.Signal() + m.Unlock() + } +} + +func TestCondBroadcast() { + var m sync.Mutex + c := sync.NewCond(&m) + n := 5 + running := make(chan int, n) + awake := make(chan int, n) + exit := false + for i := 0; i < n; i++ { + go func(g int) { + m.Lock() + for !exit { + running <- g + c.Wait() + awake <- g + } + m.Unlock() + }(i) + } + for i := 0; i < n; i++ { + for i := 0; i < n; i++ { + <-running // Will deadlock unless n are running. + } + if i == n-1 { + m.Lock() + exit = true + m.Unlock() + } + select { + case <-awake: + _, _ = fmt.Fprintln(os.Stderr, "goroutine not asleep") + os.Exit(1) + default: + } + m.Lock() + c.Broadcast() + m.Unlock() + seen := make([]bool, n) + for i := 0; i < n; i++ { + g := <-awake + if seen[g] { + _, _ = fmt.Fprintln(os.Stderr, "goroutine woke up twice") + os.Exit(1) + } + seen[g] = true + } + } + select { + case <-running: + _, _ = fmt.Fprintln(os.Stderr, "goroutine still running") + os.Exit(1) + default: + } + c.Broadcast() +} + +func TestRace() { + x := 0 + c := sync.NewCond(&sync.Mutex{}) + done := make(chan bool) + go func() { + c.L.Lock() + x = 1 + c.Wait() + if x != 2 { + _, _ = fmt.Fprintln(os.Stderr, "want 2") + os.Exit(1) + } + x = 3 + c.Signal() + c.L.Unlock() + done <- true + }() + go func() { + c.L.Lock() + for { + if x == 1 { + x = 2 + c.Signal() + break + } + c.L.Unlock() + runtime.Gosched() + c.L.Lock() + } + c.L.Unlock() + done <- true + }() + go func() { + c.L.Lock() + for { + if x == 2 { + c.Wait() + if x != 3 { + _, _ = fmt.Fprintln(os.Stderr, "want 3") + os.Exit(1) + } + break + } + if x == 3 { + break + } + c.L.Unlock() + runtime.Gosched() + c.L.Lock() + } + c.L.Unlock() + done <- true + }() + <-done + <-done + <-done +} + +func TestCondSignalStealing() { + for iters := 0; iters < 5; iters++ { + var m sync.Mutex + cond := sync.NewCond(&m) + + // Start a waiter. + ch := make(chan struct{}) + go func() { + m.Lock() + ch <- struct{}{} + cond.Wait() + m.Unlock() + + ch <- struct{}{} + }() + + <-ch + m.Lock() + m.Unlock() + + // We know that the waiter is in the cond.Wait() call because we + // synchronized with it, then acquired/released the mutex it was + // holding when we synchronized. + // + // Start two goroutines that will race: one will broadcast on + // the cond var, the other will wait on it. + // + // The new waiter may or may not get notified, but the first one + // has to be notified. + done := false + go func() { + cond.Broadcast() + }() + + go func() { + m.Lock() + for !done { + cond.Wait() + } + m.Unlock() + }() + + // Check that the first waiter does get signaled. + <-ch + + // Release the second waiter in case it didn't get the + // broadcast. + m.Lock() + done = true + m.Unlock() + cond.Broadcast() + } +} + +func TestCondCopy() { + defer func() { + err := recover() + if err == nil || err.(string) != "sync.Cond is copied" { + _, _ = fmt.Fprintf(os.Stderr, "got %v, expect sync.Cond is copied", err) + os.Exit(1) + } + }() + c := sync.Cond{L: &sync.Mutex{}} + c.Signal() + var c2 sync.Cond + reflect.ValueOf(&c2).Elem().Set(reflect.ValueOf(&c).Elem()) // c2 := c, hidden from vet + c2.Signal() +} diff --git a/cannon/testdata/example/mt-general/go.mod b/cannon/testdata/example/mt-general/go.mod new file mode 100644 index 000000000000..3a7bf3680f5f --- /dev/null +++ b/cannon/testdata/example/mt-general/go.mod @@ -0,0 +1,5 @@ +module mtgeneral + +go 1.22 + +toolchain go1.22.0 diff --git a/cannon/testdata/example/multithreaded/main.go b/cannon/testdata/example/mt-general/main.go similarity index 100% rename from cannon/testdata/example/multithreaded/main.go rename to cannon/testdata/example/mt-general/main.go diff --git a/cannon/testdata/example/mt-map/go.mod b/cannon/testdata/example/mt-map/go.mod new file mode 100644 index 000000000000..7290b372361e --- /dev/null +++ b/cannon/testdata/example/mt-map/go.mod @@ -0,0 +1,8 @@ +module map + +go 1.22 + +toolchain go1.22.0 + +require utils v0.0.0 +replace utils => ../../utils diff --git a/cannon/testdata/example/mt-map/main.go b/cannon/testdata/example/mt-map/main.go new file mode 100644 index 000000000000..577a19a3ed8a --- /dev/null +++ b/cannon/testdata/example/mt-map/main.go @@ -0,0 +1,19 @@ +package main + +import ( + "fmt" + + "utils/testutil" +) + +func main() { + testutil.RunTest(TestMapMatchesRWMutex, "TestMapMatchesRWMutex") + testutil.RunTest(TestMapMatchesDeepCopy, "TestMapMatchesDeepCopy") + testutil.RunTest(TestConcurrentRange, "TestConcurrentRange") + testutil.RunTest(TestIssue40999, "TestIssue40999") + testutil.RunTest(TestMapRangeNestedCall, "TestMapRangeNestedCall") + testutil.RunTest(TestCompareAndSwap_NonExistingKey, "TestCompareAndSwap_NonExistingKey") + testutil.RunTest(TestMapRangeNoAllocations, "TestMapRangeNoAllocations") + + fmt.Println("Map test passed") +} diff --git a/cannon/testdata/example/mt-map/map_reference_test_copy.go b/cannon/testdata/example/mt-map/map_reference_test_copy.go new file mode 100644 index 000000000000..3beeb1501c55 --- /dev/null +++ b/cannon/testdata/example/mt-map/map_reference_test_copy.go @@ -0,0 +1,299 @@ +// This file is based on code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/map_reference_test.go +// +// --- Original License Notice --- +// +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +package main + +import ( + "sync" + "sync/atomic" +) + +// This file contains reference map implementations for unit-tests. + +// mapInterface is the interface Map implements. +type mapInterface interface { + Load(any) (any, bool) + Store(key, value any) + LoadOrStore(key, value any) (actual any, loaded bool) + LoadAndDelete(key any) (value any, loaded bool) + Delete(any) + Swap(key, value any) (previous any, loaded bool) + CompareAndSwap(key, old, new any) (swapped bool) + CompareAndDelete(key, old any) (deleted bool) + Range(func(key, value any) (shouldContinue bool)) +} + +var ( + _ mapInterface = &RWMutexMap{} + _ mapInterface = &DeepCopyMap{} +) + +// RWMutexMap is an implementation of mapInterface using a sync.RWMutex. +type RWMutexMap struct { + mu sync.RWMutex + dirty map[any]any +} + +func (m *RWMutexMap) Load(key any) (value any, ok bool) { + m.mu.RLock() + value, ok = m.dirty[key] + m.mu.RUnlock() + return +} + +func (m *RWMutexMap) Store(key, value any) { + m.mu.Lock() + if m.dirty == nil { + m.dirty = make(map[any]any) + } + m.dirty[key] = value + m.mu.Unlock() +} + +func (m *RWMutexMap) LoadOrStore(key, value any) (actual any, loaded bool) { + m.mu.Lock() + actual, loaded = m.dirty[key] + if !loaded { + actual = value + if m.dirty == nil { + m.dirty = make(map[any]any) + } + m.dirty[key] = value + } + m.mu.Unlock() + return actual, loaded +} + +func (m *RWMutexMap) Swap(key, value any) (previous any, loaded bool) { + m.mu.Lock() + if m.dirty == nil { + m.dirty = make(map[any]any) + } + + previous, loaded = m.dirty[key] + m.dirty[key] = value + m.mu.Unlock() + return +} + +func (m *RWMutexMap) LoadAndDelete(key any) (value any, loaded bool) { + m.mu.Lock() + value, loaded = m.dirty[key] + if !loaded { + m.mu.Unlock() + return nil, false + } + delete(m.dirty, key) + m.mu.Unlock() + return value, loaded +} + +func (m *RWMutexMap) Delete(key any) { + m.mu.Lock() + delete(m.dirty, key) + m.mu.Unlock() +} + +func (m *RWMutexMap) CompareAndSwap(key, old, new any) (swapped bool) { + m.mu.Lock() + defer m.mu.Unlock() + if m.dirty == nil { + return false + } + + value, loaded := m.dirty[key] + if loaded && value == old { + m.dirty[key] = new + return true + } + return false +} + +func (m *RWMutexMap) CompareAndDelete(key, old any) (deleted bool) { + m.mu.Lock() + defer m.mu.Unlock() + if m.dirty == nil { + return false + } + + value, loaded := m.dirty[key] + if loaded && value == old { + delete(m.dirty, key) + return true + } + return false +} + +func (m *RWMutexMap) Range(f func(key, value any) (shouldContinue bool)) { + m.mu.RLock() + keys := make([]any, 0, len(m.dirty)) + for k := range m.dirty { + keys = append(keys, k) + } + m.mu.RUnlock() + + for _, k := range keys { + v, ok := m.Load(k) + if !ok { + continue + } + if !f(k, v) { + break + } + } +} + +// DeepCopyMap is an implementation of mapInterface using a Mutex and +// atomic.Value. It makes deep copies of the map on every write to avoid +// acquiring the Mutex in Load. +type DeepCopyMap struct { + mu sync.Mutex + clean atomic.Value +} + +func (m *DeepCopyMap) Load(key any) (value any, ok bool) { + clean, _ := m.clean.Load().(map[any]any) + value, ok = clean[key] + return value, ok +} + +func (m *DeepCopyMap) Store(key, value any) { + m.mu.Lock() + dirty := m.dirty() + dirty[key] = value + m.clean.Store(dirty) + m.mu.Unlock() +} + +func (m *DeepCopyMap) LoadOrStore(key, value any) (actual any, loaded bool) { + clean, _ := m.clean.Load().(map[any]any) + actual, loaded = clean[key] + if loaded { + return actual, loaded + } + + m.mu.Lock() + // Reload clean in case it changed while we were waiting on m.mu. + clean, _ = m.clean.Load().(map[any]any) + actual, loaded = clean[key] + if !loaded { + dirty := m.dirty() + dirty[key] = value + actual = value + m.clean.Store(dirty) + } + m.mu.Unlock() + return actual, loaded +} + +func (m *DeepCopyMap) Swap(key, value any) (previous any, loaded bool) { + m.mu.Lock() + dirty := m.dirty() + previous, loaded = dirty[key] + dirty[key] = value + m.clean.Store(dirty) + m.mu.Unlock() + return +} + +func (m *DeepCopyMap) LoadAndDelete(key any) (value any, loaded bool) { + m.mu.Lock() + dirty := m.dirty() + value, loaded = dirty[key] + delete(dirty, key) + m.clean.Store(dirty) + m.mu.Unlock() + return +} + +func (m *DeepCopyMap) Delete(key any) { + m.mu.Lock() + dirty := m.dirty() + delete(dirty, key) + m.clean.Store(dirty) + m.mu.Unlock() +} + +func (m *DeepCopyMap) CompareAndSwap(key, old, new any) (swapped bool) { + clean, _ := m.clean.Load().(map[any]any) + if previous, ok := clean[key]; !ok || previous != old { + return false + } + + m.mu.Lock() + defer m.mu.Unlock() + dirty := m.dirty() + value, loaded := dirty[key] + if loaded && value == old { + dirty[key] = new + m.clean.Store(dirty) + return true + } + return false +} + +func (m *DeepCopyMap) CompareAndDelete(key, old any) (deleted bool) { + clean, _ := m.clean.Load().(map[any]any) + if previous, ok := clean[key]; !ok || previous != old { + return false + } + + m.mu.Lock() + defer m.mu.Unlock() + + dirty := m.dirty() + value, loaded := dirty[key] + if loaded && value == old { + delete(dirty, key) + m.clean.Store(dirty) + return true + } + return false +} + +func (m *DeepCopyMap) Range(f func(key, value any) (shouldContinue bool)) { + clean, _ := m.clean.Load().(map[any]any) + for k, v := range clean { + if !f(k, v) { + break + } + } +} + +func (m *DeepCopyMap) dirty() map[any]any { + clean, _ := m.clean.Load().(map[any]any) + dirty := make(map[any]any, len(clean)+1) + for k, v := range clean { + dirty[k] = v + } + return dirty +} diff --git a/cannon/testdata/example/mt-map/map_test_copy.go b/cannon/testdata/example/mt-map/map_test_copy.go new file mode 100644 index 000000000000..7b8806698f9f --- /dev/null +++ b/cannon/testdata/example/mt-map/map_test_copy.go @@ -0,0 +1,325 @@ +// This file is based on code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/map_test.go +// +// --- Original License Notice --- +// +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +package main + +import ( + "math/rand" + "reflect" + "runtime" + "sync" + "sync/atomic" + "testing" + "testing/quick" + + "utils/testutil" +) + +type mapOp string + +const ( + opLoad = mapOp("Load") + opStore = mapOp("Store") + opLoadOrStore = mapOp("LoadOrStore") + opLoadAndDelete = mapOp("LoadAndDelete") + opDelete = mapOp("Delete") + opSwap = mapOp("Swap") + opCompareAndSwap = mapOp("CompareAndSwap") + opCompareAndDelete = mapOp("CompareAndDelete") +) + +var mapOps = [...]mapOp{ + opLoad, + opStore, + opLoadOrStore, + opLoadAndDelete, + opDelete, + opSwap, + opCompareAndSwap, + opCompareAndDelete, +} + +// mapCall is a quick.Generator for calls on mapInterface. +type mapCall struct { + op mapOp + k, v any +} + +func (c mapCall) apply(m mapInterface) (any, bool) { + switch c.op { + case opLoad: + return m.Load(c.k) + case opStore: + m.Store(c.k, c.v) + return nil, false + case opLoadOrStore: + return m.LoadOrStore(c.k, c.v) + case opLoadAndDelete: + return m.LoadAndDelete(c.k) + case opDelete: + m.Delete(c.k) + return nil, false + case opSwap: + return m.Swap(c.k, c.v) + case opCompareAndSwap: + if m.CompareAndSwap(c.k, c.v, rand.Int()) { + m.Delete(c.k) + return c.v, true + } + return nil, false + case opCompareAndDelete: + if m.CompareAndDelete(c.k, c.v) { + if _, ok := m.Load(c.k); !ok { + return nil, true + } + } + return nil, false + default: + panic("invalid mapOp") + } +} + +type mapResult struct { + value any + ok bool +} + +func randValue(r *rand.Rand) any { + b := make([]byte, r.Intn(4)) + for i := range b { + b[i] = 'a' + byte(rand.Intn(26)) + } + return string(b) +} + +func (mapCall) Generate(r *rand.Rand, size int) reflect.Value { + c := mapCall{op: mapOps[rand.Intn(len(mapOps))], k: randValue(r)} + switch c.op { + case opStore, opLoadOrStore: + c.v = randValue(r) + } + return reflect.ValueOf(c) +} + +func applyCalls(m mapInterface, calls []mapCall) (results []mapResult, final map[any]any) { + for _, c := range calls { + v, ok := c.apply(m) + results = append(results, mapResult{v, ok}) + } + + final = make(map[any]any) + m.Range(func(k, v any) bool { + final[k] = v + return true + }) + + return results, final +} + +func applyMap(calls []mapCall) ([]mapResult, map[any]any) { + return applyCalls(new(sync.Map), calls) +} + +func applyRWMutexMap(calls []mapCall) ([]mapResult, map[any]any) { + return applyCalls(new(RWMutexMap), calls) +} + +func applyDeepCopyMap(calls []mapCall) ([]mapResult, map[any]any) { + return applyCalls(new(DeepCopyMap), calls) +} + +func TestMapMatchesRWMutex(t *testutil.TestRunner) { + if err := quick.CheckEqual(applyMap, applyRWMutexMap, nil); err != nil { + t.Error(err) + } +} + +func TestMapMatchesDeepCopy(t *testutil.TestRunner) { + if err := quick.CheckEqual(applyMap, applyDeepCopyMap, nil); err != nil { + t.Error(err) + } +} + +func TestConcurrentRange(t *testutil.TestRunner) { + const mapSize = 1 << 10 + + m := new(sync.Map) + for n := int64(1); n <= mapSize; n++ { + m.Store(n, int64(n)) + } + + done := make(chan struct{}) + var wg sync.WaitGroup + defer func() { + close(done) + wg.Wait() + }() + for g := int64(runtime.GOMAXPROCS(0)); g > 0; g-- { + r := rand.New(rand.NewSource(g)) + wg.Add(1) + go func(g int64) { + defer wg.Done() + for i := int64(0); ; i++ { + select { + case <-done: + return + default: + } + for n := int64(1); n < mapSize; n++ { + if r.Int63n(mapSize) == 0 { + m.Store(n, n*i*g) + } else { + m.Load(n) + } + } + } + }(g) + } + + //iters := 1 << 10 + //if testing.Short() { + // iters = 16 + //} + iters := 16 + for n := iters; n > 0; n-- { + seen := make(map[int64]bool, mapSize) + + m.Range(func(ki, vi any) bool { + k, v := ki.(int64), vi.(int64) + if v%k != 0 { + t.Fatalf("while Storing multiples of %v, Range saw value %v", k, v) + } + if seen[k] { + t.Fatalf("Range visited key %v twice", k) + } + seen[k] = true + return true + }) + + if len(seen) != mapSize { + t.Fatalf("Range visited %v elements of %v-element Map", len(seen), mapSize) + } + } +} + +func TestIssue40999(t *testutil.TestRunner) { + var m sync.Map + + // Since the miss-counting in missLocked (via Delete) + // compares the miss count with len(m.dirty), + // add an initial entry to bias len(m.dirty) above the miss count. + m.Store(nil, struct{}{}) + + var finalized uint32 + + // Set finalizers that count for collected keys. A non-zero count + // indicates that keys have not been leaked. + for atomic.LoadUint32(&finalized) == 0 { + p := new(int) + runtime.SetFinalizer(p, func(*int) { + atomic.AddUint32(&finalized, 1) + }) + m.Store(p, struct{}{}) + m.Delete(p) + runtime.GC() + } +} + +func TestMapRangeNestedCall(t *testutil.TestRunner) { // Issue 46399 + var m sync.Map + for i, v := range [3]string{"hello", "world", "Go"} { + m.Store(i, v) + } + m.Range(func(key, value any) bool { + m.Range(func(key, value any) bool { + // We should be able to load the key offered in the Range callback, + // because there are no concurrent Delete involved in this tested map. + if v, ok := m.Load(key); !ok || !reflect.DeepEqual(v, value) { + t.Fatalf("Nested Range loads unexpected value, got %+v want %+v", v, value) + } + + // We didn't keep 42 and a value into the map before, if somehow we loaded + // a value from such a key, meaning there must be an internal bug regarding + // nested range in the Map. + if _, loaded := m.LoadOrStore(42, "dummy"); loaded { + t.Fatalf("Nested Range loads unexpected value, want store a new value") + } + + // Try to Store then LoadAndDelete the corresponding value with the key + // 42 to the Map. In this case, the key 42 and associated value should be + // removed from the Map. Therefore any future range won't observe key 42 + // as we checked in above. + val := "sync.Map" + m.Store(42, val) + if v, loaded := m.LoadAndDelete(42); !loaded || !reflect.DeepEqual(v, val) { + t.Fatalf("Nested Range loads unexpected value, got %v, want %v", v, val) + } + return true + }) + + // Remove key from Map on-the-fly. + m.Delete(key) + return true + }) + + // After a Range of Delete, all keys should be removed and any + // further Range won't invoke the callback. Hence length remains 0. + length := 0 + m.Range(func(key, value any) bool { + length++ + return true + }) + + if length != 0 { + t.Fatalf("Unexpected sync.Map size, got %v want %v", length, 0) + } +} + +func TestCompareAndSwap_NonExistingKey(t *testutil.TestRunner) { + m := &sync.Map{} + if m.CompareAndSwap(m, nil, 42) { + // See https://go.dev/issue/51972#issuecomment-1126408637. + t.Fatalf("CompareAndSwap on a non-existing key succeeded") + } +} + +func TestMapRangeNoAllocations(t *testutil.TestRunner) { // Issue 62404 + var m sync.Map + allocs := testing.AllocsPerRun(10, func() { + m.Range(func(key, value any) bool { + return true + }) + }) + if allocs > 0 { + t.Errorf("AllocsPerRun of m.Range = %v; want 0", allocs) + } +} diff --git a/cannon/testdata/example/mt-mutex/go.mod b/cannon/testdata/example/mt-mutex/go.mod new file mode 100644 index 000000000000..3aceb4c8cebf --- /dev/null +++ b/cannon/testdata/example/mt-mutex/go.mod @@ -0,0 +1,8 @@ +module mutex + +go 1.22 + +toolchain go1.22.0 + +require utils v0.0.0 +replace utils => ../../utils diff --git a/cannon/testdata/example/mt-mutex/main.go b/cannon/testdata/example/mt-mutex/main.go new file mode 100644 index 000000000000..1a3b75c231bc --- /dev/null +++ b/cannon/testdata/example/mt-mutex/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "fmt" + + "utils/testutil" +) + +func main() { + testutil.RunTest(TestSemaphore, "TestSemaphore") + testutil.RunTest(TestMutex, "TestMutex") + testutil.RunTest(TestMutexFairness, "TestMutexFairness") + + fmt.Println("Mutex test passed") +} diff --git a/cannon/testdata/example/mt-mutex/mutex_test_copy.go b/cannon/testdata/example/mt-mutex/mutex_test_copy.go new file mode 100644 index 000000000000..d3ed9343c3d4 --- /dev/null +++ b/cannon/testdata/example/mt-mutex/mutex_test_copy.go @@ -0,0 +1,135 @@ +// This file is based on code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/mutex_test.go +// +// --- Original License Notice --- +// +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package main + +import ( + "runtime" + . "sync" + "time" + + "utils/testutil" +) + +func HammerSemaphore(s *uint32, loops int, cdone chan bool) { + for i := 0; i < loops; i++ { + Runtime_Semacquire(s) + Runtime_Semrelease(s, false, 0) + } + cdone <- true +} + +func TestSemaphore(t *testutil.TestRunner) { + s := new(uint32) + *s = 1 + c := make(chan bool) + for i := 0; i < 10; i++ { + go HammerSemaphore(s, 1000, c) + } + for i := 0; i < 10; i++ { + <-c + } +} + +func HammerMutex(m *Mutex, loops int, cdone chan bool) { + for i := 0; i < loops; i++ { + if i%3 == 0 { + if m.TryLock() { + m.Unlock() + } + continue + } + m.Lock() + m.Unlock() + } + cdone <- true +} + +func TestMutex(t *testutil.TestRunner) { + if n := runtime.SetMutexProfileFraction(1); n != 0 { + t.Logf("got mutexrate %d expected 0", n) + } + defer runtime.SetMutexProfileFraction(0) + + m := new(Mutex) + + m.Lock() + if m.TryLock() { + t.Fatalf("TryLock succeeded with mutex locked") + } + m.Unlock() + if !m.TryLock() { + t.Fatalf("TryLock failed with mutex unlocked") + } + m.Unlock() + + c := make(chan bool) + for i := 0; i < 10; i++ { + go HammerMutex(m, 1000, c) + } + for i := 0; i < 10; i++ { + <-c + } +} + +func TestMutexFairness(t *testutil.TestRunner) { + var mu Mutex + stop := make(chan bool) + defer close(stop) + go func() { + for { + mu.Lock() + time.Sleep(100 * time.Microsecond) + mu.Unlock() + select { + case <-stop: + return + default: + } + } + }() + done := make(chan bool, 1) + go func() { + for i := 0; i < 10; i++ { + time.Sleep(100 * time.Microsecond) + mu.Lock() + mu.Unlock() + } + done <- true + }() + select { + case <-done: + case <-time.After(10 * time.Second): + t.Fatalf("can't acquire Mutex in 10 seconds") + } +} diff --git a/cannon/testdata/example/mt-mutex/runtime.go b/cannon/testdata/example/mt-mutex/runtime.go new file mode 100644 index 000000000000..8d70cb10b354 --- /dev/null +++ b/cannon/testdata/example/mt-mutex/runtime.go @@ -0,0 +1,14 @@ +package main + +import ( + _ "unsafe" // Required for go:linkname +) + +var Runtime_Semacquire = runtime_Semacquire +var Runtime_Semrelease = runtime_Semrelease + +//go:linkname runtime_Semacquire sync.runtime_Semacquire +func runtime_Semacquire(s *uint32) + +//go:linkname runtime_Semrelease sync.runtime_Semrelease +func runtime_Semrelease(s *uint32, handoff bool, skipframes int) diff --git a/cannon/testdata/example/mt-once/go.mod b/cannon/testdata/example/mt-once/go.mod new file mode 100644 index 000000000000..7595e1de483f --- /dev/null +++ b/cannon/testdata/example/mt-once/go.mod @@ -0,0 +1,5 @@ +module once + +go 1.22 + +toolchain go1.22.0 diff --git a/cannon/testdata/example/mt-once/main.go b/cannon/testdata/example/mt-once/main.go new file mode 100644 index 000000000000..3be753e2f702 --- /dev/null +++ b/cannon/testdata/example/mt-once/main.go @@ -0,0 +1,98 @@ +// Portions of this code are derived from code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/400433af3660905ecaceaf19ddad3e6c24b141df/src/sync/once_test.go +// +// --- Original License Notice --- +// +// Copyright 2009 The Go Authors. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google LLC nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +package main + +import ( + "fmt" + "os" + "sync" +) + +func main() { + TestOnce() + TestOncePanic() + + fmt.Println("Once test passed") +} + +type one int + +func (o *one) Increment() { + *o++ +} + +func run(once *sync.Once, o *one, c chan bool) { + once.Do(func() { o.Increment() }) + if v := *o; v != 1 { + _, _ = fmt.Fprintf(os.Stderr, "once failed inside run: %d is not 1\n", v) + os.Exit(1) + } + c <- true +} + +func TestOnce() { + o := new(one) + once := new(sync.Once) + c := make(chan bool) + const N = 10 + for i := 0; i < N; i++ { + go run(once, o, c) + } + for i := 0; i < N; i++ { + <-c + } + if *o != 1 { + _, _ = fmt.Fprintf(os.Stderr, "once failed outside run: %d is not 1\n", *o) + os.Exit(1) + } +} + +func TestOncePanic() { + var once sync.Once + func() { + defer func() { + if r := recover(); r == nil { + _, _ = fmt.Fprintf(os.Stderr, "Once.Do did not panic") + os.Exit(1) + } + }() + once.Do(func() { + panic("failed") + }) + }() + + once.Do(func() { + _, _ = fmt.Fprintf(os.Stderr, "Once.Do called twice") + os.Exit(1) + }) +} diff --git a/cannon/testdata/example/mt-oncefunc/go.mod b/cannon/testdata/example/mt-oncefunc/go.mod new file mode 100644 index 000000000000..e0f45e8c8790 --- /dev/null +++ b/cannon/testdata/example/mt-oncefunc/go.mod @@ -0,0 +1,8 @@ +module oncefunc + +go 1.22 + +toolchain go1.22.0 + +require utils v0.0.0 +replace utils => ../../utils diff --git a/cannon/testdata/example/mt-oncefunc/main.go b/cannon/testdata/example/mt-oncefunc/main.go new file mode 100644 index 000000000000..d5b0badc292a --- /dev/null +++ b/cannon/testdata/example/mt-oncefunc/main.go @@ -0,0 +1,22 @@ +package main + +import ( + "fmt" + + "utils/testutil" +) + +func main() { + testutil.RunTest(TestOnceFunc, "TestOnceFunc") + testutil.RunTest(TestOnceValue, "TestOnceValue") + testutil.RunTest(TestOnceValues, "TestOnceValues") + testutil.RunTest(TestOnceFuncPanic, "TestOnceFuncPanic") + testutil.RunTest(TestOnceValuePanic, "TestOnceValuePanic") + testutil.RunTest(TestOnceValuesPanic, "TestOnceValuesPanic") + testutil.RunTest(TestOnceFuncPanicNil, "TestOnceFuncPanicNil") + testutil.RunTest(TestOnceFuncGoexit, "TestOnceFuncGoexit") + testutil.RunTest(TestOnceFuncPanicTraceback, "TestOnceFuncPanicTraceback") + testutil.RunTest(TestOnceXGC, "TestOnceXGC") + + fmt.Println("OnceFunc tests passed") +} diff --git a/cannon/testdata/example/mt-oncefunc/oncefunc_test_copy.go b/cannon/testdata/example/mt-oncefunc/oncefunc_test_copy.go new file mode 100644 index 000000000000..fdbe93c260aa --- /dev/null +++ b/cannon/testdata/example/mt-oncefunc/oncefunc_test_copy.go @@ -0,0 +1,265 @@ +// This file is based on code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/oncefunc_test.go +// +// --- Original License Notice --- +// +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package main + +import ( + "bytes" + "math" + "runtime" + "runtime/debug" + "sync" + "sync/atomic" + "testing" + _ "unsafe" + + "utils/testutil" +) + +// We assume that the Once.Do tests have already covered parallelism. + +func TestOnceFunc(t *testutil.TestRunner) { + calls := 0 + f := sync.OnceFunc(func() { calls++ }) + allocs := testing.AllocsPerRun(10, f) + if calls != 1 { + t.Errorf("want calls==1, got %d", calls) + } + if allocs != 0 { + t.Errorf("want 0 allocations per call, got %v", allocs) + } +} + +func TestOnceValue(t *testutil.TestRunner) { + calls := 0 + f := sync.OnceValue(func() int { + calls++ + return calls + }) + allocs := testing.AllocsPerRun(10, func() { f() }) + value := f() + if calls != 1 { + t.Errorf("want calls==1, got %d", calls) + } + if value != 1 { + t.Errorf("want value==1, got %d", value) + } + if allocs != 0 { + t.Errorf("want 0 allocations per call, got %v", allocs) + } +} + +func TestOnceValues(t *testutil.TestRunner) { + calls := 0 + f := sync.OnceValues(func() (int, int) { + calls++ + return calls, calls + 1 + }) + allocs := testing.AllocsPerRun(10, func() { f() }) + v1, v2 := f() + if calls != 1 { + t.Errorf("want calls==1, got %d", calls) + } + if v1 != 1 || v2 != 2 { + t.Errorf("want v1==1 and v2==2, got %d and %d", v1, v2) + } + if allocs != 0 { + t.Errorf("want 0 allocations per call, got %v", allocs) + } +} + +func testOncePanicX(t testing.TB, calls *int, f func()) { + testOncePanicWith(t, calls, f, func(label string, p any) { + if p != "x" { + t.Fatalf("%s: want panic %v, got %v", label, "x", p) + } + }) +} + +func testOncePanicWith(t testing.TB, calls *int, f func(), check func(label string, p any)) { + // Check that the each call to f panics with the same value, but the + // underlying function is only called once. + for _, label := range []string{"first time", "second time"} { + var p any + panicked := true + func() { + defer func() { + p = recover() + }() + f() + panicked = false + }() + if !panicked { + t.Fatalf("%s: f did not panic", label) + } + check(label, p) + } + if *calls != 1 { + t.Errorf("want calls==1, got %d", *calls) + } +} + +func TestOnceFuncPanic(t *testutil.TestRunner) { + calls := 0 + f := sync.OnceFunc(func() { + calls++ + panic("x") + }) + testOncePanicX(t, &calls, f) +} + +func TestOnceValuePanic(t *testutil.TestRunner) { + calls := 0 + f := sync.OnceValue(func() int { + calls++ + panic("x") + }) + testOncePanicX(t, &calls, func() { f() }) +} + +func TestOnceValuesPanic(t *testutil.TestRunner) { + calls := 0 + f := sync.OnceValues(func() (int, int) { + calls++ + panic("x") + }) + testOncePanicX(t, &calls, func() { f() }) +} + +func TestOnceFuncPanicNil(t *testutil.TestRunner) { + calls := 0 + f := sync.OnceFunc(func() { + calls++ + panic(nil) + }) + testOncePanicWith(t, &calls, f, func(label string, p any) { + switch p.(type) { + case nil, *runtime.PanicNilError: + return + } + t.Fatalf("%s: want nil panic, got %v", label, p) + }) +} + +func TestOnceFuncGoexit(t *testutil.TestRunner) { + // If f calls Goexit, the results are unspecified. But check that f doesn't + // get called twice. + calls := 0 + f := sync.OnceFunc(func() { + calls++ + runtime.Goexit() + }) + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + defer wg.Done() + defer func() { recover() }() + f() + }() + wg.Wait() + } + if calls != 1 { + t.Errorf("want calls==1, got %d", calls) + } +} + +func TestOnceFuncPanicTraceback(t *testutil.TestRunner) { + // Test that on the first invocation of a OnceFunc, the stack trace goes all + // the way to the origin of the panic. + f := sync.OnceFunc(onceFuncPanic) + + defer func() { + if p := recover(); p != "x" { + t.Fatalf("want panic %v, got %v", "x", p) + } + stack := debug.Stack() + //want := "sync_test.onceFuncPanic" + want := "main.onceFuncPanic" + if !bytes.Contains(stack, []byte(want)) { + t.Fatalf("want stack containing %v, got:\n%s", want, string(stack)) + } + }() + f() +} + +func onceFuncPanic() { + panic("x") +} + +func TestOnceXGC(t *testutil.TestRunner) { + fns := map[string]func([]byte) func(){ + "OnceFunc": func(buf []byte) func() { + return sync.OnceFunc(func() { buf[0] = 1 }) + }, + "OnceValue": func(buf []byte) func() { + f := sync.OnceValue(func() any { buf[0] = 1; return nil }) + return func() { f() } + }, + "OnceValues": func(buf []byte) func() { + f := sync.OnceValues(func() (any, any) { buf[0] = 1; return nil, nil }) + return func() { f() } + }, + } + for n, fn := range fns { + t.Run(n, func(t testing.TB) { + buf := make([]byte, 1024) + var gc atomic.Bool + runtime.SetFinalizer(&buf[0], func(_ *byte) { + gc.Store(true) + }) + f := fn(buf) + gcwaitfin() + if gc.Load() != false { + t.Fatal("wrapped function garbage collected too early") + } + f() + gcwaitfin() + if gc.Load() != true { + // Even if f is still alive, the function passed to Once(Func|Value|Values) + // is not kept alive after the first call to f. + t.Fatal("wrapped function should be garbage collected, but still live") + } + f() + }) + } +} + +// gcwaitfin performs garbage collection and waits for all finalizers to run. +func gcwaitfin() { + runtime.GC() + runtime_blockUntilEmptyFinalizerQueue(math.MaxInt64) +} + +//go:linkname runtime_blockUntilEmptyFinalizerQueue runtime.blockUntilEmptyFinalizerQueue +func runtime_blockUntilEmptyFinalizerQueue(int64) bool diff --git a/cannon/testdata/example/mt-pool/export_test_copy.go b/cannon/testdata/example/mt-pool/export_test_copy.go new file mode 100644 index 000000000000..37b7c4a92bf5 --- /dev/null +++ b/cannon/testdata/example/mt-pool/export_test_copy.go @@ -0,0 +1,86 @@ +// This file is based on code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/export_test.go +// +// --- Original License Notice --- +// +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package main + +// Export for testing. +// var Runtime_Semacquire = runtime_Semacquire +// var Runtime_Semrelease = runtime_Semrelease +var Runtime_procPin = runtime_procPin +var Runtime_procUnpin = runtime_procUnpin + +// poolDequeue testing. +type PoolDequeue interface { + PushHead(val any) bool + PopHead() (any, bool) + PopTail() (any, bool) +} + +func NewPoolDequeue(n int) PoolDequeue { + d := &poolDequeue{ + vals: make([]eface, n), + } + // For testing purposes, set the head and tail indexes close + // to wrapping around. + d.headTail.Store(d.pack(1< ../../utils diff --git a/cannon/testdata/example/mt-pool/main.go b/cannon/testdata/example/mt-pool/main.go new file mode 100644 index 000000000000..2c138b07cd97 --- /dev/null +++ b/cannon/testdata/example/mt-pool/main.go @@ -0,0 +1,20 @@ +package main + +import ( + "fmt" + + "utils/testutil" +) + +func main() { + testutil.RunTest(TestPool, "TestPool") + testutil.RunTest(TestPoolNew, "TestPoolNew") + testutil.RunTest(TestPoolGC, "TestPoolGC") + testutil.RunTest(TestPoolRelease, "TestPoolRelease") + testutil.RunTest(TestPoolStress, "TestPoolStress") + testutil.RunTest(TestPoolDequeue, "TestPoolDequeue") + testutil.RunTest(TestPoolChain, "TestPoolChain") + testutil.RunTest(TestNilPool, "TestNilPool") + + fmt.Println("Pool test passed") +} diff --git a/cannon/testdata/example/mt-pool/pool_test_copy.go b/cannon/testdata/example/mt-pool/pool_test_copy.go new file mode 100644 index 000000000000..962cbfce7b5e --- /dev/null +++ b/cannon/testdata/example/mt-pool/pool_test_copy.go @@ -0,0 +1,298 @@ +// This file is based on code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/pool_test.go +// +// --- Original License Notice --- +// +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package main + +import ( + "runtime" + "runtime/debug" + . "sync" + "sync/atomic" + "testing" + "time" + + "utils/testutil" +) + +var short bool = true + +func TestPool(t *testutil.TestRunner) { + // disable GC so we can control when it happens. + defer debug.SetGCPercent(debug.SetGCPercent(-1)) + var p Pool + if p.Get() != nil { + t.Fatal("expected empty") + } + + // Make sure that the goroutine doesn't migrate to another P + // between Put and Get calls. + Runtime_procPin() + p.Put("a") + p.Put("b") + if g := p.Get(); g != "a" { + t.Fatalf("got %#v; want a", g) + } + if g := p.Get(); g != "b" { + t.Fatalf("got %#v; want b", g) + } + if g := p.Get(); g != nil { + t.Fatalf("got %#v; want nil", g) + } + Runtime_procUnpin() + + // Put in a large number of objects so they spill into + // stealable space. + for i := 0; i < 100; i++ { + p.Put("c") + } + // After one GC, the victim cache should keep them alive. + runtime.GC() + if g := p.Get(); g != "c" { + t.Fatalf("got %#v; want c after GC", g) + } + // A second GC should drop the victim cache. + runtime.GC() + if g := p.Get(); g != nil { + t.Fatalf("got %#v; want nil after second GC", g) + } +} + +func TestPoolNew(t *testutil.TestRunner) { + // disable GC so we can control when it happens. + defer debug.SetGCPercent(debug.SetGCPercent(-1)) + + i := 0 + p := Pool{ + New: func() any { + i++ + return i + }, + } + if v := p.Get(); v != 1 { + t.Fatalf("got %v; want 1", v) + } + if v := p.Get(); v != 2 { + t.Fatalf("got %v; want 2", v) + } + + // Make sure that the goroutine doesn't migrate to another P + // between Put and Get calls. + Runtime_procPin() + p.Put(42) + if v := p.Get(); v != 42 { + t.Fatalf("got %v; want 42", v) + } + Runtime_procUnpin() + + if v := p.Get(); v != 3 { + t.Fatalf("got %v; want 3", v) + } +} + +// Test that Pool does not hold pointers to previously cached resources. +func TestPoolGC(t *testutil.TestRunner) { + testPool(t, true) +} + +// Test that Pool releases resources on GC. +func TestPoolRelease(t *testutil.TestRunner) { + testPool(t, false) +} + +func testPool(t testing.TB, drain bool) { + var p Pool + const N = 100 +loop: + for try := 0; try < 3; try++ { + if try == 1 && short { + break + } + var fin, fin1 uint32 + for i := 0; i < N; i++ { + v := new(string) + runtime.SetFinalizer(v, func(vv *string) { + atomic.AddUint32(&fin, 1) + }) + p.Put(v) + } + if drain { + for i := 0; i < N; i++ { + p.Get() + } + } + for i := 0; i < 5; i++ { + runtime.GC() + time.Sleep(time.Duration(i*100+10) * time.Millisecond) + // 1 pointer can remain on stack or elsewhere + if fin1 = atomic.LoadUint32(&fin); fin1 >= N-1 { + continue loop + } + } + t.Fatalf("only %v out of %v resources are finalized on try %v", fin1, N, try) + } +} + +func TestPoolStress(t *testutil.TestRunner) { + const P = 10 + N := int(1e6) + if short { + N /= 100 + } + var p Pool + done := make(chan bool) + for i := 0; i < P; i++ { + go func() { + var v any = 0 + for j := 0; j < N; j++ { + if v == nil { + v = 0 + } + p.Put(v) + v = p.Get() + if v != nil && v.(int) != 0 { + t.Errorf("expect 0, got %v", v) + break + } + } + done <- true + }() + } + for i := 0; i < P; i++ { + <-done + } +} + +func TestPoolDequeue(t *testutil.TestRunner) { + testPoolDequeue(t, NewPoolDequeue(16)) +} + +func TestPoolChain(t *testutil.TestRunner) { + testPoolDequeue(t, NewPoolChain()) +} + +func testPoolDequeue(t testing.TB, d PoolDequeue) { + const P = 10 + var N int = 2e6 + if short { + N = 1e3 + } + have := make([]int32, N) + var stop int32 + var wg WaitGroup + record := func(val int) { + atomic.AddInt32(&have[val], 1) + if val == N-1 { + atomic.StoreInt32(&stop, 1) + } + } + + // Start P-1 consumers. + for i := 1; i < P; i++ { + wg.Add(1) + go func() { + fail := 0 + for atomic.LoadInt32(&stop) == 0 { + val, ok := d.PopTail() + if ok { + fail = 0 + record(val.(int)) + } else { + // Speed up the test by + // allowing the pusher to run. + if fail++; fail%100 == 0 { + runtime.Gosched() + } + } + } + wg.Done() + }() + } + + // Start 1 producer. + nPopHead := 0 + wg.Add(1) + go func() { + for j := 0; j < N; j++ { + for !d.PushHead(j) { + // Allow a popper to run. + runtime.Gosched() + } + if j%10 == 0 { + val, ok := d.PopHead() + if ok { + nPopHead++ + record(val.(int)) + } + } + } + wg.Done() + }() + wg.Wait() + + // Check results. + for i, count := range have { + if count != 1 { + t.Errorf("expected have[%d] = 1, got %d", i, count) + } + } + // Check that at least some PopHeads succeeded. We skip this + // check in short mode because it's common enough that the + // queue will stay nearly empty all the time and a PopTail + // will happen during the window between every PushHead and + // PopHead. + if !short && nPopHead == 0 { + t.Errorf("popHead never succeeded") + } +} + +func TestNilPool(t *testutil.TestRunner) { + catch := func() { + if recover() == nil { + t.Error("expected panic") + } + } + + var p *Pool + t.Run("Get", func(t testing.TB) { + defer catch() + if p.Get() != nil { + t.Error("expected empty") + } + t.Error("should have panicked already") + }) + t.Run("Put", func(t testing.TB) { + defer catch() + p.Put("a") + t.Error("should have panicked already") + }) +} diff --git a/cannon/testdata/example/mt-pool/poolqueue_copy.go b/cannon/testdata/example/mt-pool/poolqueue_copy.go new file mode 100644 index 000000000000..5aa7b1ffa7b9 --- /dev/null +++ b/cannon/testdata/example/mt-pool/poolqueue_copy.go @@ -0,0 +1,338 @@ +// This file is based on code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/poolqueue.go +// +// --- Original License Notice --- +// +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package main + +import ( + "sync/atomic" + "unsafe" +) + +// poolDequeue is a lock-free fixed-size single-producer, +// multi-consumer queue. The single producer can both push and pop +// from the head, and consumers can pop from the tail. +// +// It has the added feature that it nils out unused slots to avoid +// unnecessary retention of objects. This is important for sync.Pool, +// but not typically a property considered in the literature. +type poolDequeue struct { + // headTail packs together a 32-bit head index and a 32-bit + // tail index. Both are indexes into vals modulo len(vals)-1. + // + // tail = index of oldest data in queue + // head = index of next slot to fill + // + // Slots in the range [tail, head) are owned by consumers. + // A consumer continues to own a slot outside this range until + // it nils the slot, at which point ownership passes to the + // producer. + // + // The head index is stored in the most-significant bits so + // that we can atomically add to it and the overflow is + // harmless. + headTail atomic.Uint64 + + // vals is a ring buffer of interface{} values stored in this + // dequeue. The size of this must be a power of 2. + // + // vals[i].typ is nil if the slot is empty and non-nil + // otherwise. A slot is still in use until *both* the tail + // index has moved beyond it and typ has been set to nil. This + // is set to nil atomically by the consumer and read + // atomically by the producer. + vals []eface +} + +type eface struct { + typ, val unsafe.Pointer +} + +const dequeueBits = 32 + +// dequeueLimit is the maximum size of a poolDequeue. +// +// This must be at most (1<> dequeueBits) & mask) + tail = uint32(ptrs & mask) + return +} + +func (d *poolDequeue) pack(head, tail uint32) uint64 { + const mask = 1<= dequeueLimit { + // Can't make it any bigger. + newSize = dequeueLimit + } + + d2 := &poolChainElt{prev: d} + d2.vals = make([]eface, newSize) + c.head = d2 + storePoolChainElt(&d.next, d2) + d2.pushHead(val) +} + +func (c *poolChain) popHead() (any, bool) { + d := c.head + for d != nil { + if val, ok := d.popHead(); ok { + return val, ok + } + // There may still be unconsumed elements in the + // previous dequeue, so try backing up. + d = loadPoolChainElt(&d.prev) + } + return nil, false +} + +func (c *poolChain) popTail() (any, bool) { + d := loadPoolChainElt(&c.tail) + if d == nil { + return nil, false + } + + for { + // It's important that we load the next pointer + // *before* popping the tail. In general, d may be + // transiently empty, but if next is non-nil before + // the pop and the pop fails, then d is permanently + // empty, which is the only condition under which it's + // safe to drop d from the chain. + d2 := loadPoolChainElt(&d.next) + + if val, ok := d.popTail(); ok { + return val, ok + } + + if d2 == nil { + // This is the only dequeue. It's empty right + // now, but could be pushed to in the future. + return nil, false + } + + // The tail of the chain has been drained, so move on + // to the next dequeue. Try to drop it from the chain + // so the next pop doesn't have to look at the empty + // dequeue again. + if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&c.tail)), unsafe.Pointer(d), unsafe.Pointer(d2)) { + // We won the race. Clear the prev pointer so + // the garbage collector can collect the empty + // dequeue and so popHead doesn't back up + // further than necessary. + storePoolChainElt(&d2.prev, nil) + } + d = d2 + } +} diff --git a/cannon/testdata/example/mt-pool/runtime.go b/cannon/testdata/example/mt-pool/runtime.go new file mode 100644 index 000000000000..1b6dbe3e6cd5 --- /dev/null +++ b/cannon/testdata/example/mt-pool/runtime.go @@ -0,0 +1,11 @@ +package main + +import ( + _ "unsafe" // Required for go:linkname +) + +//go:linkname runtime_procPin runtime.procPin +func runtime_procPin() int + +//go:linkname runtime_procUnpin runtime.procUnpin +func runtime_procUnpin() diff --git a/cannon/testdata/example/mt-rwmutex/go.mod b/cannon/testdata/example/mt-rwmutex/go.mod new file mode 100644 index 000000000000..a0a433e91199 --- /dev/null +++ b/cannon/testdata/example/mt-rwmutex/go.mod @@ -0,0 +1,5 @@ +module rwmutex + +go 1.22 + +toolchain go1.22.0 diff --git a/cannon/testdata/example/mt-rwmutex/main.go b/cannon/testdata/example/mt-rwmutex/main.go new file mode 100644 index 000000000000..8553bba75ef4 --- /dev/null +++ b/cannon/testdata/example/mt-rwmutex/main.go @@ -0,0 +1,226 @@ +// Portions of this code are derived from code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/400433af3660905ecaceaf19ddad3e6c24b141df/src/sync/rwmutex_test.go +// +// --- Original License Notice --- +// +// Copyright 2009 The Go Authors. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google LLC nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +package main + +import ( + "fmt" + "os" + "runtime" + "sync" + "sync/atomic" +) + +func main() { + TestParallelReaders() + TestRLocker() + TestRWMutex() + + fmt.Println("RWMutex test passed") +} + +func parallelReader(m *sync.RWMutex, clocked, cunlock, cdone chan bool) { + m.RLock() + clocked <- true + <-cunlock + m.RUnlock() + cdone <- true +} + +func doTestParallelReaders(numReaders, gomaxprocs int) { + runtime.GOMAXPROCS(gomaxprocs) + var m sync.RWMutex + clocked := make(chan bool) + cunlock := make(chan bool) + cdone := make(chan bool) + for i := 0; i < numReaders; i++ { + go parallelReader(&m, clocked, cunlock, cdone) + } + // Wait for all parallel RLock()s to succeed. + for i := 0; i < numReaders; i++ { + <-clocked + } + for i := 0; i < numReaders; i++ { + cunlock <- true + } + // Wait for the goroutines to finish. + for i := 0; i < numReaders; i++ { + <-cdone + } +} + +func TestParallelReaders() { + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1)) + doTestParallelReaders(1, 4) + doTestParallelReaders(3, 4) + doTestParallelReaders(4, 2) +} + +func reader(rwm *sync.RWMutex, num_iterations int, activity *int32, cdone chan bool) { + for i := 0; i < num_iterations; i++ { + rwm.RLock() + n := atomic.AddInt32(activity, 1) + if n < 1 || n >= 10000 { + rwm.RUnlock() + panic(fmt.Sprintf("wlock(%d)\n", n)) + } + for i := 0; i < 100; i++ { + } + atomic.AddInt32(activity, -1) + rwm.RUnlock() + } + cdone <- true +} + +func writer(rwm *sync.RWMutex, num_iterations int, activity *int32, cdone chan bool) { + for i := 0; i < num_iterations; i++ { + rwm.Lock() + n := atomic.AddInt32(activity, 10000) + if n != 10000 { + rwm.Unlock() + panic(fmt.Sprintf("wlock(%d)\n", n)) + } + for i := 0; i < 100; i++ { + } + atomic.AddInt32(activity, -10000) + rwm.Unlock() + } + cdone <- true +} + +func HammerRWMutex(gomaxprocs, numReaders, num_iterations int) { + runtime.GOMAXPROCS(gomaxprocs) + // Number of active readers + 10000 * number of active writers. + var activity int32 + var rwm sync.RWMutex + cdone := make(chan bool) + go writer(&rwm, num_iterations, &activity, cdone) + var i int + for i = 0; i < numReaders/2; i++ { + go reader(&rwm, num_iterations, &activity, cdone) + } + go writer(&rwm, num_iterations, &activity, cdone) + for ; i < numReaders; i++ { + go reader(&rwm, num_iterations, &activity, cdone) + } + // Wait for the 2 writers and all readers to finish. + for i := 0; i < 2+numReaders; i++ { + <-cdone + } +} + +func TestRWMutex() { + var m sync.RWMutex + + m.Lock() + if m.TryLock() { + _, _ = fmt.Fprintln(os.Stderr, "TryLock succeeded with mutex locked") + os.Exit(1) + } + if m.TryRLock() { + _, _ = fmt.Fprintln(os.Stderr, "TryRLock succeeded with mutex locked") + os.Exit(1) + } + m.Unlock() + + if !m.TryLock() { + _, _ = fmt.Fprintln(os.Stderr, "TryLock failed with mutex unlocked") + os.Exit(1) + } + m.Unlock() + + if !m.TryRLock() { + _, _ = fmt.Fprintln(os.Stderr, "TryRLock failed with mutex unlocked") + os.Exit(1) + } + if !m.TryRLock() { + _, _ = fmt.Fprintln(os.Stderr, "TryRLock failed with mutex unlocked") + os.Exit(1) + } + if m.TryLock() { + _, _ = fmt.Fprintln(os.Stderr, "TryLock succeeded with mutex rlocked") + os.Exit(1) + } + m.RUnlock() + m.RUnlock() + + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1)) + n := 5 + + HammerRWMutex(1, 1, n) + HammerRWMutex(1, 3, n) + HammerRWMutex(1, 10, n) + HammerRWMutex(4, 1, n) + HammerRWMutex(4, 3, n) + HammerRWMutex(4, 10, n) + HammerRWMutex(10, 1, n) + HammerRWMutex(10, 3, n) + HammerRWMutex(10, 10, n) + HammerRWMutex(10, 5, n) +} + +func TestRLocker() { + var wl sync.RWMutex + var rl sync.Locker + wlocked := make(chan bool, 1) + rlocked := make(chan bool, 1) + rl = wl.RLocker() + n := 10 + go func() { + for i := 0; i < n; i++ { + rl.Lock() + rl.Lock() + rlocked <- true + wl.Lock() + wlocked <- true + } + }() + for i := 0; i < n; i++ { + <-rlocked + rl.Unlock() + select { + case <-wlocked: + _, _ = fmt.Fprintln(os.Stderr, "RLocker() didn't read-lock it") + os.Exit(1) + default: + } + rl.Unlock() + <-wlocked + select { + case <-rlocked: + _, _ = fmt.Fprintln(os.Stderr, "RLocker() didn't respect the write lock") + os.Exit(1) + default: + } + wl.Unlock() + } +} diff --git a/cannon/testdata/example/mt-value/go.mod b/cannon/testdata/example/mt-value/go.mod new file mode 100644 index 000000000000..602687cbcca2 --- /dev/null +++ b/cannon/testdata/example/mt-value/go.mod @@ -0,0 +1,8 @@ +module mtvalue + +go 1.22 + +toolchain go1.22.0 + +require utils v0.0.0 +replace utils => ../../utils diff --git a/cannon/testdata/example/mt-value/main.go b/cannon/testdata/example/mt-value/main.go new file mode 100644 index 000000000000..51fd1b2b7300 --- /dev/null +++ b/cannon/testdata/example/mt-value/main.go @@ -0,0 +1,20 @@ +package main + +import ( + "fmt" + + "utils/testutil" +) + +func main() { + testutil.RunTest(TestValue, "TestValue") + testutil.RunTest(TestValueLarge, "TestValueLarge") + testutil.RunTest(TestValuePanic, "TestValuePanic") + testutil.RunTest(TestValueConcurrent, "TestValueConcurrent") + testutil.RunTest(TestValue_Swap, "TestValue_Swap") + testutil.RunTest(TestValueSwapConcurrent, "TestValueSwapConcurrent") + testutil.RunTest(TestValue_CompareAndSwap, "TestValue_CompareAndSwap") + testutil.RunTest(TestValueCompareAndSwapConcurrent, "TestValueCompareAndSwapConcurrent") + + fmt.Println("Value tests passed") +} diff --git a/cannon/testdata/example/mt-value/value_test_copy.go b/cannon/testdata/example/mt-value/value_test_copy.go new file mode 100644 index 000000000000..32ccc0067408 --- /dev/null +++ b/cannon/testdata/example/mt-value/value_test_copy.go @@ -0,0 +1,312 @@ +// This file is based on code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/atomic/value_test.go +// +// --- Original License Notice --- +// +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package main + +import ( + "math/rand" + "runtime" + "strconv" + "sync" + "sync/atomic" + . "sync/atomic" + "testing" + + "utils/testutil" +) + +var short bool = true + +func TestValue(t *testutil.TestRunner) { + var v Value + if v.Load() != nil { + t.Fatal("initial Value is not nil") + } + v.Store(42) + x := v.Load() + if xx, ok := x.(int); !ok || xx != 42 { + t.Fatalf("wrong value: got %+v, want 42", x) + } + v.Store(84) + x = v.Load() + if xx, ok := x.(int); !ok || xx != 84 { + t.Fatalf("wrong value: got %+v, want 84", x) + } +} + +func TestValueLarge(t *testutil.TestRunner) { + var v Value + v.Store("foo") + x := v.Load() + if xx, ok := x.(string); !ok || xx != "foo" { + t.Fatalf("wrong value: got %+v, want foo", x) + } + v.Store("barbaz") + x = v.Load() + if xx, ok := x.(string); !ok || xx != "barbaz" { + t.Fatalf("wrong value: got %+v, want barbaz", x) + } +} + +func TestValuePanic(t *testutil.TestRunner) { + const nilErr = "sync/atomic: store of nil value into Value" + const badErr = "sync/atomic: store of inconsistently typed value into Value" + var v Value + func() { + defer func() { + err := recover() + if err != nilErr { + t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, nilErr) + } + }() + v.Store(nil) + }() + v.Store(42) + func() { + defer func() { + err := recover() + if err != badErr { + t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, badErr) + } + }() + v.Store("foo") + }() + func() { + defer func() { + err := recover() + if err != nilErr { + t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, nilErr) + } + }() + v.Store(nil) + }() +} + +func TestValueConcurrent(t *testutil.TestRunner) { + tests := [][]any{ + {uint16(0), ^uint16(0), uint16(1 + 2<<8), uint16(3 + 4<<8)}, + {uint32(0), ^uint32(0), uint32(1 + 2<<16), uint32(3 + 4<<16)}, + {uint64(0), ^uint64(0), uint64(1 + 2<<32), uint64(3 + 4<<32)}, + {complex(0, 0), complex(1, 2), complex(3, 4), complex(5, 6)}, + } + p := 4 * runtime.GOMAXPROCS(0) + N := int(1e5) + if short { + p /= 2 + //N = 1e3 + N = 1e2 + } + for _, test := range tests { + var v Value + done := make(chan bool, p) + for i := 0; i < p; i++ { + go func() { + r := rand.New(rand.NewSource(rand.Int63())) + expected := true + loop: + for j := 0; j < N; j++ { + x := test[r.Intn(len(test))] + v.Store(x) + x = v.Load() + for _, x1 := range test { + if x == x1 { + continue loop + } + } + t.Logf("loaded unexpected value %+v, want %+v", x, test) + expected = false + break + } + done <- expected + }() + } + for i := 0; i < p; i++ { + if !<-done { + t.FailNow() + } + } + } +} + +func BenchmarkValueRead(b *testing.B) { + var v Value + v.Store(new(int)) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + x := v.Load().(*int) + if *x != 0 { + b.Fatalf("wrong value: got %v, want 0", *x) + } + } + }) +} + +var Value_SwapTests = []struct { + init any + new any + want any + err any +}{ + {init: nil, new: nil, err: "sync/atomic: swap of nil value into Value"}, + {init: nil, new: true, want: nil, err: nil}, + {init: true, new: "", err: "sync/atomic: swap of inconsistently typed value into Value"}, + {init: true, new: false, want: true, err: nil}, +} + +func TestValue_Swap(t *testutil.TestRunner) { + for i, tt := range Value_SwapTests { + t.Run(strconv.Itoa(i), func(t testing.TB) { + var v Value + if tt.init != nil { + v.Store(tt.init) + } + defer func() { + err := recover() + switch { + case tt.err == nil && err != nil: + t.Errorf("should not panic, got %v", err) + case tt.err != nil && err == nil: + t.Errorf("should panic %v, got ", tt.err) + } + }() + if got := v.Swap(tt.new); got != tt.want { + t.Errorf("got %v, want %v", got, tt.want) + } + if got := v.Load(); got != tt.new { + t.Errorf("got %v, want %v", got, tt.new) + } + }) + } +} + +func TestValueSwapConcurrent(t *testutil.TestRunner) { + var v Value + var count uint64 + var g sync.WaitGroup + var m, n uint64 = 10000, 10000 + if short { + //m = 1000 + //n = 1000 + m = 10 + n = 10 + } + for i := uint64(0); i < m*n; i += n { + i := i + g.Add(1) + go func() { + var c uint64 + for new := i; new < i+n; new++ { + if old := v.Swap(new); old != nil { + c += old.(uint64) + } + } + atomic.AddUint64(&count, c) + g.Done() + }() + } + g.Wait() + if want, got := (m*n-1)*(m*n)/2, count+v.Load().(uint64); got != want { + t.Errorf("sum from 0 to %d was %d, want %v", m*n-1, got, want) + } +} + +var heapA, heapB = struct{ uint }{0}, struct{ uint }{0} + +var Value_CompareAndSwapTests = []struct { + init any + new any + old any + want bool + err any +}{ + {init: nil, new: nil, old: nil, err: "sync/atomic: compare and swap of nil value into Value"}, + {init: nil, new: true, old: "", err: "sync/atomic: compare and swap of inconsistently typed values into Value"}, + {init: nil, new: true, old: true, want: false, err: nil}, + {init: nil, new: true, old: nil, want: true, err: nil}, + {init: true, new: "", err: "sync/atomic: compare and swap of inconsistently typed value into Value"}, + {init: true, new: true, old: false, want: false, err: nil}, + {init: true, new: true, old: true, want: true, err: nil}, + {init: heapA, new: struct{ uint }{1}, old: heapB, want: true, err: nil}, +} + +func TestValue_CompareAndSwap(t *testutil.TestRunner) { + for i, tt := range Value_CompareAndSwapTests { + t.Run(strconv.Itoa(i), func(t testing.TB) { + var v Value + if tt.init != nil { + v.Store(tt.init) + } + defer func() { + err := recover() + switch { + case tt.err == nil && err != nil: + t.Errorf("got %v, wanted no panic", err) + case tt.err != nil && err == nil: + t.Errorf("did not panic, want %v", tt.err) + } + }() + if got := v.CompareAndSwap(tt.old, tt.new); got != tt.want { + t.Errorf("got %v, want %v", got, tt.want) + } + }) + } +} + +func TestValueCompareAndSwapConcurrent(t *testutil.TestRunner) { + var v Value + var w sync.WaitGroup + v.Store(0) + m, n := 1000, 100 + if short { + //m = 100 + //n = 100 + m = 10 + n = 10 + } + for i := 0; i < m; i++ { + i := i + w.Add(1) + go func() { + for j := i; j < m*n; runtime.Gosched() { + if v.CompareAndSwap(j, j+1) { + j += m + } + } + w.Done() + }() + } + w.Wait() + if stop := v.Load().(int); stop != m*n { + t.Errorf("did not get to %v, stopped at %v", m*n, stop) + } +} diff --git a/cannon/testdata/example/mt-wg/go.mod b/cannon/testdata/example/mt-wg/go.mod new file mode 100644 index 000000000000..0c10638b3d1d --- /dev/null +++ b/cannon/testdata/example/mt-wg/go.mod @@ -0,0 +1,8 @@ +module wg + +go 1.22 + +toolchain go1.22.0 + +require utils v0.0.0 +replace utils => ../../utils diff --git a/cannon/testdata/example/mt-wg/main.go b/cannon/testdata/example/mt-wg/main.go new file mode 100644 index 000000000000..cfdb3e56b8ed --- /dev/null +++ b/cannon/testdata/example/mt-wg/main.go @@ -0,0 +1,16 @@ +package main + +import ( + "fmt" + + "utils/testutil" +) + +func main() { + testutil.RunTest(TestWaitGroup, "TestWaitGroup") + testutil.RunTest(TestWaitGroupMisuse, "TestWaitGroupMisuse") + testutil.RunTest(TestWaitGroupRace, "TestWaitGroupRace") + testutil.RunTest(TestWaitGroupAlign, "TestWaitGroupAlign") + + fmt.Println("WaitGroup tests passed") +} diff --git a/cannon/testdata/example/mt-wg/waitgroup_test_copy.go b/cannon/testdata/example/mt-wg/waitgroup_test_copy.go new file mode 100644 index 000000000000..81f0cd0ce4cd --- /dev/null +++ b/cannon/testdata/example/mt-wg/waitgroup_test_copy.go @@ -0,0 +1,130 @@ +// This file is based on code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/waitgroup_test.go +// +// --- Original License Notice --- +// +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package main + +import ( + . "sync" + "sync/atomic" + "testing" + + "utils/testutil" +) + +func testWaitGroup(t testing.TB, wg1 *WaitGroup, wg2 *WaitGroup) { + n := 16 + wg1.Add(n) + wg2.Add(n) + exited := make(chan bool, n) + for i := 0; i != n; i++ { + go func() { + wg1.Done() + wg2.Wait() + exited <- true + }() + } + wg1.Wait() + for i := 0; i != n; i++ { + select { + case <-exited: + t.Fatal("WaitGroup released group too soon") + default: + } + wg2.Done() + } + for i := 0; i != n; i++ { + <-exited // Will block if barrier fails to unlock someone. + } +} + +func TestWaitGroup(t *testutil.TestRunner) { + wg1 := &WaitGroup{} + wg2 := &WaitGroup{} + + // Run the same test a few times to ensure barrier is in a proper state. + for i := 0; i != 8; i++ { + testWaitGroup(t, wg1, wg2) + } +} + +func TestWaitGroupMisuse(t *testutil.TestRunner) { + defer func() { + err := recover() + if err != "sync: negative WaitGroup counter" { + t.Fatalf("Unexpected panic: %#v", err) + } + }() + wg := &WaitGroup{} + wg.Add(1) + wg.Done() + wg.Done() + t.Fatal("Should panic") +} + +func TestWaitGroupRace(t *testutil.TestRunner) { + // Run this test for about 1ms. + for i := 0; i < 1000; i++ { + wg := &WaitGroup{} + n := new(int32) + // spawn goroutine 1 + wg.Add(1) + go func() { + atomic.AddInt32(n, 1) + wg.Done() + }() + // spawn goroutine 2 + wg.Add(1) + go func() { + atomic.AddInt32(n, 1) + wg.Done() + }() + // Wait for goroutine 1 and 2 + wg.Wait() + if atomic.LoadInt32(n) != 2 { + t.Fatal("Spurious wakeup from Wait") + } + } +} + +func TestWaitGroupAlign(t *testutil.TestRunner) { + type X struct { + x byte + wg WaitGroup + } + var x X + x.wg.Add(1) + go func(x *X) { + x.wg.Done() + }(&x) + x.wg.Wait() +} diff --git a/cannon/testdata/example/utilscheck/go.mod b/cannon/testdata/example/utilscheck/go.mod new file mode 100644 index 000000000000..5a109a7ca8cd --- /dev/null +++ b/cannon/testdata/example/utilscheck/go.mod @@ -0,0 +1,8 @@ +module utilscheck + +go 1.22 + +toolchain go1.22.0 + +require utils v0.0.0 +replace utils => ../../utils diff --git a/cannon/testdata/example/utilscheck/main.go b/cannon/testdata/example/utilscheck/main.go new file mode 100644 index 000000000000..ad66a3211ad8 --- /dev/null +++ b/cannon/testdata/example/utilscheck/main.go @@ -0,0 +1,17 @@ +package main + +import ( + "fmt" + + "utils/testutil" +) + +func main() { + testutil.RunTest(ShouldFail, "ShouldFail") + + fmt.Println("Passed test that should have failed") +} + +func ShouldFail(t *testutil.TestRunner) { + t.Fail() +} diff --git a/cannon/testdata/example/utilscheck2/go.mod b/cannon/testdata/example/utilscheck2/go.mod new file mode 100644 index 000000000000..ee0430168ad1 --- /dev/null +++ b/cannon/testdata/example/utilscheck2/go.mod @@ -0,0 +1,8 @@ +module utilscheck2 + +go 1.22 + +toolchain go1.22.0 + +require utils v0.0.0 +replace utils => ../../utils diff --git a/cannon/testdata/example/utilscheck2/main.go b/cannon/testdata/example/utilscheck2/main.go new file mode 100644 index 000000000000..6fc619f18fe2 --- /dev/null +++ b/cannon/testdata/example/utilscheck2/main.go @@ -0,0 +1,24 @@ +package main + +import ( + "fmt" + "testing" + + "utils/testutil" +) + +func main() { + testutil.RunTest(ShouldFail, "ShouldFail") + + fmt.Println("Passed test that should have failed") +} + +func ShouldFail(t *testutil.TestRunner) { + t.Run("subtest 1", func(t testing.TB) { + // Do something + }) + + t.Run("subtest 2", func(t testing.TB) { + t.Fail() + }) +} diff --git a/cannon/testdata/example/utilscheck3/go.mod b/cannon/testdata/example/utilscheck3/go.mod new file mode 100644 index 000000000000..3bc116499beb --- /dev/null +++ b/cannon/testdata/example/utilscheck3/go.mod @@ -0,0 +1,8 @@ +module utilscheck3 + +go 1.22 + +toolchain go1.22.0 + +require utils v0.0.0 +replace utils => ../../utils diff --git a/cannon/testdata/example/utilscheck3/main.go b/cannon/testdata/example/utilscheck3/main.go new file mode 100644 index 000000000000..248c891808b0 --- /dev/null +++ b/cannon/testdata/example/utilscheck3/main.go @@ -0,0 +1,20 @@ +package main + +import ( + "fmt" + "testing" + + "utils/testutil" +) + +func main() { + testutil.RunTest(ShouldFail, "ShouldFail") + + fmt.Println("Passed test that should have failed") +} + +func ShouldFail(t *testutil.TestRunner) { + t.Run("panic test", func(t testing.TB) { + panic("oops") + }) +} diff --git a/cannon/testdata/example/utilscheck4/go.mod b/cannon/testdata/example/utilscheck4/go.mod new file mode 100644 index 000000000000..7f80460beb9e --- /dev/null +++ b/cannon/testdata/example/utilscheck4/go.mod @@ -0,0 +1,8 @@ +module utilscheck4 + +go 1.22 + +toolchain go1.22.0 + +require utils v0.0.0 +replace utils => ../../utils diff --git a/cannon/testdata/example/utilscheck4/main.go b/cannon/testdata/example/utilscheck4/main.go new file mode 100644 index 000000000000..deb78e2cb4d3 --- /dev/null +++ b/cannon/testdata/example/utilscheck4/main.go @@ -0,0 +1,17 @@ +package main + +import ( + "fmt" + + "utils/testutil" +) + +func main() { + testutil.RunTest(ShouldFail, "ShouldFail") + + fmt.Println("Passed test that should have failed") +} + +func ShouldFail(t *testutil.TestRunner) { + panic("oops") +} diff --git a/cannon/testdata/utils/go.mod b/cannon/testdata/utils/go.mod new file mode 100644 index 000000000000..45f262e0b16d --- /dev/null +++ b/cannon/testdata/utils/go.mod @@ -0,0 +1,5 @@ +module utils + +go 1.22 + +toolchain go1.22.0 diff --git a/cannon/testdata/utils/testutil/testing.go b/cannon/testdata/utils/testutil/testing.go new file mode 100644 index 000000000000..f6c79c63655e --- /dev/null +++ b/cannon/testdata/utils/testutil/testing.go @@ -0,0 +1,170 @@ +package testutil + +import ( + "fmt" + "os" + "runtime" + "sync" + "testing" +) + +func RunTest(testFunc func(*TestRunner), name string) { + goRunTest(name, testFunc, newTestRunner(name)) +} + +type TestRunner struct { + *mockT + baseName string +} + +func newTestRunner(baseName string) *TestRunner { + return &TestRunner{mockT: newMockT(), baseName: baseName} +} + +func (r *TestRunner) Run(name string, testFunc func(t testing.TB)) bool { + testName := r.baseName + if name != "" { + testName = fmt.Sprintf("%v (%v)", r.baseName, name) + } + + var tester testing.TB = r + goRunTest(testName, testFunc, tester) + return !r.Failed() +} + +func goRunTest[T testing.TB](testName string, testFunc func(t T), t T) { + var wg sync.WaitGroup + wg.Add(1) + + go func() { + defer func() { + if err := recover(); err != nil { + fmt.Printf("Test panicked: %v\n\t%v", testName, err) + os.Exit(1) + } + + if t.Failed() { + fmt.Printf("Test failed: %v\n", testName) + os.Exit(1) + } else if t.Skipped() { + fmt.Printf("Test skipped: %v\n", testName) + } else { + fmt.Printf("Test passed: %v\n", testName) + } + + wg.Done() + }() + + testFunc(t) + }() + + wg.Wait() +} + +type mockT struct { + *testing.T + mu sync.Mutex + failed bool + skipped bool +} + +var _ testing.TB = (*mockT)(nil) + +func newMockT() *mockT { + return &mockT{} +} + +func (t *mockT) Cleanup(func()) { + t.Fatalf("Cleanup not supported") +} + +func (t *mockT) Error(args ...any) { + fmt.Print(args...) + t.fail() +} + +func (t *mockT) Errorf(format string, args ...any) { + fmt.Printf(format, args...) + t.fail() +} + +func (t *mockT) Fail() { + t.fail() +} + +func (t *mockT) FailNow() { + fmt.Println("Fatal") + t.fail() +} + +func (t *mockT) Failed() bool { + t.mu.Lock() + defer t.mu.Unlock() + return t.failed +} + +func (t *mockT) Fatal(args ...any) { + fmt.Print(args...) + t.fail() +} + +func (t *mockT) Fatalf(format string, args ...any) { + fmt.Printf(format, args...) + t.fail() +} + +func (t *mockT) Helper() {} + +func (t *mockT) Log(args ...any) { + fmt.Print(args...) +} + +func (t *mockT) Logf(format string, args ...any) { + fmt.Printf(format, args...) +} + +func (t *mockT) Name() string { + return "" +} + +func (t *mockT) Setenv(key, value string) { + t.Fatalf("Setenv not supported") +} + +func (t *mockT) Skip(args ...any) { + fmt.Println(args...) + t.skip() +} + +func (t *mockT) SkipNow() { + t.skip() +} + +func (t *mockT) Skipf(format string, args ...any) { + fmt.Printf(format, args...) + t.skip() +} +func (t *mockT) Skipped() bool { + t.mu.Lock() + defer t.mu.Unlock() + return t.skipped +} + +func (t *mockT) skip() { + t.mu.Lock() + defer t.mu.Unlock() + t.skipped = true + runtime.Goexit() +} + +func (t *mockT) fail() { + t.mu.Lock() + defer t.mu.Unlock() + t.failed = true + runtime.Goexit() +} + +func (t *mockT) TempDir() string { + t.Fatalf("TempDir not supported") + return "" +} diff --git a/docker-bake.hcl b/docker-bake.hcl index 5f7f311cefe4..64e22327629c 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -6,6 +6,10 @@ variable "REPOSITORY" { default = "oplabs-tools-artifacts/images" } +variable "KONA_VERSION" { + default = "kona-client-v0.1.0-beta.5" +} + variable "GIT_COMMIT" { default = "dev" } @@ -119,6 +123,7 @@ target "op-challenger" { GIT_COMMIT = "${GIT_COMMIT}" GIT_DATE = "${GIT_DATE}" OP_CHALLENGER_VERSION = "${OP_CHALLENGER_VERSION}" + KONA_VERSION="${KONA_VERSION}" } target = "op-challenger-target" platforms = split(",", PLATFORMS) @@ -207,13 +212,24 @@ target "proofs-tools" { context = "." args = { CHALLENGER_VERSION="b46bffed42db3442d7484f089278d59f51503049" - KONA_VERSION="kona-client-v0.1.0-alpha.7" + KONA_VERSION="${KONA_VERSION}" } target="proofs-tools" platforms = split(",", PLATFORMS) tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/proofs-tools:${tag}"] } +target "holocene-deployer" { + dockerfile = "./packages/contracts-bedrock/scripts/upgrades/holocene/upgrade.dockerfile" + context = "./packages/contracts-bedrock/scripts/upgrades/holocene" + args = { + REV = "op-contracts/v1.8.0-rc.1" + } + target="holocene-deployer" + platforms = split(",", PLATFORMS) + tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/holocene-deployer:${tag}"] +} + target "ci-builder" { dockerfile = "./ops/docker/ci-builder/Dockerfile" context = "." @@ -230,15 +246,6 @@ target "ci-builder-rust" { tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/ci-builder-rust:${tag}"] } -target "contracts-bedrock" { - dockerfile = "./ops/docker/Dockerfile.packages" - context = "." - target = "contracts-bedrock" - # See comment in Dockerfile.packages for why we only build for linux/amd64. - platforms = ["linux/amd64"] - tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/contracts-bedrock:${tag}"] -} - target "op-deployer" { dockerfile = "ops/docker/op-stack-go/Dockerfile" context = "." diff --git a/docs/handbook/pr-guidelines.md b/docs/handbook/pr-guidelines.md index cf133ada6c57..57913ebeaa51 100644 --- a/docs/handbook/pr-guidelines.md +++ b/docs/handbook/pr-guidelines.md @@ -46,4 +46,4 @@ This is organized by current state of PR, so it can be easily referenced frequen ### Merging PRs - **Resolve all Comments**: Comments can be resolved by (1) the PR author for nits/optionals, (2) the author or reviewer after discussions, or (3) extracting the comment into an issue to address in a future PR. For (3), ensure the new issue links to the specific comment thread. This is currently enforced by GitHub's merge requirements. -- **Other Standard Merge Requirements**: The PR must be approved by the appropriate reviewers, CI must passing, and other standard merge requirements apply. +- **Other Standard Merge Requirements**: The PR must be approved by the appropriate reviewers, CI must pass, and other standard merge requirements apply. diff --git a/docs/postmortems/2022-02-02-inflation-vuln.md b/docs/postmortems/2022-02-02-inflation-vuln.md index a755b0fdfe38..a2a23e382303 100644 --- a/docs/postmortems/2022-02-02-inflation-vuln.md +++ b/docs/postmortems/2022-02-02-inflation-vuln.md @@ -58,7 +58,7 @@ timeline and activities were as follows: (Using github handles as identifiers) - 2022-02-02 1625: smartcontracts receives an e-mail from saurik claiming to have found a critical - issue in L2Geth. E-mail was sent to securityoptimism.io. + issue in L2Geth. E-mail was sent to security@optimism.io. - 2022-02-02 X: saurik messaged smartcontracts on Discord to make sure we checked the e-mail since he knew we had a prior problem where security advisories went to spam. - 2022-02-02 1650: Huddle begins in #security on Slack. diff --git a/docs/security-reviews/2024_10-Cannon-FGETFD-3DocSecurity.md b/docs/security-reviews/2024_10-Cannon-FGETFD-3DocSecurity.md new file mode 100644 index 000000000000..f157356e4905 --- /dev/null +++ b/docs/security-reviews/2024_10-Cannon-FGETFD-3DocSecurity.md @@ -0,0 +1,114 @@ +# Audit Report - OP Cannon + +| | | +| -------------- | ------------------------------------------------------------------------- | +| **Audit Date** | Oct 2nd 2024 - Oct 3rd 2024 | +| **Auditor** | 3DOC Security ([@3docSec](https://x.com/3docSec)) | +| **Version 1** | Oct 3rd 2024. | + +
+ +# Contents +- [Audit Report - OP cannon](#audit-report---op-cannon) +- [Contents](#contents) +- [Disclaimer](#disclaimer) +- [About 3DOC](#about-3doc) +- [Scope](#scope) +- [Severity Classification](#severity-classification) +- [Summary](#summary) +- [Findings](#findings) + - [Low Risk Findings (1)](#low-risk-findings-1) + - [1. Op-challenger Docker image does not include Cannon embeds](#-op-challenger-docker-image-does-not-include-cannon-embeds) + +# Disclaimer +_The following audit report is based on the information and code provided by the client, and any findings or recommendations are made solely on the basis of this information. While the Auditor has exercised due care and skill in conducting the audit, it cannot be guaranteed that all issues have been identified and that there are no undiscovered errors or vulnerabilities in the code._ + +_Furthermore, this report is not an endorsement or certification of the protocol, and the Auditor does not assume any responsibility for any losses or damages that may result from the use of the smart contracts, either in their current form or in any modified version thereof._ + +# About 3DOC +3DOC is a top ranked Smart Contract Auditor doing audits on Code4rena (www.code4rena.com), having ranked 1st in multiple contests in [solo](https://code4rena.com/@3docSec) and [team](https://code4rena.com/@RadiantLabs) audits, including the [Optimism superchain contest](https://code4rena.com/audits/2024-07-optimism-superchain) in July 2024.
+He can also be booked for conducting Private Audits. + +Contact:
+ +X: [@3DocSec](https://x.com/3DocSec) + +e-mail: [hello@3doc.fr](mailto:hello@3doc.fr) + +# Scope +The scope of the audit is the following Pull Request in the client's GitHub repository: + +https://github.com/ethereum-optimism/optimism/pull/12050 + +The change consists of a core update for supporting the `F_GETFD` syscall in the MIPS VM, [provided with this commit](https://github.com/ethereum-optimism/optimism/pull/12050/commits/7c8257d3574a2a76ab90f8129c7b532d68049944), and several additional updates accommodating the VM version bump that came with the core change. + +# Severity Classification +| Severity | Impact: High | Impact: Medium | Impact: Low | +| ---------------------- | ------------ | -------------- | ----------- | +| **Likelihood: High** | ![high] | ![high] | ![medium] | +| **Likelihood: Medium** | ![high] | ![medium] | ![low] | +| **Likelihood: Low** | ![medium] | ![low] | ![low] | + +**Impact** - the technical, economic and reputation damage of a successful attack + +**Likelihood** - the chance that a particular vulnerability is discovered and exploited + +# Summary + +| Severity | Total | +| -------------- | ----- | +| ![high] | 0 | +| ![medium] | 0 | +| ![low] | 0 | +| ![information] | 0 | + + +# Findings +## Low Risk findings (0) + +### [False positive] Op-challenger Docker image does not include Cannon embeds +#### Description +The change in scope added a new implementation of the Cannon VM, which was called `VersionSingleThreaded2`. Cannon has now three versions (`VersionSingleThreaded`, `VersionSingleThreaded2`, and `VersionMultiThreaded`). + +The op-challenger program makes use of the Cannon VM in several places via the configured `VmBin` path, which point to the `multicannon` command line. This one reads the State version from the input state and selects the right Cannon VM accordingly (`cannon/multicannon/exec.go:L81`). + +If we look at the Docker challenger image generated by the `make golang-docker` command, however, we can see it doesn't contain an `embeds` folder: + +``` +docker run -t us-docker.pkg.dev/oplabs-tools-artifacts/images/op-challenger find / -name embeds +``` + +But it however has the `cannon` command pointing to the `multicannon` multiplexer: + +``` +➜ optimism git:(52d0e60c1) ✗ docker run -t us-docker.pkg.dev/oplabs-tools-artifacts/images/op-challenger cannon | head -2 +NAME: + multicannon - MIPS Fault Proof tool +➜ optimism git:(52d0e60c1) ✗ +``` + +This issue appears to be pre-existing to the changes in scope; using Docker images to run the challenger is [mentioned as option](https://docs.optimism.io/builders/chain-operators/tools/op-challenger), but only as alternative option, hence the Low risk assessed for this finding. + +#### Impact +Because of this issue, challenger instances operated in a Docker container won't be able to function properly. + +#### Recommendation +Consider modifying the Docker build chain to include the `embeds` folder. +Consider extending the current e2e test suite to cover execution from Docker images. + +#### Discussion + +> @inphi The cannon-2 implementation that supports go1.22 is now embedded into the cannon cli binary. Note that these embeds are not actual files that you can find in the docker container filesystem. But rather an embedded filesystem inside the Go binary - https://pkg.go.dev/embed. + +> @3DOC Oh yes I see that. So those are included in an embedded filesystem, I missed that + + +[high]: https://img.shields.io/badge/-HIGH-b02319 "HIGH" +[medium]: https://img.shields.io/badge/-MEDIUM-orange "MEDIUM" +[low]: https://img.shields.io/badge/-LOW-FFD700 "LOW" +[information]: https://img.shields.io/badge/-INFORMATION-darkgreen "INFORMATION" +[fixed]: https://img.shields.io/badge/-FIXED-brightgreen "FIXED" +[acknowledged]: https://img.shields.io/badge/-ACKNOWLEDGED-blue "ACKNOWLEDGED" +[disputed]: https://img.shields.io/badge/-DISPUTED-lightgrey "DISPUTED" +[reported]: https://img.shields.io/badge/-REPORTED-lightblue "REPORTED" +[partiallyfixed]: https://img.shields.io/badge/-PARTIALLY_FIXED-lightgreen "PARTIALLTY FIXED" diff --git a/docs/security-reviews/README.md b/docs/security-reviews/README.md index 483dc1541858..265d0a65f903 100644 --- a/docs/security-reviews/README.md +++ b/docs/security-reviews/README.md @@ -6,7 +6,7 @@ Each review is focused on a different part of the codebase, and at a different p Please see the report for the specific details. | Date | Reviewer | Focus and Scope | Report Link | Commit | Subsequent Release | -| ------- | -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------- | ------------------- | +|---------|----------------------| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |------------------------------------------------------------------------------------------------------------------------------------------------------------| -------------------------------------------- |---------------------| | 2020-10 | Trail of Bits | Rollup | [2020_10-TrailOfBits.pdf](./2020_10-Rollup-TrailOfBits.pdf) | | | | 2020-11 | Dapphub | ECDSA Wallet | [2020_11-Dapphub-ECDSA_Wallet.pdf](./2020_11-Dapphub-ECDSA_Wallet.pdf) | | | | 2021-03 | OpenZeppelin | OVM and Rollup | [2021_03-OVM_and_Rollup-OpenZeppelin.pdf](./2021_03-OVM_and_Rollup-OpenZeppelin.pdf) | | | @@ -25,7 +25,8 @@ Please see the report for the specific details. | 2024-02 | Runtime Verification | Pausability | [Kontrol Verification][kontrol] | | | | 2024-02 | Cantina | MCP L1: `OptimismPortal.sol`, `L1CrossDomainMessenger.sol`, `L1StandardBridge.sol`, `L1ERC721Bridge.sol`, `OptimismMintableERC20Factory.sol`, `L2OutputOracle.sol`, `SystemConfig.sol` | [2024_02-MCP_L1-Cantina.pdf](./2024_02-MCP_L1-Cantina.pdf) | e6ef3a900c42c8722e72c2e2314027f85d12ced5 | op-contracts/v1.3.0 | | 2024-03 | Sherlock | Fault Proofs | Sherlock Optimism Fault Proofs Contest ([site](https://audits.sherlock.xyz/contests/205), [repo](https://github.com/sherlock-audit/2024-02-optimism-2024)) | | | -| 2024-08 | Cantina | Fault proof no-MIPS: All contracts in the `packages/contracts-bedrock/src/dispute` directory | [./2024_08_Fault-Proofs-MIPS_Cantina.pdf](./2024_08_Fault-Proofs-MIPS_Cantina.pdf) | 1f7081798ce2d49b8643514663d10681cb853a3d | op-contracts/v1.4.0 | -| 2024-08 | Spearbit | Fault proof MIPS: `MIPS.sol` | [./2024_08_Fault-Proofs-No-MIPS_Spearbit.pdf](./2024_08_Fault-Proofs-No-MIPS_Spearbit.pdf) | 71b93116738ee98c9f8713b1a5dfe626ce06c1b2 | op-contracts/v1.6.0 | +| 2024-08 | Cantina | Fault proof MIPS: `MIPS.sol` | [./2024_08_Fault-Proofs-MIPS_Cantina.pdf](./2024_08_Fault-Proofs-MIPS_Cantina.pdf) | 71b93116738ee98c9f8713b1a5dfe626ce06c1b2 | op-contracts/v1.4.0 | +| 2024-08 | Spearbit | Fault proof no-MIPS: All contracts in the `packages/contracts-bedrock/src/dispute` directory | [./2024_08_Fault-Proofs-No-MIPS_Spearbit.pdf](./2024_08_Fault-Proofs-No-MIPS_Spearbit.pdf) | 1f7081798ce2d49b8643514663d10681cb853a3d | op-contracts/v1.6.0 | +| 2024-10 | 3Doc Security | Fault proof MIPS: `MIPS.sol` | [./2024_10-Cannon-FGETFD-3DocSecurity.md](./2024_10-Cannon-FGETFD-3DocSecurity.md) | 52d0e60c16498ad4efec8798e3fc1b36b13f46a2 | op-contracts/v1.8.0 | [kontrol]: https://github.com/ethereum-optimism/optimism/blob/876e16ad04968f0bb641eb76f98eb77e7e1a3e16/packages/contracts-bedrock/test/kontrol/README.md diff --git a/go.mod b/go.mod index d1be77ddc0e1..e3b57ace633c 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,7 @@ toolchain go1.22.7 require ( github.com/BurntSushi/toml v1.4.0 github.com/andybalholm/brotli v1.1.0 + github.com/bmatcuk/doublestar/v4 v4.7.1 github.com/btcsuite/btcd v0.24.2 github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 github.com/cockroachdb/pebble v1.1.2 @@ -14,7 +15,7 @@ require ( github.com/crate-crypto/go-kzg-4844 v1.0.0 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 - github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20241113154227-e72c6311f6e7 + github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20241126105717-d31591e83048 github.com/ethereum/go-ethereum v1.14.11 github.com/fsnotify/fsnotify v1.8.0 github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb @@ -28,27 +29,27 @@ require ( github.com/ipfs/go-datastore v0.6.0 github.com/ipfs/go-ds-leveldb v0.5.0 github.com/klauspost/compress v1.17.11 - github.com/kurtosis-tech/kurtosis/api/golang v1.4.1 + github.com/kurtosis-tech/kurtosis/api/golang v1.4.3 github.com/libp2p/go-libp2p v0.36.2 github.com/libp2p/go-libp2p-mplex v0.9.0 github.com/libp2p/go-libp2p-pubsub v0.12.0 github.com/libp2p/go-libp2p-testing v0.12.0 github.com/mattn/go-isatty v0.0.20 - github.com/minio/minio-go/v7 v7.0.80 + github.com/minio/minio-go/v7 v7.0.81 github.com/multiformats/go-base32 v0.1.0 github.com/multiformats/go-multiaddr v0.14.0 - github.com/multiformats/go-multiaddr-dns v0.4.0 + github.com/multiformats/go-multiaddr-dns v0.4.1 github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 github.com/olekukonko/tablewriter v0.0.5 github.com/pkg/errors v0.9.1 github.com/pkg/profile v1.7.0 github.com/prometheus/client_golang v1.20.5 github.com/protolambda/ctxlock v0.1.0 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 github.com/urfave/cli/v2 v2.27.5 golang.org/x/crypto v0.28.0 golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c - golang.org/x/sync v0.8.0 + golang.org/x/sync v0.10.0 golang.org/x/term v0.25.0 golang.org/x/time v0.7.0 ) @@ -250,7 +251,7 @@ require ( rsc.io/tmplfunc v0.0.3 // indirect ) -replace github.com/ethereum/go-ethereum => github.com/ethereum-optimism/op-geth v1.101411.2-rc.1 +replace github.com/ethereum/go-ethereum => github.com/ethereum-optimism/op-geth v1.101411.3-rc.1.0.20241126165630-b84907bf4d95 //replace github.com/ethereum/go-ethereum => ../go-ethereum diff --git a/go.sum b/go.sum index 5eabf529b0df..bcae4d056f09 100644 --- a/go.sum +++ b/go.sum @@ -50,6 +50,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bmatcuk/doublestar/v4 v4.7.1 h1:fdDeAqgT47acgwd9bd9HxJRDmc9UAmPpc+2m0CXv75Q= +github.com/bmatcuk/doublestar/v4 v4.7.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= @@ -187,10 +189,10 @@ github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/u github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z8veEq5ZO3DfIhZ7xgRP9WTc= github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs= -github.com/ethereum-optimism/op-geth v1.101411.2-rc.1 h1:v314tR5EzG+QNE9aLf+goWCDsTT+RT2EsdOOlJT6CwM= -github.com/ethereum-optimism/op-geth v1.101411.2-rc.1/go.mod h1:RrPkuqfeIXkW28lQJwc5AG/BKbhkHRXPD5YezeeK4w8= -github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20241113154227-e72c6311f6e7 h1:Mbgsp5T52F2pEULHccLr4NtnT6cKnJgabpAPlTfPxrk= -github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20241113154227-e72c6311f6e7/go.mod h1:9feO8jcL5OZ1tvRjEfNAHz4Aggvd6373l+ZxmZZAyZs= +github.com/ethereum-optimism/op-geth v1.101411.3-rc.1.0.20241126165630-b84907bf4d95 h1:4bgr/Y/Vl2lntFvFM6l/W6P8EWLMxAdF5CRnJStHGGI= +github.com/ethereum-optimism/op-geth v1.101411.3-rc.1.0.20241126165630-b84907bf4d95/go.mod h1:zBADVb3+aon0Idb3uEg/1TFpep+Jdkz3ge9SLFDBXOo= +github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20241126105717-d31591e83048 h1:kb220NeqVRRt/XP5JHt3i4zpLsYNCdWMM/0tDnOFk3o= +github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20241126105717-d31591e83048/go.mod h1:9feO8jcL5OZ1tvRjEfNAHz4Aggvd6373l+ZxmZZAyZs= github.com/ethereum/c-kzg-4844 v1.0.0 h1:0X1LBXxaEtYD9xsyj9B9ctQEZIpnvVDeoBx8aHEwTNA= github.com/ethereum/c-kzg-4844 v1.0.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/ethereum/go-verkle v0.1.1-0.20240829091221-dffa7562dbe9 h1:8NfxH2iXvJ60YRB8ChToFTUzl8awsc3cJ8CbLjGIl/A= @@ -440,8 +442,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kurtosis-tech/kurtosis-portal/api/golang v0.0.0-20230818182330-1a86869414d2 h1:izciXrFyFR+ihJ7nLTOkoIX5GzBPIp8gVKlw94gIc98= github.com/kurtosis-tech/kurtosis-portal/api/golang v0.0.0-20230818182330-1a86869414d2/go.mod h1:bWSMQK3WHVTGHX9CjxPAb/LtzcmfOxID2wdzakSWQxo= -github.com/kurtosis-tech/kurtosis/api/golang v1.4.1 h1:V/T5k7t1iKgFof1cGhyLh396YKdTehUqO97AsTPDy+k= -github.com/kurtosis-tech/kurtosis/api/golang v1.4.1/go.mod h1:9T22P7Vv3j5g6sbm78DxHQ4s9C4Cj3s9JjFQ7DFyYpM= +github.com/kurtosis-tech/kurtosis/api/golang v1.4.3 h1:CkrfwpBAOQ9TOCUrVWSv5C7d3hLBNjU4kAYSbL6EHf0= +github.com/kurtosis-tech/kurtosis/api/golang v1.4.3/go.mod h1:9T22P7Vv3j5g6sbm78DxHQ4s9C4Cj3s9JjFQ7DFyYpM= github.com/kurtosis-tech/kurtosis/contexts-config-store v0.0.0-20230818184218-f4e3e773463b h1:hMoIM99QKcYQqsnK4AF7Lovi9ZD9ac6lZLZ5D/jx2x8= github.com/kurtosis-tech/kurtosis/contexts-config-store v0.0.0-20230818184218-f4e3e773463b/go.mod h1:4pFdrRwDz5R+Fov2ZuTaPhAVgjA2jhGh1Izf832sX7A= github.com/kurtosis-tech/kurtosis/grpc-file-transfer/golang v0.0.0-20230803130419-099ee7a4e3dc h1:7IlEpSehmWcNXOFpNP24Cu5HQI3af7GCBQw//m+LnvQ= @@ -524,8 +526,8 @@ github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4S github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.80 h1:2mdUHXEykRdY/BigLt3Iuu1otL0JTogT0Nmltg0wujk= -github.com/minio/minio-go/v7 v7.0.80/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5w25xf8xvC0= +github.com/minio/minio-go/v7 v7.0.81 h1:SzhMN0TQ6T/xSBu6Nvw3M5M8voM+Ht8RH3hE8S7zxaA= +github.com/minio/minio-go/v7 v7.0.81/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5w25xf8xvC0= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= @@ -551,8 +553,8 @@ github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.14.0 h1:bfrHrJhrRuh/NXH5mCnemjpbGjzRw/b+tJFOD41g2tU= github.com/multiformats/go-multiaddr v0.14.0/go.mod h1:6EkVAxtznq2yC3QT5CM1UTAwG0GTP3EWAIcjHuzQ+r4= -github.com/multiformats/go-multiaddr-dns v0.4.0 h1:P76EJ3qzBXpUXZ3twdCDx/kvagMsNo0LMFXpyms/zgU= -github.com/multiformats/go-multiaddr-dns v0.4.0/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc= +github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M= +github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= @@ -778,8 +780,9 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk= github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= @@ -927,8 +930,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/interop-devnet/create-chains.sh b/interop-devnet/create-chains.sh index 506682099154..05e44e67ff21 100755 --- a/interop-devnet/create-chains.sh +++ b/interop-devnet/create-chains.sh @@ -3,7 +3,7 @@ set -eu # Run this with workdir set as root of the repo -if [ -f "../versions.json" ]; then +if [ -f "../mise.toml" ]; then echo "Running create-chains script." else echo "Cannot run create-chains script, must be in interop-devnet dir, but currently in:" diff --git a/interop-devnet/docker-compose.yml b/interop-devnet/docker-compose.yml index de97c9967b64..c4cbab978a12 100644 --- a/interop-devnet/docker-compose.yml +++ b/interop-devnet/docker-compose.yml @@ -319,6 +319,7 @@ services: OP_BATCHER_METRICS_ENABLED: "true" OP_BATCHER_RPC_ENABLE_ADMIN: "true" OP_BATCHER_BATCH_TYPE: + OP_BATCHER_THROTTLE_INTERVAL: 0 # uncomment to use blobs # OP_BATCHER_DATA_AVAILABILITY_TYPE: blobs env_file: @@ -350,6 +351,7 @@ services: OP_BATCHER_METRICS_ENABLED: "true" OP_BATCHER_RPC_ENABLE_ADMIN: "true" OP_BATCHER_BATCH_TYPE: + OP_BATCHER_THROTTLE_INTERVAL: 0 # uncomment to use blobs # OP_BATCHER_DATA_AVAILABILITY_TYPE: blobs env_file: diff --git a/just/default.just b/just/default.just new file mode 100644 index 000000000000..92503b23ba58 --- /dev/null +++ b/just/default.just @@ -0,0 +1,6 @@ +set shell := ["bash", "-c"] + +PARALLEL_JOBS := num_cpus() + +# TODO: this fails in CI for some reason +MAP_JUST := "/usr/bin/env -S parallel --shebang --jobs " + PARALLEL_JOBS + " --colsep ' ' -r " + just_executable() diff --git a/just/deprecated.mk b/just/deprecated.mk new file mode 100644 index 000000000000..655f901348d6 --- /dev/null +++ b/just/deprecated.mk @@ -0,0 +1,27 @@ +ifeq (, $(shell which tput)) + # CI environment typically does not support tput. + banner-style = $1 +else ifeq (, $(TERM)) + # Terminal type not set, so tput would fail. + banner-style = $1 +else + # print in bold red to bring attention. + banner-style = $(shell tput bold)$(shell tput setaf 1)$1$(shell tput sgr0) +endif + +SELF_DIR := $(dir $(lastword $(MAKEFILE_LIST))) +include $(SELF_DIR)/flags.mk + +define make-deprecated-target +$1: + @echo + @printf %s\\n '$(call banner-style,Deprecated make call: make $1 $(JUSTFLAGS))' + @printf %s\\n '$(call banner-style,Consider using just instead: just $(JUSTFLAGS) $1)' + @echo + just $(JUSTFLAGS) $1 +endef + +$(foreach element,$(DEPRECATED_TARGETS),$(eval $(call make-deprecated-target,$(element)))) + +.PHONY: + $(DEPRECATED_TARGETS) diff --git a/just/flags.mk b/just/flags.mk new file mode 100644 index 000000000000..121a3eb70e98 --- /dev/null +++ b/just/flags.mk @@ -0,0 +1,24 @@ +# Variable assignments can affect the semantic of the make targets. +# Typical use-case: setting VERSION in a release build, since CI +# doesn't preserve the git environment. +# +# We need to translate: +# "make target VAR=val" to "just VAR=val target" +# +# MAKEFLAGS is a string of the form: +# "abc --foo --bar=baz -- VAR1=val1 VAR2=val2", namely: +# - abc is the concatnation of all short flags +# - --foo and --bar=baz are long options, +# - -- is the separator between flags and variable assignments, +# - VAR1=val1 and VAR2=val2 are variable assignments +# +# Goal: ignore all CLI flags, keep only variable assignments. +# +# First remove the short flags at the beginning, or the first long-flag, +# or if there is no flag at all, the -- separator (which then makes the +# next step a noop). If there's no flag and no variable assignment, the +# result is empty anyway, so the wordlist call is safe (everything is a noop). +tmp-flags := $(wordlist 2,$(words $(MAKEFLAGS)),$(MAKEFLAGS)) +# Then remove all long options, including the -- separator, if needed. That +# leaves only variable assignments. +JUSTFLAGS := $(patsubst --%,,$(tmp-flags)) \ No newline at end of file diff --git a/just/git.just b/just/git.just new file mode 100644 index 000000000000..ac50e4e762d1 --- /dev/null +++ b/just/git.just @@ -0,0 +1,28 @@ +import 'default.just' + +# Set default values for git info +GITCOMMIT := env('GITCOMMIT', `git rev-parse HEAD 2> /dev/null || true`) +GITDATE := env('GITDATE', `git show -s --format='%ct' 2> /dev/null|| true`) + +_PROJECT := shell("basename $1", justfile_directory()) + +_ALL_TAGS := shell("git tag --points-at $1 2> /dev/null || true", GITCOMMIT) + +_PROJECT_TAGS := shell("echo $1 | grep ^$2/ | sed s:$2/:: | sort -V", _ALL_TAGS, _PROJECT) + +_PREFERRED_TAG := shell("echo $1 | grep -v -- '-rc' | tail -n 1", _PROJECT_TAGS) + +_LAST_TAG := shell("echo $1 | tail -n 1", _PROJECT_TAGS) + +# Find version tag, prioritizing non-rc release tags +VERSION := shell('if [ -z "$1" ]; then + if [ -z "$2" ]; then + echo "untagged" + else + echo "$2" + fi +else + echo $1 +fi', _PREFERRED_TAG, _LAST_TAG) + +VERSION_META := "" diff --git a/just/go.just b/just/go.just new file mode 100644 index 000000000000..3a394e3ec94c --- /dev/null +++ b/just/go.just @@ -0,0 +1,31 @@ +import 'git.just' + +_EXTRALDFLAGS := if os() == "macos" { "-ldflags=-extldflags=-Wl,-ld_classic" } else { "" } + +# We use both GOOS/GOARCH and TARGETOS/TARGETARCH to set the build targets. +# From the usage patterns, it looks like TARGETOS/TARGETARCH should take +# precedence if set, and default to GOOS/GOARCH if not set. +# TODO: should we just remove TARGETOS/TARGETARCH altogether eventually? +GOOS := env('GOOS', `go env GOOS`) +GOARCH := env('GOARCH', `go env GOARCH`) +TARGETOS := env('TARGETOS', GOOS) +TARGETARCH := env('TARGETARCH', GOARCH) + +GORACE := "0" + +_GORACE_FLAG := if GORACE == "1" { "-race " } else { "" } + +[private] +go_build BIN PKG *FLAGS: + env GO111MODULE=on GOOS={{TARGETOS}} GOARCH={{TARGETARCH}} CGO_ENABLED=0 go build -v {{_GORACE_FLAG}} {{FLAGS}} -o {{BIN}} {{PKG}} + +[private] +go_test SELECTOR *FLAGS: + go test -v {{_GORACE_FLAG}} {{FLAGS}} {{SELECTOR}} + +[private] +go_fuzz FUZZ TIME='10s' PKG='': (go_test PKG _EXTRALDFLAGS "-fuzztime" TIME "-fuzz" FUZZ "-run" "NOTAREALTEST") + +[private] +go_generate SELECTOR *FLAGS: + go generate -v {{FLAGS}} {{SELECTOR}} \ No newline at end of file diff --git a/justfile b/justfile index e40186f75dff..35900f2a50bf 100644 --- a/justfile +++ b/justfile @@ -1,4 +1,5 @@ -issues: +# Checks that TODO comments have corresponding issues. +todo-checker: ./ops/scripts/todo-checker.sh # Runs semgrep on the entire monorepo. @@ -9,56 +10,6 @@ semgrep: semgrep-test: semgrep scan --test --config .semgrep/rules/ .semgrep/tests/ -lint-shellcheck: +# Runs shellcheck. +shellcheck: find . -type f -name '*.sh' -not -path '*/node_modules/*' -not -path './packages/contracts-bedrock/lib/*' -not -path './packages/contracts-bedrock/kout*/*' -exec sh -c 'echo "Checking $1"; shellcheck "$1"' _ {} \; - -install-foundry: - curl -L https://foundry.paradigm.xyz | bash && just update-foundry - -update-foundry: - bash ./ops/scripts/install-foundry.sh - -check-foundry: - bash ./ops/scripts/check-foundry.sh - -install-kontrol: - curl -L https://kframework.org/install | bash && just update-kontrol - -update-kontrol: - kup install kontrol --version v$(jq -r .kontrol < versions.json) - -install-abigen: - go install github.com/ethereum/go-ethereum/cmd/abigen@$(jq -r .abigen < versions.json) - -print-abigen: - abigen --version | sed -e 's/[^0-9]/ /g' -e 's/^ *//g' -e 's/ *$//g' -e 's/ /./g' -e 's/^/v/' - -check-abigen: - [[ $(just print-abigen) = $(cat versions.json | jq -r '.abigen') ]] && echo '✓ abigen versions match' || (echo '✗ abigen version mismatch. Run `just upgrade:abigen` to upgrade.' && exit 1) - -upgrade-abigen: - jq '.abigen = $v' --arg v $(just print:abigen) <<<$(cat versions.json) > versions.json - -install-slither: - pip3 install slither-analyzer==$(jq -r .slither < versions.json) - -print-slither: - slither --version - -check-slither: - [[ $(just print-slither) = $(jq -r .slither < versions.json) ]] && echo '✓ slither versions match' || (echo '✗ slither version mismatch. Run `just upgrade-slither` to upgrade.' && exit 1) - -upgrade-slither: - jq '.slither = $v' --arg v $(just print-slither) <<<$(cat versions.json) > versions.json - -install-semgrep: - pip3 install semgrep - -print-semgrep: - semgrep --version - -check-semgrep: - [ "$(just print-semgrep)" = "$(jq -r .semgrep < versions.json)" ] && echo '✓ semgrep versions match' || (echo '✗ semgrep version mismatch. Run `just upgrade-semgrep` to upgrade.' && exit 1) - -upgrade-semgrep: - jq '.semgrep = $v' --arg v $(just print-semgrep) <<<$(cat versions.json) > versions.json diff --git a/mise.toml b/mise.toml new file mode 100644 index 000000000000..5fb473de8891 --- /dev/null +++ b/mise.toml @@ -0,0 +1,54 @@ +[tools] + +# Core dependencies +go = "1.22.7" +rust = "1.83.0" +python = "3.12.0" +uv = "0.5.5" +jq = "1.7.1" +yq = "4.44.5" +shellcheck = "0.10.0" +direnv = "2.35.0" +just = "1.37.0" + +# Cargo dependencies +"cargo:just" = "1.37.0" +"cargo:svm-rs" = "0.5.8" + +# Go dependencies +"go:github.com/ethereum/go-ethereum/cmd/abigen" = "1.10.25" +"go:github.com/ethereum/go-ethereum/cmd/geth" = "1.14.7" +"go:github.com/protolambda/eth2-testnet-genesis" = "0.10.0" +"go:gotest.tools/gotestsum" = "1.12.0" +"go:github.com/vektra/mockery/v2" = "2.46.0" +"go:github.com/golangci/golangci-lint/cmd/golangci-lint" = "1.61.0" +"go:github.com/mikefarah/yq/v4" = "4.44.3" + +# Python dependencies +"pipx:slither-analyzer" = "0.10.2" +"pipx:semgrep" = "1.90.0" + +# Foundry dependencies +# Foundry is a special case because it supplies multiple binaries at the same +# GitHub release, so we need to use the aliasing trick to get mise to not error +forge = "nightly-e5dbb7a320c2b871c4a4a1006ad3c15a08fcf17b" +cast = "nightly-e5dbb7a320c2b871c4a4a1006ad3c15a08fcf17b" +anvil = "nightly-e5dbb7a320c2b871c4a4a1006ad3c15a08fcf17b" + +# Fake dependencies +# Put things here if you need to track versions of tools or projects that can't +# actually be managed by mise (yet). Make sure that anything you put in here is +# also found inside of disabled_tools or mise will try to install it. +asterisc = "1.2.0" +kontrol = "1.0.53" +binary_signer = "1.0.4" + +[alias] +forge = "ubi:foundry-rs/foundry[exe=forge]" +cast = "ubi:foundry-rs/foundry[exe=cast]" +anvil = "ubi:foundry-rs/foundry[exe=anvil]" + +[settings] +experimental = true +pipx.uvx = true +disable_tools = ["asterisc", "kontrol", "binary_signer"] diff --git a/op-alt-da/Makefile b/op-alt-da/Makefile index c98ea24c2095..eb6fb9b78043 100644 --- a/op-alt-da/Makefile +++ b/op-alt-da/Makefile @@ -1,22 +1,3 @@ -GITCOMMIT ?= $(shell git rev-parse HEAD) -GITDATE ?= $(shell git show -s --format='%ct') -VERSION ?= v0.0.0 +DEPRECATED_TARGETS := da-server clean test -LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) -LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) -LDFLAGSSTRING +=-X main.Version=$(VERSION) -LDFLAGS := -ldflags "$(LDFLAGSSTRING)" - -da-server: - env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) CGO_ENABLED=0 go build -v $(LDFLAGS) -o ./bin/da-server ./cmd/daserver - -clean: - rm bin/da-server - -test: - go test -v ./... - -.PHONY: \ - op-batcher \ - clean \ - test +include ../just/deprecated.mk diff --git a/op-alt-da/cli.go b/op-alt-da/cli.go index 30ce2168f570..84364e47952a 100644 --- a/op-alt-da/cli.go +++ b/op-alt-da/cli.go @@ -57,22 +57,25 @@ func CLIFlags(envPrefix string, category string) []cli.Flag { Category: category, }, &cli.DurationFlag{ - Name: PutTimeoutFlagName, - Usage: "Timeout for put requests. 0 means no timeout.", - Value: time.Duration(0), - EnvVars: altDAEnvs(envPrefix, "PUT_TIMEOUT"), + Name: PutTimeoutFlagName, + Usage: "Timeout for put requests. 0 means no timeout.", + Value: time.Duration(0), + EnvVars: altDAEnvs(envPrefix, "PUT_TIMEOUT"), + Category: category, }, &cli.DurationFlag{ - Name: GetTimeoutFlagName, - Usage: "Timeout for get requests. 0 means no timeout.", - Value: time.Duration(0), - EnvVars: altDAEnvs(envPrefix, "GET_TIMEOUT"), + Name: GetTimeoutFlagName, + Usage: "Timeout for get requests. 0 means no timeout.", + Value: time.Duration(0), + EnvVars: altDAEnvs(envPrefix, "GET_TIMEOUT"), + Category: category, }, &cli.Uint64Flag{ - Name: MaxConcurrentRequestsFlagName, - Usage: "Maximum number of concurrent requests to the DA server", - Value: 1, - EnvVars: altDAEnvs(envPrefix, "MAX_CONCURRENT_DA_REQUESTS"), + Name: MaxConcurrentRequestsFlagName, + Usage: "Maximum number of concurrent requests to the DA server", + Value: 1, + EnvVars: altDAEnvs(envPrefix, "MAX_CONCURRENT_DA_REQUESTS"), + Category: category, }, } } diff --git a/op-alt-da/cmd/daserver/main.go b/op-alt-da/cmd/daserver/main.go index 3ed37bd05321..bcaa9c47280e 100644 --- a/op-alt-da/cmd/daserver/main.go +++ b/op-alt-da/cmd/daserver/main.go @@ -13,7 +13,7 @@ import ( oplog "github.com/ethereum-optimism/optimism/op-service/log" ) -var Version = "v0.0.1" +var Version = "v0.0.0" func main() { oplog.SetupDefaults() diff --git a/op-alt-da/commitment.go b/op-alt-da/commitment.go index cc5829ad4dc6..a6fa5424665b 100644 --- a/op-alt-da/commitment.go +++ b/op-alt-da/commitment.go @@ -108,7 +108,7 @@ func (c Keccak256Commitment) CommitmentType() CommitmentType { return Keccak256CommitmentType } -// Encode adds a commitment type prefix self describing the commitment. +// Encode adds a commitment type prefix that describes the commitment. func (c Keccak256Commitment) Encode() []byte { return append([]byte{byte(Keccak256CommitmentType)}, c...) } diff --git a/op-alt-da/justfile b/op-alt-da/justfile new file mode 100644 index 000000000000..f1dbced35b69 --- /dev/null +++ b/op-alt-da/justfile @@ -0,0 +1,20 @@ +import '../just/go.just' + +# Build ldflags string +_LDFLAGSSTRING := "'" + trim( + "-X main.GitCommit=" + GITCOMMIT + " " + \ + "-X main.GitDate=" + GITDATE + " " + \ + "-X main.Version=" + VERSION + " " + \ + "") + "'" + +BINARY := "./bin/da-server" + +# Build the da-server binary +da-server: (go_build BINARY "./cmd/daserver" "-ldflags" _LDFLAGSSTRING) + +# Remove build artifacts +clean: + rm -f {{BINARY}} + +# Run tests +test: (go_test "./...") diff --git a/op-batcher/Makefile b/op-batcher/Makefile index 22bb7a613861..1501e0242d9b 100644 --- a/op-batcher/Makefile +++ b/op-batcher/Makefile @@ -1,52 +1,3 @@ -GITCOMMIT ?= $(shell git rev-parse HEAD) -GITDATE ?= $(shell git show -s --format='%ct') -# Find the github tag that points to this commit. If none are found, set the version string to "untagged" -# Prioritizes release tag, if one exists, over tags suffixed with "-rc" -VERSION ?= $(shell tags=$$(git tag --points-at $(GITCOMMIT) | grep '^op-batcher/' | sed 's/op-batcher\///' | sort -V); \ - preferred_tag=$$(echo "$$tags" | grep -v -- '-rc' | tail -n 1); \ - if [ -z "$$preferred_tag" ]; then \ - if [ -z "$$tags" ]; then \ - echo "untagged"; \ - else \ - echo "$$tags" | tail -n 1; \ - fi \ - else \ - echo $$preferred_tag; \ - fi) +DEPRECATED_TARGETS := op-batcher clean test fuzz -LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) -LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) -LDFLAGSSTRING +=-X main.Version=$(VERSION) -LDFLAGS := -ldflags "$(LDFLAGSSTRING)" - -# Use the old Apple linker to workaround broken xcode - https://github.com/golang/go/issues/65169 -ifeq ($(shell uname),Darwin) - FUZZLDFLAGS := -ldflags=-extldflags=-Wl,-ld_classic -endif - -op-batcher: - env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) CGO_ENABLED=0 go build -v $(LDFLAGS) -o ./bin/op-batcher ./cmd - -clean: - rm bin/op-batcher - -test: - go test -v ./... - -fuzz: - printf "%s\n" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzChannelConfig_CheckTimeout ./batcher" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationZero ./batcher" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationTimeoutMaxChannelDuration ./batcher" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationTimeoutZeroMaxChannelDuration ./batcher" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzChannelCloseTimeout ./batcher" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzChannelZeroCloseTimeout ./batcher" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzSeqWindowClose ./batcher" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzSeqWindowZeroTimeoutClose ./batcher" \ - | parallel -j 8 {} - -.PHONY: \ - op-batcher \ - clean \ - test \ - fuzz +include ../just/deprecated.mk diff --git a/op-batcher/architecture.png b/op-batcher/architecture.png new file mode 100644 index 000000000000..0eab940fbb52 Binary files /dev/null and b/op-batcher/architecture.png differ diff --git a/op-batcher/batcher/channel.go b/op-batcher/batcher/channel.go index dd0827d4686c..95abcb46a7fa 100644 --- a/op-batcher/batcher/channel.go +++ b/op-batcher/batcher/channel.go @@ -49,10 +49,10 @@ func newChannel(log log.Logger, metr metrics.Metricer, cfg ChannelConfig, rollup func (c *channel) TxFailed(id string) { if data, ok := c.pendingTransactions[id]; ok { c.log.Trace("marked transaction as failed", "id", id) - // Note: when the batcher is changed to send multiple frames per tx, - // this needs to be changed to iterate over all frames of the tx data - // and re-queue them. - c.channelBuilder.PushFrames(data.Frames()...) + // Rewind to the first frame of the failed tx + // -- the frames are ordered, and we want to send them + // all again. + c.channelBuilder.RewindFrameCursor(data.Frames()[0]) delete(c.pendingTransactions, id) } else { c.log.Warn("unknown transaction marked as failed", "id", id) @@ -61,18 +61,16 @@ func (c *channel) TxFailed(id string) { c.metr.RecordBatchTxFailed() } -// TxConfirmed marks a transaction as confirmed on L1. Unfortunately even if all frames in -// a channel have been marked as confirmed on L1 the channel may be invalid & need to be -// resubmitted. -// This function may reset the pending channel if the pending channel has timed out. -func (c *channel) TxConfirmed(id string, inclusionBlock eth.BlockID) (bool, []*types.Block) { - c.metr.RecordBatchTxSubmitted() +// TxConfirmed marks a transaction as confirmed on L1. Returns a bool indicating +// whether the channel timed out on chain. +func (c *channel) TxConfirmed(id string, inclusionBlock eth.BlockID) bool { + c.metr.RecordBatchTxSuccess() c.log.Debug("marked transaction as confirmed", "id", id, "block", inclusionBlock) if _, ok := c.pendingTransactions[id]; !ok { c.log.Warn("unknown transaction marked as confirmed", "id", id, "block", inclusionBlock) // TODO: This can occur if we clear the channel while there are still pending transactions // We need to keep track of stale transactions instead - return false, nil + return false } delete(c.pendingTransactions, id) c.confirmedTransactions[id] = inclusionBlock @@ -82,21 +80,20 @@ func (c *channel) TxConfirmed(id string, inclusionBlock eth.BlockID) (bool, []*t c.minInclusionBlock = min(c.minInclusionBlock, inclusionBlock.Number) c.maxInclusionBlock = max(c.maxInclusionBlock, inclusionBlock.Number) + if c.isFullySubmitted() { + c.metr.RecordChannelFullySubmitted(c.ID()) + c.log.Info("Channel is fully submitted", "id", c.ID(), "min_inclusion_block", c.minInclusionBlock, "max_inclusion_block", c.maxInclusionBlock) + } + // If this channel timed out, put the pending blocks back into the local saved blocks // and then reset this state so it can try to build a new channel. if c.isTimedOut() { c.metr.RecordChannelTimedOut(c.ID()) c.log.Warn("Channel timed out", "id", c.ID(), "min_inclusion_block", c.minInclusionBlock, "max_inclusion_block", c.maxInclusionBlock) - return true, c.channelBuilder.Blocks() - } - // If we are done with this channel, record that. - if c.isFullySubmitted() { - c.metr.RecordChannelFullySubmitted(c.ID()) - c.log.Info("Channel is fully submitted", "id", c.ID(), "min_inclusion_block", c.minInclusionBlock, "max_inclusion_block", c.maxInclusionBlock) - return true, nil + return true } - return false, nil + return false } // Timeout returns the channel timeout L1 block number. If there is no timeout set, it returns 0. @@ -136,7 +133,7 @@ func (c *channel) ID() derive.ChannelID { func (c *channel) NextTxData() txData { nf := c.cfg.MaxFramesPerTx() txdata := txData{frames: make([]frameData, 0, nf), asBlob: c.cfg.UseBlobs} - for i := 0; i < nf && c.channelBuilder.HasFrame(); i++ { + for i := 0; i < nf && c.channelBuilder.HasPendingFrame(); i++ { frame := c.channelBuilder.NextFrame() txdata.frames = append(txdata.frames, frame) } @@ -151,7 +148,7 @@ func (c *channel) NextTxData() txData { func (c *channel) HasTxData() bool { if c.IsFull() || // If the channel is full, we should start to submit it !c.cfg.UseBlobs { // If using calldata, we only send one frame per tx - return c.channelBuilder.HasFrame() + return c.channelBuilder.HasPendingFrame() } // Collect enough frames if channel is not full yet return c.channelBuilder.PendingFrames() >= int(c.cfg.MaxFramesPerTx()) diff --git a/op-batcher/batcher/channel_builder.go b/op-batcher/batcher/channel_builder.go index ae1fb03d2841..597b5ed3e144 100644 --- a/op-batcher/batcher/channel_builder.go +++ b/op-batcher/batcher/channel_builder.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/queue" "github.com/ethereum/go-ethereum/core/types" ) @@ -65,7 +66,7 @@ type ChannelBuilder struct { // current channel co derive.ChannelOut // list of blocks in the channel. Saved in case the channel must be rebuilt - blocks []*types.Block + blocks queue.Queue[*types.Block] // latestL1Origin is the latest L1 origin of all the L2 blocks that have been added to the channel latestL1Origin eth.BlockID // oldestL1Origin is the oldest L1 origin of all the L2 blocks that have been added to the channel @@ -75,7 +76,12 @@ type ChannelBuilder struct { // oldestL2 is the oldest L2 block of all the L2 blocks that have been added to the channel oldestL2 eth.BlockID // frames data queue, to be send as txs - frames []frameData + frames queue.Queue[frameData] + // frameCursor tracks which frames in the queue were submitted + // frames[frameCursor] is the next unsubmitted (pending) frame + // frameCursor = len(frames) is reserved for when + // there are no pending (next unsubmitted) frames + frameCursor int // total frames counter numFrames int // total amount of output data of all frames created yet @@ -190,7 +196,7 @@ func (c *ChannelBuilder) AddBlock(block *types.Block) (*derive.L1BlockInfo, erro return l1info, fmt.Errorf("adding block to channel out: %w", err) } - c.blocks = append(c.blocks, block) + c.blocks.Enqueue(block) c.updateSwTimeout(l1info.Number) if l1info.Number > c.latestL1Origin.Number { @@ -312,11 +318,11 @@ func (c *ChannelBuilder) setFullErr(err error) { } // OutputFrames creates new frames with the channel out. It should be called -// after AddBlock and before iterating over available frames with HasFrame and +// after AddBlock and before iterating over pending frames with HasFrame and // NextFrame. // // If the channel isn't full yet, it will conservatively only -// pull readily available frames from the compression output. +// pull pending frames from the compression output. // If it is full, the channel is closed and all remaining // frames will be created, possibly with a small leftover frame. func (c *ChannelBuilder) OutputFrames() error { @@ -387,7 +393,7 @@ func (c *ChannelBuilder) outputFrame() error { id: frameID{chID: c.co.ID(), frameNumber: fn}, data: buf.Bytes(), } - c.frames = append(c.frames, frame) + c.frames.Enqueue(frame) c.numFrames++ c.outputBytes += len(frame.data) return err // possibly io.EOF (last frame) @@ -402,46 +408,47 @@ func (c *ChannelBuilder) Close() { } // TotalFrames returns the total number of frames that were created in this channel so far. -// It does not decrease when the frames queue is being emptied. func (c *ChannelBuilder) TotalFrames() int { return c.numFrames } -// HasFrame returns whether there's any available frame. If true, it can be -// popped using NextFrame(). +// HasPendingFrame returns whether there's any pending frame. If true, it can be +// dequeued using NextFrame(). // // Call OutputFrames before to create new frames from the channel out // compression pipeline. -func (c *ChannelBuilder) HasFrame() bool { - return len(c.frames) > 0 +func (c *ChannelBuilder) HasPendingFrame() bool { + return c.frameCursor < c.frames.Len() } // PendingFrames returns the number of pending frames in the frames queue. -// It is larger zero iff HasFrame() returns true. +// It is larger than zero iff HasFrame() returns true. func (c *ChannelBuilder) PendingFrames() int { - return len(c.frames) + return c.frames.Len() - c.frameCursor } -// NextFrame dequeues the next available frame. -// HasFrame must be called prior to check if there's a next frame available. +// NextFrame returns the next pending frame and increments the frameCursor +// HasFrame must be called prior to check if there a next pending frame exists. // Panics if called when there's no next frame. func (c *ChannelBuilder) NextFrame() frameData { - if len(c.frames) == 0 { + if len(c.frames) <= c.frameCursor { panic("no next frame") } - - f := c.frames[0] - c.frames = c.frames[1:] + f := c.frames[c.frameCursor] + c.frameCursor++ return f } -// PushFrames adds the frames back to the internal frames queue. Panics if not of -// the same channel. -func (c *ChannelBuilder) PushFrames(frames ...frameData) { - for _, f := range frames { - if f.id.chID != c.ID() { - panic("wrong channel") - } - c.frames = append(c.frames, f) +// RewindFrameCursor moves the frameCursor to point at the supplied frame +// only if it is ahead of it. +// Panics if the frame is not in this channel. +func (c *ChannelBuilder) RewindFrameCursor(frame frameData) { + if c.frames.Len() <= int(frame.id.frameNumber) || + len(c.frames[frame.id.frameNumber].data) != len(frame.data) || + c.frames[frame.id.frameNumber].id.chID != frame.id.chID { + panic("cannot rewind to unknown frame") + } + if c.frameCursor > int(frame.id.frameNumber) { + c.frameCursor = int(frame.id.frameNumber) } } diff --git a/op-batcher/batcher/channel_builder_test.go b/op-batcher/batcher/channel_builder_test.go index 957f9ae59739..6994186b7f07 100644 --- a/op-batcher/batcher/channel_builder_test.go +++ b/op-batcher/batcher/channel_builder_test.go @@ -299,6 +299,7 @@ func TestChannelBuilderBatchType(t *testing.T) { {"ChannelBuilder_PendingFrames_TotalFrames", ChannelBuilder_PendingFrames_TotalFrames}, {"ChannelBuilder_InputBytes", ChannelBuilder_InputBytes}, {"ChannelBuilder_OutputBytes", ChannelBuilder_OutputBytes}, + {"ChannelBuilder_OutputWrongFramePanic", ChannelBuilder_OutputWrongFramePanic}, } for _, test := range tests { test := test @@ -340,7 +341,7 @@ func TestChannelBuilder_NextFrame(t *testing.T) { }, data: expectedBytes, } - cb.PushFrames(frameData) + cb.frames = append(cb.frames, frameData) // There should only be 1 frame in the channel builder require.Equal(t, 1, cb.PendingFrames()) @@ -355,7 +356,7 @@ func TestChannelBuilder_NextFrame(t *testing.T) { require.PanicsWithValue(t, "no next frame", func() { cb.NextFrame() }) } -// TestChannelBuilder_OutputWrongFramePanic tests that a panic is thrown when a frame is pushed with an invalid frame id +// TestChannelBuilder_OutputWrongFramePanic tests that a panic is thrown when we try to rewind the cursor with an invalid frame id func ChannelBuilder_OutputWrongFramePanic(t *testing.T, batchType uint) { channelConfig := defaultTestChannelConfig() channelConfig.BatchType = batchType @@ -377,7 +378,7 @@ func ChannelBuilder_OutputWrongFramePanic(t *testing.T, batchType uint) { // The frame push should panic since we constructed a new channel out // so the channel out id won't match - require.PanicsWithValue(t, "wrong channel", func() { + require.PanicsWithValue(t, "cannot rewind to unknown frame", func() { frame := frameData{ id: frameID{ chID: co.ID(), @@ -385,7 +386,7 @@ func ChannelBuilder_OutputWrongFramePanic(t *testing.T, batchType uint) { }, data: buf.Bytes(), } - cb.PushFrames(frame) + cb.RewindFrameCursor(frame) }) } @@ -625,11 +626,11 @@ func TestChannelBuilder_FullShadowCompressor(t *testing.T) { require.NoError(cb.OutputFrames()) - require.True(cb.HasFrame()) + require.True(cb.HasPendingFrame()) f := cb.NextFrame() require.Less(len(f.data), int(cfg.MaxFrameSize)) // would fail without fix, full frame - require.False(cb.HasFrame(), "no leftover frame expected") // would fail without fix + require.False(cb.HasPendingFrame(), "no leftover frame expected") // would fail without fix } func ChannelBuilder_AddBlock(t *testing.T, batchType uint) { @@ -656,8 +657,8 @@ func ChannelBuilder_AddBlock(t *testing.T, batchType uint) { expectedInputBytes = 47 } require.Equal(t, expectedInputBytes, cb.co.InputBytes()) - require.Equal(t, 1, len(cb.blocks)) - require.Equal(t, 0, len(cb.frames)) + require.Equal(t, 1, cb.blocks.Len()) + require.Equal(t, 0, cb.frames.Len()) require.True(t, cb.IsFull()) // Since the channel output is full, the next call to AddBlock @@ -858,7 +859,7 @@ func ChannelBuilder_PendingFrames_TotalFrames(t *testing.T, batchType uint) { // empty queue for pf := nf - 1; pf >= 0; pf-- { - require.True(cb.HasFrame()) + require.True(cb.HasPendingFrame()) _ = cb.NextFrame() require.Equal(cb.PendingFrames(), pf) require.Equal(cb.TotalFrames(), nf) @@ -932,7 +933,7 @@ func ChannelBuilder_OutputBytes(t *testing.T, batchType uint) { require.Greater(cb.PendingFrames(), 1) var flen int - for cb.HasFrame() { + for cb.HasPendingFrame() { f := cb.NextFrame() flen += len(f.data) } diff --git a/op-batcher/batcher/channel_config.go b/op-batcher/batcher/channel_config.go index 45dc1d4dcfa4..e62ea26eee45 100644 --- a/op-batcher/batcher/channel_config.go +++ b/op-batcher/batcher/channel_config.go @@ -104,7 +104,7 @@ func (cc *ChannelConfig) Check() error { // The [ChannelTimeout] must be larger than the [SubSafetyMargin]. // Otherwise, new blocks would always be considered timed out. if cc.ChannelTimeout < cc.SubSafetyMargin { - return ErrInvalidChannelTimeout + return fmt.Errorf("%w: %d < %d", ErrInvalidChannelTimeout, cc.ChannelTimeout, cc.SubSafetyMargin) } // The max frame size must at least be able to accommodate the constant diff --git a/op-batcher/batcher/channel_manager.go b/op-batcher/batcher/channel_manager.go index 06403645ea4d..1da2def78da6 100644 --- a/op-batcher/batcher/channel_manager.go +++ b/op-batcher/batcher/channel_manager.go @@ -39,8 +39,13 @@ type channelManager struct { // All blocks since the last request for new tx data. blocks queue.Queue[*types.Block] - // The latest L1 block from all the L2 blocks in the most recently closed channel - l1OriginLastClosedChannel eth.BlockID + // blockCursor is an index into blocks queue. It points at the next block + // to build a channel with. blockCursor = len(blocks) is reserved for when + // there are no blocks ready to build with. + blockCursor int + // The latest L1 block from all the L2 blocks in the most recently submitted channel. + // Used to track channel duration timeouts. + l1OriginLastSubmittedChannel eth.BlockID // The default ChannelConfig to use for the next channel defaultCfg ChannelConfig // last block hash - for reorg detection @@ -52,9 +57,6 @@ type channelManager struct { channelQueue []*channel // used to lookup channels by tx ID upon tx success / failure txChannels map[string]*channel - - // if set to true, prevents production of any new channel frames - closed bool } func NewChannelManager(log log.Logger, metr metrics.Metricer, cfgProvider ChannelConfigProvider, rollupCfg *rollup.Config) *channelManager { @@ -75,19 +77,23 @@ func (s *channelManager) SetChannelOutFactory(outFactory ChannelOutFactory) { // Clear clears the entire state of the channel manager. // It is intended to be used before launching op-batcher and after an L2 reorg. -func (s *channelManager) Clear(l1OriginLastClosedChannel eth.BlockID) { +func (s *channelManager) Clear(l1OriginLastSubmittedChannel eth.BlockID) { s.mu.Lock() defer s.mu.Unlock() s.log.Trace("clearing channel manager state") s.blocks.Clear() - s.l1OriginLastClosedChannel = l1OriginLastClosedChannel + s.blockCursor = 0 + s.l1OriginLastSubmittedChannel = l1OriginLastSubmittedChannel s.tip = common.Hash{} - s.closed = false s.currentChannel = nil s.channelQueue = nil s.txChannels = make(map[string]*channel) } +func (s *channelManager) pendingBlocks() int { + return s.blocks.Len() - s.blockCursor +} + // TxFailed records a transaction as failed. It will attempt to resubmit the data // in the failed transaction. func (s *channelManager) TxFailed(_id txID) { @@ -97,34 +103,21 @@ func (s *channelManager) TxFailed(_id txID) { if channel, ok := s.txChannels[id]; ok { delete(s.txChannels, id) channel.TxFailed(id) - if s.closed && channel.NoneSubmitted() { - s.log.Info("Channel has no submitted transactions, clearing for shutdown", "chID", channel.ID()) - s.removePendingChannel(channel) - } } else { s.log.Warn("transaction from unknown channel marked as failed", "id", id) } } -// TxConfirmed marks a transaction as confirmed on L1. Unfortunately even if all frames in -// a channel have been marked as confirmed on L1 the channel may be invalid & need to be -// resubmitted. -// This function may reset the pending channel if the pending channel has timed out. +// TxConfirmed marks a transaction as confirmed on L1. Only if the channel timed out +// the channelManager's state is modified. func (s *channelManager) TxConfirmed(_id txID, inclusionBlock eth.BlockID) { s.mu.Lock() defer s.mu.Unlock() id := _id.String() if channel, ok := s.txChannels[id]; ok { delete(s.txChannels, id) - done, blocksToRequeue := channel.TxConfirmed(id, inclusionBlock) - if done { - s.removePendingChannel(channel) - if len(blocksToRequeue) > 0 { - s.blocks.Prepend(blocksToRequeue...) - } - for _, b := range blocksToRequeue { - s.metr.RecordL2BlockInPendingQueue(b) - } + if timedOut := channel.TxConfirmed(id, inclusionBlock); timedOut { + s.handleChannelInvalidated(channel) } } else { s.log.Warn("transaction from unknown channel marked as confirmed", "id", id) @@ -133,23 +126,48 @@ func (s *channelManager) TxConfirmed(_id txID, inclusionBlock eth.BlockID) { s.log.Debug("marked transaction as confirmed", "id", id, "block", inclusionBlock) } -// removePendingChannel removes the given completed channel from the manager's state. -func (s *channelManager) removePendingChannel(channel *channel) { - if s.currentChannel == channel { - s.currentChannel = nil +// rewindToBlock updates the blockCursor to point at +// the block with the supplied hash, only if that block exists +// in the block queue and the blockCursor is ahead of it. +// Panics if the block is not in state. +func (s *channelManager) rewindToBlock(block eth.BlockID) { + idx := block.Number - s.blocks[0].Number().Uint64() + if s.blocks[idx].Hash() == block.Hash && idx < uint64(s.blockCursor) { + s.blockCursor = int(idx) + } else { + panic("tried to rewind to nonexistent block") } - index := -1 - for i, c := range s.channelQueue { - if c == channel { - index = i - break +} + +// handleChannelInvalidated rewinds the channelManager's blockCursor +// to point at the first block added to the provided channel, +// and removes the channel from the channelQueue, along with +// any channels which are newer than the provided channel. +func (s *channelManager) handleChannelInvalidated(c *channel) { + if len(c.channelBuilder.blocks) > 0 { + // This is usually true, but there is an edge case + // where a channel timed out before any blocks got added. + // In that case we end up with an empty frame (header only), + // and there are no blocks to requeue. + blockID := eth.ToBlockID(c.channelBuilder.blocks[0]) + for _, block := range c.channelBuilder.blocks { + s.metr.RecordL2BlockInPendingQueue(block) } + s.rewindToBlock(blockID) + } else { + s.log.Debug("channelManager.handleChanneInvalidated: channel had no blocks") } - if index < 0 { - s.log.Warn("channel not found in channel queue", "id", channel.ID()) - return + + // Trim provided channel and any older channels: + for i := range s.channelQueue { + if s.channelQueue[i] == c { + s.channelQueue = s.channelQueue[:i] + break + } } - s.channelQueue = append(s.channelQueue[:index], s.channelQueue[index+1:]...) + + // We want to start writing to a new channel, so reset currentChannel. + s.currentChannel = nil } // nextTxData dequeues frames from the channel and returns them encoded in a transaction. @@ -160,6 +178,12 @@ func (s *channelManager) nextTxData(channel *channel) (txData, error) { return txData{}, io.EOF // TODO: not enough data error instead } tx := channel.NextTxData() + + // update s.l1OriginLastSubmittedChannel so that the next + // channel's duration timeout will trigger properly + if channel.LatestL1Origin().Number > s.l1OriginLastSubmittedChannel.Number { + s.l1OriginLastSubmittedChannel = channel.LatestL1Origin() + } s.txChannels[tx.ID().String()] = channel return tx, nil } @@ -200,7 +224,16 @@ func (s *channelManager) TxData(l1Head eth.BlockID) (txData, error) { s.log.Info("Recomputing optimal ChannelConfig: changing DA type and requeing blocks...", "useBlobsBefore", s.defaultCfg.UseBlobs, "useBlobsAfter", newCfg.UseBlobs) - s.Requeue(newCfg) + + // Invalidate the channel so its blocks + // get requeued: + s.handleChannelInvalidated(channel) + + // Set the defaultCfg so new channels + // pick up the new ChannelConfig + s.defaultCfg = newCfg + + // Try again to get data to send on chain. channel, err = s.getReadyChannel(l1Head) if err != nil { return emptyTxData, err @@ -231,14 +264,9 @@ func (s *channelManager) getReadyChannel(l1Head eth.BlockID) (*channel, error) { return firstWithTxData, nil } - if s.closed { - return nil, io.EOF - } - // No pending tx data, so we have to add new blocks to the channel - // If we have no saved blocks, we will not be able to create valid frames - if s.blocks.Len() == 0 { + if s.pendingBlocks() == 0 { return nil, io.EOF } @@ -284,7 +312,7 @@ func (s *channelManager) ensureChannelWithSpace(l1Head eth.BlockID) error { return fmt.Errorf("creating channel out: %w", err) } - pc := newChannel(s.log, s.metr, cfg, s.rollupCfg, s.l1OriginLastClosedChannel.Number, channelOut) + pc := newChannel(s.log, s.metr, cfg, s.rollupCfg, s.l1OriginLastSubmittedChannel.Number, channelOut) s.currentChannel = pc s.channelQueue = append(s.channelQueue, pc) @@ -292,8 +320,8 @@ func (s *channelManager) ensureChannelWithSpace(l1Head eth.BlockID) error { s.log.Info("Created channel", "id", pc.ID(), "l1Head", l1Head, - "l1OriginLastClosedChannel", s.l1OriginLastClosedChannel, - "blocks_pending", s.blocks.Len(), + "blocks_pending", s.pendingBlocks(), + "l1OriginLastSubmittedChannel", s.l1OriginLastSubmittedChannel, "batch_type", cfg.BatchType, "compression_algo", cfg.CompressorConfig.CompressionAlgo, "target_num_frames", cfg.TargetNumFrames, @@ -324,7 +352,7 @@ func (s *channelManager) processBlocks() error { latestL2ref eth.L2BlockRef ) - for i := 0; ; i++ { + for i := s.blockCursor; ; i++ { block, ok := s.blocks.PeekN(i) if !ok { break @@ -348,7 +376,7 @@ func (s *channelManager) processBlocks() error { } } - _, _ = s.blocks.DequeueN(blocksAdded) + s.blockCursor += blocksAdded s.metr.RecordL2BlocksAdded(latestL2ref, blocksAdded, @@ -357,7 +385,7 @@ func (s *channelManager) processBlocks() error { s.currentChannel.ReadyBytes()) s.log.Debug("Added blocks to channel", "blocks_added", blocksAdded, - "blocks_pending", s.blocks.Len(), + "blocks_pending", s.pendingBlocks(), "channel_full", s.currentChannel.IsFull(), "input_bytes", s.currentChannel.InputBytes(), "ready_bytes", s.currentChannel.ReadyBytes(), @@ -374,15 +402,10 @@ func (s *channelManager) outputFrames() error { return nil } - lastClosedL1Origin := s.currentChannel.LatestL1Origin() - if lastClosedL1Origin.Number > s.l1OriginLastClosedChannel.Number { - s.l1OriginLastClosedChannel = lastClosedL1Origin - } - inBytes, outBytes := s.currentChannel.InputBytes(), s.currentChannel.OutputBytes() s.metr.RecordChannelClosed( s.currentChannel.ID(), - s.blocks.Len(), + s.pendingBlocks(), s.currentChannel.TotalFrames(), inBytes, outBytes, @@ -396,17 +419,16 @@ func (s *channelManager) outputFrames() error { s.log.Info("Channel closed", "id", s.currentChannel.ID(), - "blocks_pending", s.blocks.Len(), + "blocks_pending", s.pendingBlocks(), "num_frames", s.currentChannel.TotalFrames(), "input_bytes", inBytes, "output_bytes", outBytes, "oldest_l1_origin", s.currentChannel.OldestL1Origin(), - "l1_origin", lastClosedL1Origin, + "l1_origin", s.currentChannel.LatestL1Origin(), "oldest_l2", s.currentChannel.OldestL2(), "latest_l2", s.currentChannel.LatestL2(), "full_reason", s.currentChannel.FullErr(), "compr_ratio", comprRatio, - "latest_l1_origin", s.l1OriginLastClosedChannel, ) return nil } @@ -442,83 +464,78 @@ func l2BlockRefFromBlockAndL1Info(block *types.Block, l1info *derive.L1BlockInfo var ErrPendingAfterClose = errors.New("pending channels remain after closing channel-manager") -// Close clears any pending channels that are not in-flight already, to leave a clean derivation state. -// Close then marks the remaining current open channel, if any, as "full" so it can be submitted as well. -// Close does NOT immediately output frames for the current remaining channel: -// as this might error, due to limitations on a single channel. -// Instead, this is part of the pending-channel submission work: after closing, -// the caller SHOULD drain pending channels by generating TxData repeatedly until there is none left (io.EOF). -// A ErrPendingAfterClose error will be returned if there are any remaining pending channels to submit. -func (s *channelManager) Close() error { - s.mu.Lock() - defer s.mu.Unlock() - if s.closed { - return nil +// pruneSafeBlocks dequeues blocks from the internal blocks queue +// if they have now become safe. +func (s *channelManager) pruneSafeBlocks(newSafeHead eth.L2BlockRef) { + oldestBlock, ok := s.blocks.Peek() + if !ok { + // no blocks to prune + return } - s.closed = true - s.log.Info("Channel manager is closing") - - // Any pending state can be proactively cleared if there are no submitted transactions - for _, ch := range s.channelQueue { - if ch.NoneSubmitted() { - s.log.Info("Channel has no past or pending submission - dropping", "id", ch.ID()) - s.removePendingChannel(ch) - } else { - s.log.Info("Channel is in-flight and will need to be submitted after close", "id", ch.ID(), "confirmed", len(ch.confirmedTransactions), "pending", len(ch.pendingTransactions)) - } + if newSafeHead.Number+1 == oldestBlock.NumberU64() { + // no blocks to prune + return } - s.log.Info("Reviewed all pending channels on close", "remaining", len(s.channelQueue)) - if s.currentChannel == nil { - return nil + if newSafeHead.Number+1 < oldestBlock.NumberU64() { + // This could happen if there was an L1 reorg. + // Or if the sequencer restarted. + s.log.Warn("safe head reversed, clearing channel manager state", + "oldestBlock", eth.ToBlockID(oldestBlock), + "newSafeBlock", newSafeHead) + // We should restart work from the new safe head, + // and therefore prune all the blocks. + s.Clear(newSafeHead.L1Origin) + return } - // If the channel is already full, we don't need to close it or output frames. - // This would already have happened in TxData. - if !s.currentChannel.IsFull() { - // Force-close the remaining open channel early (if not already closed): - // it will be marked as "full" due to service termination. - s.currentChannel.Close() + numBlocksToDequeue := newSafeHead.Number + 1 - oldestBlock.NumberU64() - // Final outputFrames call in case there was unflushed data in the compressor. - if err := s.outputFrames(); err != nil { - return fmt.Errorf("outputting frames during close: %w", err) - } + if numBlocksToDequeue > uint64(s.blocks.Len()) { + // This could happen if the batcher restarted. + // The sequencer may have derived the safe chain + // from channels sent by a previous batcher instance. + s.log.Warn("safe head above unsafe head, clearing channel manager state", + "unsafeBlock", eth.ToBlockID(s.blocks[s.blocks.Len()-1]), + "newSafeBlock", newSafeHead) + // We should restart work from the new safe head, + // and therefore prune all the blocks. + s.Clear(newSafeHead.L1Origin) + return } - if s.currentChannel.HasTxData() { - // Make it clear to the caller that there is remaining pending work. - return ErrPendingAfterClose + if s.blocks[numBlocksToDequeue-1].Hash() != newSafeHead.Hash { + s.log.Warn("safe chain reorg, clearing channel manager state", + "existingBlock", eth.ToBlockID(s.blocks[numBlocksToDequeue-1]), + "newSafeBlock", newSafeHead) + // We should restart work from the new safe head, + // and therefore prune all the blocks. + s.Clear(newSafeHead.L1Origin) + return } - return nil -} -// Requeue rebuilds the channel manager state by -// rewinding blocks back from the channel queue, and setting the defaultCfg. -func (s *channelManager) Requeue(newCfg ChannelConfig) { - newChannelQueue := []*channel{} - blocksToRequeue := []*types.Block{} - for _, channel := range s.channelQueue { - if !channel.NoneSubmitted() { - newChannelQueue = append(newChannelQueue, channel) - continue - } - blocksToRequeue = append(blocksToRequeue, channel.channelBuilder.Blocks()...) - } + // This shouldn't return an error because + // We already checked numBlocksToDequeue <= s.blocks.Len() + _, _ = s.blocks.DequeueN(int(numBlocksToDequeue)) + s.blockCursor -= int(numBlocksToDequeue) - // We put the blocks back at the front of the queue: - s.blocks.Prepend(blocksToRequeue...) - for _, b := range blocksToRequeue { - s.metr.RecordL2BlockInPendingQueue(b) + if s.blockCursor < 0 { + panic("negative blockCursor") } +} - // Channels which where already being submitted are put back - s.channelQueue = newChannelQueue - s.currentChannel = nil - // Setting the defaultCfg will cause new channels - // to pick up the new ChannelConfig - s.defaultCfg = newCfg +// pruneChannels dequeues channels from the internal channels queue +// if they were built using blocks which are now safe +func (s *channelManager) pruneChannels(newSafeHead eth.L2BlockRef) { + i := 0 + for _, ch := range s.channelQueue { + if ch.LatestL2().Number > newSafeHead.Number { + break + } + i++ + } + s.channelQueue = s.channelQueue[i:] } // PendingDABytes returns the current number of bytes pending to be written to the DA layer (from blocks fetched from L2 @@ -533,3 +550,26 @@ func (s *channelManager) PendingDABytes() int64 { } return int64(f) } + +// CheckExpectedProgress uses the supplied syncStatus to infer +// whether the node providing the status has made the expected +// safe head progress given fully submitted channels held in +// state. +func (m *channelManager) CheckExpectedProgress(syncStatus eth.SyncStatus) error { + for _, ch := range m.channelQueue { + if ch.isFullySubmitted() && // This implies a number of l1 confirmations has passed, depending on how the txmgr was configured + !ch.isTimedOut() && + syncStatus.CurrentL1.Number > ch.maxInclusionBlock && + syncStatus.SafeL2.Number < ch.LatestL2().Number { + return errors.New("safe head did not make expected progress") + } + } + return nil +} + +func (m *channelManager) LastStoredBlock() eth.BlockID { + if m.blocks.Len() == 0 { + return eth.BlockID{} + } + return eth.ToBlockID(m.blocks[m.blocks.Len()-1]) +} diff --git a/op-batcher/batcher/channel_manager_test.go b/op-batcher/batcher/channel_manager_test.go index 8dcb0745c164..32aae1b06dd1 100644 --- a/op-batcher/batcher/channel_manager_test.go +++ b/op-batcher/batcher/channel_manager_test.go @@ -40,10 +40,6 @@ func TestChannelManagerBatchType(t *testing.T) { {"ChannelManagerReturnsErrReorgWhenDrained", ChannelManagerReturnsErrReorgWhenDrained}, {"ChannelManager_Clear", ChannelManager_Clear}, {"ChannelManager_TxResend", ChannelManager_TxResend}, - {"ChannelManagerCloseBeforeFirstUse", ChannelManagerCloseBeforeFirstUse}, - {"ChannelManagerCloseNoPendingChannel", ChannelManagerCloseNoPendingChannel}, - {"ChannelManagerClosePendingChannel", ChannelManagerClosePendingChannel}, - {"ChannelManagerCloseAllTxsFailed", ChannelManagerCloseAllTxsFailed}, } for _, test := range tests { test := test @@ -130,7 +126,7 @@ func ChannelManager_Clear(t *testing.T, batchType uint) { // Channel Manager state should be empty by default require.Empty(m.blocks) - require.Equal(eth.BlockID{}, m.l1OriginLastClosedChannel) + require.Equal(eth.BlockID{}, m.l1OriginLastSubmittedChannel) require.Equal(common.Hash{}, m.tip) require.Nil(m.currentChannel) require.Empty(m.channelQueue) @@ -154,15 +150,14 @@ func ChannelManager_Clear(t *testing.T, batchType uint) { // Process the blocks // We should have a pending channel with 1 frame - // and no more blocks since processBlocks consumes - // the list + require.NoError(m.processBlocks()) require.NoError(m.currentChannel.channelBuilder.co.Flush()) require.NoError(m.outputFrames()) _, err := m.nextTxData(m.currentChannel) require.NoError(err) - require.NotNil(m.l1OriginLastClosedChannel) - require.Len(m.blocks, 0) + require.Equal(m.blockCursor, len(m.blocks)) + require.NotNil(m.l1OriginLastSubmittedChannel) require.Equal(newL1Tip, m.tip) require.Len(m.currentChannel.pendingTransactions, 1) @@ -173,7 +168,7 @@ func ChannelManager_Clear(t *testing.T, batchType uint) { ParentHash: a.Hash(), }, nil, nil, nil) require.NoError(m.AddL2Block(b)) - require.Len(m.blocks, 1) + require.Equal(m.blockCursor, len(m.blocks)-1) require.Equal(b.Hash(), m.tip) safeL1Origin := eth.BlockID{ @@ -184,7 +179,7 @@ func ChannelManager_Clear(t *testing.T, batchType uint) { // Check that the entire channel manager state cleared require.Empty(m.blocks) - require.Equal(uint64(123), m.l1OriginLastClosedChannel.Number) + require.Equal(uint64(123), m.l1OriginLastSubmittedChannel.Number) require.Equal(common.Hash{}, m.tip) require.Nil(m.currentChannel) require.Empty(m.channelQueue) @@ -228,220 +223,6 @@ func ChannelManager_TxResend(t *testing.T, batchType uint) { require.Len(fs, 1) } -// ChannelManagerCloseBeforeFirstUse ensures that the channel manager -// will not produce any frames if closed immediately. -func ChannelManagerCloseBeforeFirstUse(t *testing.T, batchType uint) { - require := require.New(t) - rng := rand.New(rand.NewSource(time.Now().UnixNano())) - log := testlog.Logger(t, log.LevelCrit) - m := NewChannelManager(log, metrics.NoopMetrics, - channelManagerTestConfig(10000, batchType), - defaultTestRollupConfig, - ) - m.Clear(eth.BlockID{}) - - a := derivetest.RandomL2BlockWithChainId(rng, 4, defaultTestRollupConfig.L2ChainID) - - require.NoError(m.Close(), "Expected to close channel manager gracefully") - - err := m.AddL2Block(a) - require.NoError(err, "Failed to add L2 block") - - _, err = m.TxData(eth.BlockID{}) - require.ErrorIs(err, io.EOF, "Expected closed channel manager to contain no tx data") -} - -// ChannelManagerCloseNoPendingChannel ensures that the channel manager -// can gracefully close with no pending channels, and will not emit any new -// channel frames. -func ChannelManagerCloseNoPendingChannel(t *testing.T, batchType uint) { - require := require.New(t) - log := testlog.Logger(t, log.LevelCrit) - cfg := channelManagerTestConfig(10000, batchType) - cfg.CompressorConfig.TargetOutputSize = 1 // full on first block - cfg.ChannelTimeout = 1000 - m := NewChannelManager(log, metrics.NoopMetrics, cfg, defaultTestRollupConfig) - m.Clear(eth.BlockID{}) - a := newMiniL2Block(0) - b := newMiniL2BlockWithNumberParent(0, big.NewInt(1), a.Hash()) - - err := m.AddL2Block(a) - require.NoError(err, "Failed to add L2 block") - - txdata, err := m.TxData(eth.BlockID{}) - require.NoError(err, "Expected channel manager to return valid tx data") - - m.TxConfirmed(txdata.ID(), eth.BlockID{}) - - _, err = m.TxData(eth.BlockID{}) - require.ErrorIs(err, io.EOF, "Expected channel manager to EOF") - - require.NoError(m.Close(), "Expected to close channel manager gracefully") - - err = m.AddL2Block(b) - require.NoError(err, "Failed to add L2 block") - - _, err = m.TxData(eth.BlockID{}) - require.ErrorIs(err, io.EOF, "Expected closed channel manager to return no new tx data") -} - -// ChannelManagerClosePendingChannel ensures that the channel manager -// can gracefully close with a pending channel, and will not produce any -// new channel frames after this point. -func ChannelManagerClosePendingChannel(t *testing.T, batchType uint) { - require := require.New(t) - // The number of batch txs depends on compression of the random data, hence the static test RNG seed. - // Example of different RNG seed that creates less than 2 frames: 1698700588902821588 - rng := rand.New(rand.NewSource(123)) - log := testlog.Logger(t, log.LevelError) - cfg := channelManagerTestConfig(10_000, batchType) - cfg.ChannelTimeout = 1000 - m := NewChannelManager(log, metrics.NoopMetrics, cfg, defaultTestRollupConfig) - m.Clear(eth.BlockID{}) - - numTx := 20 // Adjust number of txs to make 2 frames - a := derivetest.RandomL2BlockWithChainId(rng, numTx, defaultTestRollupConfig.L2ChainID) - - err := m.AddL2Block(a) - require.NoError(err, "Failed to add L2 block") - - txdata, err := m.TxData(eth.BlockID{}) - require.NoError(err, "Expected channel manager to produce valid tx data") - log.Info("generated first tx data", "len", txdata.Len()) - - m.TxConfirmed(txdata.ID(), eth.BlockID{}) - - require.ErrorIs(m.Close(), ErrPendingAfterClose, "Expected channel manager to error on close because of pending tx data") - - txdata, err = m.TxData(eth.BlockID{}) - require.NoError(err, "Expected channel manager to produce tx data from remaining L2 block data") - log.Info("generated more tx data", "len", txdata.Len()) - - m.TxConfirmed(txdata.ID(), eth.BlockID{}) - - _, err = m.TxData(eth.BlockID{}) - require.ErrorIs(err, io.EOF, "Expected channel manager to have no more tx data") - - _, err = m.TxData(eth.BlockID{}) - require.ErrorIs(err, io.EOF, "Expected closed channel manager to produce no more tx data") -} - -// ChannelManager_Close_PartiallyPendingChannel ensures that the channel manager -// can gracefully close with a pending channel, where a block is still waiting -// inside the compressor to be flushed. -// -// This test runs only for singular batches on purpose. -// The SpanChannelOut writes full span batches to the compressor for -// every new block that's added, so NonCompressor cannot be used to -// set up a scenario where data is only partially flushed. -// Couldn't get the test to work even with modifying NonCompressor -// to flush half-way through writing to the compressor... -func TestChannelManager_Close_PartiallyPendingChannel(t *testing.T) { - require := require.New(t) - // The number of batch txs depends on compression of the random data, hence the static test RNG seed. - // Example of different RNG seed that creates less than 2 frames: 1698700588902821588 - rng := rand.New(rand.NewSource(123)) - log := testlog.Logger(t, log.LevelError) - cfg := ChannelConfig{ - MaxFrameSize: 2200, - ChannelTimeout: 1000, - TargetNumFrames: 100, - } - cfg.InitNoneCompressor() - m := NewChannelManager(log, metrics.NoopMetrics, cfg, defaultTestRollupConfig) - m.Clear(eth.BlockID{}) - - numTx := 3 // Adjust number of txs to make 2 frames - a := derivetest.RandomL2BlockWithChainId(rng, numTx, defaultTestRollupConfig.L2ChainID) - b := derivetest.RandomL2BlockWithChainId(rng, numTx, defaultTestRollupConfig.L2ChainID) - bHeader := b.Header() - bHeader.Number = new(big.Int).Add(a.Number(), big.NewInt(1)) - bHeader.ParentHash = a.Hash() - b = b.WithSeal(bHeader) - - require.NoError(m.AddL2Block(a), "adding 1st L2 block") - require.NoError(m.AddL2Block(b), "adding 2nd L2 block") - - // Inside TxData, the two blocks queued above are written to the compressor. - // The NonCompressor will flush the first, but not the second block, when - // adding the second block, setting up the test with a partially flushed - // compressor. - txdata, err := m.TxData(eth.BlockID{}) - require.NoError(err, "Expected channel manager to produce valid tx data") - log.Info("generated first tx data", "len", txdata.Len()) - - m.TxConfirmed(txdata.ID(), eth.BlockID{}) - - // ensure no new ready data before closing - _, err = m.TxData(eth.BlockID{}) - require.ErrorIs(err, io.EOF, "Expected unclosed channel manager to only return a single frame") - - require.ErrorIs(m.Close(), ErrPendingAfterClose, "Expected channel manager to error on close because of pending tx data") - require.NotNil(m.currentChannel) - require.ErrorIs(m.currentChannel.FullErr(), ErrTerminated, "Expected current channel to be terminated by Close") - - txdata, err = m.TxData(eth.BlockID{}) - require.NoError(err, "Expected channel manager to produce tx data from remaining L2 block data") - log.Info("generated more tx data", "len", txdata.Len()) - - m.TxConfirmed(txdata.ID(), eth.BlockID{}) - - _, err = m.TxData(eth.BlockID{}) - require.ErrorIs(err, io.EOF, "Expected closed channel manager to produce no more tx data") -} - -// ChannelManagerCloseAllTxsFailed ensures that the channel manager -// can gracefully close after producing transaction frames if none of these -// have successfully landed on chain. -func ChannelManagerCloseAllTxsFailed(t *testing.T, batchType uint) { - require := require.New(t) - rng := rand.New(rand.NewSource(1357)) - log := testlog.Logger(t, log.LevelCrit) - cfg := channelManagerTestConfig(100, batchType) - cfg.TargetNumFrames = 1000 - cfg.InitNoneCompressor() - m := NewChannelManager(log, metrics.NoopMetrics, cfg, defaultTestRollupConfig) - m.Clear(eth.BlockID{}) - - a := derivetest.RandomL2BlockWithChainId(rng, 1000, defaultTestRollupConfig.L2ChainID) - - err := m.AddL2Block(a) - require.NoError(err, "Failed to add L2 block") - - drainTxData := func() (txdatas []txData) { - for { - txdata, err := m.TxData(eth.BlockID{}) - if err == io.EOF { - return - } - require.NoError(err, "Expected channel manager to produce valid tx data") - txdatas = append(txdatas, txdata) - } - } - - txdatas := drainTxData() - require.NotEmpty(txdatas) - - for _, txdata := range txdatas { - m.TxFailed(txdata.ID()) - } - - // Show that this data will continue to be emitted as long as the transaction - // fails and the channel manager is not closed - txdatas1 := drainTxData() - require.NotEmpty(txdatas) - require.ElementsMatch(txdatas, txdatas1, "expected same txdatas on re-attempt") - - for _, txdata := range txdatas1 { - m.TxFailed(txdata.ID()) - } - - require.NoError(m.Close(), "Expected to close channel manager gracefully") - - _, err = m.TxData(eth.BlockID{}) - require.ErrorIs(err, io.EOF, "Expected closed channel manager to produce no more tx data") -} - func TestChannelManager_ChannelCreation(t *testing.T) { l := testlog.Logger(t, log.LevelCrit) const maxChannelDuration = 15 @@ -475,7 +256,7 @@ func TestChannelManager_ChannelCreation(t *testing.T) { t.Run(test.name, func(t *testing.T) { m := NewChannelManager(l, metrics.NoopMetrics, cfg, defaultTestRollupConfig) - m.l1OriginLastClosedChannel = test.safeL1Block + m.l1OriginLastSubmittedChannel = test.safeL1Block require.Nil(t, m.currentChannel) require.NoError(t, m.ensureChannelWithSpace(eth.BlockID{})) @@ -543,10 +324,12 @@ func TestChannelManager_TxData(t *testing.T) { // * One when the channelManager was created // * One when the channel is about to be submitted - // * Potentially one more if the replacement channel is about to be submitted, - // this only happens when going from calldata->blobs because - // the channel is no longer ready to send until more data - // is added. + // * Potentially one more when the replacement channel + // is not immediately ready to be submitted, but later + // becomes ready after more data is added. + // This only happens when going from calldata->blobs because + // the channel is not immediately ready to send until more data + // is added due to blob channels having greater capacity. numExpectedAssessments int } @@ -591,7 +374,7 @@ func TestChannelManager_TxData(t *testing.T) { // we get some data to submit var data txData for { - m.blocks = []*types.Block{blockA} + m.blocks = append(m.blocks, blockA) data, err = m.TxData(eth.BlockID{}) if err == nil && data.Len() > 0 { break @@ -609,16 +392,15 @@ func TestChannelManager_TxData(t *testing.T) { } -// TestChannelManager_Requeue seeds the channel manager with blocks, +// TestChannelManager_handleChannelInvalidated seeds the channel manager with blocks, // takes a state snapshot, triggers the blocks->channels pipeline, -// and then calls Requeue. Finally, it asserts the channel manager's -// state is equal to the snapshot. It repeats this for a channel -// which has a pending transaction and verifies that Requeue is then -// a noop. -func TestChannelManager_Requeue(t *testing.T) { +// and then calls handleChannelInvalidated. It asserts on the final state of +// the channel manager. +func TestChannelManager_handleChannelInvalidated(t *testing.T) { l := testlog.Logger(t, log.LevelCrit) cfg := channelManagerTestConfig(100, derive.SingularBatchType) - m := NewChannelManager(l, metrics.NoopMetrics, cfg, defaultTestRollupConfig) + metrics := new(metrics.TestMetrics) + m := NewChannelManager(l, metrics, cfg, defaultTestRollupConfig) // Seed channel manager with blocks rng := rand.New(rand.NewSource(99)) @@ -631,42 +413,197 @@ func TestChannelManager_Requeue(t *testing.T) { m.blocks = stateSnapshot require.Empty(t, m.channelQueue) + // Place an old channel in the queue. + // This channel should not be affected by + // a requeue or a later channel timing out. + oldChannel := newChannel(l, nil, m.defaultCfg, defaultTestRollupConfig, 0, nil) + oldChannel.Close() + m.channelQueue = []*channel{oldChannel} + require.Len(t, m.channelQueue, 1) + + // Setup initial metrics + metrics.RecordL2BlockInPendingQueue(blockA) + metrics.RecordL2BlockInPendingQueue(blockB) + pendingBytesBefore := metrics.PendingBlocksBytesCurrent + // Trigger the blocks -> channelQueue data pipelining require.NoError(t, m.ensureChannelWithSpace(eth.BlockID{})) - require.NotEmpty(t, m.channelQueue) + require.Len(t, m.channelQueue, 2) require.NoError(t, m.processBlocks()) // Assert that at least one block was processed into the channel - require.NotContains(t, m.blocks, blockA) + require.Equal(t, 1, m.blockCursor) + + // Check metric decreased + metricsDelta := metrics.PendingBlocksBytesCurrent - pendingBytesBefore + require.Negative(t, metricsDelta) + + l1OriginBefore := m.l1OriginLastSubmittedChannel - // Call the function we are testing - m.Requeue(m.defaultCfg) + m.handleChannelInvalidated(m.currentChannel) // Ensure we got back to the state above require.Equal(t, m.blocks, stateSnapshot) - require.Empty(t, m.channelQueue) + require.Contains(t, m.channelQueue, oldChannel) + require.Len(t, m.channelQueue, 1) + + // Check metric came back up to previous value + require.Equal(t, pendingBytesBefore, metrics.PendingBlocksBytesCurrent) + + // Ensure the l1OridingLastSubmittedChannel was + // not changed. This ensures the next channel + // has its duration timeout deadline computed + // properly. + require.Equal(t, l1OriginBefore, m.l1OriginLastSubmittedChannel) // Trigger the blocks -> channelQueue data pipelining again require.NoError(t, m.ensureChannelWithSpace(eth.BlockID{})) require.NotEmpty(t, m.channelQueue) require.NoError(t, m.processBlocks()) +} - // Assert that at least one block was processed into the channel - require.NotContains(t, m.blocks, blockA) +func TestChannelManager_PruneBlocks(t *testing.T) { + l := testlog.Logger(t, log.LevelDebug) + cfg := channelManagerTestConfig(100, derive.SingularBatchType) + m := NewChannelManager(l, metrics.NoopMetrics, cfg, defaultTestRollupConfig) - // Now mark the 0th channel in the queue as already - // starting to send on chain - channel0 := m.channelQueue[0] - channel0.pendingTransactions["foo"] = txData{} - require.False(t, channel0.NoneSubmitted()) + a := types.NewBlock(&types.Header{ + Number: big.NewInt(0), + }, nil, nil, nil) + b := types.NewBlock(&types.Header{ // This will shortly become the safe head + Number: big.NewInt(1), + ParentHash: a.Hash(), + }, nil, nil, nil) + c := types.NewBlock(&types.Header{ + Number: big.NewInt(2), + ParentHash: b.Hash(), + }, nil, nil, nil) + + require.NoError(t, m.AddL2Block(a)) + m.blockCursor += 1 + require.NoError(t, m.AddL2Block(b)) + m.blockCursor += 1 + require.NoError(t, m.AddL2Block(c)) + m.blockCursor += 1 + + // Normal path + m.pruneSafeBlocks(eth.L2BlockRef{ + Hash: b.Hash(), + Number: b.NumberU64(), + }) + require.Equal(t, queue.Queue[*types.Block]{c}, m.blocks) + + // Safe chain didn't move, nothing to prune + m.pruneSafeBlocks(eth.L2BlockRef{ + Hash: b.Hash(), + Number: b.NumberU64(), + }) + require.Equal(t, queue.Queue[*types.Block]{c}, m.blocks) - // Call the function we are testing - m.Requeue(m.defaultCfg) + // Safe chain moved beyond the blocks we had + // state should be cleared + m.pruneSafeBlocks(eth.L2BlockRef{ + Hash: c.Hash(), + Number: uint64(99), + }) + require.Equal(t, queue.Queue[*types.Block]{}, m.blocks) + + // No blocks to prune, NOOP + m.pruneSafeBlocks(eth.L2BlockRef{ + Hash: c.Hash(), + Number: c.NumberU64(), + }) + require.Equal(t, queue.Queue[*types.Block]{}, m.blocks) - // The requeue shouldn't affect the pending channel - require.Contains(t, m.channelQueue, channel0) + // Put another block in + d := types.NewBlock(&types.Header{ + Number: big.NewInt(3), + ParentHash: c.Hash(), + }, nil, nil, nil) + require.NoError(t, m.AddL2Block(d)) + m.blockCursor += 1 + + // Safe chain reorg + // state should be cleared + m.pruneSafeBlocks(eth.L2BlockRef{ + Hash: a.Hash(), + Number: uint64(3), + }) + require.Equal(t, queue.Queue[*types.Block]{}, m.blocks) + + // Put another block in + require.NoError(t, m.AddL2Block(d)) + m.blockCursor += 1 + + // Safe chain reversed + // state should be cleared + m.pruneSafeBlocks(eth.L2BlockRef{ + Hash: a.Hash(), // unused + Number: uint64(1), + }) + require.Equal(t, queue.Queue[*types.Block]{}, m.blocks) + +} + +func TestChannelManager_PruneChannels(t *testing.T) { + l := testlog.Logger(t, log.LevelCrit) + cfg := channelManagerTestConfig(100, derive.SingularBatchType) + cfg.InitNoneCompressor() + m := NewChannelManager(l, metrics.NoopMetrics, cfg, defaultTestRollupConfig) + + A, err := newChannelWithChannelOut(l, metrics.NoopMetrics, cfg, m.rollupCfg, 0) + require.NoError(t, err) + B, err := newChannelWithChannelOut(l, metrics.NoopMetrics, cfg, m.rollupCfg, 0) + require.NoError(t, err) + C, err := newChannelWithChannelOut(l, metrics.NoopMetrics, cfg, m.rollupCfg, 0) + require.NoError(t, err) + + m.channelQueue = []*channel{A, B, C} + + numTx := 1 + rng := rand.New(rand.NewSource(123)) + a0 := derivetest.RandomL2BlockWithChainId(rng, numTx, defaultTestRollupConfig.L2ChainID) + a0 = a0.WithSeal(&types.Header{Number: big.NewInt(0)}) + a1 := derivetest.RandomL2BlockWithChainId(rng, numTx, defaultTestRollupConfig.L2ChainID) + a1 = a1.WithSeal(&types.Header{Number: big.NewInt(1)}) + b2 := derivetest.RandomL2BlockWithChainId(rng, numTx, defaultTestRollupConfig.L2ChainID) + b2 = b2.WithSeal(&types.Header{Number: big.NewInt(2)}) + b3 := derivetest.RandomL2BlockWithChainId(rng, numTx, defaultTestRollupConfig.L2ChainID) + b3 = b3.WithSeal(&types.Header{Number: big.NewInt(3)}) + c4 := derivetest.RandomL2BlockWithChainId(rng, numTx, defaultTestRollupConfig.L2ChainID) + c4 = c4.WithSeal(&types.Header{Number: big.NewInt(4)}) + + _, err = A.AddBlock(a0) + require.NoError(t, err) + _, err = A.AddBlock(a1) + require.NoError(t, err) + + _, err = B.AddBlock(b2) + require.NoError(t, err) + _, err = B.AddBlock(b3) + require.NoError(t, err) + + _, err = C.AddBlock(c4) + require.NoError(t, err) + + m.pruneChannels(eth.L2BlockRef{ + Number: uint64(3), + }) + + require.Equal(t, []*channel{C}, m.channelQueue) + + m.pruneChannels(eth.L2BlockRef{ + Number: uint64(4), + }) + + require.Equal(t, []*channel{}, m.channelQueue) + + m.pruneChannels(eth.L2BlockRef{ + Number: uint64(4), + }) + + require.Equal(t, []*channel{}, m.channelQueue) - require.NotContains(t, m.blocks, blockA) } func TestChannelManager_ChannelOutFactory(t *testing.T) { type ChannelOutWrapper struct { @@ -690,3 +627,57 @@ func TestChannelManager_ChannelOutFactory(t *testing.T) { require.IsType(t, &ChannelOutWrapper{}, m.currentChannel.channelBuilder.co) } + +func TestChannelManager_CheckExpectedProgress(t *testing.T) { + l := testlog.Logger(t, log.LevelCrit) + cfg := channelManagerTestConfig(100, derive.SingularBatchType) + cfg.InitNoneCompressor() + m := NewChannelManager(l, metrics.NoopMetrics, cfg, defaultTestRollupConfig) + + channelMaxInclusionBlockNumber := uint64(3) + channelLatestSafeBlockNumber := uint64(11) + + // Prepare a (dummy) fully submitted channel + // with + // maxInclusionBlock and latest safe block number as above + A, err := newChannelWithChannelOut(l, metrics.NoopMetrics, cfg, m.rollupCfg, 0) + require.NoError(t, err) + rng := rand.New(rand.NewSource(123)) + a0 := derivetest.RandomL2BlockWithChainId(rng, 1, defaultTestRollupConfig.L2ChainID) + a0 = a0.WithSeal(&types.Header{Number: big.NewInt(int64(channelLatestSafeBlockNumber))}) + _, err = A.AddBlock(a0) + require.NoError(t, err) + A.maxInclusionBlock = channelMaxInclusionBlockNumber + A.Close() + A.channelBuilder.frames = nil + A.channelBuilder.frameCursor = 0 + require.True(t, A.isFullySubmitted()) + + m.channelQueue = append(m.channelQueue, A) + + // The current L1 number implies that + // channel A above should have been derived + // from, so we expect safe head to progress to + // the channelLatestSafeBlockNumber. + // Since the safe head moved to 11, there is no error: + ss := eth.SyncStatus{ + CurrentL1: eth.L1BlockRef{Number: channelMaxInclusionBlockNumber + 1}, + SafeL2: eth.L2BlockRef{Number: channelLatestSafeBlockNumber}, + } + err = m.CheckExpectedProgress(ss) + require.NoError(t, err) + + // If the currentL1 is as above but the + // safe head is less than channelLatestSafeBlockNumber, + // the method should return an error: + ss.SafeL2 = eth.L2BlockRef{Number: channelLatestSafeBlockNumber - 1} + err = m.CheckExpectedProgress(ss) + require.Error(t, err) + + // If the safe head is still less than channelLatestSafeBlockNumber + // but the currentL1 is _equal_ to the channelMaxInclusionBlockNumber + // there should be no error as that block is still being derived from: + ss.CurrentL1 = eth.L1BlockRef{Number: channelMaxInclusionBlockNumber} + err = m.CheckExpectedProgress(ss) + require.NoError(t, err) +} diff --git a/op-batcher/batcher/channel_test.go b/op-batcher/batcher/channel_test.go index 0aad780131c7..b36ce9311bce 100644 --- a/op-batcher/batcher/channel_test.go +++ b/op-batcher/batcher/channel_test.go @@ -113,7 +113,7 @@ func TestChannelManager_NextTxData(t *testing.T) { frameNumber: uint16(0), }, } - channel.channelBuilder.PushFrames(frame) + channel.channelBuilder.frames = append(channel.channelBuilder.frames, frame) require.Equal(t, 1, channel.PendingFrames()) // Now the nextTxData function should return the frame @@ -142,7 +142,7 @@ func TestChannel_NextTxData_singleFrameTx(t *testing.T) { mockframes := makeMockFrameDatas(chID, n+1) // put multiple frames into channel, but less than target - ch.channelBuilder.PushFrames(mockframes[:n-1]...) + ch.channelBuilder.frames = mockframes[:n-1] requireTxData := func(i int) { require.True(ch.HasTxData(), "expected tx data %d", i) @@ -160,7 +160,7 @@ func TestChannel_NextTxData_singleFrameTx(t *testing.T) { require.False(ch.HasTxData()) // put in last two - ch.channelBuilder.PushFrames(mockframes[n-1 : n+1]...) + ch.channelBuilder.frames = append(ch.channelBuilder.frames, mockframes[n-1:n+1]...) for i := n - 1; i < n+1; i++ { requireTxData(i) } @@ -183,11 +183,11 @@ func TestChannel_NextTxData_multiFrameTx(t *testing.T) { mockframes := makeMockFrameDatas(chID, n+1) // put multiple frames into channel, but less than target - ch.channelBuilder.PushFrames(mockframes[:n-1]...) + ch.channelBuilder.frames = append(ch.channelBuilder.frames, mockframes[:n-1]...) require.False(ch.HasTxData()) // put in last two - ch.channelBuilder.PushFrames(mockframes[n-1 : n+1]...) + ch.channelBuilder.frames = append(ch.channelBuilder.frames, mockframes[n-1:n+1]...) require.True(ch.HasTxData()) txdata := ch.NextTxData() require.Len(txdata.frames, n) @@ -240,7 +240,8 @@ func TestChannelTxConfirmed(t *testing.T) { frameNumber: uint16(0), }, } - m.currentChannel.channelBuilder.PushFrames(frame) + m.currentChannel.channelBuilder.frames = append(m.currentChannel.channelBuilder.frames, frame) + require.Equal(t, 1, m.currentChannel.PendingFrames()) returnedTxData, err := m.nextTxData(m.currentChannel) expectedTxData := singleFrameTxData(frame) @@ -291,7 +292,7 @@ func TestChannelTxFailed(t *testing.T) { frameNumber: uint16(0), }, } - m.currentChannel.channelBuilder.PushFrames(frame) + m.currentChannel.channelBuilder.frames = append(m.currentChannel.channelBuilder.frames, frame) require.Equal(t, 1, m.currentChannel.PendingFrames()) returnedTxData, err := m.nextTxData(m.currentChannel) expectedTxData := singleFrameTxData(frame) diff --git a/op-batcher/batcher/driver.go b/op-batcher/batcher/driver.go index 6d8b6eb9da1f..729626cd946c 100644 --- a/op-batcher/batcher/driver.go +++ b/op-batcher/batcher/driver.go @@ -111,13 +111,9 @@ type BatchSubmitter struct { running bool txpoolMutex sync.Mutex // guards txpoolState and txpoolBlockedBlob - txpoolState int + txpoolState TxPoolState txpoolBlockedBlob bool - // lastStoredBlock is the last block loaded into `state`. If it is empty it should be set to the l2 safe head. - lastStoredBlock eth.BlockID - lastL1Tip eth.L1BlockRef - state *channelManager } @@ -147,7 +143,6 @@ func (l *BatchSubmitter) StartBatchSubmitting() error { l.shutdownCtx, l.cancelShutdownCtx = context.WithCancel(context.Background()) l.killCtx, l.cancelKillCtx = context.WithCancel(context.Background()) l.clearState(l.shutdownCtx) - l.lastStoredBlock = eth.BlockID{} if err := l.waitForL2Genesis(); err != nil { return fmt.Errorf("error waiting for L2 genesis: %w", err) @@ -160,8 +155,20 @@ func (l *BatchSubmitter) StartBatchSubmitting() error { } } - l.wg.Add(1) - go l.loop() + receiptsCh := make(chan txmgr.TxReceipt[txRef]) + receiptsLoopCtx, cancelReceiptsLoopCtx := context.WithCancel(context.Background()) + throttlingLoopCtx, cancelThrottlingLoopCtx := context.WithCancel(context.Background()) + + // DA throttling loop should always be started except for testing (indicated by ThrottleInterval == 0) + if l.Config.ThrottleInterval > 0 { + l.wg.Add(1) + go l.throttlingLoop(throttlingLoopCtx) + } else { + l.Log.Warn("Throttling loop is DISABLED due to 0 throttle-interval. This should not be disabled in prod.") + } + l.wg.Add(2) + go l.processReceiptsLoop(receiptsLoopCtx, receiptsCh) // receives from receiptsCh + go l.mainLoop(l.shutdownCtx, receiptsCh, cancelReceiptsLoopCtx, cancelThrottlingLoopCtx) // sends on receiptsCh l.Log.Info("Batch Submitter started") return nil @@ -240,11 +247,12 @@ func (l *BatchSubmitter) StopBatchSubmitting(ctx context.Context) error { // 2. Check if the sync status is valid or if we are all the way up to date // 3. Check if it needs to initialize state OR it is lagging (todo: lagging just means race condition?) // 4. Load all new blocks into the local state. +// 5. Dequeue blocks from local state which are now safe. // // If there is a reorg, it will reset the last stored block but not clear the internal state so // the state can be flushed to L1. -func (l *BatchSubmitter) loadBlocksIntoState(ctx context.Context) error { - start, end, err := l.calculateL2BlockRangeToStore(ctx) +func (l *BatchSubmitter) loadBlocksIntoState(syncStatus eth.SyncStatus, ctx context.Context) error { + start, end, err := l.calculateL2BlockRangeToStore(syncStatus) if err != nil { l.Log.Warn("Error calculating L2 block range", "err", err) return err @@ -258,13 +266,11 @@ func (l *BatchSubmitter) loadBlocksIntoState(ctx context.Context) error { block, err := l.loadBlockIntoState(ctx, i) if errors.Is(err, ErrReorg) { l.Log.Warn("Found L2 reorg", "block_number", i) - l.lastStoredBlock = eth.BlockID{} return err } else if err != nil { l.Log.Warn("Failed to load block into state", "err", err) return err } - l.lastStoredBlock = eth.ToBlockID(block) latestBlock = block } @@ -307,12 +313,10 @@ func (l *BatchSubmitter) loadBlockIntoState(ctx context.Context, blockNumber uin return block, nil } -// calculateL2BlockRangeToStore determines the range (start,end] that should be loaded into the local state. -// It also takes care of initializing some local state (i.e. will modify l.lastStoredBlock in certain conditions) -func (l *BatchSubmitter) calculateL2BlockRangeToStore(ctx context.Context) (eth.BlockID, eth.BlockID, error) { +func (l *BatchSubmitter) getSyncStatus(ctx context.Context) (*eth.SyncStatus, error) { rollupClient, err := l.EndpointProvider.RollupClient(ctx) if err != nil { - return eth.BlockID{}, eth.BlockID{}, fmt.Errorf("getting rollup client: %w", err) + return nil, fmt.Errorf("getting rollup client: %w", err) } var ( @@ -330,7 +334,7 @@ func (l *BatchSubmitter) calculateL2BlockRangeToStore(ctx context.Context) (eth. // Ensure that we have the sync status if err != nil { - return eth.BlockID{}, eth.BlockID{}, fmt.Errorf("failed to get sync status: %w", err) + return nil, fmt.Errorf("failed to get sync status: %w", err) } // If we have a head, break out of the loop @@ -347,26 +351,39 @@ func (l *BatchSubmitter) calculateL2BlockRangeToStore(ctx context.Context) (eth. // Reset timer to tick of the new backoff time again timer.Reset(backoff) case <-ctx.Done(): - return eth.BlockID{}, eth.BlockID{}, ctx.Err() + return nil, ctx.Err() } } - // Check last stored to see if it needs to be set on startup OR set if is lagged behind. - // It lagging implies that the op-node processed some batches that were submitted prior to the current instance of the batcher being alive. - if l.lastStoredBlock == (eth.BlockID{}) { - l.Log.Info("Starting batch-submitter work at safe-head", "safe", syncStatus.SafeL2) - l.lastStoredBlock = syncStatus.SafeL2.ID() - } else if l.lastStoredBlock.Number < syncStatus.SafeL2.Number { - l.Log.Warn("Last submitted block lagged behind L2 safe head: batch submission will continue from the safe head now", "last", l.lastStoredBlock, "safe", syncStatus.SafeL2) - l.lastStoredBlock = syncStatus.SafeL2.ID() - } + return syncStatus, nil +} +// calculateL2BlockRangeToStore determines the range (start,end] that should be loaded into the local state. +func (l *BatchSubmitter) calculateL2BlockRangeToStore(syncStatus eth.SyncStatus) (eth.BlockID, eth.BlockID, error) { + if syncStatus.HeadL1 == (eth.L1BlockRef{}) { + return eth.BlockID{}, eth.BlockID{}, errors.New("empty sync status") + } // Check if we should even attempt to load any blocks. TODO: May not need this check if syncStatus.SafeL2.Number >= syncStatus.UnsafeL2.Number { - return eth.BlockID{}, eth.BlockID{}, fmt.Errorf("L2 safe head(%d) ahead of L2 unsafe head(%d)", syncStatus.SafeL2.Number, syncStatus.UnsafeL2.Number) + return eth.BlockID{}, eth.BlockID{}, fmt.Errorf("L2 safe head(%d) >= L2 unsafe head(%d)", syncStatus.SafeL2.Number, syncStatus.UnsafeL2.Number) } - return l.lastStoredBlock, syncStatus.UnsafeL2.ID(), nil + lastStoredBlock := l.state.LastStoredBlock() + start := lastStoredBlock + end := syncStatus.UnsafeL2.ID() + + // Check last stored block to see if it is empty or has lagged behind. + // It lagging implies that the op-node processed some batches that + // were submitted prior to the current instance of the batcher being alive. + if lastStoredBlock == (eth.BlockID{}) { + l.Log.Info("Resuming batch-submitter work at safe-head", "safe", syncStatus.SafeL2) + start = syncStatus.SafeL2.ID() + } else if lastStoredBlock.Number < syncStatus.SafeL2.Number { + l.Log.Warn("Last stored block lagged behind L2 safe head: batch submission will continue from the safe head now", "last", lastStoredBlock, "safe", syncStatus.SafeL2) + start = syncStatus.SafeL2.ID() + } + + return start, end, nil } // The following things occur: @@ -380,6 +397,8 @@ func (l *BatchSubmitter) calculateL2BlockRangeToStore(ctx context.Context) (eth. // Submitted batch, but it is not valid // Missed L2 block somehow. +type TxPoolState int + const ( // Txpool states. Possible state transitions: // TxpoolGood -> TxpoolBlocked: @@ -389,13 +408,29 @@ const ( // send a cancellation transaction. // TxpoolCancelPending -> TxpoolGood: // happens once the cancel transaction completes, whether successfully or in error. - TxpoolGood int = iota + TxpoolGood TxPoolState = iota TxpoolBlocked TxpoolCancelPending ) -func (l *BatchSubmitter) loop() { +// setTxPoolState locks the mutex, sets the parameters to the supplied ones, and release the mutex. +func (l *BatchSubmitter) setTxPoolState(txPoolState TxPoolState, txPoolBlockedBlob bool) { + l.txpoolMutex.Lock() + l.txpoolState = txPoolState + l.txpoolBlockedBlob = txPoolBlockedBlob + l.txpoolMutex.Unlock() +} + +// mainLoop periodically: +// - polls the sequencer, +// - prunes the channel manager state (i.e. safe blocks) +// - loads unsafe blocks from the sequencer +// - drives the creation of channels and frames +// - sends transactions to the DA layer +func (l *BatchSubmitter) mainLoop(ctx context.Context, receiptsCh chan txmgr.TxReceipt[txRef], receiptsLoopCancel, throttlingLoopCancel context.CancelFunc) { defer l.wg.Done() + defer receiptsLoopCancel() + defer throttlingLoopCancel() queue := txmgr.NewQueue[txRef](l.killCtx, l.Txmgr, l.Config.MaxPendingTransactions) daGroup := &errgroup.Group{} @@ -409,111 +444,73 @@ func (l *BatchSubmitter) loop() { l.txpoolState = TxpoolGood l.txpoolMutex.Unlock() - // start the receipt/result processing loop - receiptsLoopDone := make(chan struct{}) - defer close(receiptsLoopDone) // shut down receipt loop l.l2BlockAdded = make(chan struct{}) defer close(l.l2BlockAdded) - receiptsCh := make(chan txmgr.TxReceipt[txRef]) - go l.processReceiptsLoop(receiptsCh, receiptsLoopDone) - - // DA throttling loop should always be started except for testing (indicated by ThrottleInterval == 0) - if l.Config.ThrottleInterval > 0 { - throttlingLoopDone := make(chan struct{}) - defer close(throttlingLoopDone) - go l.throttlingLoop(throttlingLoopDone) - } else { - l.Log.Warn("Throttling loop is DISABLED due to 0 throttle-interval. This should not be disabled in prod.") - } ticker := time.NewTicker(l.Config.PollInterval) defer ticker.Stop() - publishAndWait := func() { - l.publishStateToL1(queue, receiptsCh, daGroup) - if !l.Txmgr.IsClosed() { - if l.Config.UseAltDA { - l.Log.Info("Waiting for altDA writes to complete...") - err := daGroup.Wait() - if err != nil { - l.Log.Error("Error returned by one of the altda goroutines waited on", "err", err) - } - } - l.Log.Info("Waiting for L1 txs to be confirmed...") - err := queue.Wait() - if err != nil { - l.Log.Error("Error returned by one of the txmgr goroutines waited on", "err", err) - } - } else { - l.Log.Info("Txmgr is closed, remaining channel data won't be sent") - } - } - for { select { case <-ticker.C: + if !l.checkTxpool(queue, receiptsCh) { continue } - if err := l.loadBlocksIntoState(l.shutdownCtx); errors.Is(err, ErrReorg) { - err := l.state.Close() - if err != nil { - if errors.Is(err, ErrPendingAfterClose) { - l.Log.Warn("Closed channel manager to handle L2 reorg with pending channel(s) remaining - submitting") - } else { - l.Log.Error("Error closing the channel manager to handle a L2 reorg", "err", err) - } - } - // on reorg we want to publish all pending state then wait until each result clears before resetting - // the state. - publishAndWait() - l.clearState(l.shutdownCtx) + + syncStatus, err := l.getSyncStatus(l.shutdownCtx) + if err != nil { + l.Log.Warn("could not get sync status", "err", err) continue } - l.publishStateToL1(queue, receiptsCh, daGroup) - case <-l.shutdownCtx.Done(): - if l.Txmgr.IsClosed() { - l.Log.Info("Txmgr is closed, remaining channel data won't be sent") - return - } - // This removes any never-submitted pending channels, so these do not have to be drained with transactions. - // Any remaining unfinished channel is terminated, so its data gets submitted. - err := l.state.Close() + + l.state.pruneSafeBlocks(syncStatus.SafeL2) + l.state.pruneChannels(syncStatus.SafeL2) + + err = l.state.CheckExpectedProgress(*syncStatus) if err != nil { - if errors.Is(err, ErrPendingAfterClose) { - l.Log.Warn("Closed channel manager on shutdown with pending channel(s) remaining - submitting") - } else { - l.Log.Error("Error closing the channel manager on shutdown", "err", err) - } + l.Log.Warn("error checking expected progress, clearing state and waiting for node sync", "err", err) + l.waitNodeSyncAndClearState() + continue + } + + if err := l.loadBlocksIntoState(*syncStatus, l.shutdownCtx); errors.Is(err, ErrReorg) { + l.Log.Warn("error loading blocks, clearing state and waiting for node sync", "err", err) + l.waitNodeSyncAndClearState() + continue } - publishAndWait() - l.Log.Info("Finished publishing all remaining channel data") + + l.publishStateToL1(queue, receiptsCh, daGroup, l.Config.PollInterval) + case <-ctx.Done(): + if err := queue.Wait(); err != nil { + l.Log.Error("error waiting for transactions to complete", "err", err) + } + l.Log.Warn("main loop returning") return } } } -func (l *BatchSubmitter) processReceiptsLoop(receiptsCh chan txmgr.TxReceipt[txRef], receiptsLoopDone chan struct{}) { +// processReceiptsLoop handles transaction receipts from the DA layer +func (l *BatchSubmitter) processReceiptsLoop(ctx context.Context, receiptsCh chan txmgr.TxReceipt[txRef]) { + defer l.wg.Done() l.Log.Info("Starting receipts processing loop") for { select { case r := <-receiptsCh: - l.txpoolMutex.Lock() if errors.Is(r.Err, txpool.ErrAlreadyReserved) && l.txpoolState == TxpoolGood { - l.txpoolState = TxpoolBlocked - l.txpoolBlockedBlob = r.ID.isBlob - l.Log.Info("incompatible tx in txpool", "is_blob", r.ID.isBlob) + l.setTxPoolState(TxpoolBlocked, r.ID.isBlob) + l.Log.Warn("incompatible tx in txpool", "id", r.ID, "is_blob", r.ID.isBlob) } else if r.ID.isCancel && l.txpoolState == TxpoolCancelPending { // Set state to TxpoolGood even if the cancellation transaction ended in error // since the stuck transaction could have cleared while we were waiting. - l.txpoolState = TxpoolGood + l.setTxPoolState(TxpoolGood, l.txpoolBlockedBlob) l.Log.Info("txpool may no longer be blocked", "err", r.Err) } - l.txpoolMutex.Unlock() l.Log.Info("Handling receipt", "id", r.ID) l.handleReceipt(r) - case <-receiptsLoopDone: - l.Log.Info("Receipts processing loop done") + case <-ctx.Done(): + l.Log.Info("Receipt processing loop done") return } } @@ -523,7 +520,8 @@ func (l *BatchSubmitter) processReceiptsLoop(receiptsCh chan txmgr.TxReceipt[txR // throttling of incoming data prevent the backlog from growing too large. By looping & calling the miner API setter // continuously, we ensure the engine currently in use is always going to be reset to the proper throttling settings // even in the event of sequencer failover. -func (l *BatchSubmitter) throttlingLoop(throttlingLoopDone chan struct{}) { +func (l *BatchSubmitter) throttlingLoop(ctx context.Context) { + defer l.wg.Done() l.Log.Info("Starting DA throttling loop") ticker := time.NewTicker(l.Config.ThrottleInterval) defer ticker.Stop() @@ -557,8 +555,11 @@ func (l *BatchSubmitter) throttlingLoop(throttlingLoopDone chan struct{}) { // We'd probably hit this error right after startup, so a short shutdown duration should suffice. ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - // Always returns nil. An error is only returned to expose this function as an RPC. - _ = l.StopBatchSubmitting(ctx) + // Call StopBatchSubmitting in another goroutine to avoid deadlock. + go func() { + // Always returns nil. An error is only returned to expose this function as an RPC. + _ = l.StopBatchSubmitting(ctx) + }() return } else if err != nil { l.Log.Error("SetMaxDASize rpc failed, retrying.", "err", err) @@ -575,13 +576,24 @@ func (l *BatchSubmitter) throttlingLoop(throttlingLoopDone chan struct{}) { updateParams() case <-ticker.C: updateParams() - case <-throttlingLoopDone: + case <-ctx.Done(): l.Log.Info("DA throttling loop done") return } } } +func (l *BatchSubmitter) waitNodeSyncAndClearState() { + // Wait for any in flight transactions + // to be ingested by the node before + // we start loading blocks again. + err := l.waitNodeSync() + if err != nil { + l.Log.Warn("error waiting for node sync", "err", err) + } + l.clearState(l.shutdownCtx) +} + // waitNodeSync Check to see if there was a batcher tx sent recently that // still needs more block confirmations before being considered finalized func (l *BatchSubmitter) waitNodeSync() error { @@ -614,9 +626,11 @@ func (l *BatchSubmitter) waitNodeSync() error { return dial.WaitRollupSync(l.shutdownCtx, l.Log, rollupClient, l1TargetBlock, time.Second*12) } -// publishStateToL1 queues up all pending TxData to be published to the L1, returning when there is -// no more data to queue for publishing or if there was an error queing the data. -func (l *BatchSubmitter) publishStateToL1(queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef], daGroup *errgroup.Group) { +// publishStateToL1 queues up all pending TxData to be published to the L1, returning when there is no more data to +// queue for publishing or if there was an error queing the data. maxDuration tells this function to return from state +// publishing after this amount of time has been exceeded even if there is more data remaining. +func (l *BatchSubmitter) publishStateToL1(queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef], daGroup *errgroup.Group, maxDuration time.Duration) { + start := time.Now() for { // if the txmgr is closed, we stop the transaction sending if l.Txmgr.IsClosed() { @@ -634,6 +648,10 @@ func (l *BatchSubmitter) publishStateToL1(queue *txmgr.Queue[txRef], receiptsCh } return } + if time.Since(start) > maxDuration { + l.Log.Warn("Aborting state publishing, max duration exceeded") + return + } } } @@ -684,7 +702,7 @@ func (l *BatchSubmitter) publishTxToL1(ctx context.Context, queue *txmgr.Queue[t l.Log.Error("Failed to query L1 tip", "err", err) return err } - l.recordL1Tip(l1tip) + l.Metr.RecordLatestL1Block(l1tip) // Collect next transaction data. This pulls data out of the channel, so we need to make sure // to put it back if ever da or txmgr requests fail, by calling l.recordFailedDARequest/recordFailedTx. @@ -762,10 +780,16 @@ func (l *BatchSubmitter) publishToAltDAAndL1(txdata txData, queue *txmgr.Queue[t // So we prefer to mimic the behavior of txmgr and cancel all pending DA/txmgr requests when the batcher is stopped. comm, err := l.AltDA.SetInput(l.shutdownCtx, txdata.CallData()) if err != nil { - l.Log.Error("Failed to post input to Alt DA", "error", err) - // requeue frame if we fail to post to the DA Provider so it can be retried - // note: this assumes that the da server caches requests, otherwise it might lead to resubmissions of the blobs - l.recordFailedDARequest(txdata.ID(), err) + // Don't log context cancelled events because they are expected, + // and can happen after tests complete which causes a panic. + if errors.Is(err, context.Canceled) { + l.recordFailedDARequest(txdata.ID(), nil) + } else { + l.Log.Error("Failed to post input to Alt DA", "error", err) + // requeue frame if we fail to post to the DA Provider so it can be retried + // note: this assumes that the da server caches requests, otherwise it might lead to resubmissions of the blobs + l.recordFailedDARequest(txdata.ID(), err) + } return nil } l.Log.Info("Set altda input", "commitment", comm, "tx", txdata.ID()) @@ -862,14 +886,6 @@ func (l *BatchSubmitter) handleReceipt(r txmgr.TxReceipt[txRef]) { } } -func (l *BatchSubmitter) recordL1Tip(l1tip eth.L1BlockRef) { - if l.lastL1Tip == l1tip { - return - } - l.lastL1Tip = l1tip - l.Metr.RecordLatestL1Block(l1tip) -} - func (l *BatchSubmitter) recordFailedDARequest(id txID, err error) { if err != nil { l.Log.Warn("DA request failed", logFields(id, err)...) diff --git a/op-batcher/cmd/main.go b/op-batcher/cmd/main.go index 82472006da27..39ca58b193c3 100644 --- a/op-batcher/cmd/main.go +++ b/op-batcher/cmd/main.go @@ -18,7 +18,7 @@ import ( ) var ( - Version = "v0.10.14" + Version = "v0.0.0" GitCommit = "" GitDate = "" ) diff --git a/op-batcher/justfile b/op-batcher/justfile new file mode 100644 index 000000000000..a0c671ebd28a --- /dev/null +++ b/op-batcher/justfile @@ -0,0 +1,36 @@ +import '../just/go.just' + +# Build ldflags string +_LDFLAGSSTRING := "'" + trim( + "-X main.GitCommit=" + GITCOMMIT + " " + \ + "-X main.GitDate=" + GITDATE + " " + \ + "-X main.Version=" + VERSION + " " + \ + "") + "'" + +BINARY := "./bin/op-batcher" + +# Build op-batcher binary +op-batcher: (go_build BINARY "./cmd" "-ldflags" _LDFLAGSSTRING) + +# Clean build artifacts +clean: + rm -f {{BINARY}} + +# Run tests +test: (go_test "./...") + +[private] +batcher_fuzz_task FUZZ TIME='10s': (go_fuzz FUZZ TIME "./batcher") + +# Run fuzzing tests +fuzz: + printf "%s\n" \ + "FuzzChannelConfig_CheckTimeout" \ + "FuzzDurationZero" \ + "FuzzDurationTimeoutMaxChannelDuration" \ + "FuzzDurationTimeoutZeroMaxChannelDuration" \ + "FuzzChannelCloseTimeout" \ + "FuzzChannelZeroCloseTimeout" \ + "FuzzSeqWindowClose" \ + "FuzzSeqWindowZeroTimeoutClose" \ + | parallel -j {{PARALLEL_JOBS}} {{just_executable()}} batcher_fuzz_task {} diff --git a/op-batcher/metrics/test.go b/op-batcher/metrics/test.go new file mode 100644 index 000000000000..76c365ea7e2b --- /dev/null +++ b/op-batcher/metrics/test.go @@ -0,0 +1,22 @@ +package metrics + +import ( + "github.com/ethereum/go-ethereum/core/types" +) + +type TestMetrics struct { + noopMetrics + PendingBlocksBytesCurrent float64 +} + +var _ Metricer = new(TestMetrics) + +func (m *TestMetrics) RecordL2BlockInPendingQueue(block *types.Block) { + _, rawSize := estimateBatchSize(block) + m.PendingBlocksBytesCurrent += float64(rawSize) + +} +func (m *TestMetrics) RecordL2BlockInChannel(block *types.Block) { + _, rawSize := estimateBatchSize(block) + m.PendingBlocksBytesCurrent -= float64(rawSize) +} diff --git a/op-batcher/readme.md b/op-batcher/readme.md new file mode 100644 index 000000000000..ba547845f099 --- /dev/null +++ b/op-batcher/readme.md @@ -0,0 +1,88 @@ +# op-batcher + +The `op-batcher` is responsible for ensuring data availability. See the [specs](https://specs.optimism.io/protocol/batcher.html). + + +## Interactions & Dependencies +The `op-batcher` works together with the [sequencer](../op-node/) (which it reads unsafe blocks from), the data availability layer (e.g. Layer 1 or an [Alt DA](../op-alt-da/) layer, which it posts data to), and the [derivation pipeline](../op-node/) (which reads the data from the DA layer and progresses the safe chain). + +It depends directly on some code shared with the derivation pipeline, namely the [`ChannelOut`](../op-node/rollup/derive/channel_out.go) implementation(s). It also depends directly on the shared [txmgr](../op-service/txmgr/) module. + +## Testing +The batcher has a suite of unit test which can be triggered by running +``` +go test ./... +``` +from this directory. There are also end-to-end tests in [`op-e2e`](../op-e2e/) which integrate the batcher. + +## Architecture + +The architecture of this batcher implementation is shown on the left side of the following diagram: + +![architecture](./architecture.png) + +Batch submitting (writing to the DA layer, in the middle of the diagram) works together with the derivation pipeline (on the right side of the diagram, reading from the DA layer) to progress the safe chain. + +The philosophy behind the current architecture is: +* Blocks, channels and frames are kept around for as long as they might be needed, and discarded as soon as they are not needed. They are not moved from one part of state to another. +* We retain block data in a strict order for as long as necessary. We only garbage collect frames, channels and blocks when the safe head moves sufficiently and those structures have done their job. +* When something goes wrong, we rewind the state cursors by the minimal amount we need to get going again. + + +### Happy path + +In the happy path, the batcher periodically: +1. Enqueues unsafe blocks and dequeues safe blocks from the sequencer to its internal state. +2. Enqueues a new channel, if necessary. +3. Processes some unprocessed blocks into the current channel, triggers the compression of the block data and the creation of frames. +4. Sends frames from the channel queue to the DA layer as (e.g. to Ethereum L1 as calldata or blob transactions). +5. If there is more transaction data to send, go to 2. Else wait for a tick and go to 1. + + +The `blockCursor` state variable tracks the next unprocessed block. +In each channel, the `frameCursor` tracks the next unsent frame. + + +### Reorgs +When an L2 unsafe reorg is detected, the batch submitter will reset its state, and wait for any in flight transactions to be ingested by the verifier nodes before starting work again. + +### Tx Failed +When a Tx fails, an asynchronous receipts handler is triggered. The channel from whence the Tx's frames came has its `frameCursor` rewound, so that all the frames can be resubmitted in order. + +### Channel Times Out +When a Tx is confirmed, an asynchronous receipts handler is triggered. We only update the batcher's state if the channel timed out on chain. In that case, the `blockCursor` is rewound to the first block added to that channel, and the channel queue is cleared out. This allows the batcher to start fresh building a new channel starting from the same block -- it does not need to refetch blocks from the sequencer. + +## Design Principles and Optimization Targets +At the current time, the batcher should be optimized for correctness, simplicity and robustness. It is considered preferable to prioritize these properties, even at the expense of other potentially desirable properties such as frugality. For example, it is preferable to have the batcher resubmit some data from time to time ("wasting" money on data availability costs) instead of avoiding that by e.g. adding some persistent state to the batcher. + +The batcher can almost always recover from unforeseen situations by being restarted. + + +Some complexity is permitted, however, for handling data availability switching, so that the batcher is not wasting money for longer periods of time. + +### Data Availability Backlog + +A chain can potentially experience an influx of large transactions whose data availability requirements exceed the total +throughput of the data availability layer. While this situation might resolve on its own in the long term through the +data availability pricing mechanism, in practice this feedback loop is too slow to prevent a very large backlog of data +from being produced, even at a relatively low cost to whomever is submitting the large transactions. In such +circumstances, the safe head can fall significantly behind the unsafe head, and the time between seeing a transaction +(and charging it a given L1 data fee) and actually posting the transaction to the data availability layer grows larger +and larger. Because DA costs can rise quickly during such an event, the batcher can end up paying far more to post the +transaction to the DA layer than what can be recovered from the transaction's data fee. + +To prevent a significant DA backlog, the batcher can instruct the block builder (via op-geth's miner RPC API) to impose +thresholds on the total DA requirements of a single block, and/or the maximum DA requirement of any single +transaction. In the happy case, the batcher instructs the block builder to impose a block-level DA limit of +OP_BATCHER_THROTTLE_ALWAYS_BLOCK_SIZE, and imposes no additional limit on the DA requirements of a single +transaction. But in the case of a DA backlog (as defined by OP_BATCHER_THROTTLE_THRESHOLD), the batcher instructs the +block builder to instead impose a (tighter) block level limit of OP_BATCHER_THROTTLE_BLOCK_SIZE, and a single +transaction limit of OP_BATCHER_THROTTLE_TRANSACTION_SIZE. + +## Known issues and future work + +Link to [open issues with the `op-batcher` tag](https://github.com/ethereum-optimism/optimism/issues?q=is%3Aopen+is%3Aissue+label%3AA-op-batcher). + +The batcher launches L1 transactions in parallel so that it can achieve higher throughput, particularly in situations where there is a large backlog of data which needs to be posted. Sometimes, transactions can get stuck in the L1 mempool. The batcher does have functionality to clear these stuck transactions, but it is not completely reliable. + +The automatic data availability switching behavior is a somewhat new feature which may still have some bugs in it. diff --git a/op-chain-ops/Makefile b/op-chain-ops/Makefile index ad595b65e367..4dac7c1b6bb7 100644 --- a/op-chain-ops/Makefile +++ b/op-chain-ops/Makefile @@ -1,54 +1,3 @@ -GITCOMMIT ?= $(shell git rev-parse HEAD) -GITDATE ?= $(shell git show -s --format='%ct') +DEPRECATED_TARGETS := ecotone-scalar receipt-reference-builder test op-deployer fuzz sync-standard-version -# Find the github tag that points to this commit. If none are found, set the version string to "untagged" -# Prioritizes release tag, if one exists, over tags suffixed with "-rc" -VERSION ?= $(shell tags=$$(git tag --points-at $(GITCOMMIT) | grep '^op-deployer/' | sed 's/op-deployer\///' | sort -V); \ - preferred_tag=$$(echo "$$tags" | grep -v -- '-rc' | tail -n 1); \ - if [ -z "$$preferred_tag" ]; then \ - if [ -z "$$tags" ]; then \ - echo "untagged"; \ - else \ - echo "$$tags" | tail -n 1; \ - fi \ - else \ - echo $$preferred_tag; \ - fi) - -LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) -LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) -LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-chain-ops/deployer/version.Version=$(VERSION) -LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-chain-ops/deployer/version.Meta=$(VERSION_META) -LDFLAGS := -ldflags "$(LDFLAGSSTRING)" - -# Use the old Apple linker to workaround broken xcode - https://github.com/golang/go/issues/65169 -ifeq ($(shell uname),Darwin) - FUZZLDFLAGS := -ldflags=-extldflags=-Wl,-ld_classic -endif - -ecotone-scalar: - go build -o ./bin/ecotone-scalar ./cmd/ecotone-scalar/main.go - -receipt-reference-builder: - go build -o ./bin/receipt-reference-builder ./cmd/receipt-reference-builder/*.go - -test: - go test ./... - -op-deployer: - GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) CGO_ENABLED=0 go build -v $(LDFLAGS) -o ./bin/op-deployer ../op-deployer/cmd/op-deployer/main.go - -fuzz: - printf "%s\n" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzEncodeDecodeWithdrawal ./crossdomain" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzEncodeDecodeLegacyWithdrawal ./crossdomain" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzAliasing ./crossdomain" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzVersionedNonce ./crossdomain" \ - | parallel -j 8 {} - - -sync-standard-version: - curl -Lo ./deployer/opcm/standard-versions-mainnet.toml https://raw.githubusercontent.com/ethereum-optimism/superchain-registry/refs/heads/main/validation/standard/standard-versions-mainnet.toml - curl -Lo ./deployer/opcm/standard-versions-sepolia.toml https://raw.githubusercontent.com/ethereum-optimism/superchain-registry/refs/heads/main/validation/standard/standard-versions-sepolia.toml - -.PHONY: test fuzz op-deployer sync-standard-version +include ../just/deprecated.mk diff --git a/op-chain-ops/README.md b/op-chain-ops/README.md new file mode 100644 index 000000000000..98d0d974722d --- /dev/null +++ b/op-chain-ops/README.md @@ -0,0 +1,80 @@ +# `op-chain-ops` + +Issues: [monorepo](https://github.com/ethereum-optimism/optimism/issues?q=is%3Aissue%20state%3Aopen%20label%3AA-op-chain-ops) + +Pull requests: [monorepo](https://github.com/ethereum-optimism/optimism/pulls?q=is%3Aopen+is%3Apr+label%3AA-op-chain-ops) + +This is an OP Stack utils package for chain operations, +ranging from EVM tooling to chain generation. + +Packages: +- `clients`: utils for chain checker tools. +- `cmd`: upgrade validation tools, debug tools, attributes formatting tools. +- `crossdomain`: utils to interact with L1 <> L2 cross-domain messages. +- `devkeys`: generate OP-Stack development keys from a common source. +- `foundry`: utils to read foundry artifacts. +- `genesis`: OP Stack genesis-configs generation, pre OPCM. +- `interopgen`: interop test-chain genesis config generation. +- `script`: foundry-like solidity scripting environment in Go. +- `solc`: utils to read solidity compiler artifacts data. +- `srcmap`: utils for solidity source-maps loaded from foundry-artifacts. + +## Usage + +Upgrade checks and chain utilities can be found in `./cmd`: +these are not officially published in OP-Stack monorepo releases, +but can be built from source. + +Utils: +```text +cmd/ +├── check-canyon - Checks for Canyon network upgrade +├── check-delta - Checks for Delta network upgrade +├── check-deploy-config - Checks of the (legacy) Deploy Config +├── check-derivation - Check that transactions can be confirmed and safety can be consolidated +├── check-ecotone - Checks for Ecotone network upgrade +├── check-fjord - Checks for Fjord network upgrade +├── deposit-hash - Determine the L2 deposit tx hash, based on log event(s) emitted by a L1 tx. +├── ecotone-scalar - Translate between serialized and human-readable L1 fee scalars (introduced in Ecotone upgrade). +├── op-simulate - Simulate a remote transaction in a local Geth EVM for block-processing debugging. +├── protocol-version - Translate between serialized and human-readable protocol versions. +├── receipt-reference-builder - Receipt data collector for pre-Canyon deposit-nonce metadata. +└── unclaimed-credits - Utilitiy to inspect credits of resolved fault-proof games. +``` + +## Product + +### Optimization target + +Provide tools for chain-setup and inspection tools for deployment, upgrades, and testing. +This includes `op-deployer`, OP-Contracts-Manager (OPCM), upgrade-check scripts, and `op-e2e` testing. + +### Vision + +- Upgrade checking scripts should become more extensible, and maybe be bundled in a single check-script CLI tool. +- Serve chain inspection/processing building-blocks for test setups and tooling like op-deployer. +- `interopgen` is meant to be temporary, and consolidate with `op-deployer`. + This change depends largely on the future of `op-e2e`, + where system tests may be replaced in favor of tests set up by `op-e2e`. +- `script` is a Go version of `forge` script, with hooks and customization options, + for better integration into tooling such as `op-deployer`. + This package should evolve to serve testing and `op-deployer` as best as possible, + it is not a full `forge` replacement. +- `genesis` will shrink over time, as more of the genesis responsibilities are automated away into + the protocol through system-transactions, and tooling such as `op-deployer` and OPCM. + +## Design principles + +- Provide high-quality bindings to accelerate testing and tooling development. +- Minimal introspection into fragile solidity details. + +There is a trade-off here in how minimal the tooling is: +generally we aim to provide dedicated functionality in Go for better integration, +if the target tool is significant Go service of its own. +If not, then `op-chain-ops` should not be extended, and the design of the target tool should be adjusted instead. + +## Testing + +- Upgrade checks are tested against live devnet/testnet upgrades, before testing against mainnet. + Testing here is aimed to expand to end-to-end testing, for better integrated test feedback of these tools. +- Utils have unit-test coverage of their own, and are used widely in end-to-end testing itself. diff --git a/op-chain-ops/contracts/common.go b/op-chain-ops/contracts/common.go deleted file mode 100644 index a1eb5b471dbb..000000000000 --- a/op-chain-ops/contracts/common.go +++ /dev/null @@ -1,21 +0,0 @@ -package contracts - -import ( - "fmt" - - "github.com/ethereum/go-ethereum/common" - "github.com/urfave/cli/v2" -) - -// parseAddress will parse a [common.Address] from a [cli.Context] and return -// an error if the configured address is not correct. -func parseAddress(ctx *cli.Context, name string) (common.Address, error) { - value := ctx.String(name) - if value == "" { - return common.Address{}, nil - } - if !common.IsHexAddress(value) { - return common.Address{}, fmt.Errorf("invalid address: %s", value) - } - return common.HexToAddress(value), nil -} diff --git a/op-chain-ops/contracts/contracts.go b/op-chain-ops/contracts/contracts.go deleted file mode 100644 index def9c3746201..000000000000 --- a/op-chain-ops/contracts/contracts.go +++ /dev/null @@ -1,50 +0,0 @@ -package contracts - -import ( - "github.com/ethereum/go-ethereum/common" - "github.com/urfave/cli/v2" -) - -// Addresses represents the address values of various contracts. The values can -// be easily populated via a [cli.Context]. -type Addresses struct { - AddressManager common.Address - OptimismPortal common.Address - L1StandardBridge common.Address - L1CrossDomainMessenger common.Address - CanonicalTransactionChain common.Address - StateCommitmentChain common.Address -} - -// NewAddresses populates an Addresses struct given a [cli.Context]. -// This is useful for writing scripts that interact with smart contracts. -func NewAddresses(ctx *cli.Context) (*Addresses, error) { - var addresses Addresses - var err error - - addresses.AddressManager, err = parseAddress(ctx, "address-manager-address") - if err != nil { - return nil, err - } - addresses.OptimismPortal, err = parseAddress(ctx, "optimism-portal-address") - if err != nil { - return nil, err - } - addresses.L1StandardBridge, err = parseAddress(ctx, "l1-standard-bridge-address") - if err != nil { - return nil, err - } - addresses.L1CrossDomainMessenger, err = parseAddress(ctx, "l1-crossdomain-messenger-address") - if err != nil { - return nil, err - } - addresses.CanonicalTransactionChain, err = parseAddress(ctx, "canonical-transaction-chain-address") - if err != nil { - return nil, err - } - addresses.StateCommitmentChain, err = parseAddress(ctx, "state-commitment-chain-address") - if err != nil { - return nil, err - } - return &addresses, nil -} diff --git a/op-chain-ops/genesis/config.go b/op-chain-ops/genesis/config.go index 7ed0a6e2f682..69d8999880bb 100644 --- a/op-chain-ops/genesis/config.go +++ b/op-chain-ops/genesis/config.go @@ -854,12 +854,6 @@ func (d *L1DependenciesConfig) CheckAddresses(dependencyContext DependencyContex // The genesis generation may log warnings, do a best-effort support attempt, // or ignore these attributes completely. type LegacyDeployConfig struct { - // CliqueSignerAddress represents the signer address for the clique consensus engine. - // It is used in the multi-process devnet to sign blocks. - CliqueSignerAddress common.Address `json:"cliqueSignerAddress"` - // L1UseClique represents whether or not to use the clique consensus engine. - L1UseClique bool `json:"l1UseClique"` - // DeploymentWaitConfirmations is the number of confirmations to wait during // deployment. This is DEPRECATED and should be removed in a future PR. DeploymentWaitConfirmations int `json:"deploymentWaitConfirmations"` diff --git a/op-chain-ops/genesis/genesis.go b/op-chain-ops/genesis/genesis.go index c852fa0fc100..ee7836852358 100644 --- a/op-chain-ops/genesis/genesis.go +++ b/op-chain-ops/genesis/genesis.go @@ -11,7 +11,6 @@ import ( "github.com/ethereum/go-ethereum/consensus/misc/eip1559" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" ) @@ -141,25 +140,12 @@ func NewL1Genesis(config *DeployConfig) (*core.Genesis, error) { LondonBlock: big.NewInt(0), ArrowGlacierBlock: big.NewInt(0), GrayGlacierBlock: big.NewInt(0), - ShanghaiTime: nil, - CancunTime: nil, - } - - extraData := make([]byte, 0) - if config.L1UseClique { - // warning: clique has an overly strict block header timestamp check against the system wallclock, - // causing blocks to get scheduled as "future block" and not get mined instantly when produced. - chainConfig.Clique = ¶ms.CliqueConfig{ - Period: config.L1BlockTime, - Epoch: 30000, - } - extraData = append(append(make([]byte, 32), config.CliqueSignerAddress[:]...), make([]byte, crypto.SignatureLength)...) - } else { - chainConfig.MergeNetsplitBlock = big.NewInt(0) - chainConfig.TerminalTotalDifficulty = big.NewInt(0) - chainConfig.TerminalTotalDifficultyPassed = true - chainConfig.ShanghaiTime = u64ptr(0) - chainConfig.CancunTime = u64ptr(0) + ShanghaiTime: u64ptr(0), + CancunTime: u64ptr(0), + // To enable post-Merge consensus at genesis + MergeNetsplitBlock: big.NewInt(0), + TerminalTotalDifficulty: big.NewInt(0), + TerminalTotalDifficultyPassed: true, } gasLimit := config.L1GenesisBlockGasLimit @@ -178,7 +164,7 @@ func NewL1Genesis(config *DeployConfig) (*core.Genesis, error) { if timestamp == 0 { timestamp = hexutil.Uint64(time.Now().Unix()) } - if !config.L1UseClique && config.L1CancunTimeOffset != nil { + if config.L1CancunTimeOffset != nil { cancunTime := uint64(timestamp) + uint64(*config.L1CancunTimeOffset) chainConfig.CancunTime = &cancunTime } @@ -187,7 +173,7 @@ func NewL1Genesis(config *DeployConfig) (*core.Genesis, error) { Config: &chainConfig, Nonce: uint64(config.L1GenesisBlockNonce), Timestamp: uint64(timestamp), - ExtraData: extraData, + ExtraData: make([]byte, 0), GasLimit: uint64(gasLimit), Difficulty: difficulty.ToInt(), Mixhash: config.L1GenesisBlockMixHash, diff --git a/op-chain-ops/genesis/testdata/test-deploy-config-full.json b/op-chain-ops/genesis/testdata/test-deploy-config-full.json index 814fff245b0d..7fe9a78e7154 100644 --- a/op-chain-ops/genesis/testdata/test-deploy-config-full.json +++ b/op-chain-ops/genesis/testdata/test-deploy-config-full.json @@ -6,8 +6,6 @@ "maxSequencerDrift": 20, "sequencerWindowSize": 100, "channelTimeout": 30, - "l1UseClique": false, - "cliqueSignerAddress": "0x0000000000000000000000000000000000000000", "customGasTokenAddress": "0x0000000000000000000000000000000000000000", "p2pSequencerAddress": "0x9965507d1a55bcc2695c58ba16fb37d819b0a4dc", "batchInboxAddress": "0x42000000000000000000000000000000000000ff", diff --git a/op-chain-ops/interopgen/configs.go b/op-chain-ops/interopgen/configs.go index 942588a9929c..948d9daa3053 100644 --- a/op-chain-ops/interopgen/configs.go +++ b/op-chain-ops/interopgen/configs.go @@ -35,7 +35,7 @@ type SuperFaultProofConfig struct { } type OPCMImplementationsConfig struct { - Release string + L1ContractsRelease string FaultProof SuperFaultProofConfig diff --git a/op-chain-ops/interopgen/deploy.go b/op-chain-ops/interopgen/deploy.go index 476406821ea5..e915b724e10b 100644 --- a/op-chain-ops/interopgen/deploy.go +++ b/op-chain-ops/interopgen/deploy.go @@ -170,10 +170,9 @@ func DeploySuperchainToL1(l1Host *script.Host, superCfg *SuperchainConfig) (*Sup ProofMaturityDelaySeconds: superCfg.Implementations.FaultProof.ProofMaturityDelaySeconds, DisputeGameFinalityDelaySeconds: superCfg.Implementations.FaultProof.DisputeGameFinalityDelaySeconds, MipsVersion: superCfg.Implementations.FaultProof.MipsVersion, - Release: superCfg.Implementations.Release, + L1ContractsRelease: superCfg.Implementations.L1ContractsRelease, SuperchainConfigProxy: superDeployment.SuperchainConfigProxy, ProtocolVersionsProxy: superDeployment.ProtocolVersionsProxy, - OpcmProxyOwner: superDeployment.SuperchainProxyAdmin, UseInterop: superCfg.Implementations.UseInterop, StandardVersionsToml: standard.VersionsMainnetData, }) @@ -210,7 +209,7 @@ func DeployL2ToL1(l1Host *script.Host, superCfg *SuperchainConfig, superDeployme BasefeeScalar: cfg.GasPriceOracleBaseFeeScalar, BlobBaseFeeScalar: cfg.GasPriceOracleBlobBaseFeeScalar, L2ChainId: new(big.Int).SetUint64(cfg.L2ChainID), - OpcmProxy: superDeployment.OpcmProxy, + Opcm: superDeployment.Opcm, SaltMixer: cfg.SaltMixer, GasLimit: cfg.GasLimit, DisputeGameType: cfg.DisputeGameType, diff --git a/op-chain-ops/interopgen/deployments.go b/op-chain-ops/interopgen/deployments.go index ba18fbfdf9bd..f98a0554d870 100644 --- a/op-chain-ops/interopgen/deployments.go +++ b/op-chain-ops/interopgen/deployments.go @@ -9,8 +9,7 @@ type L1Deployment struct { } type Implementations struct { - OpcmProxy common.Address `json:"OPCMProxy"` - OpcmImpl common.Address `json:"OPCMImpl"` + Opcm common.Address `json:"OPCM"` DelayedWETHImpl common.Address `json:"DelayedWETHImpl"` OptimismPortalImpl common.Address `json:"OptimismPortalImpl"` PreimageOracleSingleton common.Address `json:"PreimageOracleSingleton"` diff --git a/op-chain-ops/interopgen/recipe.go b/op-chain-ops/interopgen/recipe.go index 8983ee72da8e..e70c69e9f481 100644 --- a/op-chain-ops/interopgen/recipe.go +++ b/op-chain-ops/interopgen/recipe.go @@ -69,7 +69,7 @@ func (r *InteropDevRecipe) Build(addrs devkeys.Addresses) (*WorldConfig, error) ProtocolVersionsOwner: superchainProtocolVersionsOwner, Deployer: superchainDeployer, Implementations: OPCMImplementationsConfig{ - Release: "dev", + L1ContractsRelease: "dev", FaultProof: SuperFaultProofConfig{ WithdrawalDelaySeconds: big.NewInt(604800), MinProposalSizeBytes: big.NewInt(10000), diff --git a/op-chain-ops/justfile b/op-chain-ops/justfile new file mode 100644 index 000000000000..a9c2bcad62da --- /dev/null +++ b/op-chain-ops/justfile @@ -0,0 +1,41 @@ +import '../just/go.just' + +# Build ldflags string +_VERSION_META_STR := if VERSION_META != "" { "+" + VERSION_META } else { "" } +_LDFLAGSSTRING := "'" + trim( + "-X main.GitCommit=" + GITCOMMIT + " " + \ + "-X main.GitDate=" + GITDATE + " " + \ + "-X github.com/ethereum-optimism/optimism/op-chain-ops/deployer/version.Version=" + VERSION + " " + \ + "-X github.com/ethereum-optimism/optimism/op-chain-ops/deployer/version.Meta=" + _VERSION_META_STR + " " + \ + "") + "'" + +# Build ecotone-scalar binary +ecotone-scalar: (go_build "./bin/ecotone-scalar" "./cmd/ecotone-scalar" "-ldflags" _LDFLAGSSTRING) + +# Build receipt-reference-builder binary +receipt-reference-builder: (go_build "./bin/receipt-reference-builder" "./cmd/receipt-reference-builder" "-ldflags" _LDFLAGSSTRING) + +# Run tests +test: (go_test "./...") + +# Build op-deployer binary +op-deployer: + just ../op-deployer/build + mkdir -p ./bin && ln -f ../op-deployer/bin/op-deployer ./bin/op-deployer + +# Run fuzzing tests +[private] +fuzz_task FUZZ TIME='10s': (go_fuzz FUZZ TIME "./crossdomain") + +fuzz: + printf "%s\n" \ + "FuzzEncodeDecodeWithdrawal" \ + "FuzzEncodeDecodeLegacyWithdrawal" \ + "FuzzAliasing" \ + "FuzzVersionedNonce" \ + | parallel -j {{PARALLEL_JOBS}} {{just_executable()}} fuzz_task {} + +# Sync standard versions +sync-standard-version: + curl -Lo ./deployer/opcm/standard-versions-mainnet.toml https://raw.githubusercontent.com/ethereum-optimism/superchain-registry/refs/heads/main/validation/standard/standard-versions-mainnet.toml + curl -Lo ./deployer/opcm/standard-versions-sepolia.toml https://raw.githubusercontent.com/ethereum-optimism/superchain-registry/refs/heads/main/validation/standard/standard-versions-sepolia.toml diff --git a/op-chain-ops/script/script.go b/op-chain-ops/script/script.go index 3ce493487f77..9a3d9ae80201 100644 --- a/op-chain-ops/script/script.go +++ b/op-chain-ops/script/script.go @@ -609,7 +609,7 @@ func (h *Host) onOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpCo }) } // Sanity check that top of the call-stack matches the scope context now - if len(h.callStack) == 0 || h.callStack[len(h.callStack)-1].Ctx != scopeCtx { + if h.callStack[len(h.callStack)-1].Ctx != scopeCtx { panic("scope context changed without call-frame pop/push") } cf := h.callStack[len(h.callStack)-1] diff --git a/op-chain-ops/solc/types.go b/op-chain-ops/solc/types.go index de6edb90d231..56ea47a9ad57 100644 --- a/op-chain-ops/solc/types.go +++ b/op-chain-ops/solc/types.go @@ -1,7 +1,6 @@ package solc import ( - "encoding/json" "fmt" "github.com/ethereum/go-ethereum/accounts/abi" @@ -129,5 +128,158 @@ type Ast struct { Id uint `json:"id"` License string `json:"license"` NodeType string `json:"nodeType"` - Nodes json.RawMessage `json:"nodes"` + Nodes []AstNode `json:"nodes"` + Src string `json:"src"` +} + +type AstNode struct { + Id int `json:"id"` + NodeType string `json:"nodeType"` + Src string `json:"src"` + Nodes []AstNode `json:"nodes,omitempty"` + Abstract bool `json:"abstract,omitempty"` + BaseContracts []AstBaseContract `json:"baseContracts,omitempty"` + CanonicalName string `json:"canonicalName,omitempty"` + ContractDependencies []int `json:"contractDependencies,omitempty"` + ContractKind string `json:"contractKind,omitempty"` + Documentation interface{} `json:"documentation,omitempty"` + FullyImplemented bool `json:"fullyImplemented,omitempty"` + LinearizedBaseContracts []int `json:"linearizedBaseContracts,omitempty"` + Name string `json:"name,omitempty"` + NameLocation string `json:"nameLocation,omitempty"` + Scope int `json:"scope,omitempty"` + UsedErrors []int `json:"usedErrors,omitempty"` + UsedEvents []int `json:"usedEvents,omitempty"` + + // Function specific + Body *AstBlock `json:"body,omitempty"` + Parameters *AstParameterList `json:"parameters,omitempty"` + ReturnParameters *AstParameterList `json:"returnParameters,omitempty"` + StateMutability string `json:"stateMutability,omitempty"` + Virtual bool `json:"virtual,omitempty"` + Visibility string `json:"visibility,omitempty"` + + // Variable specific + Constant bool `json:"constant,omitempty"` + Mutability string `json:"mutability,omitempty"` + StateVariable bool `json:"stateVariable,omitempty"` + StorageLocation string `json:"storageLocation,omitempty"` + TypeDescriptions *AstTypeDescriptions `json:"typeDescriptions,omitempty"` + TypeName *AstTypeName `json:"typeName,omitempty"` + + // Expression specific + Expression *Expression `json:"expression,omitempty"` + IsConstant bool `json:"isConstant,omitempty"` + IsLValue bool `json:"isLValue,omitempty"` + IsPure bool `json:"isPure,omitempty"` + LValueRequested bool `json:"lValueRequested,omitempty"` + + // Literal specific + HexValue string `json:"hexValue,omitempty"` + Kind string `json:"kind,omitempty"` + Value interface{} `json:"value,omitempty"` + + // Other fields + Arguments []Expression `json:"arguments,omitempty"` + Condition *Expression `json:"condition,omitempty"` + TrueBody *AstBlock `json:"trueBody,omitempty"` + FalseBody *AstBlock `json:"falseBody,omitempty"` + Operator string `json:"operator,omitempty"` +} + +type AstBaseContract struct { + BaseName *AstTypeName `json:"baseName"` + Id int `json:"id"` + NodeType string `json:"nodeType"` + Src string `json:"src"` +} + +type AstDocumentation struct { + Id int `json:"id"` + NodeType string `json:"nodeType"` + Src string `json:"src"` + Text string `json:"text"` +} + +type AstBlock struct { + Id int `json:"id"` + NodeType string `json:"nodeType"` + Src string `json:"src"` + Statements []AstNode `json:"statements"` +} + +type AstParameterList struct { + Id int `json:"id"` + NodeType string `json:"nodeType"` + Parameters []AstNode `json:"parameters"` + Src string `json:"src"` +} + +type AstTypeDescriptions struct { + TypeIdentifier string `json:"typeIdentifier"` + TypeString string `json:"typeString"` +} + +type AstTypeName struct { + Id int `json:"id"` + Name string `json:"name"` + NodeType string `json:"nodeType"` + Src string `json:"src"` + StateMutability string `json:"stateMutability,omitempty"` + TypeDescriptions *AstTypeDescriptions `json:"typeDescriptions,omitempty"` +} + +type Expression struct { + Id int `json:"id"` + NodeType string `json:"nodeType"` + Src string `json:"src"` + TypeDescriptions *AstTypeDescriptions `json:"typeDescriptions,omitempty"` + Name string `json:"name,omitempty"` + OverloadedDeclarations []int `json:"overloadedDeclarations,omitempty"` + ReferencedDeclaration int `json:"referencedDeclaration,omitempty"` + ArgumentTypes []AstTypeDescriptions `json:"argumentTypes,omitempty"` +} + +type ForgeArtifact struct { + Abi abi.ABI `json:"abi"` + Bytecode CompilerOutputBytecode `json:"bytecode"` + DeployedBytecode CompilerOutputBytecode `json:"deployedBytecode"` + MethodIdentifiers map[string]string `json:"methodIdentifiers"` + RawMetadata string `json:"rawMetadata"` + Metadata ForgeCompilerMetadata `json:"metadata"` + StorageLayout *StorageLayout `json:"storageLayout,omitempty"` + Ast Ast `json:"ast"` + Id int `json:"id"` +} + +type ForgeCompilerMetadata struct { + Compiler ForgeCompilerInfo `json:"compiler"` + Language string `json:"language"` + Output ForgeMetadataOutput `json:"output"` + Settings CompilerSettings `json:"settings"` + Sources map[string]ForgeSourceInfo `json:"sources"` + Version int `json:"version"` +} + +type ForgeCompilerInfo struct { + Version string `json:"version"` +} + +type ForgeMetadataOutput struct { + Abi abi.ABI `json:"abi"` + DevDoc ForgeDocObject `json:"devdoc"` + UserDoc ForgeDocObject `json:"userdoc"` +} + +type ForgeSourceInfo struct { + Keccak256 string `json:"keccak256"` + License string `json:"license"` + Urls []string `json:"urls"` +} + +type ForgeDocObject struct { + Kind string `json:"kind"` + Methods map[string]interface{} `json:"methods"` + Notice string `json:"notice,omitempty"` + Version int `json:"version"` } diff --git a/op-challenger/Makefile b/op-challenger/Makefile index 1a7422a4c879..35a202b99c4c 100644 --- a/op-challenger/Makefile +++ b/op-challenger/Makefile @@ -1,35 +1,3 @@ -GITCOMMIT ?= $(shell git rev-parse HEAD) -GITDATE ?= $(shell git show -s --format='%ct') -VERSION ?= v0.0.0 +DEPRECATED_TARGETS := op-challenger clean test fuzz visualize -LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) -LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) -LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-challenger/version.Version=$(VERSION) -LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-challenger/version.Meta=$(VERSION_META) -LDFLAGS := -ldflags "$(LDFLAGSSTRING)" - -# Use the old Apple linker to workaround broken xcode - https://github.com/golang/go/issues/65169 -ifeq ($(shell uname),Darwin) - FUZZLDFLAGS := -ldflags=-extldflags=-Wl,-ld_classic -endif - -op-challenger: - env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) CGO_ENABLED=0 go build -v $(LDFLAGS) -o ./bin/op-challenger ./cmd - -fuzz: - go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzKeccak ./game/keccak/matrix - -clean: - rm bin/op-challenger - -test: - go test -v ./... - -visualize: - ./scripts/visualize.sh - -.PHONY: \ - op-challenger \ - clean \ - test \ - visualize +include ../just/deprecated.mk diff --git a/op-challenger/README.md b/op-challenger/README.md index 97fafab8e6f7..5efbbd85b3d3 100644 --- a/op-challenger/README.md +++ b/op-challenger/README.md @@ -1,7 +1,7 @@ # op-challenger The `op-challenger` is a modular **op-stack** challenge agent written in -golang for dispute games including, but not limited to,attestation games, +golang for dispute games including, but not limited to, attestation games, fault games, and validity games. To learn more about dispute games, visit the [fault proof specs][proof-specs]. diff --git a/op-challenger/cmd/main_test.go b/op-challenger/cmd/main_test.go index 1de92468306c..52c9c6c4d9f0 100644 --- a/op-challenger/cmd/main_test.go +++ b/op-challenger/cmd/main_test.go @@ -90,9 +90,9 @@ func TestL1Beacon(t *testing.T) { func TestTraceType(t *testing.T) { t.Run("Default", func(t *testing.T) { - expectedDefault := types.TraceTypeCannon - cfg := configForArgs(t, addRequiredArgsExcept(expectedDefault, "--trace-type")) - require.Equal(t, []types.TraceType{expectedDefault}, cfg.TraceTypes) + expectedDefault := []types.TraceType{types.TraceTypeCannon, types.TraceTypeAsteriscKona} + cfg := configForArgs(t, addRequiredArgsForMultipleTracesExcept(expectedDefault, "--trace-type")) + require.Equal(t, expectedDefault, cfg.TraceTypes) }) for _, traceType := range types.TraceTypes { @@ -177,6 +177,13 @@ func TestNetwork(t *testing.T) { t.Run("UnknownNetwork", func(t *testing.T) { verifyArgsInvalid(t, "unknown chain: not-a-network", addRequiredArgsExcept(types.TraceTypeAlphabet, "--game-factory-address", "--network=not-a-network")) }) + + t.Run("ChainIDAllowedWhenGameFactoryAddressSupplied", func(t *testing.T) { + addr := common.Address{0xbb, 0xcc, 0xdd} + cfg := configForArgs(t, addRequiredArgsExcept(types.TraceTypeAlphabet, "--game-factory-address", "--network=1234", "--game-factory-address="+addr.Hex())) + require.Equal(t, addr, cfg.GameFactoryAddress) + require.Equal(t, "1234", cfg.Cannon.Network) + }) } func TestGameAllowlist(t *testing.T) { @@ -988,6 +995,12 @@ func addRequiredArgsExcept(traceType types.TraceType, name string, optionalArgs return append(toArgList(req), optionalArgs...) } +func addRequiredArgsForMultipleTracesExcept(traceType []types.TraceType, name string, optionalArgs ...string) []string { + req := requiredArgsMultiple(traceType) + delete(req, name) + return append(toArgList(req), optionalArgs...) +} + func addRequiredArgsExceptArr(traceType types.TraceType, names []string, optionalArgs ...string) []string { req := requiredArgs(traceType) for _, name := range names { @@ -996,6 +1009,16 @@ func addRequiredArgsExceptArr(traceType types.TraceType, names []string, optiona return append(toArgList(req), optionalArgs...) } +func requiredArgsMultiple(traceType []types.TraceType) map[string]string { + args := make(map[string]string) + for _, t := range traceType { + for name, value := range requiredArgs(t) { + args[name] = value + } + } + return args +} + func requiredArgs(traceType types.TraceType) map[string]string { args := map[string]string{ "--l1-eth-rpc": l1EthRpc, diff --git a/op-challenger/config/config.go b/op-challenger/config/config.go index 9f4a5f5f375b..6faf7c9fdca3 100644 --- a/op-challenger/config/config.go +++ b/op-challenger/config/config.go @@ -6,6 +6,7 @@ import ( "net/url" "runtime" "slices" + "strconv" "time" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" @@ -230,7 +231,10 @@ func (c Config) Check() error { return ErrCannonNetworkAndL2Genesis } if ch := chaincfg.ChainByName(c.Cannon.Network); ch == nil { - return fmt.Errorf("%w: %v", ErrCannonNetworkUnknown, c.Cannon.Network) + // Check if this looks like a chain ID that could be a custom chain configuration. + if _, err := strconv.ParseUint(c.Cannon.Network, 10, 32); err != nil { + return fmt.Errorf("%w: %v", ErrCannonNetworkUnknown, c.Cannon.Network) + } } } if c.CannonAbsolutePreState == "" && c.CannonAbsolutePreStateBaseURL == nil { diff --git a/op-challenger/config/config_test.go b/op-challenger/config/config_test.go index 0d922b77fe9b..bf910ab83e85 100644 --- a/op-challenger/config/config_test.go +++ b/op-challenger/config/config_test.go @@ -232,6 +232,18 @@ func TestCannonRequiredArgs(t *testing.T) { require.ErrorIs(t, cfg.Check(), ErrCannonNetworkUnknown) }) + t.Run(fmt.Sprintf("TestNetworkMayBeAnyChainID-%v", traceType), func(t *testing.T) { + cfg := validConfig(traceType) + cfg.Cannon.Network = "467294" + require.NoError(t, cfg.Check()) + }) + + t.Run(fmt.Sprintf("TestNetworkInvalidWhenNotEntirelyNumeric-%v", traceType), func(t *testing.T) { + cfg := validConfig(traceType) + cfg.Cannon.Network = "467294a" + require.ErrorIs(t, cfg.Check(), ErrCannonNetworkUnknown) + }) + t.Run(fmt.Sprintf("TestDebugInfoEnabled-%v", traceType), func(t *testing.T) { cfg := validConfig(traceType) require.True(t, cfg.Cannon.DebugInfo) diff --git a/op-challenger/flags/flags.go b/op-challenger/flags/flags.go index 041ee3601d29..19d6bc79042e 100644 --- a/op-challenger/flags/flags.go +++ b/op-challenger/flags/flags.go @@ -65,7 +65,7 @@ var ( Name: "trace-type", Usage: "The trace types to support. Valid options: " + openum.EnumString(types.TraceTypes), EnvVars: prefixEnvVars("TRACE_TYPE"), - Value: cli.NewStringSlice(types.TraceTypeCannon.String()), + Value: cli.NewStringSlice(types.TraceTypeCannon.String(), types.TraceTypeAsteriscKona.String()), } DatadirFlag = &cli.StringFlag{ Name: "datadir", diff --git a/op-challenger/game/fault/trace/vm/kona_server_executor.go b/op-challenger/game/fault/trace/vm/kona_server_executor.go index baaa253088fc..ffb74d9a31b8 100644 --- a/op-challenger/game/fault/trace/vm/kona_server_executor.go +++ b/op-challenger/game/fault/trace/vm/kona_server_executor.go @@ -9,8 +9,7 @@ import ( ) type KonaExecutor struct { - nativeMode bool - clientBinPath string + nativeMode bool } var _ OracleServerExecutor = (*KonaExecutor)(nil) @@ -19,8 +18,8 @@ func NewKonaExecutor() *KonaExecutor { return &KonaExecutor{nativeMode: false} } -func NewNativeKonaExecutor(clientBinPath string) *KonaExecutor { - return &KonaExecutor{nativeMode: true, clientBinPath: clientBinPath} +func NewNativeKonaExecutor() *KonaExecutor { + return &KonaExecutor{nativeMode: true} } func (s *KonaExecutor) OracleCommand(cfg Config, dataDir string, inputs utils.LocalGameInputs) ([]string, error) { @@ -37,7 +36,7 @@ func (s *KonaExecutor) OracleCommand(cfg Config, dataDir string, inputs utils.Lo } if s.nativeMode { - args = append(args, "--exec", s.clientBinPath) + args = append(args, "--native") } else { args = append(args, "--server") args = append(args, "--data-dir", dataDir) diff --git a/op-challenger/justfile b/op-challenger/justfile new file mode 100644 index 000000000000..ccb2b5fa7e91 --- /dev/null +++ b/op-challenger/justfile @@ -0,0 +1,25 @@ +import '../just/go.just' + +# Build ldflags string +_VERSION_META_STR := if VERSION_META != "" { "+" + VERSION_META } else { "" } +_LDFLAGSSTRING := "'" + trim( + "-X main.GitCommit=" + GITCOMMIT + " " + \ + "-X main.GitDate=" + GITDATE + " " + \ + "-X github.com/ethereum-optimism/optimism/op-challenger/version.Version=" + VERSION + " " + \ + "-X github.com/ethereum-optimism/optimism/op-challenger/version.Meta=" + _VERSION_META_STR + " " + \ + "") + "'" + +BINARY := "./bin/op-challenger" + +# Build op-challenger binary +op-challenger: (go_build BINARY "./cmd" "-ldflags" _LDFLAGSSTRING) + +# Run fuzzing tests +fuzz: (go_fuzz "FuzzKeccak" "10s" "./game/keccak/matrix") + +# Clean build artifacts +clean: + rm -f {{BINARY}} + +# Run tests +test: (go_test "./...") diff --git a/op-challenger/sender/sender_test.go b/op-challenger/sender/sender_test.go index 5169ae67192a..1d4e17047b74 100644 --- a/op-challenger/sender/sender_test.go +++ b/op-challenger/sender/sender_test.go @@ -129,8 +129,14 @@ func (s *stubTxMgr) Send(ctx context.Context, candidate txmgr.TxCandidate) (*typ return <-ch, nil } +// SendAsync simply wraps Send to make it non blocking. It does not guarantee transaction nonce ordering, +// unlike the production txMgr. func (s *stubTxMgr) SendAsync(ctx context.Context, candidate txmgr.TxCandidate, ch chan txmgr.SendResponse) { - panic("unimplemented") + go func() { + receipt, err := s.Send(ctx, candidate) + resp := txmgr.SendResponse{Receipt: receipt, Err: err} + ch <- resp + }() } func (s *stubTxMgr) recordTx(candidate txmgr.TxCandidate) chan *types.Receipt { diff --git a/op-conductor/Makefile b/op-conductor/Makefile index 6360df3da0c8..c76af2d563c6 100644 --- a/op-conductor/Makefile +++ b/op-conductor/Makefile @@ -1,26 +1,3 @@ -GITCOMMIT ?= $(shell git rev-parse HEAD) -GITDATE ?= $(shell git show -s --format='%ct') -VERSION ?= v0.0.0 +DEPRECATED_TARGETS := op-conductor clean test generate-mocks -LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) -LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) -LDFLAGSSTRING +=-X main.Version=$(VERSION) -LDFLAGS := -ldflags "$(LDFLAGSSTRING)" - -op-conductor: - env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) CGO_ENABLED=0 go build -v $(LDFLAGS) -o ./bin/op-conductor ./cmd - -clean: - rm bin/op-conductor - -test: - go test -v ./... - -generate-mocks: - go generate ./... - -.PHONY: \ - op-conductor \ - clean \ - test \ - generate-mocks +include ../just/deprecated.mk diff --git a/op-conductor/README.md b/op-conductor/README.md index 497156b8eee8..c436d0248ace 100644 --- a/op-conductor/README.md +++ b/op-conductor/README.md @@ -1,3 +1,4 @@ + # op-conductor op-conductor is an auxiliary service designed to enhance the reliability and availability of a sequencer in @@ -20,7 +21,7 @@ For configuration and runbook, please refer to [RUNBOOK.md](./RUNBOOK.md) ### Architecture Typically you can setup a 3 nodes sequencer cluster, each one with op-conductor running alongside the sequencer in different regions / AZs. -Below diagram showcaes how conductor interacts with relevant op-stack components. +Below diagram showcases how conductor interacts with relevant op-stack components. ![op-conductor setup](./assets/setup.svg) @@ -93,6 +94,6 @@ There are 2 situations we need to consider. 1. Leadership transfer triggered by raft consensus protocol (network partition, etc) 1. In this case, a new leader will be elected regardless of its sync status, it could be behind for a few blocks - 2. The solution is to simple, wait until the elected leader catch up to tip (same as the FSM tip) + 2. The solution is to simply wait until the elected leader catches up to tip (same as the FSM tip) 2. Leadership transfer triggered by us (Conductor detected unhealthy sequencer) 1. In this case, we have the choice to determine which node to transfer leadership to, we can simply query the latest block from candidates within the network and transfer directly to the one with the most up to date blocks. diff --git a/op-conductor/RUNBOOK.md b/op-conductor/RUNBOOK.md index 8e80fde6b6c0..00b8757a338e 100644 --- a/op-conductor/RUNBOOK.md +++ b/op-conductor/RUNBOOK.md @@ -28,7 +28,7 @@ OP_CONDUCTOR_NODE_RPC= # for example, http://op-node:8545 OP_CONDUCTOR_EXECUTION_RPC= # for example, http://op-geth:8545 OP_CONDUCTOR_NETWORK= # for example, base-mainnet, op-mainnet, etc, should be same as OP_NODE_NETWORK OP_CONDUCTOR_HEALTHCHECK_INTERVAL= # in seconds -OP_CONDUCTOR_HEALTHCHECK_UNSAFE_INTERVAL= # Interval allowed between unsafe head and now measured in seconds in seconds +OP_CONDUCTOR_HEALTHCHECK_UNSAFE_INTERVAL= # Interval allowed between unsafe head and now measured in seconds OP_CONDUCTOR_HEALTHCHECK_MIN_PEER_COUNT= # minimum number of peers required to be considered healthy OP_CONDUCTOR_RAFT_BOOTSTRAP=true/false # set to true if you want to bootstrap the raft cluster ``` diff --git a/op-conductor/consensus/iface.go b/op-conductor/consensus/iface.go index e0dcb6efd5a7..2de955c1201e 100644 --- a/op-conductor/consensus/iface.go +++ b/op-conductor/consensus/iface.go @@ -72,9 +72,9 @@ type Consensus interface { // ClusterMembership returns the current cluster membership configuration and associated version. ClusterMembership() (*ClusterMembership, error) - // CommitPayload commits latest unsafe payload to the FSM in a strongly consistent fashion. + // CommitUnsafePayload commits latest unsafe payload to the FSM in a strongly consistent fashion. CommitUnsafePayload(payload *eth.ExecutionPayloadEnvelope) error - // LatestUnsafeBlock returns the latest unsafe payload from FSM in a strongly consistent fashion. + // LatestUnsafePayload returns the latest unsafe payload from FSM in a strongly consistent fashion. LatestUnsafePayload() (*eth.ExecutionPayloadEnvelope, error) // Shutdown shuts down the consensus protocol client. diff --git a/op-conductor/justfile b/op-conductor/justfile new file mode 100644 index 000000000000..7ee6ef39bcfa --- /dev/null +++ b/op-conductor/justfile @@ -0,0 +1,23 @@ +import '../just/go.just' + +# Build ldflags string +_LDFLAGSSTRING := "'" + trim( + "-X main.GitCommit=" + GITCOMMIT + " " + \ + "-X main.GitDate=" + GITDATE + " " + \ + "-X main.Version=" + VERSION + " " + \ + "") + "'" + +BINARY := "./bin/op-conductor" + +# Build op-conductor binary +op-conductor: (go_build BINARY "./cmd" "-ldflags" _LDFLAGSSTRING) + +# Clean build artifacts +clean: + rm -f {{BINARY}} + +# Run tests +test: (go_test "./...") + +# Generate mocks +generate-mocks: (go_generate "./...") \ No newline at end of file diff --git a/op-deployer/Dockerfile.default b/op-deployer/Dockerfile.default index cc5ca8d95e4f..0821bc0c48ee 100644 --- a/op-deployer/Dockerfile.default +++ b/op-deployer/Dockerfile.default @@ -1,3 +1,9 @@ FROM debian:bookworm-20240812-slim ENTRYPOINT ["/op-deployer"] -COPY op-deployer /op-deployer \ No newline at end of file +COPY op-deployer /op-deployer + +# Install ca-certificates so that HTTPS requests work +RUN apt-get update && apt-get install -y ca-certificates + +# Symlink onto the PATH +RUN ln -s /op-deployer /usr/local/bin/op-deployer \ No newline at end of file diff --git a/op-deployer/pkg/deployer/apply.go b/op-deployer/pkg/deployer/apply.go index f02716c655a5..b4ff343de351 100644 --- a/op-deployer/pkg/deployer/apply.go +++ b/op-deployer/pkg/deployer/apply.go @@ -141,6 +141,9 @@ func ApplyPipeline( opts ApplyPipelineOpts, ) error { intent := opts.Intent + if err := intent.Check(); err != nil { + return err + } st := opts.State progressor := func(curr, total int64) { diff --git a/op-deployer/pkg/deployer/artifacts/downloader.go b/op-deployer/pkg/deployer/artifacts/downloader.go index 1303adbe86aa..7e566952e09a 100644 --- a/op-deployer/pkg/deployer/artifacts/downloader.go +++ b/op-deployer/pkg/deployer/artifacts/downloader.go @@ -3,8 +3,10 @@ package artifacts import ( "archive/tar" "bufio" + "bytes" "compress/gzip" "context" + "crypto/sha256" "errors" "fmt" "io" @@ -15,6 +17,8 @@ import ( "strings" "time" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" "github.com/ethereum/go-ethereum/log" @@ -41,15 +45,50 @@ func LogProgressor(lgr log.Logger) DownloadProgressor { func Download(ctx context.Context, loc *Locator, progress DownloadProgressor) (foundry.StatDirFs, CleanupFunc, error) { var u *url.URL var err error + var checker integrityChecker if loc.IsTag() { u, err = standard.ArtifactsURLForTag(loc.Tag) if err != nil { return nil, nil, fmt.Errorf("failed to get standard artifacts URL for tag %s: %w", loc.Tag, err) } + + hash, err := standard.ArtifactsHashForTag(loc.Tag) + if err != nil { + return nil, nil, fmt.Errorf("failed to get standard artifacts hash for tag %s: %w", loc.Tag, err) + } + + checker = &hashIntegrityChecker{hash: hash} } else { u = loc.URL + checker = &noopIntegrityChecker{} } + return downloadURL(ctx, u, progress, checker) +} + +type integrityChecker interface { + CheckIntegrity(data []byte) error +} + +type hashIntegrityChecker struct { + hash common.Hash +} + +func (h *hashIntegrityChecker) CheckIntegrity(data []byte) error { + hash := sha256.Sum256(data) + if hash != h.hash { + return fmt.Errorf("integrity check failed - expected: %x, got: %x", h.hash, hash) + } + return nil +} + +type noopIntegrityChecker struct{} + +func (noopIntegrityChecker) CheckIntegrity(data []byte) error { + return nil +} + +func downloadURL(ctx context.Context, u *url.URL, progress DownloadProgressor, checker integrityChecker) (foundry.StatDirFs, CleanupFunc, error) { switch u.Scheme { case "http", "https": req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) @@ -78,7 +117,16 @@ func Download(ctx context.Context, loc *Locator, progress DownloadProgressor) (f total: resp.ContentLength, } - gr, err := gzip.NewReader(pr) + data, err := io.ReadAll(pr) + if err != nil { + return nil, nil, fmt.Errorf("failed to read response body: %w", err) + } + + if err := checker.CheckIntegrity(data); err != nil { + return nil, nil, fmt.Errorf("failed to check integrity: %w", err) + } + + gr, err := gzip.NewReader(bytes.NewReader(data)) if err != nil { return nil, nil, fmt.Errorf("failed to create gzip reader: %w", err) } @@ -111,7 +159,6 @@ type progressReader struct { } func (pr *progressReader) Read(p []byte) (int, error) { - n, err := pr.r.Read(p) pr.curr += int64(n) if pr.progress != nil && time.Since(pr.lastPrint) > 1*time.Second { diff --git a/op-deployer/pkg/deployer/artifacts/downloader_test.go b/op-deployer/pkg/deployer/artifacts/downloader_test.go index e66b41f96a81..cf4ef4742c94 100644 --- a/op-deployer/pkg/deployer/artifacts/downloader_test.go +++ b/op-deployer/pkg/deployer/artifacts/downloader_test.go @@ -9,10 +9,12 @@ import ( "os" "testing" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" ) -func TestDownloadArtifacts(t *testing.T) { +func TestDownloadArtifacts_MockArtifacts(t *testing.T) { f, err := os.OpenFile("testdata/artifacts.tar.gz", os.O_RDONLY, 0o644) require.NoError(t, err) defer f.Close() @@ -21,6 +23,9 @@ func TestDownloadArtifacts(t *testing.T) { w.WriteHeader(http.StatusOK) _, err := io.Copy(w, f) require.NoError(t, err) + // Seek to beginning of file for next request + _, err = f.Seek(0, 0) + require.NoError(t, err) })) defer ts.Close() @@ -31,14 +36,50 @@ func TestDownloadArtifacts(t *testing.T) { URL: artifactsURL, } - fs, cleanup, err := Download(ctx, loc, nil) - require.NoError(t, err) - require.NotNil(t, fs) - defer func() { - require.NoError(t, cleanup()) - }() + t.Run("success", func(t *testing.T) { + fs, cleanup, err := Download(ctx, loc, nil) + require.NoError(t, err) + require.NotNil(t, fs) + defer func() { + require.NoError(t, cleanup()) + }() - info, err := fs.Stat("WETH98.sol/WETH98.json") - require.NoError(t, err) - require.Greater(t, info.Size(), int64(0)) + info, err := fs.Stat("WETH98.sol/WETH98.json") + require.NoError(t, err) + require.Greater(t, info.Size(), int64(0)) + }) + + t.Run("bad integrity", func(t *testing.T) { + _, _, err := downloadURL(ctx, loc.URL, nil, &hashIntegrityChecker{ + hash: common.Hash{'B', 'A', 'D'}, + }) + require.Error(t, err) + require.ErrorContains(t, err, "integrity check failed") + }) + + t.Run("ok integrity", func(t *testing.T) { + _, _, err := downloadURL(ctx, loc.URL, nil, &hashIntegrityChecker{ + hash: common.HexToHash("0x0f814df0c4293aaaadd468ac37e6c92f0b40fd21df848076835cb2c21d2a516f"), + }) + require.NoError(t, err) + }) +} + +func TestDownloadArtifacts_TaggedVersions(t *testing.T) { + tags := []string{ + "op-contracts/v1.6.0", + "op-contracts/v1.7.0-beta.1+l2-contracts", + } + for _, tag := range tags { + t.Run(tag, func(t *testing.T) { + t.Parallel() + + loc := MustNewLocatorFromTag(tag) + _, cleanup, err := Download(context.Background(), loc, nil) + t.Cleanup(func() { + require.NoError(t, cleanup()) + }) + require.NoError(t, err) + }) + } } diff --git a/op-deployer/pkg/deployer/artifacts/locator.go b/op-deployer/pkg/deployer/artifacts/locator.go index 160e8790420b..42b838f66a7c 100644 --- a/op-deployer/pkg/deployer/artifacts/locator.go +++ b/op-deployer/pkg/deployer/artifacts/locator.go @@ -24,6 +24,22 @@ var DefaultL2ContractsLocator = &Locator{ Tag: standard.DefaultL2ContractsTag, } +func NewLocatorFromTag(tag string) (*Locator, error) { + loc := new(Locator) + if err := loc.UnmarshalText([]byte("tag://" + tag)); err != nil { + return nil, fmt.Errorf("failed to unmarshal tag: %w", err) + } + return loc, nil +} + +func MustNewLocatorFromTag(tag string) *Locator { + loc, err := NewLocatorFromTag(tag) + if err != nil { + panic(err) + } + return loc +} + type Locator struct { URL *url.URL Tag string @@ -54,11 +70,7 @@ func (a *Locator) MarshalText() ([]byte, error) { return []byte(a.URL.String()), nil } - if a.Tag != "" { - return []byte("tag://" + a.Tag), nil - } - - return nil, fmt.Errorf("no URL, path or tag set") + return []byte("tag://" + a.Tag), nil } func (a *Locator) IsTag() bool { diff --git a/op-deployer/pkg/deployer/bootstrap/asterisc.go b/op-deployer/pkg/deployer/bootstrap/asterisc.go new file mode 100644 index 000000000000..b091f8c4d6f3 --- /dev/null +++ b/op-deployer/pkg/deployer/bootstrap/asterisc.go @@ -0,0 +1,197 @@ +package bootstrap + +import ( + "context" + "crypto/ecdsa" + "fmt" + "strings" + + "github.com/ethereum-optimism/optimism/op-chain-ops/script" + "github.com/ethereum-optimism/optimism/op-chain-ops/script/forking" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" + "github.com/ethereum/go-ethereum/common" + + "github.com/ethereum-optimism/optimism/op-deployer/pkg/env" + + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/broadcaster" + + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/opcm" + opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto" + "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" + "github.com/ethereum-optimism/optimism/op-service/ioutil" + "github.com/ethereum-optimism/optimism/op-service/jsonutil" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rpc" + "github.com/urfave/cli/v2" +) + +type AsteriscConfig struct { + L1RPCUrl string + PrivateKey string + Logger log.Logger + ArtifactsLocator *artifacts.Locator + + privateKeyECDSA *ecdsa.PrivateKey + + PreimageOracle common.Address +} + +func (c *AsteriscConfig) Check() error { + if c.L1RPCUrl == "" { + return fmt.Errorf("l1RPCUrl must be specified") + } + + if c.PrivateKey == "" { + return fmt.Errorf("private key must be specified") + } + + privECDSA, err := crypto.HexToECDSA(strings.TrimPrefix(c.PrivateKey, "0x")) + if err != nil { + return fmt.Errorf("failed to parse private key: %w", err) + } + c.privateKeyECDSA = privECDSA + + if c.Logger == nil { + return fmt.Errorf("logger must be specified") + } + + if c.ArtifactsLocator == nil { + return fmt.Errorf("artifacts locator must be specified") + } + + if c.PreimageOracle == (common.Address{}) { + return fmt.Errorf("preimage oracle must be specified") + } + + return nil +} + +func AsteriscCLI(cliCtx *cli.Context) error { + logCfg := oplog.ReadCLIConfig(cliCtx) + l := oplog.NewLogger(oplog.AppOut(cliCtx), logCfg) + oplog.SetGlobalLogHandler(l.Handler()) + + l1RPCUrl := cliCtx.String(deployer.L1RPCURLFlagName) + privateKey := cliCtx.String(deployer.PrivateKeyFlagName) + artifactsURLStr := cliCtx.String(ArtifactsLocatorFlagName) + artifactsLocator := new(artifacts.Locator) + if err := artifactsLocator.UnmarshalText([]byte(artifactsURLStr)); err != nil { + return fmt.Errorf("failed to parse artifacts URL: %w", err) + } + + preimageOracle := common.HexToAddress(cliCtx.String(PreimageOracleFlagName)) + + ctx := ctxinterrupt.WithCancelOnInterrupt(cliCtx.Context) + + return Asterisc(ctx, AsteriscConfig{ + L1RPCUrl: l1RPCUrl, + PrivateKey: privateKey, + Logger: l, + ArtifactsLocator: artifactsLocator, + PreimageOracle: preimageOracle, + }) +} + +func Asterisc(ctx context.Context, cfg AsteriscConfig) error { + if err := cfg.Check(); err != nil { + return fmt.Errorf("invalid config for Asterisc: %w", err) + } + + lgr := cfg.Logger + progressor := func(curr, total int64) { + lgr.Info("artifacts download progress", "current", curr, "total", total) + } + + artifactsFS, cleanup, err := artifacts.Download(ctx, cfg.ArtifactsLocator, progressor) + if err != nil { + return fmt.Errorf("failed to download artifacts: %w", err) + } + defer func() { + if err := cleanup(); err != nil { + lgr.Warn("failed to clean up artifacts", "err", err) + } + }() + + l1Client, err := ethclient.Dial(cfg.L1RPCUrl) + if err != nil { + return fmt.Errorf("failed to connect to L1 RPC: %w", err) + } + + chainID, err := l1Client.ChainID(ctx) + if err != nil { + return fmt.Errorf("failed to get chain ID: %w", err) + } + + signer := opcrypto.SignerFnFromBind(opcrypto.PrivateKeySignerFn(cfg.privateKeyECDSA, chainID)) + chainDeployer := crypto.PubkeyToAddress(cfg.privateKeyECDSA.PublicKey) + + bcaster, err := broadcaster.NewKeyedBroadcaster(broadcaster.KeyedBroadcasterOpts{ + Logger: lgr, + ChainID: chainID, + Client: l1Client, + Signer: signer, + From: chainDeployer, + }) + if err != nil { + return fmt.Errorf("failed to create broadcaster: %w", err) + } + + l1RPC, err := rpc.Dial(cfg.L1RPCUrl) + if err != nil { + return fmt.Errorf("failed to connect to L1 RPC: %w", err) + } + + l1Host, err := env.DefaultScriptHost( + bcaster, + lgr, + chainDeployer, + artifactsFS, + script.WithForkHook(func(cfg *script.ForkConfig) (forking.ForkSource, error) { + src, err := forking.RPCSourceByNumber(cfg.URLOrAlias, l1RPC, *cfg.BlockNumber) + if err != nil { + return nil, fmt.Errorf("failed to create RPC fork source: %w", err) + } + return forking.Cache(src), nil + }), + ) + if err != nil { + return fmt.Errorf("failed to create script host: %w", err) + } + + latest, err := l1Client.HeaderByNumber(ctx, nil) + if err != nil { + return fmt.Errorf("failed to get latest block: %w", err) + } + + if _, err := l1Host.CreateSelectFork( + script.ForkWithURLOrAlias("main"), + script.ForkWithBlockNumberU256(latest.Number), + ); err != nil { + return fmt.Errorf("failed to select fork: %w", err) + } + + dgo, err := opcm.DeployAsterisc( + l1Host, + opcm.DeployAsteriscInput{ + PreimageOracle: cfg.PreimageOracle, + }, + ) + if err != nil { + return fmt.Errorf("error deploying asterisc VM: %w", err) + } + + if _, err := bcaster.Broadcast(ctx); err != nil { + return fmt.Errorf("failed to broadcast: %w", err) + } + + lgr.Info("deployed asterisc VM") + + if err := jsonutil.WriteJSON(dgo, ioutil.ToStdOut()); err != nil { + return fmt.Errorf("failed to write output: %w", err) + } + return nil +} diff --git a/op-deployer/pkg/deployer/bootstrap/delayed_weth.go b/op-deployer/pkg/deployer/bootstrap/delayed_weth.go index 451b0741f245..1209043d31d7 100644 --- a/op-deployer/pkg/deployer/bootstrap/delayed_weth.go +++ b/op-deployer/pkg/deployer/bootstrap/delayed_weth.go @@ -7,6 +7,8 @@ import ( "math/big" "strings" + "github.com/ethereum-optimism/optimism/op-chain-ops/script" + "github.com/ethereum-optimism/optimism/op-chain-ops/script/forking" artifacts2 "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" "github.com/ethereum-optimism/optimism/op-deployer/pkg/env" @@ -26,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rpc" "github.com/urfave/cli/v2" ) @@ -69,22 +72,31 @@ func DelayedWETHCLI(cliCtx *cli.Context) error { l := oplog.NewLogger(oplog.AppOut(cliCtx), logCfg) oplog.SetGlobalLogHandler(l.Handler()) + config, err := NewDelayedWETHConfigFromClI(cliCtx, l) + if err != nil { + return err + } + + ctx := ctxinterrupt.WithCancelOnInterrupt(cliCtx.Context) + + return DelayedWETH(ctx, config) +} + +func NewDelayedWETHConfigFromClI(cliCtx *cli.Context, l log.Logger) (DelayedWETHConfig, error) { l1RPCUrl := cliCtx.String(deployer.L1RPCURLFlagName) privateKey := cliCtx.String(deployer.PrivateKeyFlagName) artifactsURLStr := cliCtx.String(ArtifactsLocatorFlagName) artifactsLocator := new(artifacts2.Locator) if err := artifactsLocator.UnmarshalText([]byte(artifactsURLStr)); err != nil { - return fmt.Errorf("failed to parse artifacts URL: %w", err) + return DelayedWETHConfig{}, fmt.Errorf("failed to parse artifacts URL: %w", err) } - - ctx := ctxinterrupt.WithCancelOnInterrupt(cliCtx.Context) - - return DelayedWETH(ctx, DelayedWETHConfig{ + config := DelayedWETHConfig{ L1RPCUrl: l1RPCUrl, PrivateKey: privateKey, Logger: l, ArtifactsLocator: artifactsLocator, - }) + } + return config, nil } func DelayedWETH(ctx context.Context, cfg DelayedWETHConfig) error { @@ -149,9 +161,9 @@ func DelayedWETH(ctx context.Context, cfg DelayedWETHConfig) error { return fmt.Errorf("failed to create broadcaster: %w", err) } - nonce, err := l1Client.NonceAt(ctx, chainDeployer, nil) + l1RPC, err := rpc.Dial(cfg.L1RPCUrl) if err != nil { - return fmt.Errorf("failed to get starting nonce: %w", err) + return fmt.Errorf("failed to connect to L1 RPC: %w", err) } host, err := env.DefaultScriptHost( @@ -159,11 +171,29 @@ func DelayedWETH(ctx context.Context, cfg DelayedWETHConfig) error { lgr, chainDeployer, artifactsFS, + script.WithForkHook(func(cfg *script.ForkConfig) (forking.ForkSource, error) { + src, err := forking.RPCSourceByNumber(cfg.URLOrAlias, l1RPC, *cfg.BlockNumber) + if err != nil { + return nil, fmt.Errorf("failed to create RPC fork source: %w", err) + } + return forking.Cache(src), nil + }), ) if err != nil { return fmt.Errorf("failed to create script host: %w", err) } - host.SetNonce(chainDeployer, nonce) + + latest, err := l1Client.HeaderByNumber(ctx, nil) + if err != nil { + return fmt.Errorf("failed to get latest block: %w", err) + } + + if _, err := host.CreateSelectFork( + script.ForkWithURLOrAlias("main"), + script.ForkWithBlockNumberU256(latest.Number), + ); err != nil { + return fmt.Errorf("failed to select fork: %w", err) + } var release string if cfg.ArtifactsLocator.IsTag() { diff --git a/op-deployer/pkg/deployer/bootstrap/delayed_weth_test.go b/op-deployer/pkg/deployer/bootstrap/delayed_weth_test.go new file mode 100644 index 000000000000..5e3b667b9b2a --- /dev/null +++ b/op-deployer/pkg/deployer/bootstrap/delayed_weth_test.go @@ -0,0 +1,45 @@ +package bootstrap + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-service/cliapp" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" + "github.com/urfave/cli/v2" +) + +func TestNewDelayedWETHConfigFromCLI(t *testing.T) { + ctx, err := parseCLIArgs(DelayedWETHFlags, + "--artifacts-locator", "tag://op-contracts/v1.6.0", + "--l1-rpc-url", "http://foo", + "--private-key", "0x123456") + require.NoError(t, err) + + logger := testlog.Logger(t, log.LvlInfo) + cfg, err := NewDelayedWETHConfigFromClI(ctx, logger) + require.NoError(t, err) + require.Same(t, logger, cfg.Logger) + require.Equal(t, "op-contracts/v1.6.0", cfg.ArtifactsLocator.Tag) + require.True(t, cfg.ArtifactsLocator.IsTag()) + require.Equal(t, "0x123456", cfg.PrivateKey) +} + +func parseCLIArgs(flags []cli.Flag, args ...string) (*cli.Context, error) { + app := cli.NewApp() + app.Flags = cliapp.ProtectFlags(flags) + var ctx *cli.Context + app.Action = func(c *cli.Context) error { + ctx = c + return nil + } + argsWithCmd := make([]string, len(args)+1) + argsWithCmd[0] = "bootstrap" + copy(argsWithCmd[1:], args) + err := app.Run(argsWithCmd) + if err != nil { + return nil, err + } + return ctx, nil +} diff --git a/op-deployer/pkg/deployer/bootstrap/dispute_game.go b/op-deployer/pkg/deployer/bootstrap/dispute_game.go index d3efe36f9ca8..441fd73d9ea2 100644 --- a/op-deployer/pkg/deployer/bootstrap/dispute_game.go +++ b/op-deployer/pkg/deployer/bootstrap/dispute_game.go @@ -6,7 +6,10 @@ import ( "fmt" "strings" + "github.com/ethereum-optimism/optimism/op-chain-ops/script" + "github.com/ethereum-optimism/optimism/op-chain-ops/script/forking" artifacts2 "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" + "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum-optimism/optimism/op-deployer/pkg/env" @@ -36,9 +39,7 @@ type DisputeGameConfig struct { privateKeyECDSA *ecdsa.PrivateKey - MinProposalSizeBytes uint64 - ChallengePeriodSeconds uint64 - MipsVersion uint64 + Vm common.Address GameKind string GameType uint32 AbsolutePrestate common.Hash @@ -84,22 +85,44 @@ func DisputeGameCLI(cliCtx *cli.Context) error { l := oplog.NewLogger(oplog.AppOut(cliCtx), logCfg) oplog.SetGlobalLogHandler(l.Handler()) + cfg, err := NewDisputeGameConfigFromCLI(cliCtx, l) + if err != nil { + return err + } + ctx := ctxinterrupt.WithCancelOnInterrupt(cliCtx.Context) + return DisputeGame(ctx, cfg) +} + +func NewDisputeGameConfigFromCLI(cliCtx *cli.Context, l log.Logger) (DisputeGameConfig, error) { l1RPCUrl := cliCtx.String(deployer.L1RPCURLFlagName) privateKey := cliCtx.String(deployer.PrivateKeyFlagName) artifactsURLStr := cliCtx.String(ArtifactsLocatorFlagName) artifactsLocator := new(artifacts2.Locator) if err := artifactsLocator.UnmarshalText([]byte(artifactsURLStr)); err != nil { - return fmt.Errorf("failed to parse artifacts URL: %w", err) + return DisputeGameConfig{}, fmt.Errorf("failed to parse artifacts URL: %w", err) } - ctx := ctxinterrupt.WithCancelOnInterrupt(cliCtx.Context) - - return DisputeGame(ctx, DisputeGameConfig{ + cfg := DisputeGameConfig{ L1RPCUrl: l1RPCUrl, PrivateKey: privateKey, Logger: l, ArtifactsLocator: artifactsLocator, - }) + + Vm: common.HexToAddress(cliCtx.String(VmFlagName)), + GameKind: cliCtx.String(GameKindFlagName), + GameType: uint32(cliCtx.Uint64(GameTypeFlagName)), + AbsolutePrestate: common.HexToHash(cliCtx.String(AbsolutePrestateFlagName)), + MaxGameDepth: cliCtx.Uint64(MaxGameDepthFlagName), + SplitDepth: cliCtx.Uint64(SplitDepthFlagName), + ClockExtension: cliCtx.Uint64(ClockExtensionFlagName), + MaxClockDuration: cliCtx.Uint64(MaxClockDurationFlagName), + DelayedWethProxy: common.HexToAddress(cliCtx.String(DelayedWethProxyFlagName)), + AnchorStateRegistryProxy: common.HexToAddress(cliCtx.String(AnchorStateRegistryProxyFlagName)), + L2ChainId: cliCtx.Uint64(L2ChainIdFlagName), + Proposer: common.HexToAddress(cliCtx.String(ProposerFlagName)), + Challenger: common.HexToAddress(cliCtx.String(ChallengerFlagName)), + } + return cfg, nil } func DisputeGame(ctx context.Context, cfg DisputeGameConfig) error { @@ -126,6 +149,10 @@ func DisputeGame(ctx context.Context, cfg DisputeGameConfig) error { if err != nil { return fmt.Errorf("failed to connect to L1 RPC: %w", err) } + l1Rpc, err := rpc.Dial(cfg.L1RPCUrl) + if err != nil { + return fmt.Errorf("failed to connect to L1 RPC: %w", err) + } chainID, err := l1Client.ChainID(ctx) if err != nil { @@ -152,21 +179,34 @@ func DisputeGame(ctx context.Context, cfg DisputeGameConfig) error { return fmt.Errorf("failed to create broadcaster: %w", err) } - nonce, err := l1Client.NonceAt(ctx, chainDeployer, nil) - if err != nil { - return fmt.Errorf("failed to get starting nonce: %w", err) - } - host, err := env.DefaultScriptHost( bcaster, lgr, chainDeployer, artifactsFS, + script.WithForkHook(func(forkCfg *script.ForkConfig) (forking.ForkSource, error) { + src, err := forking.RPCSourceByNumber(forkCfg.URLOrAlias, l1Rpc, *forkCfg.BlockNumber) + if err != nil { + return nil, fmt.Errorf("failed to create RPC fork source: %w", err) + } + return forking.Cache(src), nil + }), ) if err != nil { - return fmt.Errorf("failed to create script host: %w", err) + return fmt.Errorf("failed to create L1 script host: %w", err) + } + + latest, err := l1Client.HeaderByNumber(ctx, nil) + if err != nil { + return fmt.Errorf("failed to get latest block: %w", err) + } + + if _, err := host.CreateSelectFork( + script.ForkWithURLOrAlias("main"), + script.ForkWithBlockNumberU256(latest.Number), + ); err != nil { + return fmt.Errorf("failed to select fork: %w", err) } - host.SetNonce(chainDeployer, nonce) var release string if cfg.ArtifactsLocator.IsTag() { @@ -176,15 +216,12 @@ func DisputeGame(ctx context.Context, cfg DisputeGameConfig) error { } lgr.Info("deploying dispute game", "release", release) - dgo, err := opcm.DeployDisputeGame( host, opcm.DeployDisputeGameInput{ Release: release, StandardVersionsToml: standardVersionsTOML, - MipsVersion: cfg.MipsVersion, - MinProposalSizeBytes: cfg.MinProposalSizeBytes, - ChallengePeriodSeconds: cfg.ChallengePeriodSeconds, + VmAddress: cfg.Vm, GameKind: cfg.GameKind, GameType: cfg.GameType, AbsolutePrestate: cfg.AbsolutePrestate, diff --git a/op-deployer/pkg/deployer/bootstrap/dispute_game_test.go b/op-deployer/pkg/deployer/bootstrap/dispute_game_test.go new file mode 100644 index 000000000000..e24ae3754dfe --- /dev/null +++ b/op-deployer/pkg/deployer/bootstrap/dispute_game_test.go @@ -0,0 +1,66 @@ +package bootstrap + +import ( + "reflect" + "testing" + + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +func TestNewDisputeGameConfigFromCLI(t *testing.T) { + ctx, err := parseCLIArgs(DisputeGameFlags, + "--artifacts-locator", "tag://op-contracts/v1.6.0", + "--l1-rpc-url", "http://foo", + "--private-key", "0x123456", + + "--game-type", "2", + "--delayed-weth-proxy", common.Address{0xaa}.Hex(), + "--anchor-state-registry-proxy", common.Address{0xbb}.Hex(), + "--l2-chain-id", "901", + "--proposer", common.Address{0xcc}.Hex(), + "--challenger", common.Address{0xdd}.Hex(), + "--vm", common.Address{0xee}.Hex(), + ) + require.NoError(t, err) + + logger := testlog.Logger(t, log.LvlInfo) + cfg, err := NewDisputeGameConfigFromCLI(ctx, logger) + require.NoError(t, err) + require.Same(t, logger, cfg.Logger) + require.Equal(t, "op-contracts/v1.6.0", cfg.ArtifactsLocator.Tag) + require.True(t, cfg.ArtifactsLocator.IsTag()) + require.Equal(t, "0x123456", cfg.PrivateKey) + require.Equal(t, "FaultDisputeGame", cfg.GameKind) + require.Equal(t, uint32(2), cfg.GameType) + require.Equal(t, standard.DisputeAbsolutePrestate, cfg.AbsolutePrestate) + require.Equal(t, standard.DisputeMaxGameDepth, cfg.MaxGameDepth) + require.Equal(t, standard.DisputeSplitDepth, cfg.SplitDepth) + require.Equal(t, standard.DisputeClockExtension, cfg.ClockExtension) + require.Equal(t, standard.DisputeMaxClockDuration, cfg.MaxClockDuration) + require.Equal(t, common.Address{0xaa}, cfg.DelayedWethProxy) + require.Equal(t, common.Address{0xbb}, cfg.AnchorStateRegistryProxy) + require.Equal(t, common.Address{0xcc}, cfg.Proposer) + require.Equal(t, common.Address{0xdd}, cfg.Challenger) + require.Equal(t, common.Address{0xee}, cfg.Vm) + require.Equal(t, uint64(901), cfg.L2ChainId) + + // Check all fields are set to ensure any newly added fields don't get missed. + cfgRef := reflect.ValueOf(cfg) + cfgType := reflect.TypeOf(cfg) + var unsetFields []string + for i := 0; i < cfgRef.NumField(); i++ { + field := cfgType.Field(i) + if field.Type == reflect.TypeOf(cfg.privateKeyECDSA) { + // privateKeyECDSA is only set when Check() is called so skip it. + continue + } + if cfgRef.Field(i).IsZero() { + unsetFields = append(unsetFields, field.Name) + } + } + require.Empty(t, unsetFields, "Found unset fields in config") +} diff --git a/op-deployer/pkg/deployer/bootstrap/flags.go b/op-deployer/pkg/deployer/bootstrap/flags.go index 73f99e7b4325..58fb7fc14368 100644 --- a/op-deployer/pkg/deployer/bootstrap/flags.go +++ b/op-deployer/pkg/deployer/bootstrap/flags.go @@ -16,6 +16,7 @@ const ( ProofMaturityDelaySecondsFlagName = "proof-maturity-delay-seconds" DisputeGameFinalityDelaySecondsFlagName = "dispute-game-finality-delay-seconds" MIPSVersionFlagName = "mips-version" + VmFlagName = "vm" GameKindFlagName = "game-kind" GameTypeFlagName = "game-type" AbsolutePrestateFlagName = "absolute-prestate" @@ -73,6 +74,11 @@ var ( EnvVars: deployer.PrefixEnvVar("MIPS_VERSION"), Value: standard.MIPSVersion, } + VmFlag = &cli.StringFlag{ + Name: VmFlagName, + Usage: "VM contract address.", + EnvVars: deployer.PrefixEnvVar("VM"), + } GameKindFlag = &cli.StringFlag{ Name: GameKindFlagName, Usage: "Game kind (FaultDisputeGame or PermissionedDisputeGame).", @@ -173,7 +179,7 @@ var DisputeGameFlags = []cli.Flag{ ArtifactsLocatorFlag, MinProposalSizeBytesFlag, ChallengePeriodSecondsFlag, - MIPSVersionFlag, + VmFlag, GameKindFlag, GameTypeFlag, AbsolutePrestateFlag, @@ -188,14 +194,17 @@ var DisputeGameFlags = []cli.Flag{ ChallengerFlag, } -var MIPSFlags = []cli.Flag{ +var BaseFPVMFlags = []cli.Flag{ deployer.L1RPCURLFlag, deployer.PrivateKeyFlag, ArtifactsLocatorFlag, PreimageOracleFlag, - MIPSVersionFlag, } +var MIPSFlags = append(BaseFPVMFlags, MIPSVersionFlag) + +var AsteriscFlags = BaseFPVMFlags + var Commands = []*cli.Command{ { Name: "opcm", @@ -221,4 +230,10 @@ var Commands = []*cli.Command{ Flags: cliapp.ProtectFlags(MIPSFlags), Action: MIPSCLI, }, + { + Name: "asterisc", + Usage: "Bootstrap an instance of Asterisc.", + Flags: cliapp.ProtectFlags(AsteriscFlags), + Action: AsteriscCLI, + }, } diff --git a/op-deployer/pkg/deployer/bootstrap/opcm.go b/op-deployer/pkg/deployer/bootstrap/opcm.go index 2f5976f304ad..89a8c3df5123 100644 --- a/op-deployer/pkg/deployer/bootstrap/opcm.go +++ b/op-deployer/pkg/deployer/bootstrap/opcm.go @@ -164,10 +164,6 @@ func OPCM(ctx context.Context, cfg OPCMConfig) error { if err != nil { return fmt.Errorf("error getting standard versions TOML: %w", err) } - opcmProxyOwnerAddr, err := standard.ManagerOwnerAddrFor(chainIDU64) - if err != nil { - return fmt.Errorf("error getting superchain proxy admin: %w", err) - } signer := opcrypto.SignerFnFromBind(opcrypto.PrivateKeySignerFn(cfg.privateKeyECDSA, chainID)) chainDeployer := crypto.PubkeyToAddress(cfg.privateKeyECDSA.PublicKey) @@ -199,14 +195,14 @@ func OPCM(ctx context.Context, cfg OPCMConfig) error { } host.SetNonce(chainDeployer, nonce) - var release string + var l1ContractsRelease string if cfg.ArtifactsLocator.IsTag() { - release = cfg.ArtifactsLocator.Tag + l1ContractsRelease = cfg.ArtifactsLocator.Tag } else { - release = "dev" + l1ContractsRelease = "dev" } - lgr.Info("deploying OPCM", "release", release) + lgr.Info("deploying OPCM", "l1ContractsRelease", l1ContractsRelease) // We need to etch the Superchain addresses so that they have nonzero code // and the checks in the OPCM constructor pass. @@ -238,10 +234,9 @@ func OPCM(ctx context.Context, cfg OPCMConfig) error { ProofMaturityDelaySeconds: new(big.Int).SetUint64(cfg.ProofMaturityDelaySeconds), DisputeGameFinalityDelaySeconds: new(big.Int).SetUint64(cfg.DisputeGameFinalityDelaySeconds), MipsVersion: new(big.Int).SetUint64(cfg.MIPSVersion), - Release: release, + L1ContractsRelease: l1ContractsRelease, SuperchainConfigProxy: superchainConfigAddr, ProtocolVersionsProxy: protocolVersionsAddr, - OpcmProxyOwner: opcmProxyOwnerAddr, StandardVersionsToml: standardVersionsTOML, UseInterop: false, }, diff --git a/op-deployer/pkg/deployer/broadcaster/gas_estimator.go b/op-deployer/pkg/deployer/broadcaster/gas_estimator.go index abe76d027ec4..b04390fc8aa7 100644 --- a/op-deployer/pkg/deployer/broadcaster/gas_estimator.go +++ b/op-deployer/pkg/deployer/broadcaster/gas_estimator.go @@ -11,15 +11,20 @@ import ( var ( // baseFeePadFactor = 50% as a divisor baseFeePadFactor = big.NewInt(2) - // tipMulFactor = 20 as a multiplier - tipMulFactor = big.NewInt(20) + // tipMulFactor = 5 as a multiplier + tipMulFactor = big.NewInt(5) // dummyBlobFee is a dummy value for the blob fee. Since this gas estimator will never // post blobs, it's just set to 1. dummyBlobFee = big.NewInt(1) + // maxTip is the maximum tip that can be suggested by this estimator. + maxTip = big.NewInt(50 * 1e9) + // minTip is the minimum tip that can be suggested by this estimator. + minTip = big.NewInt(1 * 1e9) ) // DeployerGasPriceEstimator is a custom gas price estimator for use with op-deployer. -// It pads the base fee by 50% and multiplies the suggested tip by 20. +// It pads the base fee by 50% and multiplies the suggested tip by 5 up to a max of +// 50 gwei. func DeployerGasPriceEstimator(ctx context.Context, client txmgr.ETHBackend) (*big.Int, *big.Int, *big.Int, error) { chainHead, err := client.HeaderByNumber(ctx, nil) if err != nil { @@ -34,5 +39,14 @@ func DeployerGasPriceEstimator(ctx context.Context, client txmgr.ETHBackend) (*b baseFeePad := new(big.Int).Div(chainHead.BaseFee, baseFeePadFactor) paddedBaseFee := new(big.Int).Add(chainHead.BaseFee, baseFeePad) paddedTip := new(big.Int).Mul(tip, tipMulFactor) + + if paddedTip.Cmp(minTip) < 0 { + paddedTip.Set(minTip) + } + + if paddedTip.Cmp(maxTip) > 0 { + paddedTip.Set(maxTip) + } + return paddedTip, paddedBaseFee, dummyBlobFee, nil } diff --git a/op-deployer/pkg/deployer/broadcaster/keyed.go b/op-deployer/pkg/deployer/broadcaster/keyed.go index f5797d939156..c9bb27fcf0ce 100644 --- a/op-deployer/pkg/deployer/broadcaster/keyed.go +++ b/op-deployer/pkg/deployer/broadcaster/keyed.go @@ -90,6 +90,9 @@ func NewKeyedBroadcaster(cfg KeyedBroadcasterOpts) (*KeyedBroadcaster, error) { } func (t *KeyedBroadcaster) Hook(bcast script.Broadcast) { + if bcast.Type != script.BroadcastCreate2 && bcast.From != t.mgr.From() { + panic(fmt.Sprintf("invalid from for broadcast:%v, expected:%v", bcast.From, t.mgr.From())) + } t.mtx.Lock() t.bcasts = append(t.bcasts, bcast) t.mtx.Unlock() diff --git a/op-deployer/pkg/deployer/flags.go b/op-deployer/pkg/deployer/flags.go index 2611957f0ad3..a58d35bbbb8d 100644 --- a/op-deployer/pkg/deployer/flags.go +++ b/op-deployer/pkg/deployer/flags.go @@ -20,6 +20,7 @@ const ( OutdirFlagName = "outdir" PrivateKeyFlagName = "private-key" DeploymentStrategyFlagName = "deployment-strategy" + IntentConfigTypeFlagName = "intent-config-type" ) var ( @@ -35,7 +36,7 @@ var ( Name: L1ChainIDFlagName, Usage: "Chain ID of the L1 chain.", EnvVars: PrefixEnvVar("L1_CHAIN_ID"), - Value: 900, + Value: 11155111, } L2ChainIDsFlag = &cli.StringFlag{ Name: L2ChainIDsFlagName, @@ -62,6 +63,17 @@ var ( EnvVars: PrefixEnvVar("DEPLOYMENT_STRATEGY"), Value: string(state.DeploymentStrategyLive), } + IntentConfigTypeFlag = &cli.StringFlag{ + Name: IntentConfigTypeFlagName, + Usage: fmt.Sprintf("Intent config type to use. Options: %s (default), %s, %s, %s, %s", + state.IntentConfigTypeStandard, + state.IntentConfigTypeCustom, + state.IntentConfigTypeStrict, + state.IntentConfigTypeStandardOverrides, + state.IntentConfigTypeStrictOverrides), + EnvVars: PrefixEnvVar("INTENT_CONFIG_TYPE"), + Value: string(state.IntentConfigTypeStandard), + } ) var GlobalFlags = append([]cli.Flag{}, oplog.CLIFlags(EnvVarPrefix)...) @@ -71,6 +83,7 @@ var InitFlags = []cli.Flag{ L2ChainIDsFlag, WorkdirFlag, DeploymentStrategyFlag, + IntentConfigTypeFlag, } var ApplyFlags = []cli.Flag{ diff --git a/op-deployer/pkg/deployer/init.go b/op-deployer/pkg/deployer/init.go index 3a8ae27d2ae7..deca136558cb 100644 --- a/op-deployer/pkg/deployer/init.go +++ b/op-deployer/pkg/deployer/init.go @@ -7,19 +7,17 @@ import ( "path" "strings" - "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" - "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/state" op_service "github.com/ethereum-optimism/optimism/op-service" - "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" "github.com/ethereum/go-ethereum/common" "github.com/urfave/cli/v2" ) type InitConfig struct { DeploymentStrategy state.DeploymentStrategy + IntentConfigType state.IntentConfigType L1ChainID uint64 Outdir string L2ChainIDs []common.Hash @@ -51,6 +49,7 @@ func InitCLI() func(ctx *cli.Context) error { l1ChainID := ctx.Uint64(L1ChainIDFlagName) outdir := ctx.String(OutdirFlagName) l2ChainIDsRaw := ctx.String(L2ChainIDsFlagName) + intentConfigType := ctx.String(IntentConfigTypeFlagName) if len(l2ChainIDsRaw) == 0 { return fmt.Errorf("must specify at least one L2 chain ID") @@ -68,6 +67,7 @@ func InitCLI() func(ctx *cli.Context) error { err := Init(InitConfig{ DeploymentStrategy: state.DeploymentStrategy(deploymentStrategy), + IntentConfigType: state.IntentConfigType(intentConfigType), L1ChainID: l1ChainID, Outdir: outdir, L2ChainIDs: l2ChainIDs, @@ -86,55 +86,12 @@ func Init(cfg InitConfig) error { return fmt.Errorf("invalid config for init: %w", err) } - intent := &state.Intent{ - DeploymentStrategy: cfg.DeploymentStrategy, - L1ChainID: cfg.L1ChainID, - FundDevAccounts: true, - L1ContractsLocator: artifacts.DefaultL1ContractsLocator, - L2ContractsLocator: artifacts.DefaultL2ContractsLocator, - } - - l1ChainIDBig := intent.L1ChainIDBig() - - dk, err := devkeys.NewMnemonicDevKeys(devkeys.TestMnemonic) + intent, err := state.NewIntent(cfg.IntentConfigType, cfg.DeploymentStrategy, cfg.L1ChainID, cfg.L2ChainIDs) if err != nil { - return fmt.Errorf("failed to create dev keys: %w", err) - } - - addrFor := func(key devkeys.Key) common.Address { - // The error below should never happen, so panic if it does. - addr, err := dk.Address(key) - if err != nil { - panic(err) - } - return addr - } - intent.SuperchainRoles = &state.SuperchainRoles{ - ProxyAdminOwner: addrFor(devkeys.L1ProxyAdminOwnerRole.Key(l1ChainIDBig)), - ProtocolVersionsOwner: addrFor(devkeys.SuperchainProtocolVersionsOwner.Key(l1ChainIDBig)), - Guardian: addrFor(devkeys.SuperchainConfigGuardianKey.Key(l1ChainIDBig)), - } - - for _, l2ChainID := range cfg.L2ChainIDs { - l2ChainIDBig := l2ChainID.Big() - intent.Chains = append(intent.Chains, &state.ChainIntent{ - ID: l2ChainID, - BaseFeeVaultRecipient: addrFor(devkeys.BaseFeeVaultRecipientRole.Key(l2ChainIDBig)), - L1FeeVaultRecipient: addrFor(devkeys.L1FeeVaultRecipientRole.Key(l2ChainIDBig)), - SequencerFeeVaultRecipient: addrFor(devkeys.SequencerFeeVaultRecipientRole.Key(l2ChainIDBig)), - Eip1559Denominator: 50, - Eip1559Elasticity: 6, - Roles: state.ChainRoles{ - L1ProxyAdminOwner: addrFor(devkeys.L1ProxyAdminOwnerRole.Key(l2ChainIDBig)), - L2ProxyAdminOwner: addrFor(devkeys.L2ProxyAdminOwnerRole.Key(l2ChainIDBig)), - SystemConfigOwner: addrFor(devkeys.SystemConfigOwner.Key(l2ChainIDBig)), - UnsafeBlockSigner: addrFor(devkeys.SequencerP2PRole.Key(l2ChainIDBig)), - Batcher: addrFor(devkeys.BatcherRole.Key(l2ChainIDBig)), - Proposer: addrFor(devkeys.ProposerRole.Key(l2ChainIDBig)), - Challenger: addrFor(devkeys.ChallengerRole.Key(l2ChainIDBig)), - }, - }) + return err } + intent.DeploymentStrategy = cfg.DeploymentStrategy + intent.ConfigType = cfg.IntentConfigType st := &state.State{ Version: 1, diff --git a/op-deployer/pkg/deployer/inspect/l1.go b/op-deployer/pkg/deployer/inspect/l1.go index d0a4f88172d1..4883e83486c3 100644 --- a/op-deployer/pkg/deployer/inspect/l1.go +++ b/op-deployer/pkg/deployer/inspect/l1.go @@ -45,7 +45,7 @@ type OpChainDeployment struct { } type ImplementationsDeployment struct { - OpcmProxyAddress common.Address `json:"opcmProxyAddress"` + OpcmAddress common.Address `json:"opcmAddress"` DelayedWETHImplAddress common.Address `json:"delayedWETHImplAddress"` OptimismPortalImplAddress common.Address `json:"optimismPortalImplAddress"` PreimageOracleSingletonAddress common.Address `json:"preimageOracleSingletonAddress"` @@ -113,7 +113,7 @@ func L1(globalState *state.State, chainID common.Hash) (*L1Contracts, error) { // DelayedWETHPermissionlessGameProxyAddress: chainState.DelayedWETHPermissionlessGameProxyAddress, }, ImplementationsDeployment: ImplementationsDeployment{ - OpcmProxyAddress: globalState.ImplementationsDeployment.OpcmProxyAddress, + OpcmAddress: globalState.ImplementationsDeployment.OpcmAddress, DelayedWETHImplAddress: globalState.ImplementationsDeployment.DelayedWETHImplAddress, OptimismPortalImplAddress: globalState.ImplementationsDeployment.OptimismPortalImplAddress, PreimageOracleSingletonAddress: globalState.ImplementationsDeployment.PreimageOracleSingletonAddress, diff --git a/op-deployer/pkg/deployer/inspect/semvers.go b/op-deployer/pkg/deployer/inspect/semvers.go index da666096ee18..48e16d21dbc4 100644 --- a/op-deployer/pkg/deployer/inspect/semvers.go +++ b/op-deployer/pkg/deployer/inspect/semvers.go @@ -8,6 +8,10 @@ import ( "regexp" "time" + "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/state" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" "github.com/ethereum-optimism/optimism/op-deployer/pkg/env" @@ -25,8 +29,6 @@ import ( "github.com/urfave/cli/v2" ) -var versionSelector = []byte{0x54, 0xfd, 0x4d, 0x50} - func L2SemversCLI(cliCtx *cli.Context) error { cliCfg, err := readConfig(cliCtx) if err != nil { @@ -67,6 +69,60 @@ func L2SemversCLI(cliCtx *cli.Context) error { } }() + ps, err := L2Semvers(L2SemversConfig{ + Lgr: l, + Artifacts: artifactsFS, + ChainState: chainState, + }) + if err != nil { + return fmt.Errorf("failed to get L2 semvers: %w", err) + } + + if err := jsonutil.WriteJSON(ps, ioutil.ToStdOutOrFileOrNoop(cliCfg.Outfile, 0o666)); err != nil { + return fmt.Errorf("failed to write rollup config: %w", err) + } + + return nil +} + +type L2SemversConfig struct { + Lgr log.Logger + Artifacts foundry.StatDirFs + ChainState *state.ChainState +} + +type L2PredeploySemvers struct { + L2ToL1MessagePasser string + DeployerWhitelist string + WETH string + L2CrossDomainMessenger string + L2StandardBridge string + SequencerFeeVault string + OptimismMintableERC20Factory string + L1BlockNumber string + GasPriceOracle string + L1Block string + LegacyMessagePasser string + L2ERC721Bridge string + OptimismMintableERC721Factory string + BaseFeeVault string + L1FeeVault string + SchemaRegistry string + EAS string + CrossL2Inbox string + L2toL2CrossDomainMessenger string + SuperchainWETH string + ETHLiquidity string + SuperchainTokenBridge string + OptimismMintableERC20 string + OptimismMintableERC721 string +} + +func L2Semvers(cfg L2SemversConfig) (*L2PredeploySemvers, error) { + l := cfg.Lgr + artifactsFS := cfg.Artifacts + chainState := cfg.ChainState + host, err := env.DefaultScriptHost( broadcaster.NoopBroadcaster(), l, @@ -74,85 +130,89 @@ func L2SemversCLI(cliCtx *cli.Context) error { artifactsFS, ) if err != nil { - return fmt.Errorf("failed to create script host: %w", err) + return nil, fmt.Errorf("failed to create script host: %w", err) } host.ImportState(chainState.Allocs.Data) - addr := common.Address{19: 0x01} - type contractToCheck struct { - Address common.Address - Name string + Address common.Address + FieldPtr *string + Name string } - contractsOutput := make(map[string]string) + var ps L2PredeploySemvers - // The gov token and the proxy admin do not have semvers. contracts := []contractToCheck{ - {predeploys.L2ToL1MessagePasserAddr, "L2ToL1MessagePasser"}, - {predeploys.DeployerWhitelistAddr, "DeployerWhitelist"}, - {predeploys.WETHAddr, "WETH"}, - {predeploys.L2CrossDomainMessengerAddr, "L2CrossDomainMessenger"}, - {predeploys.L2StandardBridgeAddr, "L2StandardBridge"}, - {predeploys.SequencerFeeVaultAddr, "SequencerFeeVault"}, - {predeploys.OptimismMintableERC20FactoryAddr, "OptimismMintableERC20Factory"}, - {predeploys.L1BlockNumberAddr, "L1BlockNumber"}, - {predeploys.GasPriceOracleAddr, "GasPriceOracle"}, - {predeploys.L1BlockAddr, "L1Block"}, - {predeploys.LegacyMessagePasserAddr, "LegacyMessagePasser"}, - {predeploys.L2ERC721BridgeAddr, "L2ERC721Bridge"}, - {predeploys.OptimismMintableERC721FactoryAddr, "OptimismMintableERC721Factory"}, - {predeploys.BaseFeeVaultAddr, "BaseFeeVault"}, - {predeploys.L1FeeVaultAddr, "L1FeeVault"}, - {predeploys.SchemaRegistryAddr, "SchemaRegistry"}, - {predeploys.EASAddr, "EAS"}, - {predeploys.WETHAddr, "WETH"}, + {predeploys.L2ToL1MessagePasserAddr, &ps.L2ToL1MessagePasser, "L2ToL1MessagePasser"}, + {predeploys.DeployerWhitelistAddr, &ps.DeployerWhitelist, "DeployerWhitelist"}, + {predeploys.WETHAddr, &ps.WETH, "WETH"}, + {predeploys.L2CrossDomainMessengerAddr, &ps.L2CrossDomainMessenger, "L2CrossDomainMessenger"}, + {predeploys.L2StandardBridgeAddr, &ps.L2StandardBridge, "L2StandardBridge"}, + {predeploys.SequencerFeeVaultAddr, &ps.SequencerFeeVault, "SequencerFeeVault"}, + {predeploys.OptimismMintableERC20FactoryAddr, &ps.OptimismMintableERC20Factory, "OptimismMintableERC20Factory"}, + {predeploys.L1BlockNumberAddr, &ps.L1BlockNumber, "L1BlockNumber"}, + {predeploys.GasPriceOracleAddr, &ps.GasPriceOracle, "GasPriceOracle"}, + {predeploys.L1BlockAddr, &ps.L1Block, "L1Block"}, + {predeploys.LegacyMessagePasserAddr, &ps.LegacyMessagePasser, "LegacyMessagePasser"}, + {predeploys.L2ERC721BridgeAddr, &ps.L2ERC721Bridge, "L2ERC721Bridge"}, + {predeploys.OptimismMintableERC721FactoryAddr, &ps.OptimismMintableERC721Factory, "OptimismMintableERC721Factory"}, + {predeploys.BaseFeeVaultAddr, &ps.BaseFeeVault, "BaseFeeVault"}, + {predeploys.L1FeeVaultAddr, &ps.L1FeeVault, "L1FeeVault"}, + {predeploys.SchemaRegistryAddr, &ps.SchemaRegistry, "SchemaRegistry"}, + {predeploys.EASAddr, &ps.EAS, "EAS"}, } for _, contract := range contracts { - data, _, err := host.Call( - addr, - contract.Address, - bytes.Clone(versionSelector), - 1_000_000_000, - uint256.NewInt(0), - ) + semver, err := ReadSemver(host, contract.Address) if err != nil { - return fmt.Errorf("failed to call version on %s: %w", contract.Name, err) - } - - // The second 32 bytes contain the length of the string - length := new(big.Int).SetBytes(data[32:64]).Int64() - // Start of the string data (after offset and length) - stringStart := 64 - stringEnd := int64(stringStart) + length - - // Bounds check - if stringEnd > int64(len(data)) { - return fmt.Errorf("string data out of bounds") + return nil, fmt.Errorf("failed to read semver for %s: %w", contract.Name, err) } - contractsOutput[contract.Name] = string(data[stringStart:stringEnd]) + *contract.FieldPtr = semver } erc20Semver, err := findSemverBytecode(host, predeploys.OptimismMintableERC20FactoryAddr) if err == nil { - contractsOutput["OptimismMintableERC20"] = erc20Semver + ps.OptimismMintableERC20 = erc20Semver } else { l.Warn("failed to find semver for OptimismMintableERC20", "err", err) } erc721Semver, err := findSemverBytecode(host, predeploys.OptimismMintableERC721FactoryAddr) if err == nil { - contractsOutput["OptimismMintableERC721"] = erc721Semver + ps.OptimismMintableERC721 = erc721Semver } else { l.Warn("failed to find semver for OptimismMintableERC721", "err", err) } - if err := jsonutil.WriteJSON(contractsOutput, ioutil.ToStdOutOrFileOrNoop(cliCfg.Outfile, 0o666)); err != nil { - return fmt.Errorf("failed to write rollup config: %w", err) + return &ps, nil +} + +var versionSelector = []byte{0x54, 0xfd, 0x4d, 0x50} + +func ReadSemver(host *script.Host, addr common.Address) (string, error) { + data, _, err := host.Call( + common.Address{19: 0x01}, + addr, + bytes.Clone(versionSelector), + 1_000_000_000, + uint256.NewInt(0), + ) + if err != nil { + return "", fmt.Errorf("failed to call version on %s: %w", addr, err) } - return nil + // The second 32 bytes contain the length of the string + length := new(big.Int).SetBytes(data[32:64]).Int64() + // Start of the string data (after offset and length) + stringStart := 64 + stringEnd := int64(stringStart) + length + + // Bounds check + if stringEnd > int64(len(data)) { + return "", fmt.Errorf("string data out of bounds") + } + + return string(data[stringStart:stringEnd]), nil } const patternLen = 24 diff --git a/op-deployer/pkg/deployer/inspect/superchain_registry.go b/op-deployer/pkg/deployer/inspect/superchain_registry.go index 7a6fe384db64..c8d57907d9fd 100644 --- a/op-deployer/pkg/deployer/inspect/superchain_registry.go +++ b/op-deployer/pkg/deployer/inspect/superchain_registry.go @@ -28,6 +28,10 @@ func SuperchainRegistryCLI(cliCtx *cli.Context) error { return fmt.Errorf("failed to read intent: %w", err) } + if err := globalIntent.Check(); err != nil { + return fmt.Errorf("intent check failed: %w", err) + } + envVars := map[string]string{} envVars["SCR_CHAIN_NAME"] = "" envVars["SCR_CHAIN_SHORT_NAME"] = "" @@ -161,7 +165,7 @@ func createAddressList(l1Contracts *L1Contracts, appliedIntent *state.Intent, ch // Fault proof contracts AnchorStateRegistryProxy: superchain.Address(l1Contracts.OpChainDeployment.AnchorStateRegistryProxyAddress), - DelayedWETHProxy: superchain.Address(l1Contracts.OpChainDeployment.L1CrossDomainMessengerProxyAddress), + DelayedWETHProxy: superchain.Address(l1Contracts.OpChainDeployment.DelayedWETHPermissionedGameProxyAddress), DisputeGameFactoryProxy: superchain.Address(l1Contracts.OpChainDeployment.DisputeGameFactoryProxyAddress), FaultDisputeGame: superchain.Address(l1Contracts.OpChainDeployment.FaultDisputeGameAddress), MIPS: superchain.Address(l1Contracts.ImplementationsDeployment.MipsSingletonAddress), diff --git a/op-deployer/pkg/deployer/integration_test/apply_test.go b/op-deployer/pkg/deployer/integration_test/apply_test.go index 215687c5cfc0..a6ae4dab3ad8 100644 --- a/op-deployer/pkg/deployer/integration_test/apply_test.go +++ b/op-deployer/pkg/deployer/integration_test/apply_test.go @@ -1,17 +1,23 @@ package integration_test import ( + "bufio" "bytes" + "compress/gzip" "context" "crypto/rand" "encoding/hex" + "encoding/json" "fmt" "log/slog" + "maps" "math/big" "os" "testing" "time" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/retryproxy" + altda "github.com/ethereum-optimism/optimism/op-alt-da" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/inspect" "github.com/ethereum-optimism/optimism/op-node/rollup" @@ -64,6 +70,8 @@ network_params: genesis_delay: 0 ` +const defaultL1ChainID uint64 = 77799777 + type deployerKey struct{} func (d *deployerKey) HDPath() string { @@ -94,21 +102,22 @@ func TestEndToEndApply(t *testing.T) { l1Client, err := ethclient.Dial(rpcURL) require.NoError(t, err) - depKey := new(deployerKey) - l1ChainID := big.NewInt(77799777) - dk, err := devkeys.NewMnemonicDevKeys(devkeys.TestMnemonic) + pk, err := crypto.HexToECDSA("ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80") require.NoError(t, err) - pk, err := dk.Secret(depKey) + + l1ChainID := new(big.Int).SetUint64(defaultL1ChainID) + dk, err := devkeys.NewMnemonicDevKeys(devkeys.TestMnemonic) require.NoError(t, err) l2ChainID1 := uint256.NewInt(1) l2ChainID2 := uint256.NewInt(2) loc, _ := testutil.LocalArtifacts(t) - intent, st := newIntent(t, l1ChainID, dk, l2ChainID1, loc, loc) - cg := ethClientCodeGetter(ctx, l1Client) - t.Run("initial chain", func(t *testing.T) { + t.Run("two chains one after another", func(t *testing.T) { + intent, st := newIntent(t, l1ChainID, dk, l2ChainID1, loc, loc) + cg := ethClientCodeGetter(ctx, l1Client) + require.NoError(t, deployer.ApplyPipeline( ctx, deployer.ApplyPipelineOpts{ @@ -121,11 +130,6 @@ func TestEndToEndApply(t *testing.T) { }, )) - validateSuperchainDeployment(t, st, cg) - validateOPChainDeployment(t, cg, st, intent) - }) - - t.Run("subsequent chain", func(t *testing.T) { // create a new environment with wiped state to ensure we can continue using the // state from the previous deployment intent.Chains = append(intent.Chains, newChainIntent(t, dk, l1ChainID, l2ChainID2)) @@ -142,14 +146,43 @@ func TestEndToEndApply(t *testing.T) { }, )) + validateSuperchainDeployment(t, st, cg) validateOPChainDeployment(t, cg, st, intent) }) + + t.Run("chain with tagged artifacts", func(t *testing.T) { + intent, st := newIntent(t, l1ChainID, dk, l2ChainID1, loc, loc) + intent.L1ContractsLocator = artifacts.DefaultL1ContractsLocator + intent.L2ContractsLocator = artifacts.DefaultL2ContractsLocator + + require.ErrorIs(t, deployer.ApplyPipeline( + ctx, + deployer.ApplyPipelineOpts{ + L1RPCUrl: rpcURL, + DeployerPrivateKey: pk, + Intent: intent, + State: st, + Logger: lgr, + StateWriter: pipeline.NoopStateWriter(), + }, + ), pipeline.ErrRefusingToDeployTaggedReleaseWithoutOPCM) + }) } func TestApplyExistingOPCM(t *testing.T) { + t.Run("mainnet", func(t *testing.T) { + testApplyExistingOPCM(t, 1, os.Getenv("MAINNET_RPC_URL"), standard.L1VersionsMainnet) + }) + t.Run("sepolia", func(t *testing.T) { + testApplyExistingOPCM(t, 11155111, os.Getenv("SEPOLIA_RPC_URL"), standard.L1VersionsSepolia) + }) +} + +func testApplyExistingOPCM(t *testing.T, l1ChainID uint64, forkRPCUrl string, versions standard.L1Versions) { + op_e2e.InitParallel(t) + anvil.Test(t) - forkRPCUrl := os.Getenv("SEPOLIA_RPC_URL") if forkRPCUrl == "" { t.Skip("no fork RPC URL provided") } @@ -159,8 +192,14 @@ func TestApplyExistingOPCM(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() + retryProxy := retryproxy.New(lgr, forkRPCUrl) + require.NoError(t, retryProxy.Start()) + t.Cleanup(func() { + require.NoError(t, retryProxy.Stop()) + }) + runner, err := anvil.New( - forkRPCUrl, + retryProxy.Endpoint(), lgr, ) require.NoError(t, err) @@ -173,22 +212,24 @@ func TestApplyExistingOPCM(t *testing.T) { l1Client, err := ethclient.Dial(runner.RPCUrl()) require.NoError(t, err) - l1ChainID := big.NewInt(11155111) + l1ChainIDBig := new(big.Int).SetUint64(l1ChainID) dk, err := devkeys.NewMnemonicDevKeys(devkeys.TestMnemonic) require.NoError(t, err) // index 0 from Anvil's test set pk, err := crypto.HexToECDSA("ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80") require.NoError(t, err) - l2ChainID := uint256.NewInt(1) + l2ChainID := uint256.NewInt(777) + // Hardcode the below tags to ensure the test is validating the correct + // version even if the underlying tag changes intent, st := newIntent( t, - l1ChainID, + l1ChainIDBig, dk, l2ChainID, - artifacts.DefaultL1ContractsLocator, - artifacts.DefaultL2ContractsLocator, + artifacts.MustNewLocatorFromTag("op-contracts/v1.6.0"), + artifacts.MustNewLocatorFromTag("op-contracts/v1.7.0-beta.1+l2-contracts"), ) // Define a new create2 salt to avoid contract address collisions _, err = rand.Read(st.Create2Salt[:]) @@ -207,6 +248,171 @@ func TestApplyExistingOPCM(t *testing.T) { )) validateOPChainDeployment(t, ethClientCodeGetter(ctx, l1Client), st, intent) + + releases := versions.Releases["op-contracts/v1.6.0"] + + implTests := []struct { + name string + expAddr common.Address + actAddr common.Address + }{ + {"OptimismPortal", releases.OptimismPortal.ImplementationAddress, st.ImplementationsDeployment.OptimismPortalImplAddress}, + {"SystemConfig,", releases.SystemConfig.ImplementationAddress, st.ImplementationsDeployment.SystemConfigImplAddress}, + {"L1CrossDomainMessenger", releases.L1CrossDomainMessenger.ImplementationAddress, st.ImplementationsDeployment.L1CrossDomainMessengerImplAddress}, + {"L1ERC721Bridge", releases.L1ERC721Bridge.ImplementationAddress, st.ImplementationsDeployment.L1ERC721BridgeImplAddress}, + {"L1StandardBridge", releases.L1StandardBridge.ImplementationAddress, st.ImplementationsDeployment.L1StandardBridgeImplAddress}, + {"OptimismMintableERC20Factory", releases.OptimismMintableERC20Factory.ImplementationAddress, st.ImplementationsDeployment.OptimismMintableERC20FactoryImplAddress}, + {"DisputeGameFactory", releases.DisputeGameFactory.ImplementationAddress, st.ImplementationsDeployment.DisputeGameFactoryImplAddress}, + {"MIPS", releases.MIPS.Address, st.ImplementationsDeployment.MipsSingletonAddress}, + {"PreimageOracle", releases.PreimageOracle.Address, st.ImplementationsDeployment.PreimageOracleSingletonAddress}, + {"DelayedWETH", releases.DelayedWETH.ImplementationAddress, st.ImplementationsDeployment.DelayedWETHImplAddress}, + } + for _, tt := range implTests { + require.Equal(t, tt.expAddr, tt.actAddr, "unexpected address for %s", tt.name) + } + + superchain, err := standard.SuperchainFor(l1ChainIDBig.Uint64()) + require.NoError(t, err) + + managerOwner, err := standard.ManagerOwnerAddrFor(l1ChainIDBig.Uint64()) + require.NoError(t, err) + + superchainTests := []struct { + name string + expAddr common.Address + actAddr common.Address + }{ + {"ProxyAdmin", managerOwner, st.SuperchainDeployment.ProxyAdminAddress}, + {"SuperchainConfig", common.Address(*superchain.Config.SuperchainConfigAddr), st.SuperchainDeployment.SuperchainConfigProxyAddress}, + {"ProtocolVersions", common.Address(*superchain.Config.ProtocolVersionsAddr), st.SuperchainDeployment.ProtocolVersionsProxyAddress}, + } + for _, tt := range superchainTests { + require.Equal(t, tt.expAddr, tt.actAddr, "unexpected address for %s", tt.name) + } + + artifactsFSL2, cleanupL2, err := artifacts.Download( + ctx, + intent.L2ContractsLocator, + artifacts.LogProgressor(lgr), + ) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, cleanupL2()) + }) + + chainState := st.Chains[0] + chainIntent := intent.Chains[0] + + semvers, err := inspect.L2Semvers(inspect.L2SemversConfig{ + Lgr: lgr, + Artifacts: artifactsFSL2, + ChainState: chainState, + }) + require.NoError(t, err) + + expectedSemversL2 := &inspect.L2PredeploySemvers{ + L2ToL1MessagePasser: "1.1.1-beta.1", + DeployerWhitelist: "1.1.1-beta.1", + WETH: "1.0.0-beta.1", + L2CrossDomainMessenger: "2.1.1-beta.1", + L2StandardBridge: "1.11.1-beta.1", + SequencerFeeVault: "1.5.0-beta.2", + OptimismMintableERC20Factory: "1.10.1-beta.2", + L1BlockNumber: "1.1.1-beta.1", + GasPriceOracle: "1.3.1-beta.1", + L1Block: "1.5.1-beta.1", + LegacyMessagePasser: "1.1.1-beta.1", + L2ERC721Bridge: "1.7.1-beta.2", + OptimismMintableERC721Factory: "1.4.1-beta.1", + BaseFeeVault: "1.5.0-beta.2", + L1FeeVault: "1.5.0-beta.2", + SchemaRegistry: "1.3.1-beta.1", + EAS: "1.4.1-beta.1", + CrossL2Inbox: "", + L2toL2CrossDomainMessenger: "", + SuperchainWETH: "", + ETHLiquidity: "", + SuperchainTokenBridge: "", + OptimismMintableERC20: "1.4.0-beta.1", + OptimismMintableERC721: "1.3.1-beta.1", + } + + require.EqualValues(t, expectedSemversL2, semvers) + + f, err := os.Open(fmt.Sprintf("./testdata/allocs-l2-v160-%d.json.gz", l1ChainID)) + require.NoError(t, err) + defer f.Close() + gzr, err := gzip.NewReader(f) + require.NoError(t, err) + defer gzr.Close() + dec := json.NewDecoder(bufio.NewReader(gzr)) + var expAllocs types.GenesisAlloc + require.NoError(t, dec.Decode(&expAllocs)) + + type storageCheckerFunc func(addr common.Address, actStorage map[common.Hash]common.Hash) + + storageDiff := func(addr common.Address, expStorage, actStorage map[common.Hash]common.Hash) { + require.EqualValues(t, expStorage, actStorage, "storage for %s differs", addr) + } + + defaultStorageChecker := func(addr common.Address, actStorage map[common.Hash]common.Hash) { + storageDiff(addr, expAllocs[addr].Storage, actStorage) + } + + overrideStorageChecker := func(addr common.Address, actStorage, overrides map[common.Hash]common.Hash) { + expStorage := make(map[common.Hash]common.Hash) + maps.Copy(expStorage, expAllocs[addr].Storage) + maps.Copy(expStorage, overrides) + storageDiff(addr, expStorage, actStorage) + } + + storageCheckers := map[common.Address]storageCheckerFunc{ + predeploys.L2CrossDomainMessengerAddr: func(addr common.Address, actStorage map[common.Hash]common.Hash) { + overrideStorageChecker(addr, actStorage, map[common.Hash]common.Hash{ + {31: 0xcf}: common.BytesToHash(chainState.L1CrossDomainMessengerProxyAddress.Bytes()), + }) + }, + predeploys.L2StandardBridgeAddr: func(addr common.Address, actStorage map[common.Hash]common.Hash) { + overrideStorageChecker(addr, actStorage, map[common.Hash]common.Hash{ + {31: 0x04}: common.BytesToHash(chainState.L1StandardBridgeProxyAddress.Bytes()), + }) + }, + predeploys.L2ERC721BridgeAddr: func(addr common.Address, actStorage map[common.Hash]common.Hash) { + overrideStorageChecker(addr, actStorage, map[common.Hash]common.Hash{ + {31: 0x02}: common.BytesToHash(chainState.L1ERC721BridgeProxyAddress.Bytes()), + }) + }, + predeploys.ProxyAdminAddr: func(addr common.Address, actStorage map[common.Hash]common.Hash) { + overrideStorageChecker(addr, actStorage, map[common.Hash]common.Hash{ + {}: common.BytesToHash(intent.Chains[0].Roles.L2ProxyAdminOwner.Bytes()), + }) + }, + // The ProxyAdmin owner is also set on the ProxyAdmin contract's implementation address, see + // L2Genesis.s.sol line 292. + common.HexToAddress("0xc0d3c0d3c0d3c0d3c0d3c0d3c0d3c0d3c0d30018"): func(addr common.Address, actStorage map[common.Hash]common.Hash) { + overrideStorageChecker(addr, actStorage, map[common.Hash]common.Hash{ + {}: common.BytesToHash(chainIntent.Roles.L2ProxyAdminOwner.Bytes()), + }) + }, + } + + //Use a custom equality function to compare the genesis allocs + //because the reflect-based one is really slow + actAllocs := st.Chains[0].Allocs.Data.Accounts + require.Equal(t, len(expAllocs), len(actAllocs)) + for addr, expAcc := range expAllocs { + actAcc, ok := actAllocs[addr] + require.True(t, ok) + require.True(t, expAcc.Balance.Cmp(actAcc.Balance) == 0, "balance for %s differs", addr) + require.Equal(t, expAcc.Nonce, actAcc.Nonce, "nonce for %s differs", addr) + require.Equal(t, hex.EncodeToString(expAllocs[addr].Code), hex.EncodeToString(actAcc.Code), "code for %s differs", addr) + + storageChecker, ok := storageCheckers[addr] + if !ok { + storageChecker = defaultStorageChecker + } + storageChecker(addr, actAcc.Storage) + } } func TestL2BlockTimeOverride(t *testing.T) { @@ -216,7 +422,7 @@ func TestL2BlockTimeOverride(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opts, intent, st := setupGenesisChain(t) + opts, intent, st := setupGenesisChain(t, defaultL1ChainID) intent.GlobalDeployOverrides = map[string]interface{}{ "l2BlockTime": float64(3), } @@ -234,7 +440,7 @@ func TestApplyGenesisStrategy(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opts, intent, st := setupGenesisChain(t) + opts, intent, st := setupGenesisChain(t, defaultL1ChainID) require.NoError(t, deployer.ApplyPipeline(ctx, opts)) @@ -254,7 +460,7 @@ func TestProofParamOverrides(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opts, intent, st := setupGenesisChain(t) + opts, intent, st := setupGenesisChain(t, defaultL1ChainID) intent.GlobalDeployOverrides = map[string]any{ "withdrawalDelaySeconds": standard.WithdrawalDelaySeconds + 1, "minProposalSizeBytes": standard.MinProposalSizeBytes + 1, @@ -351,7 +557,7 @@ func TestInteropDeployment(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opts, intent, st := setupGenesisChain(t) + opts, intent, st := setupGenesisChain(t, defaultL1ChainID) intent.UseInterop = true require.NoError(t, deployer.ApplyPipeline(ctx, opts)) @@ -369,7 +575,7 @@ func TestAltDADeployment(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opts, intent, st := setupGenesisChain(t) + opts, intent, st := setupGenesisChain(t, defaultL1ChainID) altDACfg := genesis.AltDADeployConfig{ UseAltDA: true, DACommitmentType: altda.KeccakCommitmentString, @@ -447,7 +653,7 @@ func TestInvalidL2Genesis(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - opts, intent, _ := setupGenesisChain(t) + opts, intent, _ := setupGenesisChain(t, defaultL1ChainID) intent.DeploymentStrategy = state.DeploymentStrategyGenesis intent.GlobalDeployOverrides = tt.overrides @@ -458,11 +664,11 @@ func TestInvalidL2Genesis(t *testing.T) { } } -func setupGenesisChain(t *testing.T) (deployer.ApplyPipelineOpts, *state.Intent, *state.State) { +func setupGenesisChain(t *testing.T, l1ChainID uint64) (deployer.ApplyPipelineOpts, *state.Intent, *state.State) { lgr := testlog.Logger(t, slog.LevelDebug) depKey := new(deployerKey) - l1ChainID := big.NewInt(77799777) + l1ChainIDBig := new(big.Int).SetUint64(l1ChainID) dk, err := devkeys.NewMnemonicDevKeys(devkeys.TestMnemonic) require.NoError(t, err) @@ -473,8 +679,8 @@ func setupGenesisChain(t *testing.T) (deployer.ApplyPipelineOpts, *state.Intent, loc, _ := testutil.LocalArtifacts(t) - intent, st := newIntent(t, l1ChainID, dk, l2ChainID1, loc, loc) - intent.Chains = append(intent.Chains, newChainIntent(t, dk, l1ChainID, l2ChainID1)) + intent, st := newIntent(t, l1ChainIDBig, dk, l2ChainID1, loc, loc) + intent.Chains = append(intent.Chains, newChainIntent(t, dk, l1ChainIDBig, l2ChainID1)) intent.DeploymentStrategy = state.DeploymentStrategyGenesis opts := deployer.ApplyPipelineOpts{ @@ -503,6 +709,7 @@ func newIntent( l2Loc *artifacts.Locator, ) (*state.Intent, *state.State) { intent := &state.Intent{ + ConfigType: state.IntentConfigTypeCustom, DeploymentStrategy: state.DeploymentStrategyLive, L1ChainID: l1ChainID.Uint64(), SuperchainRoles: &state.SuperchainRoles{ @@ -529,8 +736,9 @@ func newChainIntent(t *testing.T, dk *devkeys.MnemonicDevKeys, l1ChainID *big.In BaseFeeVaultRecipient: addrFor(t, dk, devkeys.BaseFeeVaultRecipientRole.Key(l1ChainID)), L1FeeVaultRecipient: addrFor(t, dk, devkeys.L1FeeVaultRecipientRole.Key(l1ChainID)), SequencerFeeVaultRecipient: addrFor(t, dk, devkeys.SequencerFeeVaultRecipientRole.Key(l1ChainID)), - Eip1559Denominator: 50, - Eip1559Elasticity: 6, + Eip1559DenominatorCanyon: standard.Eip1559DenominatorCanyon, + Eip1559Denominator: standard.Eip1559Denominator, + Eip1559Elasticity: standard.Eip1559Elasticity, Roles: state.ChainRoles{ L1ProxyAdminOwner: addrFor(t, dk, devkeys.L2ProxyAdminOwnerRole.Key(l1ChainID)), L2ProxyAdminOwner: addrFor(t, dk, devkeys.L2ProxyAdminOwnerRole.Key(l1ChainID)), @@ -571,7 +779,7 @@ func validateSuperchainDeployment(t *testing.T, st *state.State, cg codeGetter) {"SuperchainConfigImpl", st.SuperchainDeployment.SuperchainConfigImplAddress}, {"ProtocolVersionsProxy", st.SuperchainDeployment.ProtocolVersionsProxyAddress}, {"ProtocolVersionsImpl", st.SuperchainDeployment.ProtocolVersionsImplAddress}, - {"OpcmProxy", st.ImplementationsDeployment.OpcmProxyAddress}, + {"Opcm", st.ImplementationsDeployment.OpcmAddress}, {"PreimageOracleSingleton", st.ImplementationsDeployment.PreimageOracleSingletonAddress}, {"MipsSingleton", st.ImplementationsDeployment.MipsSingletonAddress}, } diff --git a/op-deployer/pkg/deployer/integration_test/testdata/allocs-l2-v160-1.json.gz b/op-deployer/pkg/deployer/integration_test/testdata/allocs-l2-v160-1.json.gz new file mode 100644 index 000000000000..7a5450d64191 Binary files /dev/null and b/op-deployer/pkg/deployer/integration_test/testdata/allocs-l2-v160-1.json.gz differ diff --git a/op-deployer/pkg/deployer/integration_test/testdata/allocs-l2-v160-11155111.json.gz b/op-deployer/pkg/deployer/integration_test/testdata/allocs-l2-v160-11155111.json.gz new file mode 100644 index 000000000000..2fed12ccec47 Binary files /dev/null and b/op-deployer/pkg/deployer/integration_test/testdata/allocs-l2-v160-11155111.json.gz differ diff --git a/op-deployer/pkg/deployer/opcm/asterisc.go b/op-deployer/pkg/deployer/opcm/asterisc.go new file mode 100644 index 000000000000..9ba8959e3543 --- /dev/null +++ b/op-deployer/pkg/deployer/opcm/asterisc.go @@ -0,0 +1,64 @@ +package opcm + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" + + "github.com/ethereum-optimism/optimism/op-chain-ops/script" +) + +type DeployAsteriscInput struct { + PreimageOracle common.Address +} + +func (input *DeployAsteriscInput) InputSet() bool { + return true +} + +type DeployAsteriscOutput struct { + AsteriscSingleton common.Address +} + +func (output *DeployAsteriscOutput) CheckOutput(input common.Address) error { + return nil +} + +type DeployAsteriscScript struct { + Run func(input, output common.Address) error +} + +func DeployAsterisc( + host *script.Host, + input DeployAsteriscInput, +) (DeployAsteriscOutput, error) { + var output DeployAsteriscOutput + inputAddr := host.NewScriptAddress() + outputAddr := host.NewScriptAddress() + + cleanupInput, err := script.WithPrecompileAtAddress[*DeployAsteriscInput](host, inputAddr, &input) + if err != nil { + return output, fmt.Errorf("failed to insert DeployAsteriscInput precompile: %w", err) + } + defer cleanupInput() + + cleanupOutput, err := script.WithPrecompileAtAddress[*DeployAsteriscOutput](host, outputAddr, &output, + script.WithFieldSetter[*DeployAsteriscOutput]) + if err != nil { + return output, fmt.Errorf("failed to insert DeployAsteriscOutput precompile: %w", err) + } + defer cleanupOutput() + + implContract := "DeployAsterisc" + deployScript, cleanupDeploy, err := script.WithScript[DeployAsteriscScript](host, "DeployAsterisc.s.sol", implContract) + if err != nil { + return output, fmt.Errorf("failed to load %s script: %w", implContract, err) + } + defer cleanupDeploy() + + if err := deployScript.Run(inputAddr, outputAddr); err != nil { + return output, fmt.Errorf("failed to run %s script: %w", implContract, err) + } + + return output, nil +} diff --git a/op-deployer/pkg/deployer/opcm/asterisc_test.go b/op-deployer/pkg/deployer/opcm/asterisc_test.go new file mode 100644 index 000000000000..0ed68f7aeaff --- /dev/null +++ b/op-deployer/pkg/deployer/opcm/asterisc_test.go @@ -0,0 +1,34 @@ +package opcm + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/broadcaster" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/testutil" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/env" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +func TestDeployAsterisc(t *testing.T) { + _, artifacts := testutil.LocalArtifacts(t) + + host, err := env.DefaultScriptHost( + broadcaster.NoopBroadcaster(), + testlog.Logger(t, log.LevelInfo), + common.Address{'D'}, + artifacts, + ) + require.NoError(t, err) + + input := DeployAsteriscInput{ + PreimageOracle: common.Address{0xab}, + } + + output, err := DeployAsterisc(host, input) + require.NoError(t, err) + + require.NotEmpty(t, output.AsteriscSingleton) +} diff --git a/op-deployer/pkg/deployer/opcm/contract.go b/op-deployer/pkg/deployer/opcm/contract.go index b90db5814192..8d02f77e8e0a 100644 --- a/op-deployer/pkg/deployer/opcm/contract.go +++ b/op-deployer/pkg/deployer/opcm/contract.go @@ -48,57 +48,6 @@ func (c *Contract) GenericAddressGetter(ctx context.Context, functionName string return c.callContractMethod(ctx, functionName, abi.Arguments{}) } -// GetImplementation retrieves the Implementation struct for a given release and contract name. -func (c *Contract) GetOPCMImplementationAddress(ctx context.Context, release, contractName string) (common.Address, error) { - methodName := "implementations" - method := abi.NewMethod( - methodName, - methodName, - abi.Function, - "view", - true, - false, - abi.Arguments{ - {Name: "release", Type: mustType("string")}, - {Name: "contractName", Type: mustType("string")}, - }, - abi.Arguments{ - {Name: "logic", Type: mustType("address")}, - {Name: "initializer", Type: mustType("bytes4")}, - }, - ) - - calldata, err := method.Inputs.Pack(release, contractName) - if err != nil { - return common.Address{}, fmt.Errorf("failed to pack inputs: %w", err) - } - - msg := ethereum.CallMsg{ - To: &c.addr, - Data: append(bytes.Clone(method.ID), calldata...), - } - - result, err := c.client.CallContract(ctx, msg, nil) - if err != nil { - return common.Address{}, fmt.Errorf("failed to call contract: %w", err) - } - - out, err := method.Outputs.Unpack(result) - if err != nil { - return common.Address{}, fmt.Errorf("failed to unpack result: %w", err) - } - if len(out) != 2 { - return common.Address{}, fmt.Errorf("unexpected output length: %d", len(out)) - } - - logic, ok := out[0].(common.Address) - if !ok { - return common.Address{}, fmt.Errorf("unexpected type for logic: %T", out[0]) - } - - return logic, nil -} - func (c *Contract) callContractMethod(ctx context.Context, methodName string, inputs abi.Arguments, args ...interface{}) (common.Address, error) { method := abi.NewMethod( methodName, diff --git a/op-deployer/pkg/deployer/opcm/dispute_game.go b/op-deployer/pkg/deployer/opcm/dispute_game.go index 0e117c040d83..481a401d07db 100644 --- a/op-deployer/pkg/deployer/opcm/dispute_game.go +++ b/op-deployer/pkg/deployer/opcm/dispute_game.go @@ -11,9 +11,7 @@ import ( type DeployDisputeGameInput struct { Release string StandardVersionsToml string - MipsVersion uint64 - MinProposalSizeBytes uint64 - ChallengePeriodSeconds uint64 + VmAddress common.Address GameKind string GameType uint32 AbsolutePrestate common.Hash @@ -33,9 +31,7 @@ func (input *DeployDisputeGameInput) InputSet() bool { } type DeployDisputeGameOutput struct { - DisputeGameImpl common.Address - MipsSingleton common.Address - PreimageOracleSingleton common.Address + DisputeGameImpl common.Address } func (output *DeployDisputeGameOutput) CheckOutput(input common.Address) error { diff --git a/op-deployer/pkg/deployer/opcm/dispute_game_test.go b/op-deployer/pkg/deployer/opcm/dispute_game_test.go index f39849be4403..2b2d33f9efe0 100644 --- a/op-deployer/pkg/deployer/opcm/dispute_game_test.go +++ b/op-deployer/pkg/deployer/opcm/dispute_game_test.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum-optimism/optimism/op-deployer/pkg/env" "github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/stretchr/testify/require" ) @@ -26,13 +27,15 @@ func TestDeployDisputeGame(t *testing.T) { standardVersionsTOML, err := standard.L1VersionsDataFor(11155111) require.NoError(t, err) + vmAddr := common.Address{'V'} + host.ImportAccount(vmAddr, types.Account{Code: vmCode}) + // Address has to match the one returned by vmCode for oracle()(address) + host.ImportAccount(common.HexToAddress("0x92240135b46fc1142dA181f550aE8f595B858854"), types.Account{Code: oracleCode}) input := DeployDisputeGameInput{ Release: "dev", StandardVersionsToml: standardVersionsTOML, - MipsVersion: 1, - MinProposalSizeBytes: standard.MinProposalSizeBytes, - ChallengePeriodSeconds: standard.ChallengePeriodSeconds, + VmAddress: vmAddr, GameKind: "PermissionedDisputeGame", GameType: 1, AbsolutePrestate: common.Hash{'A'}, @@ -51,6 +54,8 @@ func TestDeployDisputeGame(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, output.DisputeGameImpl) - require.NotEmpty(t, output.MipsSingleton) - require.NotEmpty(t, output.PreimageOracleSingleton) } + +// Code to etch so that the VM oracle() method and the oracle challengePeriod() methods work (they return immutables) +var vmCode = common.FromHex("0x608060405234801561001057600080fd5b50600436106100415760003560e01c806354fd4d50146100465780637dc0d1d014610098578063e14ced32146100dc575b600080fd5b6100826040518060400160405280600c81526020017f312e322e312d626574612e37000000000000000000000000000000000000000081525081565b60405161008f919061269c565b60405180910390f35b60405173ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000092240135b46fc1142da181f550ae8f595b85885416815260200161008f565b6100ef6100ea366004612751565b6100fd565b60405190815260200161008f565b6000610107612612565b6080811461011457600080fd5b6040516106001461012457600080fd5b6084871461013157600080fd5b6101a4851461013f57600080fd5b8635608052602087013560a052604087013560e090811c60c09081526044890135821c82526048890135821c61010052604c890135821c610120526050890135821c61014052605489013590911c61016052605888013560f890811c610180526059890135901c6101a0819052605a89013590911c6101c05260628801906101e09060018111156101f5576040517f0136cc76000000000000000000000000000000000000000000000000000000008152600481fd5b506020810181511461020657600080fd5b60200160005b602081101561023057823560e01c825260049092019160209091019060010161020c565b5050508061012001511561024e57610246610408565b9150506103ff565b6101408101805160010167ffffffffffffffff16905260006101a4905060008060006102838560600151866000015186610559565b9250925092508163ffffffff1660001480156102a557508063ffffffff16600c145b156102bf576102b387610583565b955050505050506103ff565b63ffffffff8216603014806102da575063ffffffff82166038145b156102ea576102b385848461095d565b6000610364866040805160808101825260008082526020820181905291810182905260608101919091526040518060800160405280836060015163ffffffff168152602001836080015163ffffffff1681526020018360a0015163ffffffff1681526020018360c0015163ffffffff168152509050919050565b6040805160e0810182528281526101608901516020820152885191810191909152610524606082015263ffffffff808716608083015285811660a0830152841660c08201529091506103b581610b78565b50508752815163ffffffff9081166060808a01919091526020840151821660808a01526040840151821660a08a01528301511660c08801526103f5610408565b9750505050505050505b95945050505050565b60408051608051815260a051602082015260dc519181019190915260fc51604482015261011c51604882015261013c51604c82015261015c51605082015261017c5160548201526101805161019f5160588301526101a0516101bf5160598401526101d851605a84015260009261020092909160628301919060018111156104b5576040517f0136cc76000000000000000000000000000000000000000000000000000000008152600481fd5b60005b60208110156104dc57601c86015184526020909501946004909301926001016104b8565b506000835283830384a06000945080600181146104fc5760039550610524565b828015610514576001811461051d5760029650610522565b60009650610522565b600196505b505b50505081900390207effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1660f89190911b17919050565b6000806000610569858786611001565b925050603f601a83901c8116915082165b93509350939050565b600061058d612612565b608090506000806000806105c48561016001516040810151608082015160a083015160c084015160e0909401519294919390929091565b509350935093509350600080610ffa63ffffffff168663ffffffff1603610609576105f485858960e00151611053565b63ffffffff1660e08a0152909250905061086c565b7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff03363ffffffff871601610642576340000000915061086c565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefe863ffffffff871601610678576001915061086c565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffef6a63ffffffff8716016106cc57600161012088015260ff85166101008801526106bf610408565b9998505050505050505050565b7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff05d63ffffffff8716016107ca5760006040518061012001604052808763ffffffff1681526020018663ffffffff1681526020018563ffffffff16815260200189602001518152602001896040015163ffffffff1681526020018b81526020017f00000000000000000000000092240135b46fc1142da181f550ae8f595b85885473ffffffffffffffffffffffffffffffffffffffff16815260200161079a6101a4600160ff16610380020190565b8152895160209091015290506107af816110e7565b50508b5263ffffffff1660408b0152909350915061086c9050565b7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff05c63ffffffff87160161082f5760208701516040880151610815918791879187916105248d51611367565b63ffffffff1660408b015260208a0152909250905061086c565b7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff02963ffffffff87160161086c57610866858561145d565b90925090505b60006108e6886040805160808101825260008082526020820181905291810182905260608101919091526040518060800160405280836060015163ffffffff168152602001836080015163ffffffff1681526020018360a0015163ffffffff1681526020018360c0015163ffffffff168152509050919050565b61016089015163ffffffff85811660408084019190915285821660e0909301929092526020830180518083168086526004909101831682526060808e01919091529051821660808d015291830151811660a08c0152908201511660c08a0152905061094f610408565b9a9950505050505050505050565b610160830151600090601f601585901c169082908260208110610982576109826127c5565b60200201519050601f601086901c16600061099c8761159c565b905082810163fffffffc166000610524905060006109bf8b600001518484611001565b905060007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd063ffffffff8b16016109f7575080610a93565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc863ffffffff8b1601610a615760008c61016001518763ffffffff1660208110610a4357610a436127c5565b60200201519050610a558585836115b3565b8d525060019050610a93565b6040517fecf79d0d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000610b0d8d6040805160808101825260008082526020820181905291810182905260608101919091526040518060800160405280836060015163ffffffff168152602001836080015163ffffffff1681526020018360a0015163ffffffff1681526020018360c0015163ffffffff168152509050919050565b9050610b22818e610160015189856001611655565b610b5f8d82805163ffffffff9081166060808501919091526020830151821660808501526040830151821660a0850152909101511660c090910152565b610b67610408565b9d9c50505050505050505050505050565b604081015160a0820151600090819063ffffffff1660021480610ba557508360a0015163ffffffff166003145b15610bfe57608084015184516020808201519087015160a088015163f0000000909216630ffffffc600295861b161793610bf8939263ffffffff1614610bec57601f610bef565b60005b60ff168461172a565b50610ffa565b60808401516020808601516000928392601f601083901c8116939260151c16908110610c2c57610c2c6127c5565b602002015160a0880151909350819063ffffffff161580610c5757508760a0015163ffffffff16601c145b15610c985787602001518263ffffffff1660208110610c7857610c786127c5565b60200201519250600b886080015163ffffffff16901c601f169050610d77565b60208860a0015163ffffffff161015610d12578760a0015163ffffffff16600c1480610cce57508760a0015163ffffffff16600d145b80610ce357508760a0015163ffffffff16600e145b15610cf857876080015161ffff169250610d77565b610d0b886080015161ffff1660106117fd565b9250610d77565b60288860a0015163ffffffff16101580610d3657508760a0015163ffffffff166022145b80610d4b57508760a0015163ffffffff166026145b15610d775787602001518263ffffffff1660208110610d6c57610d6c6127c5565b602002015192508190505b60048860a0015163ffffffff1610158015610d9c575060088860a0015163ffffffff16105b80610db157508760a0015163ffffffff166001145b15610ddd57610dd4886000015189602001518a60a001518b608001518689611870565b50505050610ffa565b600063ffffffff9050600060208a60a0015163ffffffff1610610e4d57610e0d8a6080015161ffff1660106117fd565b8601955060008663fffffffc169050610e2f8b60400151828d60600151611001565b915060288b60a0015163ffffffff1610610e4b57809250600093505b505b6000610e698b608001518c60a001518d60c001518a8a87611ab3565b63ffffffff1690508a60a0015163ffffffff166000148015610e96575060088b60c0015163ffffffff1610155b8015610eac5750601c8b60c0015163ffffffff16105b15610fb2578a60c0015163ffffffff1660081480610ed457508a60c0015163ffffffff166009145b15610f1357610f078b600001518c602001518d60c0015163ffffffff16600814610efe5786610f01565b60005b8a61172a565b50505050505050610ffa565b8a60c0015163ffffffff16600a03610f40578a5160208c0151610f079190868a63ffffffff8b1615611655565b8a60c0015163ffffffff16600b03610f6e578a5160208c0151610f079190868a63ffffffff8b161515611655565b60108b60c0015163ffffffff1610158015610f935750601c8b60c0015163ffffffff16105b15610fb257610f078b600001518c602001518d60c001518a8a89612121565b8263ffffffff1663ffffffff14610fdc57610fd2838c60600151836115b3565b9950600198508297505b610ff28b600001518c6020015186846001611655565b505050505050505b9193909250565b60008061100f8585856123da565b90925090508061104b576040517f8e77b2b700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b509392505050565b6000808284610fff81161561106d57610fff811661100003015b8663ffffffff166000036110d95784935090810190636000000063ffffffff831611806110a557508463ffffffff168263ffffffff16105b806110bb57508563ffffffff168163ffffffff16105b156110d4575063ffffffff92506016915083905061057a565b6110dd565b8693505b5093509350939050565b610100810151608082015182516000928392918390819063ffffffff161561135e57865163ffffffff167ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb01611318576000876020015163fffffffc169050600061115c896101000151838b60e00151611001565b60608a015190915060001a6001036111de576111d889606001518a60a0015160408051600093845233602052918152606090922091527effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f01000000000000000000000000000000000000000000000000000000000000001790565b60608a01525b6000808a60c0015173ffffffffffffffffffffffffffffffffffffffff1663e03110e18c606001518d608001516040518363ffffffff1660e01b815260040161123792919091825263ffffffff16602082015260400190565b6040805180830381865afa158015611253573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061127791906127f4565b60208d015160408e0151929450909250906003821660048190038481101561129d578094505b50838210156112aa578193505b8460088502610100031c9450846008828660040303021b9450600180600883600403021b036001806008878560040303021b039150811981169050858119881617965050506112fe868e60e00151876115b3565b929b5050509689019695506001945091925061135e915050565b865163ffffffff167ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd01611352578660400151955061135e565b63ffffffff9550600994505b91939550919395565b600080858563ffffffff8b1660011480611387575063ffffffff8b166002145b80611398575063ffffffff8b166004145b156113a55788935061144f565b7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa63ffffffff8c16016114435760006113e5868c63fffffffc1689611001565b90508860038c166004038b8110156113fb57809b505b8b965086900360089081029290921c7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600193880293841b0116911b1791506000905061144f565b63ffffffff9350600992505b975097509750979350505050565b60008063ffffffff83166001036114f95763ffffffff84161580611487575063ffffffff84166001145b80611498575063ffffffff84166002145b806114a9575063ffffffff84166005145b806114ba575063ffffffff84166003145b806114cb575063ffffffff84166006145b806114dc575063ffffffff84166004145b156114ea5760009150611595565b5063ffffffff90506009611595565b8263ffffffff1660030361158a5763ffffffff84161580611520575063ffffffff84166005145b80611531575063ffffffff84166003145b1561153f5760009150611595565b63ffffffff84166001148061155a575063ffffffff84166002145b8061156b575063ffffffff84166006145b8061157c575063ffffffff84166004145b156114ea5760019150611595565b5063ffffffff905060165b9250929050565b60006115ad8261ffff1660106117fd565b92915050565b60006115be83612485565b60038416156115cc57600080fd5b6020830192601f8516601c0360031b83811b913563ffffffff90911b1916178460051c60005b601b81101561164a5760208601953582821c600116801561161a576001811461162f57611640565b60008581526020839052604090209450611640565b600082815260208690526040902094505b50506001016115f2565b509095945050505050565b60208363ffffffff16106116ca576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600e60248201527f76616c696420726567697374657200000000000000000000000000000000000060448201526064015b60405180910390fd5b63ffffffff8316158015906116dc5750805b1561170b5781848463ffffffff16602081106116fa576116fa6127c5565b63ffffffff90921660209290920201525b5050505060208101805163ffffffff8082169093526004019091169052565b836000015160040163ffffffff16846020015163ffffffff16146117aa576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f6a756d7020696e2064656c617920736c6f74000000000000000000000000000060448201526064016116c1565b835160208501805163ffffffff90811687528381169091528316156117f65780600801848463ffffffff16602081106117e5576117e56127c5565b63ffffffff90921660209290920201525b5050505050565b600063ffffffff8381167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80850183169190911c821615159160016020869003821681901b830191861691821b92911b018261185a57600061185c565b815b90861663ffffffff16179250505092915050565b6000866000015160040163ffffffff16876020015163ffffffff16146118f2576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f6272616e636820696e2064656c617920736c6f7400000000000000000000000060448201526064016116c1565b8463ffffffff166004148061190d57508463ffffffff166005145b15611984576000868463ffffffff166020811061192c5761192c6127c5565b602002015190508063ffffffff168363ffffffff1614801561195457508563ffffffff166004145b8061197c57508063ffffffff168363ffffffff161415801561197c57508563ffffffff166005145b915050611a56565b8463ffffffff166006036119a15760008260030b13159050611a56565b8463ffffffff166007036119bd5760008260030b139050611a56565b8463ffffffff16600103611a5657601f601085901c1660008190036119e65760008360030b1291505b8063ffffffff16601003611a1057875160080163ffffffff166103e08801526000600384900b1291505b8063ffffffff16600103611a295760008360030b121591505b8063ffffffff16601103611a5457875160080163ffffffff166103e08801526000600384900b121591505b505b8651602088015163ffffffff1688528115611a97576002611a7c8661ffff1660106117fd565b63ffffffff90811690911b8201600401166020890152611aa9565b60208801805160040163ffffffff1690525b5050505050505050565b600063ffffffff86161580611ae0575060088663ffffffff1610158015611ae05750600f8663ffffffff16105b15611ee0578560088114611b235760098114611b2c57600a8114611b3557600b8114611b3e57600c8114611b4757600d8114611b5057600e8114611b5957611b5e565b60209550611b5e565b60219550611b5e565b602a9550611b5e565b602b9550611b5e565b60249550611b5e565b60259550611b5e565b602695505b508463ffffffff16600003611b83575063ffffffff8216601f600688901c161b612117565b8463ffffffff16600203611ba7575063ffffffff8216601f600688901c161c612117565b8463ffffffff16600303611bdb57601f600688901c16611bd363ffffffff8516821c60208390036117fd565b915050612117565b8463ffffffff16600403611bfb575063ffffffff8216601f84161b612117565b8463ffffffff16600603611c1b575063ffffffff8216601f84161c612117565b8463ffffffff16600703611c4357601f8416611bd363ffffffff8516821c60208390036117fd565b8463ffffffff16600803611c58575082612117565b8463ffffffff16600903611c6d575082612117565b8463ffffffff16600a03611c82575082612117565b8463ffffffff16600b03611c97575082612117565b8463ffffffff16600c03611cac575082612117565b8463ffffffff16600f03611cc1575082612117565b8463ffffffff16601003611cd6575082612117565b8463ffffffff16601103611ceb575082612117565b8463ffffffff16601203611d00575082612117565b8463ffffffff16601303611d15575082612117565b8463ffffffff16601803611d2a575082612117565b8463ffffffff16601903611d3f575082612117565b8463ffffffff16601a03611d54575082612117565b8463ffffffff16601b03611d69575082612117565b8463ffffffff16602003611d805750828201612117565b8463ffffffff16602103611d975750828201612117565b8463ffffffff16602203611dae5750818303612117565b8463ffffffff16602303611dc55750818303612117565b8463ffffffff16602403611ddc5750828216612117565b8463ffffffff16602503611df35750828217612117565b8463ffffffff16602603611e0a5750828218612117565b8463ffffffff16602703611e22575082821719612117565b8463ffffffff16602a03611e51578260030b8460030b12611e44576000611e47565b60015b60ff169050612117565b8463ffffffff16602b03611e79578263ffffffff168463ffffffff1610611e44576000611e47565b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f696e76616c696420696e737472756374696f6e0000000000000000000000000060448201526064016116c1565b611e79565b8563ffffffff16601c03611f60578463ffffffff16600203611f055750828202612117565b8463ffffffff1660201480611f2057508463ffffffff166021145b15611edb578463ffffffff16602003611f37579219925b60005b6380000000851615611f59576401fffffffe600195861b169401611f3a565b9050612117565b8563ffffffff16600f03611f81575065ffffffff0000601083901b16612117565b8563ffffffff16602003611f9c57611f59848360018061251e565b8563ffffffff16602103611fb857611f5984836002600161251e565b8563ffffffff16602203611fe6575063ffffffff60086003851602811681811b198416918316901b17612117565b8563ffffffff1660230361200257611f5984836004600161251e565b8563ffffffff1660240361201e57611f5984836001600061251e565b8563ffffffff1660250361203a57611f5984836002600061251e565b8563ffffffff1660260361206b575063ffffffff60086003851602601803811681811c198416918316901c17612117565b8563ffffffff1660280361208657611f598483600186612566565b8563ffffffff166029036120a157611f598483600286612566565b8563ffffffff16602a036120cf575063ffffffff60086003851602811681811c198316918416901c17612117565b8563ffffffff16602b036120ea57611f598483600486612566565b8563ffffffff16602e03611e79575063ffffffff60086003851602601803811681811b198316918416901b175b9695505050505050565b60008463ffffffff1660100361213c57506060860151612382565b8463ffffffff1660110361215b5763ffffffff84166060880152612382565b8463ffffffff1660120361217457506040860151612382565b8463ffffffff166013036121935763ffffffff84166040880152612382565b8463ffffffff166018036121c75763ffffffff600385810b9085900b02602081901c821660608a0152166040880152612382565b8463ffffffff166019036121f85763ffffffff84811681851602602081901c821660608a0152166040880152612382565b8463ffffffff16601a036122bb578260030b600003612273576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d4950533a206469766973696f6e206279207a65726f0000000000000000000060448201526064016116c1565b8260030b8460030b8161228857612288612818565b0763ffffffff166060880152600383810b9085900b816122aa576122aa612818565b0563ffffffff166040880152612382565b8463ffffffff16601b03612382578263ffffffff16600003612339576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d4950533a206469766973696f6e206279207a65726f0000000000000000000060448201526064016116c1565b8263ffffffff168463ffffffff168161235457612354612818565b0663ffffffff90811660608901528381169085168161237557612375612818565b0463ffffffff1660408801525b63ffffffff8216156123b85780868363ffffffff16602081106123a7576123a76127c5565b63ffffffff90921660209290920201525b50505060208401805163ffffffff808216909652600401909416909352505050565b6000806123e683612485565b60038416156123f457600080fd5b6020830192358460051c8160005b601b81101561245a5760208701963583821c600116801561242a576001811461243f57612450565b60008481526020839052604090209350612450565b600082815260208590526040902093505b5050600101612402565b508714925050811561247c57601f8516601c0360031b81901c63ffffffff1692505b50935093915050565b36610380820181101561251a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602360248201527f636865636b207468617420746865726520697320656e6f7567682063616c6c6460448201527f617461000000000000000000000000000000000000000000000000000000000060648201526084016116c1565b5050565b60008060008061252e888761259a565b925092509250828263ffffffff168863ffffffff16901c169350841561255b5761255884826117fd565b93505b505050949350505050565b6000806000612575878661259a565b5063ffffffff868316811691811691821b9216901b1987161792505050949350505050565b600080806407fffffff8600385901b16816125b6826020612847565b63ffffffff9081161c905060006125ce600188612847565b198816600316905060006125e3886004612847565b9050888216600060036125f68385612847565b959c63ffffffff909616901b9a50949850929650505050505050565b6040805161018081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c0810182905260e0810182905261010081018290526101208101829052610140810191909152610160810161267861267d565b905290565b6040518061040001604052806020906020820280368337509192915050565b600060208083528351808285015260005b818110156126c9578581018301518582016040015282016126ad565b818111156126db576000604083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016929092016040019392505050565b60008083601f84011261272157600080fd5b50813567ffffffffffffffff81111561273957600080fd5b60208301915083602082850101111561159557600080fd5b60008060008060006060868803121561276957600080fd5b853567ffffffffffffffff8082111561278157600080fd5b61278d89838a0161270f565b909750955060208801359150808211156127a657600080fd5b506127b38882890161270f565b96999598509660400135949350505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b6000806040838503121561280757600080fd5b505080516020909101519092909150565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b600063ffffffff8381169083168181101561288b577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b03939250505056fea164736f6c634300080f000a") +var oracleCode = common.FromHex("0x6080604052600436106101d85760003560e01c80639d53a64811610102578063ddcd58de11610095578063ec5efcbc11610064578063ec5efcbc14610681578063f3f480d9146106a1578063faf37bc7146106d4578063fef2b4ed146106e757600080fd5b8063ddcd58de146105d4578063e03110e11461060c578063e159261114610641578063ea7139501461066157600080fd5b8063b5e7154c116100d1578063b5e7154c14610555578063d18534b51461056c578063da35c6641461058c578063dd24f9bf146105a157600080fd5b80639d53a6481461048e5780639d7e8769146104dd578063b2e67ba8146104fd578063b4801e611461053557600080fd5b806361238bde1161017a5780637ac54767116101495780637ac54767146103ca5780638542cf50146103ea578063882856ef146104355780638dc4be111461046e57600080fd5b806361238bde1461031e5780636551927b146103565780637051472e1461038e5780637917de1d146103aa57600080fd5b80633909af5c116101b65780633909af5c146102715780634d52b4c91461029357806352f0f3ad146102a857806354fd4d50146102c857600080fd5b8063013cf08b146101dd5780630359a5631461022e5780632055b36b1461025c575b600080fd5b3480156101e957600080fd5b506101fd6101f8366004612e29565b610714565b6040805173ffffffffffffffffffffffffffffffffffffffff90931683526020830191909152015b60405180910390f35b34801561023a57600080fd5b5061024e610249366004612e6b565b610759565b604051908152602001610225565b34801561026857600080fd5b5061024e601081565b34801561027d57600080fd5b5061029161028c366004613073565b610891565b005b34801561029f57600080fd5b5061024e610ae8565b3480156102b457600080fd5b5061024e6102c336600461315f565b610b03565b3480156102d457600080fd5b506103116040518060400160405280600581526020017f312e312e3200000000000000000000000000000000000000000000000000000081525081565b60405161022591906131c6565b34801561032a57600080fd5b5061024e610339366004613217565b600160209081526000928352604080842090915290825290205481565b34801561036257600080fd5b5061024e610371366004612e6b565b601560209081526000928352604080842090915290825290205481565b34801561039a57600080fd5b5061024e6703782dace9d9000081565b3480156103b657600080fd5b506102916103c536600461327b565b610bd9565b3480156103d657600080fd5b5061024e6103e5366004612e29565b6110dc565b3480156103f657600080fd5b50610425610405366004613217565b600260209081526000928352604080842090915290825290205460ff1681565b6040519015158152602001610225565b34801561044157600080fd5b50610455610450366004613317565b6110f3565b60405167ffffffffffffffff9091168152602001610225565b34801561047a57600080fd5b5061029161048936600461334a565b61114d565b34801561049a57600080fd5b5061024e6104a9366004612e6b565b73ffffffffffffffffffffffffffffffffffffffff9091166000908152601860209081526040808320938352929052205490565b3480156104e957600080fd5b506102916104f8366004613396565b611248565b34801561050957600080fd5b5061024e610518366004612e6b565b601760209081526000928352604080842090915290825290205481565b34801561054157600080fd5b5061024e610550366004613317565b6113ff565b34801561056157600080fd5b5061024e620186a081565b34801561057857600080fd5b50610291610587366004613073565b611431565b34801561059857600080fd5b5060135461024e565b3480156105ad57600080fd5b507f000000000000000000000000000000000000000000000000000000000001ec3061024e565b3480156105e057600080fd5b5061024e6105ef366004612e6b565b601660209081526000928352604080842090915290825290205481565b34801561061857600080fd5b5061062c610627366004613217565b611840565b60408051928352602083019190915201610225565b34801561064d57600080fd5b5061029161065c36600461334a565b611931565b34801561066d57600080fd5b5061029161067c366004613422565b611a39565b34801561068d57600080fd5b5061029161069c366004613491565b611b98565b3480156106ad57600080fd5b507f000000000000000000000000000000000000000000000000000000000001518061024e565b6102916106e2366004613519565b611d1e565b3480156106f357600080fd5b5061024e610702366004612e29565b60006020819052908152604090205481565b6013818154811061072457600080fd5b60009182526020909120600290910201805460019091015473ffffffffffffffffffffffffffffffffffffffff909116915082565b73ffffffffffffffffffffffffffffffffffffffff82166000908152601560209081526040808320848452909152812054819061079c9060601c63ffffffff1690565b63ffffffff16905060005b6010811015610889578160011660010361082f5773ffffffffffffffffffffffffffffffffffffffff85166000908152601460209081526040808320878452909152902081601081106107fc576107fc613555565b01546040805160208101929092528101849052606001604051602081830303815290604052805190602001209250610870565b826003826010811061084357610843613555565b01546040805160208101939093528201526060016040516020818303038152906040528051906020012092505b60019190911c9080610881816135b3565b9150506107a7565b505092915050565b600061089d8a8a610759565b90506108c086868360208b01356108bb6108b68d6135eb565b611fea565b61202a565b80156108de57506108de83838360208801356108bb6108b68a6135eb565b610914576040517f09bde33900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b86604001358860405160200161092a91906136ba565b6040516020818303038152906040528051906020012014610977576040517f1968a90200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b83602001358760200135600161098d91906136f8565b146109c4576040517f9a3b119900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610a0c886109d28680613710565b8080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525061208b92505050565b610a15886121e6565b836040013588604051602001610a2b91906136ba565b6040516020818303038152906040528051906020012003610a78576040517f9843145b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff8a1660009081526015602090815260408083208c8452909152902080547fffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000166001179055610adc8a8a3361298e565b50505050505050505050565b6001610af660106002613897565b610b0091906138a3565b81565b6000610b0f8686612a47565b9050610b1c8360086136f8565b82101580610b2a5750602083115b15610b61576040517ffe25498700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000602081815260c085901b82526008959095528251828252600286526040808320858452875280832080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660019081179091558484528752808320948352938652838220558181529384905292205592915050565b60608115610bf257610beb8686612af4565b9050610c2c565b85858080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509293505050505b3360009081526014602090815260408083208b845290915280822081516102008101928390529160109082845b815481526020019060010190808311610c5957505050505090506000601560003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008b81526020019081526020016000205490506000610cda8260601c63ffffffff1690565b63ffffffff169050333214610d1b576040517fba092d1600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610d2b8260801c63ffffffff1690565b63ffffffff16600003610d6a576040517f87138d5c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610d748260c01c90565b67ffffffffffffffff1615610db5576040517f475a253500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b898114610dee576040517f60f95d5a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610dfb89898d8886612b6d565b83516020850160888204881415608883061715610e20576307b1daf16000526004601cfd5b60405160c8810160405260005b83811015610ed0578083018051835260208101516020840152604081015160408401526060810151606084015260808101516080840152508460888301526088810460051b8b013560a883015260c882206001860195508560005b610200811015610ec5576001821615610ea55782818b0152610ec5565b8981015160009081526020938452604090209260019290921c9101610e88565b505050608801610e2d565b50505050600160106002610ee49190613897565b610eee91906138a3565b811115610f27576040517f6229572300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610f9c610f3a8360401c63ffffffff1690565b610f4a9063ffffffff168a6136f8565b60401b7fffffffffffffffffffffffffffffffffffffffff00000000ffffffffffffffff606084901b167fffffffffffffffffffffffffffffffff0000000000000000ffffffffffffffff8516171790565b915084156110295777ffffffffffffffffffffffffffffffffffffffffffffffff82164260c01b179150610fd68260801c63ffffffff1690565b63ffffffff16610fec8360401c63ffffffff1690565b63ffffffff1614611029576040517f7b1dafd100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b3360009081526014602090815260408083208e8452909152902061104f90846010612d9f565b503360008181526018602090815260408083208f8452825280832080546001810182559084528284206004820401805460039092166008026101000a67ffffffffffffffff818102199093164390931602919091179055838352601582528083208f8452909152812084905560609190911b81523690601437366014016000a05050505050505050505050565b600381601081106110ec57600080fd5b0154905081565b6018602052826000526040600020602052816000526040600020818154811061111b57600080fd5b906000526020600020906004918282040191900660080292509250509054906101000a900467ffffffffffffffff1681565b60443560008060088301861061116b5763fe2549876000526004601cfd5b60c083901b60805260888386823786600882030151915060206000858360025afa90508061119857600080fd5b50600080517effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f0400000000000000000000000000000000000000000000000000000000000000178082526002602090815260408084208a8552825280842080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660019081179091558385528252808420998452988152888320939093558152908190529490942055505050565b600080603087600037602060006030600060025afa806112705763f91129696000526004601cfd5b6000517effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f010000000000000000000000000000000000000000000000000000000000000017608081815260a08c905260c08b905260308a60e037603088609083013760008060c083600a5afa9250826112f2576309bde3396000526004601cfd5b602886106113085763fe2549876000526004601cfd5b6000602882015278200000000000000000000000000000000000000000000000008152600881018b905285810151935060308a8237603081019b909b52505060509098207effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f0500000000000000000000000000000000000000000000000000000000000000176000818152600260209081526040808320868452825280832080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600190811790915584845282528083209583529481528482209a909a559081528089529190912096909655505050505050565b6014602052826000526040600020602052816000526040600020816010811061142757600080fd5b0154925083915050565b73ffffffffffffffffffffffffffffffffffffffff891660009081526015602090815260408083208b845290915290205467ffffffffffffffff8116156114a4576040517fc334f06900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6114ae8160c01c90565b67ffffffffffffffff166000036114f1576040517f55d4cbf900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b7f000000000000000000000000000000000000000000000000000000000001518061151c8260c01c90565b6115309067ffffffffffffffff16426138a3565b11611567576040517f55d4cbf900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60006115738b8b610759565b905061158c87878360208c01356108bb6108b68e6135eb565b80156115aa57506115aa84848360208901356108bb6108b68b6135eb565b6115e0576040517f09bde33900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8760400135896040516020016115f691906136ba565b6040516020818303038152906040528051906020012014611643576040517f1968a90200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b84602001358860200135600161165991906136f8565b14158061168b575060016116738360601c63ffffffff1690565b61167d91906138ba565b63ffffffff16856020013514155b156116c2576040517f9a3b119900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6116d0896109d28780613710565b6116d9896121e6565b60006116e48a612cc0565b7effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f0200000000000000000000000000000000000000000000000000000000000000179050600061173b8460a01c63ffffffff1690565b67ffffffffffffffff169050600160026000848152602001908152602001600020600083815260200190815260200160002060006101000a81548160ff021916908315150217905550601760008e73ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008d8152602001908152602001600020546001600084815260200190815260200160002060008381526020019081526020016000208190555061180d8460801c63ffffffff1690565b600083815260208190526040902063ffffffff9190911690556118318d8d8161298e565b50505050505050505050505050565b6000828152600260209081526040808320848452909152812054819060ff166118c9576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f7072652d696d616765206d757374206578697374000000000000000000000000604482015260640160405180910390fd5b50600083815260208181526040909120546118e58160086136f8565b6118f08560206136f8565b1061190e57836119018260086136f8565b61190b91906138a3565b91505b506000938452600160209081526040808620948652939052919092205492909150565b60443560008060088301861061194f5763fe2549876000526004601cfd5b60c083901b6080526088838682378087017ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80151908490207effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f02000000000000000000000000000000000000000000000000000000000000001760008181526002602090815260408083208b8452825280832080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600190811790915584845282528083209a83529981528982209390935590815290819052959095209190915550505050565b60008060008060808860601b81528760c01b6014820152858782601c0137601c860181207effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f0600000000000000000000000000000000000000000000000000000000000000179350604088026260216001603f5a021015611ac35763dd629f866000526004601cfd5b6000808783601c018c5afa94503d6001019150600882018a10611aee5763fe2549876000526004601cfd5b60c082901b81526008018481533d6000600183013e89017ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8015160008481526002602090815260408083208d8452825280832080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600190811790915587845282528083209c83529b81528b8220929092559384528390529790912096909655505050505050565b6000611ba48686610759565b9050611bbd83838360208801356108bb6108b68a6135eb565b611bf3576040517f09bde33900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b602084013515611c2f576040517f9a3b119900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b611c37612ddd565b611c45816109d28780613710565b611c4e816121e6565b846040013581604051602001611c6491906136ba565b6040516020818303038152906040528051906020012003611cb1576040517f9843145b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff87166000908152601560209081526040808320898452909152902080547fffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000166001179055611d1587873361298e565b50505050505050565b6703782dace9d90000341015611d60576040517fe92c469f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b333214611d99576040517fba092d1600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b611da48160086138df565b63ffffffff168263ffffffff1610611de8576040517ffe25498700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b7f000000000000000000000000000000000000000000000000000000000001ec308163ffffffff161015611e48576040517f7b1dafd100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b336000908152601560209081526040808320868452909152902054611e738160801c63ffffffff1690565b63ffffffff1615611eb0576040517f0dc149f000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b608082901b7fffffffffffffffffffffffff00000000ffffffffffffffffffffffffffffffff60a085901b167fffffffffffffffff0000000000000000ffffffffffffffffffffffffffffffff83161717336000818152601560209081526040808320898452825280832094909455835180850185528381528082018981526013805460018101825590855291517f66de8ffda797e3de9c05e8fc57b3bf0ec28a930d40b0d285d93c06501cf6a090600290930292830180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909216919091179055517f66de8ffda797e3de9c05e8fc57b3bf0ec28a930d40b0d285d93c06501cf6a0919091015591815260168252828120968152959052909320349055505050565b600081600001518260200151836040015160405160200161200d93929190613907565b604051602081830303815290604052805190602001209050919050565b60008160005b601081101561207e578060051b880135600186831c16600181146120635760008481526020839052604090209350612074565b600082815260208590526040902093505b5050600101612030565b5090931495945050505050565b608881511461209957600080fd5b602081016020830161211a565b8260031b8201518060001a8160011a60081b178160021a60101b8260031a60181b17178160041a60201b8260051a60281b178260061a60301b8360071a60381b1717179050612114816120ff868560059190911b015190565b1867ffffffffffffffff16600586901b840152565b50505050565b612126600083836120a6565b612132600183836120a6565b61213e600283836120a6565b61214a600383836120a6565b612156600483836120a6565b612162600583836120a6565b61216e600683836120a6565b61217a600783836120a6565b612186600883836120a6565b612192600983836120a6565b61219e600a83836120a6565b6121aa600b83836120a6565b6121b6600c83836120a6565b6121c2600d83836120a6565b6121ce600e83836120a6565b6121da600f83836120a6565b612114601083836120a6565b6040805178010000000000008082800000000000808a8000000080008000602082015279808b00000000800000018000000080008081800000000000800991810191909152788a00000000000000880000000080008009000000008000000a60608201527b8000808b800000000000008b8000000000008089800000000000800360808201527f80000000000080028000000000000080000000000000800a800000008000000a60a08201527f800000008000808180000000000080800000000080000001800000008000800860c082015260009060e0016040516020818303038152906040529050602082016020820161286e565b6102808101516101e082015161014083015160a0840151845118189118186102a082015161020083015161016084015160c0850151602086015118189118186102c083015161022084015161018085015160e0860151604087015118189118186102e08401516102408501516101a0860151610100870151606088015118189118186103008501516102608601516101c0870151610120880151608089015118189118188084603f1c6123998660011b67ffffffffffffffff1690565b18188584603f1c6123b48660011b67ffffffffffffffff1690565b18188584603f1c6123cf8660011b67ffffffffffffffff1690565b181895508483603f1c6123ec8560011b67ffffffffffffffff1690565b181894508387603f1c6124098960011b67ffffffffffffffff1690565b60208b01518b51861867ffffffffffffffff168c5291189190911897508118600181901b603f9190911c18935060c08801518118601481901c602c9190911b1867ffffffffffffffff1660208901526101208801518718602c81901c60149190911b1867ffffffffffffffff1660c08901526102c08801518618600381901c603d9190911b1867ffffffffffffffff166101208901526101c08801518718601981901c60279190911b1867ffffffffffffffff166102c08901526102808801518218602e81901c60129190911b1867ffffffffffffffff166101c089015260408801518618600281901c603e9190911b1867ffffffffffffffff166102808901526101808801518618601581901c602b9190911b1867ffffffffffffffff1660408901526101a08801518518602781901c60199190911b1867ffffffffffffffff166101808901526102608801518718603881901c60089190911b1867ffffffffffffffff166101a08901526102e08801518518600881901c60389190911b1867ffffffffffffffff166102608901526101e08801518218601781901c60299190911b1867ffffffffffffffff166102e089015260808801518718602581901c601b9190911b1867ffffffffffffffff166101e08901526103008801518718603281901c600e9190911b1867ffffffffffffffff1660808901526102a08801518118603e81901c60029190911b1867ffffffffffffffff166103008901526101008801518518600981901c60379190911b1867ffffffffffffffff166102a08901526102008801518118601381901c602d9190911b1867ffffffffffffffff1661010089015260a08801518218601c81901c60249190911b1867ffffffffffffffff1661020089015260608801518518602481901c601c9190911b1867ffffffffffffffff1660a08901526102408801518518602b81901c60159190911b1867ffffffffffffffff1660608901526102208801518618603181901c600f9190911b1867ffffffffffffffff166102408901526101608801518118603681901c600a9190911b1867ffffffffffffffff166102208901525060e08701518518603a81901c60069190911b1867ffffffffffffffff166101608801526101408701518118603d81901c60039190911b1867ffffffffffffffff1660e0880152505067ffffffffffffffff81166101408601525b5050505050565b600582811b8201805160018501831b8401805160028701851b8601805160038901871b8801805160048b0190981b8901805167ffffffffffffffff861985168918811690995283198a16861889169096528819861683188816909352841986168818871690528419831684189095169052919391929190611d15565b612808600082612781565b612813600582612781565b61281e600a82612781565b612829600f82612781565b612834601482612781565b50565b612840816122dc565b612849816127fd565b600383901b820151815160c09190911c9061211490821867ffffffffffffffff168352565b61287a60008284612837565b61288660018284612837565b61289260028284612837565b61289e60038284612837565b6128aa60048284612837565b6128b660058284612837565b6128c260068284612837565b6128ce60078284612837565b6128da60088284612837565b6128e660098284612837565b6128f2600a8284612837565b6128fe600b8284612837565b61290a600c8284612837565b612916600d8284612837565b612922600e8284612837565b61292e600f8284612837565b61293a60108284612837565b61294660118284612837565b61295260128284612837565b61295e60138284612837565b61296a60148284612837565b61297660158284612837565b61298260168284612837565b61211460178284612837565b73ffffffffffffffffffffffffffffffffffffffff83811660009081526016602090815260408083208684529091528082208054908390559051909284169083908381818185875af1925050503d8060008114612a07576040519150601f19603f3d011682016040523d82523d6000602084013e612a0c565b606091505b505090508061277a576040517f83e6cc6b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b7f01000000000000000000000000000000000000000000000000000000000000007effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff831617612aed818360408051600093845233602052918152606090922091527effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f01000000000000000000000000000000000000000000000000000000000000001790565b9392505050565b6060604051905081602082018181018286833760888306808015612b3d5760888290038501848101848103803687375060806001820353506001845160001a1784538652612b54565b608836843760018353608060878401536088850186525b5050505050601f19603f82510116810160405292915050565b6000612b7f8260a01c63ffffffff1690565b67ffffffffffffffff1690506000612b9d8360801c63ffffffff1690565b63ffffffff1690506000612bb78460401c63ffffffff1690565b63ffffffff169050600883108015612bcd575080155b15612c015760c082901b6000908152883560085283513382526017602090815260408084208a855290915290912055612cb6565b60088310158015612c1f575080612c196008856138a3565b93508310155b8015612c335750612c3087826136f8565b83105b15612cb6576000612c4482856138a3565b905087612c528260206136f8565b10158015612c5e575085155b15612c95576040517ffe25498700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b3360009081526017602090815260408083208a845290915290209089013590555b5050505050505050565b6000612d43565b66ff00ff00ff00ff8160081c1667ff00ff00ff00ff00612cf18360081b67ffffffffffffffff1690565b1617905065ffff0000ffff8160101c1667ffff0000ffff0000612d1e8360101b67ffffffffffffffff1690565b1617905060008160201c612d3c8360201b67ffffffffffffffff1690565b1792915050565b60808201516020830190612d5b90612cc7565b612cc7565b6040820151612d6990612cc7565b60401b17612d81612d5660018460059190911b015190565b825160809190911b90612d9390612cc7565b60c01b17179392505050565b8260108101928215612dcd579160200282015b82811115612dcd578251825591602001919060010190612db2565b50612dd9929150612df5565b5090565b6040518060200160405280612df0612e0a565b905290565b5b80821115612dd95760008155600101612df6565b6040518061032001604052806019906020820280368337509192915050565b600060208284031215612e3b57600080fd5b5035919050565b803573ffffffffffffffffffffffffffffffffffffffff81168114612e6657600080fd5b919050565b60008060408385031215612e7e57600080fd5b612e8783612e42565b946020939093013593505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051610320810167ffffffffffffffff81118282101715612ee857612ee8612e95565b60405290565b6040516060810167ffffffffffffffff81118282101715612ee857612ee8612e95565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715612f5857612f58612e95565b604052919050565b803567ffffffffffffffff81168114612e6657600080fd5b6000610320808385031215612f8c57600080fd5b604051602080820182811067ffffffffffffffff82111715612fb057612fb0612e95565b806040525081935085601f860112612fc757600080fd5b612fcf612ec4565b928501928087851115612fe157600080fd5b865b8581101561300157612ff481612f60565b8352918301918301612fe3565b509092525091949350505050565b60006060828403121561302157600080fd5b50919050565b60008083601f84011261303957600080fd5b50813567ffffffffffffffff81111561305157600080fd5b6020830191508360208260051b850101111561306c57600080fd5b9250929050565b60008060008060008060008060006103e08a8c03121561309257600080fd5b61309b8a612e42565b985060208a013597506130b18b60408c01612f78565b96506103608a013567ffffffffffffffff808211156130cf57600080fd5b6130db8d838e0161300f565b97506103808c01359150808211156130f257600080fd5b6130fe8d838e01613027565b90975095506103a08c013591508082111561311857600080fd5b6131248d838e0161300f565b94506103c08c013591508082111561313b57600080fd5b506131488c828d01613027565b915080935050809150509295985092959850929598565b600080600080600060a0868803121561317757600080fd5b505083359560208501359550604085013594606081013594506080013592509050565b60005b838110156131b557818101518382015260200161319d565b838111156121145750506000910152565b60208152600082518060208401526131e581604085016020870161319a565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169190910160400192915050565b6000806040838503121561322a57600080fd5b50508035926020909101359150565b60008083601f84011261324b57600080fd5b50813567ffffffffffffffff81111561326357600080fd5b60208301915083602082850101111561306c57600080fd5b600080600080600080600060a0888a03121561329657600080fd5b8735965060208801359550604088013567ffffffffffffffff808211156132bc57600080fd5b6132c88b838c01613239565b909750955060608a01359150808211156132e157600080fd5b506132ee8a828b01613027565b9094509250506080880135801515811461330757600080fd5b8091505092959891949750929550565b60008060006060848603121561332c57600080fd5b61333584612e42565b95602085013595506040909401359392505050565b60008060006040848603121561335f57600080fd5b83359250602084013567ffffffffffffffff81111561337d57600080fd5b61338986828701613239565b9497909650939450505050565b600080600080600080600060a0888a0312156133b157600080fd5b8735965060208801359550604088013567ffffffffffffffff808211156133d757600080fd5b6133e38b838c01613239565b909750955060608a01359150808211156133fc57600080fd5b506134098a828b01613239565b989b979a50959894979596608090950135949350505050565b60008060008060006080868803121561343a57600080fd5b8535945061344a60208701612e42565b935061345860408701612f60565b9250606086013567ffffffffffffffff81111561347457600080fd5b61348088828901613239565b969995985093965092949392505050565b6000806000806000608086880312156134a957600080fd5b6134b286612e42565b945060208601359350604086013567ffffffffffffffff808211156134d657600080fd5b6134e289838a0161300f565b945060608801359150808211156134f857600080fd5b5061348088828901613027565b803563ffffffff81168114612e6657600080fd5b60008060006060848603121561352e57600080fd5b8335925061353e60208501613505565b915061354c60408501613505565b90509250925092565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036135e4576135e4613584565b5060010190565b6000606082360312156135fd57600080fd5b613605612eee565b823567ffffffffffffffff8082111561361d57600080fd5b9084019036601f83011261363057600080fd5b813560208282111561364457613644612e95565b613674817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f85011601612f11565b9250818352368183860101111561368a57600080fd5b81818501828501376000918301810191909152908352848101359083015250604092830135928101929092525090565b81516103208201908260005b60198110156136ef57825167ffffffffffffffff168252602092830192909101906001016136c6565b50505092915050565b6000821982111561370b5761370b613584565b500190565b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe184360301811261374557600080fd5b83018035915067ffffffffffffffff82111561376057600080fd5b60200191503681900382131561306c57600080fd5b600181815b808511156137ce57817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff048211156137b4576137b4613584565b808516156137c157918102915b93841c939080029061377a565b509250929050565b6000826137e557506001613891565b816137f257506000613891565b816001811461380857600281146138125761382e565b6001915050613891565b60ff84111561382357613823613584565b50506001821b613891565b5060208310610133831016604e8410600b8410161715613851575081810a613891565b61385b8383613775565b807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0482111561388d5761388d613584565b0290505b92915050565b6000612aed83836137d6565b6000828210156138b5576138b5613584565b500390565b600063ffffffff838116908316818110156138d7576138d7613584565b039392505050565b600063ffffffff8083168185168083038211156138fe576138fe613584565b01949350505050565b6000845161391981846020890161319a565b9190910192835250602082015260400191905056fea164736f6c634300080f000a") diff --git a/op-deployer/pkg/deployer/opcm/implementations.go b/op-deployer/pkg/deployer/opcm/implementations.go index 8dd072eef246..413452b1d348 100644 --- a/op-deployer/pkg/deployer/opcm/implementations.go +++ b/op-deployer/pkg/deployer/opcm/implementations.go @@ -18,12 +18,11 @@ type DeployImplementationsInput struct { DisputeGameFinalityDelaySeconds *big.Int MipsVersion *big.Int // Release version to set OPCM implementations for, of the format `op-contracts/vX.Y.Z`. - Release string + L1ContractsRelease string SuperchainConfigProxy common.Address ProtocolVersionsProxy common.Address UseInterop bool // if true, deploy Interop implementations - OpcmProxyOwner common.Address StandardVersionsToml string // contents of 'standard-versions-mainnet.toml' or 'standard-versions-sepolia.toml' file } @@ -32,8 +31,7 @@ func (input *DeployImplementationsInput) InputSet() bool { } type DeployImplementationsOutput struct { - OpcmProxy common.Address - OpcmImpl common.Address + Opcm common.Address DelayedWETHImpl common.Address OptimismPortalImpl common.Address PreimageOracleSingleton common.Address diff --git a/op-deployer/pkg/deployer/opcm/opchain.go b/op-deployer/pkg/deployer/opcm/opchain.go index 1e1b468a417f..8c7e60fec4d2 100644 --- a/op-deployer/pkg/deployer/opcm/opchain.go +++ b/op-deployer/pkg/deployer/opcm/opchain.go @@ -26,7 +26,7 @@ type DeployOPChainInputV160 struct { BasefeeScalar uint32 BlobBaseFeeScalar uint32 L2ChainId *big.Int - OpcmProxy common.Address + Opcm common.Address SaltMixer string GasLimit uint64 @@ -122,8 +122,8 @@ func deployOPChain[T any](host *script.Host, input T) (DeployOPChainOutput, erro type ReadImplementationAddressesInput struct { DeployOPChainOutput - OpcmProxy common.Address - Release string + Opcm common.Address + Release string } type ReadImplementationAddressesOutput struct { diff --git a/op-deployer/pkg/deployer/pipeline/alt_da.go b/op-deployer/pkg/deployer/pipeline/alt_da.go index 62796832c93c..b36412620945 100644 --- a/op-deployer/pkg/deployer/pipeline/alt_da.go +++ b/op-deployer/pkg/deployer/pipeline/alt_da.go @@ -31,7 +31,7 @@ func DeployAltDA(env *Env, intent *state.Intent, st *state.State, chainID common lgr.Info("deploying alt-da contracts") dao, err = opcm.DeployAltDA(env.L1ScriptHost, opcm.DeployAltDAInput{ Salt: st.Create2Salt, - ProxyAdmin: st.ImplementationsDeployment.OpcmProxyAddress, + ProxyAdmin: chainState.ProxyAdminAddress, ChallengeContractOwner: chainIntent.Roles.L1ProxyAdminOwner, ChallengeWindow: new(big.Int).SetUint64(chainIntent.DangerousAltDAConfig.DAChallengeWindow), ResolveWindow: new(big.Int).SetUint64(chainIntent.DangerousAltDAConfig.DAResolveWindow), diff --git a/op-deployer/pkg/deployer/pipeline/implementations.go b/op-deployer/pkg/deployer/pipeline/implementations.go index 47ea91fbe963..c2d409b5c3a4 100644 --- a/op-deployer/pkg/deployer/pipeline/implementations.go +++ b/op-deployer/pkg/deployer/pipeline/implementations.go @@ -35,10 +35,12 @@ func DeployImplementations(env *Env, intent *state.Intent, st *state.State) erro var err error if intent.L1ContractsLocator.IsTag() && intent.DeploymentStrategy == state.DeploymentStrategyLive { standardVersionsTOML, err = standard.L1VersionsDataFor(intent.L1ChainID) - if err != nil { - return fmt.Errorf("error getting standard versions TOML: %w", err) + if err == nil { + contractsRelease = intent.L1ContractsLocator.Tag + } else { + contractsRelease = "dev" } - contractsRelease = intent.L1ContractsLocator.Tag + } else { contractsRelease = "dev" } @@ -68,10 +70,9 @@ func DeployImplementations(env *Env, intent *state.Intent, st *state.State) erro ProofMaturityDelaySeconds: new(big.Int).SetUint64(proofParams.ProofMaturityDelaySeconds), DisputeGameFinalityDelaySeconds: new(big.Int).SetUint64(proofParams.DisputeGameFinalityDelaySeconds), MipsVersion: new(big.Int).SetUint64(proofParams.MIPSVersion), - Release: contractsRelease, + L1ContractsRelease: contractsRelease, SuperchainConfigProxy: st.SuperchainDeployment.SuperchainConfigProxyAddress, ProtocolVersionsProxy: st.SuperchainDeployment.ProtocolVersionsProxyAddress, - OpcmProxyOwner: st.SuperchainDeployment.ProxyAdminAddress, StandardVersionsToml: standardVersionsTOML, UseInterop: intent.UseInterop, }, @@ -81,7 +82,7 @@ func DeployImplementations(env *Env, intent *state.Intent, st *state.State) erro } st.ImplementationsDeployment = &state.ImplementationsDeployment{ - OpcmProxyAddress: dio.OpcmProxy, + OpcmAddress: dio.Opcm, DelayedWETHImplAddress: dio.DelayedWETHImpl, OptimismPortalImplAddress: dio.OptimismPortalImpl, PreimageOracleSingletonAddress: dio.PreimageOracleSingleton, diff --git a/op-deployer/pkg/deployer/pipeline/init.go b/op-deployer/pkg/deployer/pipeline/init.go index f8b12e36a431..2b9f1cbd45fc 100644 --- a/op-deployer/pkg/deployer/pipeline/init.go +++ b/op-deployer/pkg/deployer/pipeline/init.go @@ -3,10 +3,12 @@ package pipeline import ( "context" "crypto/rand" + "errors" "fmt" + "os" + "strings" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" - "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/state" "github.com/ethereum-optimism/optimism/op-chain-ops/script" @@ -14,6 +16,8 @@ import ( "github.com/ethereum/go-ethereum/common" ) +var ErrRefusingToDeployTaggedReleaseWithoutOPCM = errors.New("refusing to deploy tagged release without OPCM") + func IsSupportedStateVersion(version int) bool { return version == 1 } @@ -26,7 +30,11 @@ func InitLiveStrategy(ctx context.Context, env *Env, intent *state.Intent, st *s return err } - if intent.L1ContractsLocator.IsTag() { + opcmAddress, opcmAddrErr := standard.ManagerImplementationAddrFor(intent.L1ChainID) + hasPredeployedOPCM := opcmAddrErr == nil + isTag := intent.L1ContractsLocator.IsTag() + + if isTag && hasPredeployedOPCM { superCfg, err := standard.SuperchainFor(intent.L1ChainID) if err != nil { return fmt.Errorf("error getting superchain config: %w", err) @@ -45,12 +53,12 @@ func InitLiveStrategy(ctx context.Context, env *Env, intent *state.Intent, st *s SuperchainConfigProxyAddress: common.Address(*superCfg.Config.SuperchainConfigAddr), } - opcmProxy, err := standard.ManagerImplementationAddrFor(intent.L1ChainID) - if err != nil { - return fmt.Errorf("error getting OPCM proxy address: %w", err) - } st.ImplementationsDeployment = &state.ImplementationsDeployment{ - OpcmProxyAddress: opcmProxy, + OpcmAddress: opcmAddress, + } + } else if isTag && !hasPredeployedOPCM { + if err := displayWarning(); err != nil { + return err } } @@ -127,3 +135,23 @@ func InitGenesisStrategy(env *Env, intent *state.Intent, st *state.State) error func immutableErr(field string, was, is any) error { return fmt.Errorf("%s is immutable: was %v, is %v", field, was, is) } + +func displayWarning() error { + warning := strings.TrimPrefix(` +####################### WARNING! WARNING WARNING! ####################### + +You are deploying a tagged release to a chain with no pre-deployed OPCM. +Due to a quirk of our contract version system, this can lead to deploying +contracts containing unaudited or untested code. As a result, this +functionality is currently disabled. + +We will fix this in an upcoming release. + +This process will now exit. + +####################### WARNING! WARNING WARNING! ####################### +`, "\n") + + _, _ = fmt.Fprint(os.Stderr, warning) + return ErrRefusingToDeployTaggedReleaseWithoutOPCM +} diff --git a/op-deployer/pkg/deployer/pipeline/opchain.go b/op-deployer/pkg/deployer/pipeline/opchain.go index a49848883cea..90cc665e077d 100644 --- a/op-deployer/pkg/deployer/pipeline/opchain.go +++ b/op-deployer/pkg/deployer/pipeline/opchain.go @@ -34,7 +34,7 @@ func DeployOPChain(env *Env, intent *state.Intent, st *state.State, chainID comm return opcm.DeployOPChainOutput{}, fmt.Errorf("error making deploy OP chain input: %w", err) } - opcmAddr = input.OpcmProxy + opcmAddr = input.Opcm return opcm.DeployOPChainV160(env.L1ScriptHost, input) } default: @@ -44,7 +44,7 @@ func DeployOPChain(env *Env, intent *state.Intent, st *state.State, chainID comm return opcm.DeployOPChainOutput{}, fmt.Errorf("error making deploy OP chain input: %w", err) } - opcmAddr = input.OpcmProxy + opcmAddr = input.Opcm return opcm.DeployOPChainIsthmus(env.L1ScriptHost, input) } } @@ -67,7 +67,7 @@ func DeployOPChain(env *Env, intent *state.Intent, st *state.State, chainID comm readInput := opcm.ReadImplementationAddressesInput{ DeployOPChainOutput: dco, - OpcmProxy: opcmAddr, + Opcm: opcmAddr, Release: release, } impls, err := opcm.ReadImplementationAddresses(env.L1ScriptHost, readInput) @@ -126,7 +126,7 @@ func makeDCIV160(intent *state.Intent, thisIntent *state.ChainIntent, chainID co BasefeeScalar: standard.BasefeeScalar, BlobBaseFeeScalar: standard.BlobBaseFeeScalar, L2ChainId: chainID.Big(), - OpcmProxy: st.ImplementationsDeployment.OpcmProxyAddress, + Opcm: st.ImplementationsDeployment.OpcmAddress, SaltMixer: st.Create2Salt.String(), // passing through salt generated at state initialization GasLimit: standard.GasLimit, DisputeGameType: proofParams.DisputeGameType, diff --git a/op-deployer/pkg/deployer/standard/standard.go b/op-deployer/pkg/deployer/standard/standard.go index 2fb6de8c5bf7..d2fe5e5d942b 100644 --- a/op-deployer/pkg/deployer/standard/standard.go +++ b/op-deployer/pkg/deployer/standard/standard.go @@ -26,6 +26,9 @@ const ( DisputeSplitDepth uint64 = 30 DisputeClockExtension uint64 = 10800 DisputeMaxClockDuration uint64 = 302400 + Eip1559DenominatorCanyon uint64 = 250 + Eip1559Denominator uint64 = 50 + Eip1559Elasticity uint64 = 6 ContractsV160Tag = "op-contracts/v1.6.0" ContractsV170Beta1L2Tag = "op-contracts/v1.7.0-beta.1+l2-contracts" @@ -97,6 +100,28 @@ func L1VersionsFor(chainID uint64) (L1Versions, error) { } } +func GuardianAddressFor(chainID uint64) (common.Address, error) { + switch chainID { + case 1: + return common.HexToAddress("0x09f7150D8c019BeF34450d6920f6B3608ceFdAf2"), nil + case 11155111: + return common.HexToAddress("0x7a50f00e8D05b95F98fE38d8BeE366a7324dCf7E"), nil + default: + return common.Address{}, fmt.Errorf("unsupported chain ID: %d", chainID) + } +} + +func ChallengerAddressFor(chainID uint64) (common.Address, error) { + switch chainID { + case 1: + return common.HexToAddress("0x9BA6e03D8B90dE867373Db8cF1A58d2F7F006b3A"), nil + case 11155111: + return common.HexToAddress("0xfd1D2e729aE8eEe2E146c033bf4400fE75284301"), nil + default: + return common.Address{}, fmt.Errorf("unsupported chain ID: %d", chainID) + } +} + func SuperchainFor(chainID uint64) (*superchain.Superchain, error) { switch chainID { case 1: @@ -115,7 +140,7 @@ func ChainNameFor(chainID uint64) (string, error) { case 11155111: return "sepolia", nil default: - return "", fmt.Errorf("unrecognized chain ID: %d", chainID) + return "", fmt.Errorf("unrecognized l1 chain ID: %d", chainID) } } @@ -133,11 +158,15 @@ func CommitForDeployTag(tag string) (string, error) { func ManagerImplementationAddrFor(chainID uint64) (common.Address, error) { switch chainID { case 1: - // Generated using the bootstrap command on 10/18/2024. - return common.HexToAddress("0x18cec91779995ad14c880e4095456b9147160790"), nil + // Generated using the bootstrap command on 11/18/2024. + // Verified against compiled bytecode at: + // https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts-v160-artifacts-opcm-redesign-backport + return common.HexToAddress("0x9BC0A1eD534BFb31a6Be69e5b767Cba332f14347"), nil case 11155111: - // Generated using the bootstrap command on 10/18/2024. - return common.HexToAddress("0xf564eea7960ea244bfebcbbb17858748606147bf"), nil + // Generated using the bootstrap command on 11/18/2024. + // Verified against compiled bytecode at: + // https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts-v160-artifacts-opcm-redesign-backport + return common.HexToAddress("0x760B1d2Dc68DC51fb6E8B2b8722B8ed08903540c"), nil default: return common.Address{}, fmt.Errorf("unsupported chain ID: %d", chainID) } @@ -169,10 +198,21 @@ func SystemOwnerAddrFor(chainID uint64) (common.Address, error) { } } +func L1ProxyAdminOwner(chainID uint64) (common.Address, error) { + switch chainID { + case 1: + return common.HexToAddress("0x5a0Aae59D09fccBdDb6C6CcEB07B7279367C3d2A"), nil + case 11155111: + return common.HexToAddress("0x1Eb2fFc903729a0F03966B917003800b145F56E2"), nil + default: + return common.Address{}, fmt.Errorf("unsupported chain ID: %d", chainID) + } +} + func ArtifactsURLForTag(tag string) (*url.URL, error) { switch tag { case "op-contracts/v1.6.0": - return url.Parse(standardArtifactsURL("3a27c6dc0cb61b36feaac26def98c64b4a48ec8f5c5ba6965e8ae3157606043c")) + return url.Parse(standardArtifactsURL("e1f0c4020618c4a98972e7124c39686cab2e31d5d7846f9ce5e0d5eed0f5ff32")) case "op-contracts/v1.7.0-beta.1+l2-contracts": return url.Parse(standardArtifactsURL("b0fb1f6f674519d637cff39a22187a5993d7f81a6d7b7be6507a0b50a5e38597")) default: @@ -180,6 +220,17 @@ func ArtifactsURLForTag(tag string) (*url.URL, error) { } } +func ArtifactsHashForTag(tag string) (common.Hash, error) { + switch tag { + case "op-contracts/v1.6.0": + return common.HexToHash("d20a930cc0ff204c2d93b7aa60755ec7859ba4f328b881f5090c6a6a2a86dcba"), nil + case "op-contracts/v1.7.0-beta.1+l2-contracts": + return common.HexToHash("9e3ad322ec9b2775d59143ce6874892f9b04781742c603ad59165159e90b00b9"), nil + default: + return common.Hash{}, fmt.Errorf("unsupported tag: %s", tag) + } +} + func standardArtifactsURL(checksum string) string { return fmt.Sprintf("https://storage.googleapis.com/oplabs-contract-artifacts/artifacts-v1-%s.tar.gz", checksum) } diff --git a/op-deployer/pkg/deployer/state/chain_intent.go b/op-deployer/pkg/deployer/state/chain_intent.go new file mode 100644 index 000000000000..bb6693f56b88 --- /dev/null +++ b/op-deployer/pkg/deployer/state/chain_intent.go @@ -0,0 +1,82 @@ +package state + +import ( + "fmt" + "reflect" + + "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" + "github.com/ethereum/go-ethereum/common" +) + +type ChainIntent struct { + ID common.Hash `json:"id" toml:"id"` + BaseFeeVaultRecipient common.Address `json:"baseFeeVaultRecipient" toml:"baseFeeVaultRecipient"` + L1FeeVaultRecipient common.Address `json:"l1FeeVaultRecipient" toml:"l1FeeVaultRecipient"` + SequencerFeeVaultRecipient common.Address `json:"sequencerFeeVaultRecipient" toml:"sequencerFeeVaultRecipient"` + Eip1559DenominatorCanyon uint64 `json:"eip1559DenominatorCanyon" toml:"eip1559DenominatorCanyon"` + Eip1559Denominator uint64 `json:"eip1559Denominator" toml:"eip1559Denominator"` + Eip1559Elasticity uint64 `json:"eip1559Elasticity" toml:"eip1559Elasticity"` + Roles ChainRoles `json:"roles" toml:"roles"` + DeployOverrides map[string]any `json:"deployOverrides" toml:"deployOverrides"` + DangerousAltDAConfig genesis.AltDADeployConfig `json:"dangerousAltDAConfig,omitempty" toml:"dangerousAltDAConfig,omitempty"` +} + +type ChainRoles struct { + L1ProxyAdminOwner common.Address `json:"l1ProxyAdminOwner" toml:"l1ProxyAdminOwner"` + L2ProxyAdminOwner common.Address `json:"l2ProxyAdminOwner" toml:"l2ProxyAdminOwner"` + SystemConfigOwner common.Address `json:"systemConfigOwner" toml:"systemConfigOwner"` + UnsafeBlockSigner common.Address `json:"unsafeBlockSigner" toml:"unsafeBlockSigner"` + Batcher common.Address `json:"batcher" toml:"batcher"` + Proposer common.Address `json:"proposer" toml:"proposer"` + Challenger common.Address `json:"challenger" toml:"challenger"` +} + +var ErrChainRoleZeroAddress = fmt.Errorf("ChainRole is set to zero address") +var ErrFeeVaultZeroAddress = fmt.Errorf("chain has a fee vault set to zero address") +var ErrNonStandardValue = fmt.Errorf("chain contains non-standard config value") +var ErrEip1559ZeroValue = fmt.Errorf("eip1559 param is set to zero value") + +func (c *ChainIntent) Check() error { + if c.ID == emptyHash { + return fmt.Errorf("id must be set") + } + + if err := c.Roles.CheckNoZeroAddresses(); err != nil { + return err + } + + if c.Eip1559DenominatorCanyon == 0 || + c.Eip1559Denominator == 0 || + c.Eip1559Elasticity == 0 { + return fmt.Errorf("%w: chainId=%s", ErrEip1559ZeroValue, c.ID) + } + if c.BaseFeeVaultRecipient == emptyAddress || + c.L1FeeVaultRecipient == emptyAddress || + c.SequencerFeeVaultRecipient == emptyAddress { + return fmt.Errorf("%w: chainId=%s", ErrFeeVaultZeroAddress, c.ID) + } + + if c.DangerousAltDAConfig.UseAltDA { + return c.DangerousAltDAConfig.Check(nil) + } + + return nil +} + +// Returns an error if any fields in ChainRoles is set to common.Address{} +func (cr *ChainRoles) CheckNoZeroAddresses() error { + val := reflect.ValueOf(*cr) + typ := reflect.TypeOf(*cr) + + // Iterate through all the fields + for i := 0; i < val.NumField(); i++ { + fieldValue := val.Field(i) + fieldName := typ.Field(i).Name + + if fieldValue.Interface() == (common.Address{}) { + return fmt.Errorf("%w: %s", ErrChainRoleZeroAddress, fieldName) + } + } + + return nil +} diff --git a/op-deployer/pkg/deployer/state/deploy_config.go b/op-deployer/pkg/deployer/state/deploy_config.go index 1a03c21d7e94..11445c2fb698 100644 --- a/op-deployer/pkg/deployer/state/deploy_config.go +++ b/op-deployer/pkg/deployer/state/deploy_config.go @@ -63,6 +63,11 @@ func CombineDeployConfig(intent *Intent, chainIntent *ChainIntent, state *State, EIP1559DenominatorCanyon: 250, EIP1559Elasticity: chainIntent.Eip1559Elasticity, }, + + // STOP! This struct sets the _default_ upgrade schedule for all chains. + // Any upgrades you enable here will be enabled for all new deployments. + // In-development hardforks should never be activated here. Instead, they + // should be specified as overrides. UpgradeScheduleDeployConfig: genesis.UpgradeScheduleDeployConfig{ L2GenesisRegolithTimeOffset: u64UtilPtr(0), L2GenesisCanyonTimeOffset: u64UtilPtr(0), diff --git a/op-deployer/pkg/deployer/state/deploy_config_test.go b/op-deployer/pkg/deployer/state/deploy_config_test.go new file mode 100644 index 000000000000..c0381507d169 --- /dev/null +++ b/op-deployer/pkg/deployer/state/deploy_config_test.go @@ -0,0 +1,47 @@ +package state + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/require" +) + +func TestCombineDeployConfig(t *testing.T) { + intent := Intent{ + L1ChainID: 1, + } + chainState := ChainState{ + ID: common.HexToHash("0x123"), + } + chainIntent := ChainIntent{ + Eip1559Denominator: 1, + Eip1559Elasticity: 2, + BaseFeeVaultRecipient: common.HexToAddress("0x123"), + L1FeeVaultRecipient: common.HexToAddress("0x456"), + SequencerFeeVaultRecipient: common.HexToAddress("0x789"), + Roles: ChainRoles{ + SystemConfigOwner: common.HexToAddress("0x123"), + L1ProxyAdminOwner: common.HexToAddress("0x456"), + L2ProxyAdminOwner: common.HexToAddress("0x789"), + UnsafeBlockSigner: common.HexToAddress("0xabc"), + Batcher: common.HexToAddress("0xdef"), + }, + } + state := State{ + SuperchainDeployment: &SuperchainDeployment{ProtocolVersionsProxyAddress: common.HexToAddress("0x123")}, + } + + // apply hard fork overrides + chainIntent.DeployOverrides = map[string]any{ + "l2GenesisGraniteTimeOffset": "0x8", + "l2GenesisHoloceneTimeOffset": "0x10", + } + + out, err := CombineDeployConfig(&intent, &chainIntent, &state, &chainState) + require.NoError(t, err) + require.Equal(t, *out.L2InitializationConfig.UpgradeScheduleDeployConfig.L2GenesisFjordTimeOffset, hexutil.Uint64(0)) + require.Equal(t, *out.L2InitializationConfig.UpgradeScheduleDeployConfig.L2GenesisGraniteTimeOffset, hexutil.Uint64(8)) + require.Equal(t, *out.L2InitializationConfig.UpgradeScheduleDeployConfig.L2GenesisHoloceneTimeOffset, hexutil.Uint64(16)) +} diff --git a/op-deployer/pkg/deployer/state/intent.go b/op-deployer/pkg/deployer/state/intent.go index 860944666e9e..5b1e4222b3e0 100644 --- a/op-deployer/pkg/deployer/state/intent.go +++ b/op-deployer/pkg/deployer/state/intent.go @@ -1,10 +1,11 @@ package state import ( + "errors" "fmt" "math/big" - - "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" + "net/url" + "reflect" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" @@ -31,63 +32,200 @@ func (d DeploymentStrategy) Check() error { } } +type IntentConfigType string + +const ( + IntentConfigTypeStandard IntentConfigType = "standard" + IntentConfigTypeCustom IntentConfigType = "custom" + IntentConfigTypeStrict IntentConfigType = "strict" + IntentConfigTypeStandardOverrides IntentConfigType = "standard-overrides" + IntentConfigTypeStrictOverrides IntentConfigType = "strict-overrides" +) + var emptyAddress common.Address +var emptyHash common.Hash type Intent struct { - DeploymentStrategy DeploymentStrategy `json:"deploymentStrategy" toml:"deploymentStrategy"` + DeploymentStrategy DeploymentStrategy `json:"deploymentStrategy" toml:"deploymentStrategy"` + ConfigType IntentConfigType `json:"configType" toml:"configType"` + L1ChainID uint64 `json:"l1ChainID" toml:"l1ChainID"` + SuperchainRoles *SuperchainRoles `json:"superchainRoles" toml:"superchainRoles,omitempty"` + FundDevAccounts bool `json:"fundDevAccounts" toml:"fundDevAccounts"` + UseInterop bool `json:"useInterop" toml:"useInterop"` + L1ContractsLocator *artifacts.Locator `json:"l1ContractsLocator" toml:"l1ContractsLocator"` + L2ContractsLocator *artifacts.Locator `json:"l2ContractsLocator" toml:"l2ContractsLocator"` + Chains []*ChainIntent `json:"chains" toml:"chains"` + GlobalDeployOverrides map[string]any `json:"globalDeployOverrides" toml:"globalDeployOverrides"` +} - L1ChainID uint64 `json:"l1ChainID" toml:"l1ChainID"` +type SuperchainRoles struct { + ProxyAdminOwner common.Address `json:"proxyAdminOwner" toml:"proxyAdminOwner"` + ProtocolVersionsOwner common.Address `json:"protocolVersionsOwner" toml:"protocolVersionsOwner"` + Guardian common.Address `json:"guardian" toml:"guardian"` +} - SuperchainRoles *SuperchainRoles `json:"superchainRoles" toml:"superchainRoles,omitempty"` +var ErrSuperchainRoleZeroAddress = errors.New("SuperchainRole is set to zero address") +var ErrL1ContractsLocatorUndefined = errors.New("L1ContractsLocator undefined") +var ErrL2ContractsLocatorUndefined = errors.New("L2ContractsLocator undefined") - FundDevAccounts bool `json:"fundDevAccounts" toml:"fundDevAccounts"` +func (s *SuperchainRoles) CheckNoZeroAddresses() error { + val := reflect.ValueOf(*s) + typ := reflect.TypeOf(*s) - UseInterop bool `json:"useInterop" toml:"useInterop"` + // Iterate through all the fields + for i := 0; i < val.NumField(); i++ { + fieldValue := val.Field(i) + fieldName := typ.Field(i).Name - L1ContractsLocator *artifacts.Locator `json:"l1ContractsLocator" toml:"l1ContractsLocator"` + if fieldValue.Interface() == (common.Address{}) { + return fmt.Errorf("%w: %s", ErrSuperchainRoleZeroAddress, fieldName) + } + } + return nil +} - L2ContractsLocator *artifacts.Locator `json:"l2ContractsLocator" toml:"l2ContractsLocator"` +func (c *Intent) L1ChainIDBig() *big.Int { + return big.NewInt(int64(c.L1ChainID)) +} + +func (c *Intent) validateCustomConfig() error { + if c.L1ContractsLocator == nil || + (c.L1ContractsLocator.Tag == "" && c.L1ContractsLocator.URL == &url.URL{}) { + return ErrL1ContractsLocatorUndefined + } + if c.L2ContractsLocator == nil || + (c.L2ContractsLocator.Tag == "" && c.L2ContractsLocator.URL == &url.URL{}) { + return ErrL2ContractsLocatorUndefined + } + + if c.SuperchainRoles == nil { + return errors.New("SuperchainRoles is set to nil") + } + if err := c.SuperchainRoles.CheckNoZeroAddresses(); err != nil { + return err + } - Chains []*ChainIntent `json:"chains" toml:"chains"` + if len(c.Chains) == 0 { + return errors.New("must define at least one l2 chain") + } - GlobalDeployOverrides map[string]any `json:"globalDeployOverrides" toml:"globalDeployOverrides"` + for _, chain := range c.Chains { + if err := chain.Check(); err != nil { + return err + } + } + + return nil } -func (c *Intent) L1ChainIDBig() *big.Int { - return big.NewInt(int64(c.L1ChainID)) +func (c *Intent) validateStrictConfig() error { + if err := c.validateStandardValues(); err != nil { + return err + } + + challenger, _ := standard.ChallengerAddressFor(c.L1ChainID) + l1ProxyAdminOwner, _ := standard.L1ProxyAdminOwner(c.L1ChainID) + for chainIndex := range c.Chains { + if c.Chains[chainIndex].Roles.Challenger != challenger { + return fmt.Errorf("invalid challenger address for chain: %s", c.Chains[chainIndex].ID) + } + if c.Chains[chainIndex].Roles.L1ProxyAdminOwner != l1ProxyAdminOwner { + return fmt.Errorf("invalid l1ProxyAdminOwner address for chain: %s", c.Chains[chainIndex].ID) + } + } + + return nil } -func (c *Intent) Check() error { - if c.DeploymentStrategy != DeploymentStrategyLive && c.DeploymentStrategy != DeploymentStrategyGenesis { - return fmt.Errorf("deploymentStrategy must be 'live' or 'local'") +// Ensures the following: +// 1. no zero-values for non-standard fields (user should have populated these) +// 2. no non-standard values for standard fields (user should not have changed these) +func (c *Intent) validateStandardValues() error { + if err := c.checkL1Prod(); err != nil { + return err + } + if err := c.checkL2Prod(); err != nil { + return err + } + + standardSuperchainRoles, err := getStandardSuperchainRoles(c.L1ChainID) + if err != nil { + return fmt.Errorf("error getting standard superchain roles: %w", err) + } + if c.SuperchainRoles == nil || *c.SuperchainRoles != *standardSuperchainRoles { + return fmt.Errorf("SuperchainRoles does not match standard value") + } + + for _, chain := range c.Chains { + if err := chain.Check(); err != nil { + return err + } + if chain.Eip1559DenominatorCanyon != standard.Eip1559DenominatorCanyon || + chain.Eip1559Denominator != standard.Eip1559Denominator || + chain.Eip1559Elasticity != standard.Eip1559Elasticity { + return fmt.Errorf("%w: chainId=%s", ErrNonStandardValue, chain.ID) + } + } + + return nil +} + +func getStandardSuperchainRoles(l1ChainId uint64) (*SuperchainRoles, error) { + superCfg, err := standard.SuperchainFor(l1ChainId) + if err != nil { + return nil, fmt.Errorf("error getting superchain config: %w", err) + } + + proxyAdminOwner, err := standard.L1ProxyAdminOwner(l1ChainId) + if err != nil { + return nil, fmt.Errorf("error getting L1ProxyAdminOwner: %w", err) + } + guardian, err := standard.GuardianAddressFor(l1ChainId) + if err != nil { + return nil, fmt.Errorf("error getting guardian address: %w", err) + } + + superchainRoles := &SuperchainRoles{ + ProxyAdminOwner: proxyAdminOwner, + ProtocolVersionsOwner: common.Address(*superCfg.Config.ProtocolVersionsAddr), + Guardian: guardian, } + return superchainRoles, nil +} + +func (c *Intent) Check() error { if c.L1ChainID == 0 { - return fmt.Errorf("l1ChainID must be set") + return fmt.Errorf("l1ChainID cannot be 0") + } + + if err := c.DeploymentStrategy.Check(); err != nil { + return err } if c.L1ContractsLocator == nil { - c.L1ContractsLocator = artifacts.DefaultL1ContractsLocator + return ErrL1ContractsLocatorUndefined } if c.L2ContractsLocator == nil { - c.L2ContractsLocator = artifacts.DefaultL2ContractsLocator + return ErrL2ContractsLocatorUndefined } var err error - if c.L1ContractsLocator.IsTag() { - err = c.checkL1Prod() - } else { - err = c.checkL1Dev() + switch c.ConfigType { + case IntentConfigTypeStandard: + err = c.validateStandardValues() + case IntentConfigTypeCustom: + err = c.validateCustomConfig() + case IntentConfigTypeStrict: + err = c.validateStrictConfig() + case IntentConfigTypeStandardOverrides, IntentConfigTypeStrictOverrides: + err = c.validateCustomConfig() + default: + return fmt.Errorf("intent-config-type unsupported: %s", c.ConfigType) } if err != nil { - return err - } - - if c.L2ContractsLocator.IsTag() { - if err := c.checkL2Prod(); err != nil { - return err - } + return fmt.Errorf("failed to validate intent-config-type=%s: %w", c.ConfigType, err) } return nil @@ -120,100 +258,113 @@ func (c *Intent) checkL1Prod() error { return nil } -func (c *Intent) checkL1Dev() error { - if c.SuperchainRoles.ProxyAdminOwner == emptyAddress { - return fmt.Errorf("proxyAdminOwner must be set") - } - - if c.SuperchainRoles.ProtocolVersionsOwner == emptyAddress { - c.SuperchainRoles.ProtocolVersionsOwner = c.SuperchainRoles.ProxyAdminOwner - } - - if c.SuperchainRoles.Guardian == emptyAddress { - c.SuperchainRoles.Guardian = c.SuperchainRoles.ProxyAdminOwner - } - - return nil -} - func (c *Intent) checkL2Prod() error { _, err := standard.ArtifactsURLForTag(c.L2ContractsLocator.Tag) return err } -type SuperchainRoles struct { - ProxyAdminOwner common.Address `json:"proxyAdminOwner" toml:"proxyAdminOwner"` - - ProtocolVersionsOwner common.Address `json:"protocolVersionsOwner" toml:"protocolVersionsOwner"` - - Guardian common.Address `json:"guardian" toml:"guardian"` -} - -type ChainIntent struct { - ID common.Hash `json:"id" toml:"id"` - - BaseFeeVaultRecipient common.Address `json:"baseFeeVaultRecipient" toml:"baseFeeVaultRecipient"` +func NewIntent(configType IntentConfigType, deploymentStrategy DeploymentStrategy, l1ChainId uint64, l2ChainIds []common.Hash) (Intent, error) { + switch configType { + case IntentConfigTypeCustom: + return NewIntentCustom(deploymentStrategy, l1ChainId, l2ChainIds) - L1FeeVaultRecipient common.Address `json:"l1FeeVaultRecipient" toml:"l1FeeVaultRecipient"` + case IntentConfigTypeStandard: + return NewIntentStandard(deploymentStrategy, l1ChainId, l2ChainIds) - SequencerFeeVaultRecipient common.Address `json:"sequencerFeeVaultRecipient" toml:"sequencerFeeVaultRecipient"` + case IntentConfigTypeStandardOverrides: + return NewIntentStandardOverrides(deploymentStrategy, l1ChainId, l2ChainIds) - Eip1559Denominator uint64 `json:"eip1559Denominator" toml:"eip1559Denominator"` + case IntentConfigTypeStrict: + return NewIntentStrict(deploymentStrategy, l1ChainId, l2ChainIds) - Eip1559Elasticity uint64 `json:"eip1559Elasticity" toml:"eip1559Elasticity"` + case IntentConfigTypeStrictOverrides: + return NewIntentStrictOverrides(deploymentStrategy, l1ChainId, l2ChainIds) - Roles ChainRoles `json:"roles" toml:"roles"` - - DeployOverrides map[string]any `json:"deployOverrides" toml:"deployOverrides"` - - DangerousAltDAConfig genesis.AltDADeployConfig `json:"dangerousAltDAConfig,omitempty" toml:"dangerousAltDAConfig,omitempty"` + default: + return Intent{}, fmt.Errorf("intent config type not supported") + } } -type ChainRoles struct { - L1ProxyAdminOwner common.Address `json:"l1ProxyAdminOwner" toml:"l1ProxyAdminOwner"` - - L2ProxyAdminOwner common.Address `json:"l2ProxyAdminOwner" toml:"l2ProxyAdminOwner"` - - SystemConfigOwner common.Address `json:"systemConfigOwner" toml:"systemConfigOwner"` - - UnsafeBlockSigner common.Address `json:"unsafeBlockSigner" toml:"unsafeBlockSigner"` - - Batcher common.Address `json:"batcher" toml:"batcher"` - - Proposer common.Address `json:"proposer" toml:"proposer"` +// Sets all Intent fields to their zero value with the expectation that the +// user will populate the values before running 'apply' +func NewIntentCustom(deploymentStrategy DeploymentStrategy, l1ChainId uint64, l2ChainIds []common.Hash) (Intent, error) { + intent := Intent{ + DeploymentStrategy: deploymentStrategy, + ConfigType: IntentConfigTypeCustom, + L1ChainID: l1ChainId, + L1ContractsLocator: &artifacts.Locator{URL: &url.URL{}}, + L2ContractsLocator: &artifacts.Locator{URL: &url.URL{}}, + SuperchainRoles: &SuperchainRoles{}, + } - Challenger common.Address `json:"challenger" toml:"challenger"` + for _, l2ChainID := range l2ChainIds { + intent.Chains = append(intent.Chains, &ChainIntent{ + ID: l2ChainID, + }) + } + return intent, nil } -func (c *ChainIntent) Check() error { - var emptyHash common.Hash - if c.ID == emptyHash { - return fmt.Errorf("id must be set") +func NewIntentStandard(deploymentStrategy DeploymentStrategy, l1ChainId uint64, l2ChainIds []common.Hash) (Intent, error) { + intent := Intent{ + DeploymentStrategy: deploymentStrategy, + ConfigType: IntentConfigTypeStandard, + L1ChainID: l1ChainId, + L1ContractsLocator: artifacts.DefaultL1ContractsLocator, + L2ContractsLocator: artifacts.DefaultL2ContractsLocator, } - if c.Roles.L1ProxyAdminOwner == emptyAddress { - return fmt.Errorf("proxyAdminOwner must be set") + superchainRoles, err := getStandardSuperchainRoles(l1ChainId) + if err != nil { + return Intent{}, fmt.Errorf("error getting standard superchain roles: %w", err) } - - if c.Roles.SystemConfigOwner == emptyAddress { - c.Roles.SystemConfigOwner = c.Roles.L1ProxyAdminOwner + intent.SuperchainRoles = superchainRoles + + for _, l2ChainID := range l2ChainIds { + intent.Chains = append(intent.Chains, &ChainIntent{ + ID: l2ChainID, + Eip1559DenominatorCanyon: standard.Eip1559DenominatorCanyon, + Eip1559Denominator: standard.Eip1559Denominator, + Eip1559Elasticity: standard.Eip1559Elasticity, + }) } + return intent, nil +} - if c.Roles.L2ProxyAdminOwner == emptyAddress { - return fmt.Errorf("l2ProxyAdminOwner must be set") +func NewIntentStandardOverrides(deploymentStrategy DeploymentStrategy, l1ChainId uint64, l2ChainIds []common.Hash) (Intent, error) { + intent, err := NewIntentStandard(deploymentStrategy, l1ChainId, l2ChainIds) + if err != nil { + return Intent{}, err } + intent.ConfigType = IntentConfigTypeStandardOverrides + + return intent, nil +} - if c.Roles.UnsafeBlockSigner == emptyAddress { - return fmt.Errorf("unsafeBlockSigner must be set") +// Same as NewIntentStandard, but also sets l2 Challenger and L1ProxyAdminOwner +// addresses to standard values +func NewIntentStrict(deploymentStrategy DeploymentStrategy, l1ChainId uint64, l2ChainIds []common.Hash) (Intent, error) { + intent, err := NewIntentStandard(deploymentStrategy, l1ChainId, l2ChainIds) + if err != nil { + return Intent{}, err } + intent.ConfigType = IntentConfigTypeStrict - if c.Roles.Batcher == emptyAddress { - return fmt.Errorf("batcher must be set") + challenger, _ := standard.ChallengerAddressFor(l1ChainId) + l1ProxyAdminOwner, _ := standard.ManagerOwnerAddrFor(l1ChainId) + for chainIndex := range intent.Chains { + intent.Chains[chainIndex].Roles.Challenger = challenger + intent.Chains[chainIndex].Roles.L1ProxyAdminOwner = l1ProxyAdminOwner } + return intent, nil +} - if c.DangerousAltDAConfig.UseAltDA { - return c.DangerousAltDAConfig.Check(nil) +func NewIntentStrictOverrides(deploymentStrategy DeploymentStrategy, l1ChainId uint64, l2ChainIds []common.Hash) (Intent, error) { + intent, err := NewIntentStrict(deploymentStrategy, l1ChainId, l2ChainIds) + if err != nil { + return Intent{}, err } + intent.ConfigType = IntentConfigTypeStrictOverrides - return nil + return intent, nil } diff --git a/op-deployer/pkg/deployer/state/intent_test.go b/op-deployer/pkg/deployer/state/intent_test.go new file mode 100644 index 000000000000..1d8c12375f88 --- /dev/null +++ b/op-deployer/pkg/deployer/state/intent_test.go @@ -0,0 +1,89 @@ +package state + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestValidateStandardValues(t *testing.T) { + intent, err := NewIntentStandard(DeploymentStrategyLive, 1, []common.Hash{common.HexToHash("0x336")}) + require.NoError(t, err) + + err = intent.Check() + require.Error(t, err) + require.ErrorIs(t, err, ErrChainRoleZeroAddress) + + setChainRoles(&intent) + err = intent.Check() + require.Error(t, err) + require.ErrorIs(t, err, ErrFeeVaultZeroAddress) + + setFeeAddresses(&intent) + err = intent.Check() + require.NoError(t, err) + + intent.Chains[0].Eip1559Denominator = 3 // set to non-standard value + err = intent.Check() + require.Error(t, err) + require.ErrorIs(t, err, ErrNonStandardValue) +} + +func TestValidateCustomValues(t *testing.T) { + intent, err := NewIntentCustom(DeploymentStrategyLive, 1, []common.Hash{common.HexToHash("0x336")}) + require.NoError(t, err) + + err = intent.Check() + require.Error(t, err) + require.ErrorIs(t, err, ErrSuperchainRoleZeroAddress) + + setSuperchainRoles(&intent) + err = intent.Check() + require.Error(t, err) + require.ErrorIs(t, err, ErrChainRoleZeroAddress) + + setChainRoles(&intent) + err = intent.Check() + require.Error(t, err) + require.ErrorIs(t, err, ErrEip1559ZeroValue) + + setEip1559Params(&intent) + err = intent.Check() + require.Error(t, err) + require.ErrorIs(t, err, ErrFeeVaultZeroAddress) + + setFeeAddresses(&intent) + err = intent.Check() + require.NoError(t, err) +} + +func setSuperchainRoles(intent *Intent) { + intent.SuperchainRoles = &SuperchainRoles{ + ProxyAdminOwner: common.HexToAddress("0xa"), + ProtocolVersionsOwner: common.HexToAddress("0xb"), + Guardian: common.HexToAddress("0xc"), + } +} + +func setEip1559Params(intent *Intent) { + intent.Chains[0].Eip1559Denominator = 5000 + intent.Chains[0].Eip1559DenominatorCanyon = 5000 + intent.Chains[0].Eip1559Elasticity = 5000 +} + +func setChainRoles(intent *Intent) { + intent.Chains[0].Roles.L1ProxyAdminOwner = common.HexToAddress("0x01") + intent.Chains[0].Roles.L2ProxyAdminOwner = common.HexToAddress("0x02") + intent.Chains[0].Roles.SystemConfigOwner = common.HexToAddress("0x03") + intent.Chains[0].Roles.UnsafeBlockSigner = common.HexToAddress("0x04") + intent.Chains[0].Roles.Batcher = common.HexToAddress("0x05") + intent.Chains[0].Roles.Proposer = common.HexToAddress("0x06") + intent.Chains[0].Roles.Challenger = common.HexToAddress("0x07") +} + +func setFeeAddresses(intent *Intent) { + intent.Chains[0].BaseFeeVaultRecipient = common.HexToAddress("0x08") + intent.Chains[0].L1FeeVaultRecipient = common.HexToAddress("0x09") + intent.Chains[0].SequencerFeeVaultRecipient = common.HexToAddress("0x0A") +} diff --git a/op-deployer/pkg/deployer/state/state.go b/op-deployer/pkg/deployer/state/state.go index e3974fa2a78c..e17ed6184c5d 100644 --- a/op-deployer/pkg/deployer/state/state.go +++ b/op-deployer/pkg/deployer/state/state.go @@ -64,7 +64,7 @@ type SuperchainDeployment struct { } type ImplementationsDeployment struct { - OpcmProxyAddress common.Address `json:"opcmProxyAddress"` + OpcmAddress common.Address `json:"opcmAddress"` DelayedWETHImplAddress common.Address `json:"delayedWETHImplAddress"` OptimismPortalImplAddress common.Address `json:"optimismPortalImplAddress"` PreimageOracleSingletonAddress common.Address `json:"preimageOracleSingletonAddress"` diff --git a/op-dispute-mon/Makefile b/op-dispute-mon/Makefile index d94a0fa95a96..6bb994650bfc 100644 --- a/op-dispute-mon/Makefile +++ b/op-dispute-mon/Makefile @@ -1,21 +1,3 @@ -GITCOMMIT ?= $(shell git rev-parse HEAD) -GITDATE ?= $(shell git show -s --format='%ct') -VERSION ?= v0.0.0 +DEPRECATED_TARGETS := op-dispute-mon clean test -LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) -LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) -LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-dispute-mon/version.Version=$(VERSION) -LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-dispute-mon/version.Meta=$(VERSION_META) -LDFLAGS := -ldflags "$(LDFLAGSSTRING)" - -op-dispute-mon: - env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) CGO_ENABLED=0 go build -v $(LDFLAGS) -o ./bin/op-dispute-mon ./cmd -.PHONY: op-dispute-mon - -clean: - rm bin/op-dispute-mon -.PHONY: clean - -test: - go test -v ./... -.PHONY: test +include ../just/deprecated.mk diff --git a/op-dispute-mon/justfile b/op-dispute-mon/justfile new file mode 100644 index 000000000000..3788cbd2b14a --- /dev/null +++ b/op-dispute-mon/justfile @@ -0,0 +1,21 @@ +import '../just/go.just' + +# Build ldflags string +_LDFLAGSSTRING := "'" + trim( + "-X main.GitCommit=" + GITCOMMIT + " " + \ + "-X main.GitDate=" + GITDATE + " " + \ + "-X github.com/ethereum-optimism/optimism/op-dispute-mon/version.Version=" + VERSION + " " + \ + "-X github.com/ethereum-optimism/optimism/op-dispute-mon/version.Meta=" + VERSION_META + " " + \ + "") + "'" + +BINARY := "./bin/op-dispute-mon" + +# Build op-dispute-mon binary +op-dispute-mon: (go_build BINARY "./cmd" "-ldflags" _LDFLAGSSTRING) + +# Clean build artifacts +clean: + rm -f {{BINARY}} + +# Run tests +test: (go_test "./...") diff --git a/op-dispute-mon/mon/extract/head_enricher.go b/op-dispute-mon/mon/extract/head_enricher.go index 943e648f771d..4162c9e896d6 100644 --- a/op-dispute-mon/mon/extract/head_enricher.go +++ b/op-dispute-mon/mon/extract/head_enricher.go @@ -5,15 +5,15 @@ import ( "fmt" monTypes "github.com/ethereum-optimism/optimism/op-dispute-mon/mon/types" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" ) var _ Enricher = (*L1HeadBlockNumEnricher)(nil) type BlockFetcher interface { - HeaderByHash(ctx context.Context, block common.Hash) (*types.Header, error) + L1BlockRefByHash(ctx context.Context, block common.Hash) (eth.L1BlockRef, error) } type L1HeadBlockNumEnricher struct { @@ -25,10 +25,10 @@ func NewL1HeadBlockNumEnricher(client BlockFetcher) *L1HeadBlockNumEnricher { } func (e *L1HeadBlockNumEnricher) Enrich(ctx context.Context, _ rpcblock.Block, _ GameCaller, game *monTypes.EnrichedGameData) error { - header, err := e.client.HeaderByHash(ctx, game.L1Head) + header, err := e.client.L1BlockRefByHash(ctx, game.L1Head) if err != nil { return fmt.Errorf("failed to retrieve header for L1 head block %v: %w", game.L1Head, err) } - game.L1HeadNum = header.Number.Uint64() + game.L1HeadNum = header.Number return nil } diff --git a/op-dispute-mon/mon/extract/head_enricher_test.go b/op-dispute-mon/mon/extract/head_enricher_test.go index c0cb03b86b7a..3c54c09516a7 100644 --- a/op-dispute-mon/mon/extract/head_enricher_test.go +++ b/op-dispute-mon/mon/extract/head_enricher_test.go @@ -3,13 +3,12 @@ package extract import ( "context" "errors" - "math/big" "testing" "github.com/ethereum-optimism/optimism/op-dispute-mon/mon/types" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" "github.com/ethereum/go-ethereum/common" - gethTypes "github.com/ethereum/go-ethereum/core/types" "github.com/stretchr/testify/require" ) @@ -39,11 +38,11 @@ type stubBlockFetcher struct { err error } -func (s *stubBlockFetcher) HeaderByHash(_ context.Context, _ common.Hash) (*gethTypes.Header, error) { +func (s *stubBlockFetcher) L1BlockRefByHash(_ context.Context, _ common.Hash) (eth.L1BlockRef, error) { if s.err != nil { - return nil, s.err + return eth.L1BlockRef{}, s.err } - return &gethTypes.Header{ - Number: new(big.Int).SetUint64(s.num), + return eth.L1BlockRef{ + Number: s.num, }, nil } diff --git a/op-dispute-mon/mon/monitor.go b/op-dispute-mon/mon/monitor.go index 5f7764adccbd..3fb14525e24e 100644 --- a/op-dispute-mon/mon/monitor.go +++ b/op-dispute-mon/mon/monitor.go @@ -3,11 +3,11 @@ package mon import ( "context" "fmt" - "math/big" "time" "github.com/ethereum-optimism/optimism/op-dispute-mon/mon/types" "github.com/ethereum-optimism/optimism/op-service/clock" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -15,8 +15,7 @@ import ( type ForecastResolution func(games []*types.EnrichedGameData, ignoredCount, failedCount int) type Monitor func(games []*types.EnrichedGameData) -type BlockHashFetcher func(ctx context.Context, number *big.Int) (common.Hash, error) -type BlockNumberFetcher func(ctx context.Context) (uint64, error) +type HeadBlockFetcher func(ctx context.Context) (eth.L1BlockRef, error) type Extract func(ctx context.Context, blockHash common.Hash, minTimestamp uint64) ([]*types.EnrichedGameData, int, int, error) type MonitorMetrics interface { @@ -35,11 +34,10 @@ type gameMonitor struct { gameWindow time.Duration monitorInterval time.Duration - forecast ForecastResolution - monitors []Monitor - extract Extract - fetchBlockHash BlockHashFetcher - fetchBlockNumber BlockNumberFetcher + forecast ForecastResolution + monitors []Monitor + extract Extract + fetchHeadBlock HeadBlockFetcher } func newGameMonitor( @@ -49,40 +47,34 @@ func newGameMonitor( metrics MonitorMetrics, monitorInterval time.Duration, gameWindow time.Duration, - fetchBlockHash BlockHashFetcher, - fetchBlockNumber BlockNumberFetcher, + fetchHeadBlock HeadBlockFetcher, extract Extract, forecast ForecastResolution, monitors ...Monitor) *gameMonitor { return &gameMonitor{ - logger: logger, - clock: cl, - ctx: ctx, - done: make(chan struct{}), - metrics: metrics, - monitorInterval: monitorInterval, - gameWindow: gameWindow, - forecast: forecast, - monitors: monitors, - extract: extract, - fetchBlockNumber: fetchBlockNumber, - fetchBlockHash: fetchBlockHash, + logger: logger, + clock: cl, + ctx: ctx, + done: make(chan struct{}), + metrics: metrics, + monitorInterval: monitorInterval, + gameWindow: gameWindow, + forecast: forecast, + monitors: monitors, + extract: extract, + fetchHeadBlock: fetchHeadBlock, } } func (m *gameMonitor) monitorGames() error { start := m.clock.Now() - blockNumber, err := m.fetchBlockNumber(m.ctx) + headBlock, err := m.fetchHeadBlock(m.ctx) if err != nil { return fmt.Errorf("failed to fetch block number: %w", err) } - m.logger.Debug("Fetched block number", "blockNumber", blockNumber) - blockHash, err := m.fetchBlockHash(context.Background(), new(big.Int).SetUint64(blockNumber)) - if err != nil { - return fmt.Errorf("failed to fetch block hash: %w", err) - } + m.logger.Debug("Fetched current head block", "block", headBlock) minGameTimestamp := clock.MinCheckedTimestamp(m.clock, m.gameWindow) - enrichedGames, ignored, failed, err := m.extract(m.ctx, blockHash, minGameTimestamp) + enrichedGames, ignored, failed, err := m.extract(m.ctx, headBlock.Hash, minGameTimestamp) if err != nil { return fmt.Errorf("failed to load games: %w", err) } @@ -92,7 +84,13 @@ func (m *gameMonitor) monitorGames() error { } timeTaken := m.clock.Since(start) m.metrics.RecordMonitorDuration(timeTaken) - m.logger.Info("Completed monitoring update", "blockNumber", blockNumber, "blockHash", blockHash, "duration", timeTaken, "games", len(enrichedGames), "ignored", ignored, "failed", failed) + m.logger.Info("Completed monitoring update", + "blockNumber", headBlock.Number, + "blockHash", headBlock.Hash, + "duration", timeTaken, + "games", len(enrichedGames), + "ignored", ignored, + "failed", failed) return nil } diff --git a/op-dispute-mon/mon/monitor_test.go b/op-dispute-mon/mon/monitor_test.go index 1181c57b4ecb..2161e6af14ca 100644 --- a/op-dispute-mon/mon/monitor_test.go +++ b/op-dispute-mon/mon/monitor_test.go @@ -3,7 +3,6 @@ package mon import ( "context" "errors" - "math/big" "testing" "time" @@ -11,6 +10,7 @@ import ( "github.com/ethereum-optimism/optimism/op-dispute-mon/metrics" monTypes "github.com/ethereum-optimism/optimism/op-dispute-mon/mon/types" "github.com/ethereum-optimism/optimism/op-service/clock" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -24,21 +24,11 @@ var ( func TestMonitor_MonitorGames(t *testing.T) { t.Parallel() - t.Run("FailedFetchBlocknumber", func(t *testing.T) { + t.Run("FailedFetchHeadBlock", func(t *testing.T) { monitor, _, _, _ := setupMonitorTest(t) boom := errors.New("boom") - monitor.fetchBlockNumber = func(ctx context.Context) (uint64, error) { - return 0, boom - } - err := monitor.monitorGames() - require.ErrorIs(t, err, boom) - }) - - t.Run("FailedFetchBlockHash", func(t *testing.T) { - monitor, _, _, _ := setupMonitorTest(t) - boom := errors.New("boom") - monitor.fetchBlockHash = func(ctx context.Context, number *big.Int) (common.Hash, error) { - return common.Hash{}, boom + monitor.fetchHeadBlock = func(ctx context.Context) (eth.L1BlockRef, error) { + return eth.L1BlockRef{}, boom } err := monitor.monitorGames() require.ErrorIs(t, err, boom) @@ -108,11 +98,8 @@ func newEnrichedGameData(proxy common.Address, timestamp uint64) *monTypes.Enric func setupMonitorTest(t *testing.T) (*gameMonitor, *mockExtractor, *mockForecast, []*mockMonitor) { logger := testlog.Logger(t, log.LvlDebug) - fetchBlockNum := func(ctx context.Context) (uint64, error) { - return 1, nil - } - fetchBlockHash := func(ctx context.Context, number *big.Int) (common.Hash, error) { - return common.Hash{}, nil + fetchHeadBlock := func(ctx context.Context) (eth.L1BlockRef, error) { + return eth.L1BlockRef{Number: 1, Hash: common.Hash{0xaa}}, nil } monitorInterval := 100 * time.Millisecond cl := clock.NewAdvancingClock(10 * time.Millisecond) @@ -122,7 +109,7 @@ func setupMonitorTest(t *testing.T) (*gameMonitor, *mockExtractor, *mockForecast monitor1 := &mockMonitor{} monitor2 := &mockMonitor{} monitor3 := &mockMonitor{} - monitor := newGameMonitor(context.Background(), logger, cl, metrics.NoopMetrics, monitorInterval, 10*time.Second, fetchBlockHash, fetchBlockNum, + monitor := newGameMonitor(context.Background(), logger, cl, metrics.NoopMetrics, monitorInterval, 10*time.Second, fetchHeadBlock, extractor.Extract, forecast.Forecast, monitor1.Check, monitor2.Check, monitor3.Check) return monitor, extractor, forecast, []*mockMonitor{monitor1, monitor2, monitor3} } diff --git a/op-dispute-mon/mon/service.go b/op-dispute-mon/mon/service.go index 083e391b9111..c44f082d3a88 100644 --- a/op-dispute-mon/mon/service.go +++ b/op-dispute-mon/mon/service.go @@ -4,13 +4,13 @@ import ( "context" "errors" "fmt" - "math/big" "sync/atomic" + "time" "github.com/ethereum-optimism/optimism/op-dispute-mon/mon/bonds" "github.com/ethereum-optimism/optimism/op-dispute-mon/mon/types" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethclient" + rpcclient "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/log" "github.com/ethereum-optimism/optimism/op-dispute-mon/config" @@ -47,7 +47,9 @@ type Service struct { withdrawals *WithdrawalMonitor rollupClient *sources.RollupClient - l1Client *ethclient.Client + l1RPC rpcclient.RPC + l1Client *sources.L1Client + l1Caller *batching.MultiCaller pprofService *oppprof.Service metricsSrv *httputil.HTTPServer @@ -120,7 +122,7 @@ func (s *Service) initWithdrawalMonitor() { } func (s *Service) initGameCallerCreator() { - s.game = extract.NewGameCallerCreator(s.metrics, batching.NewMultiCaller(s.l1Client.Client(), batching.DefaultBatchSize)) + s.game = extract.NewGameCallerCreator(s.metrics, s.l1Caller) } func (s *Service) initExtractor(cfg *config.Config) { @@ -159,10 +161,20 @@ func (s *Service) initOutputRollupClient(ctx context.Context, cfg *config.Config } func (s *Service) initL1Client(ctx context.Context, cfg *config.Config) error { - l1Client, err := dial.DialEthClientWithTimeout(ctx, dial.DefaultDialTimeout, s.logger, cfg.L1EthRpc) + l1RPC, err := dial.DialRPCClientWithTimeout(ctx, dial.DefaultDialTimeout, s.logger, cfg.L1EthRpc) if err != nil { return fmt.Errorf("failed to dial L1: %w", err) } + s.l1RPC = rpcclient.NewBaseRPCClient(l1RPC, rpcclient.WithCallTimeout(30*time.Second)) + s.l1Caller = batching.NewMultiCaller(s.l1RPC, batching.DefaultBatchSize) + // The RPC is trusted because the majority of data comes from contract calls which are not verified even when the + // RPC is untrusted and also avoids needing to update op-dispute-mon for L1 hard forks that change the header. + // Note that receipts are never fetched so the RPCKind has no actual effect. + clCfg := sources.L1ClientSimpleConfig(true, sources.RPCKindAny, 100) + l1Client, err := sources.NewL1Client(s.l1RPC, s.logger, s.metrics, clCfg) + if err != nil { + return fmt.Errorf("failed to init l1 client: %w", err) + } s.l1Client = l1Client return nil } @@ -203,24 +215,18 @@ func (s *Service) initMetricsServer(cfg *opmetrics.CLIConfig) error { } func (s *Service) initFactoryContract(cfg *config.Config) error { - factoryContract := contracts.NewDisputeGameFactoryContract(s.metrics, cfg.GameFactoryAddress, - batching.NewMultiCaller(s.l1Client.Client(), batching.DefaultBatchSize)) + factoryContract := contracts.NewDisputeGameFactoryContract(s.metrics, cfg.GameFactoryAddress, s.l1Caller) s.factoryContract = factoryContract return nil } func (s *Service) initMonitor(ctx context.Context, cfg *config.Config) { - blockHashFetcher := func(ctx context.Context, blockNumber *big.Int) (common.Hash, error) { - block, err := s.l1Client.BlockByNumber(ctx, blockNumber) - if err != nil { - return common.Hash{}, fmt.Errorf("failed to fetch block by number: %w", err) - } - return block.Hash(), nil + headBlockFetcher := func(ctx context.Context) (eth.L1BlockRef, error) { + return s.l1Client.L1BlockRefByLabel(ctx, "latest") } l2ChallengesMonitor := NewL2ChallengesMonitor(s.logger, s.metrics) updateTimeMonitor := NewUpdateTimeMonitor(s.cl, s.metrics) - s.monitor = newGameMonitor(ctx, s.logger, s.cl, s.metrics, cfg.MonitorInterval, cfg.GameWindow, blockHashFetcher, - s.l1Client.BlockNumber, + s.monitor = newGameMonitor(ctx, s.logger, s.cl, s.metrics, cfg.MonitorInterval, cfg.GameWindow, headBlockFetcher, s.extractor.Extract, s.forecast.Forecast, s.bonds.CheckBonds, diff --git a/op-dispute-mon/version/version.go b/op-dispute-mon/version/version.go index 834fc089b19e..31ad6f3582af 100644 --- a/op-dispute-mon/version/version.go +++ b/op-dispute-mon/version/version.go @@ -1,7 +1,7 @@ package version var ( - Version = "v0.1.0" + Version = "v0.0.0" Meta = "dev" ) diff --git a/op-e2e/README.md b/op-e2e/README.md index d6f096194224..ba3855474615 100644 --- a/op-e2e/README.md +++ b/op-e2e/README.md @@ -1,34 +1,105 @@ -# op-e2e +# `op-e2e` -The end to end tests in this repo depend on genesis state that is -created with the `bedrock-devnet` package. To create this state, -run the following commands from the root of the repository: +Issues: [monorepo](https://github.com/ethereum-optimism/optimism/issues?q=is%3Aissue%20state%3Aopen%20label%3AA-op-e2e) -```bash -make install-geth -make cannon-prestate -make devnet-allocs -``` +Pull requests: [monorepo](https://github.com/ethereum-optimism/optimism/pulls?q=is%3Aopen+is%3Apr+label%3AA-op-e2e) + +Design docs: +- [test infra draft design-doc]: active discussion of end-to-end testing approach -This will leave artifacts in the `.devnet` directory that will be -read into `op-e2e` at runtime. The default deploy configuration -used for starting all `op-e2e` based tests can be found in -`packages/contracts-bedrock/deploy-config/devnetL1.json`. There -are some values that are safe to change in memory in `op-e2e` at -runtime, but others cannot be changed or else it will result in -broken tests. Any changes to `devnetL1.json` should result in -rebuilding the `.devnet` artifacts before the new values will -be present in the `op-e2e` tests. +[test infra draft design-doc](https://github.com/ethereum-optimism/design-docs/pull/165) -## Running tests -Consult the [Makefile](./Makefile) in this directory. Run, e.g.: +`op-e2e` is a collection of Go integration tests. +It is named `e2e` after end-to-end testing, +for those tests where we integration-test the full system, rather than only specific services. + + +## Quickstart ```bash -make test-http +make test-actions +make test-ws ``` -### Troubleshooting -If you encounter errors: -* ensure you have the latest version of foundry installed: `just update-foundry` -* try deleting the `packages/contracts-bedrock/forge-artifacts` directory -* try `forge clean && rm -rf lib && forge install` within the `packages/contracts-bedrock` directory +## Overview + +`op-e2e` can be categorized as following: +- `op-e2e/actions/`: imperative test style, more DSL-like, with a focus on the state-transition parts of services. + Parallel processing is actively avoided, and a mock clock is used. + - `op-e2e/actions/*`: sub-packages categorize specific domains to test. + - `op-e2e/actions/interop`: notable sub-package, where multiple L2s are attached together, + for integration-testing across multiple L2 chains. + - `op-e2e/actions/proofs`: notable sub-package, where proof-related state-transition testing is implemented, + with experimental support to cover alternative proof implementations. +- `op-e2e/system`: integration tests with a L1 miner and a L2 with sequencer, verifier, batcher and proposer. + These tests do run each service almost fully, including parallel background jobs and real system clock. + These tests focus less on the onchain state-transition aspects, and more on the offchain integration aspects. + - `op-e2e/faultproofs`: system tests with fault-proofs stack attached + - `op-e2e/interop`: system tests with a distinct Interop "SuperSystem", to run multiple L2 chains. +- `op-e2e/opgeth`: integration tests between test-mocks and op-geth execution-engine. + - also includes upgrade-tests to ensure testing of op-stack Go components around a network upgrade. + +### `action`-tests + +Action tests are set up in a compositional way: +each service is instantiated as actor, and tests can choose to run just the relevant set of actors. +E.g. a test about data-availability can instantiate the batcher, but omit the proposer. + +One action, across all services, runs at a time. +No live background processing or system clock affects the actors: +this enables individual actions to be deterministic and reproducible. + +With this synchronous processing, action-test can reliably navigate towards +these otherwise hard-to-reach edge-cases, and ensure the state-transition of service, +and the interactions between this state, are covered. + +Action-tests do not cover background processes or peripherals. +E.g. P2P, CLI usage, and dynamic block building are not covered. + +### `system`-tests + +System tests are more complete than `action` tests, but also require a live system. +This trade-off enables coverage of most of each Go service, +at the cost of making navigation to cover the known edge-cases less reliable and reproducible. +This test-type is thus used primarily for testing of the offchain service aspects. + +By running a more full system, test-runners also run into resource-limits more quickly. +This may result in lag or even stalled services. +Improvements, as described in the [test infra draft design-doc], +are in active development, to make test execution more reliable. + +### `op-e2e/opgeth` + +Integration-testing with op-geth, to cover engine behavior, without setting up a full test environment. +These tests are limited in scope, and may be changed at a later stage, to support alternative EL implementations. + +## Product + +### Optimization target + +Historically `op-e2e` has been optimized for test-coverage of the Go OP-Stack. +This is changing with the advance of alternative OP-Stack client implementations. + +New test framework improvements should optimize for multi-client testing. + +### Vision + +Generally, design-discussion and feedback from active test users converges on: +- a need to share test-resources, to host more tests while reducing overhead. +- a need for a DSL, to better express common test constructs. +- less involved test pre-requisites: the environment should be light and simple, welcoming new contributors. + E.g. no undocumented one-off makefile prerequisites. + +## Design principles + +- Interfaces first. We should not hardcode test-utilities against any specific client implementation, + this makes a test less parameterizable and less cross-client portable. +- Abstract setup to make it the default to reduce resource usage. + E.g. RPC transports can run in-process, and avoid websocket or HTTP costs, + and ideally the test-writer does not have to think about the difference. +- Avoid one-off test chain-configurations. Tests with more realistic parameters are more comparable to production, + and easier consolidated onto shared testing resources. +- Write helpers and DSL utilities, avoid re-implementing common testing steps. + The better the test environment, the more inviting it is for someone new to help improve test coverage. +- Use the right test-type. Do not spawn a full system for something of very limited scope, + e.g. when it fits better in a unit-test. diff --git a/op-e2e/actions/batcher/l2_batcher_test.go b/op-e2e/actions/batcher/l2_batcher_test.go index 8906dcbed4ea..3fec73db4f7c 100644 --- a/op-e2e/actions/batcher/l2_batcher_test.go +++ b/op-e2e/actions/batcher/l2_batcher_test.go @@ -408,7 +408,7 @@ func ExtendedTimeWithoutL1Batches(gt *testing.T, deltaTimeOffset *hexutil.Uint64 // - Fill 40 L2 blocks to near max-capacity, with txs of 120 KB each // - Buffer the L2 blocks into channels together as much as possible, submit data-txs only when necessary // (just before crossing the max RLP channel size) -// - Limit the data-tx size to 40 KB, to force data to be split across multiple datat-txs +// - Limit the data-tx size to 40 KB, to force data to be split across multiple data-txs // - Defer all data-tx inclusion till the end // - Fill L1 blocks with data-txs until we have processed them all // - Run the verifier, and check if it derives the same L2 chain as was created by the sequencer. diff --git a/op-e2e/actions/proofs/helpers/kona.go b/op-e2e/actions/proofs/helpers/kona.go index 9d34a98dda01..dc9b98cd8097 100644 --- a/op-e2e/actions/proofs/helpers/kona.go +++ b/op-e2e/actions/proofs/helpers/kona.go @@ -16,15 +16,14 @@ import ( "github.com/stretchr/testify/require" ) -var konaHostPath, konaClientPath string +var konaHostPath string func init() { konaHostPath = os.Getenv("KONA_HOST_PATH") - konaClientPath = os.Getenv("KONA_CLIENT_PATH") } func IsKonaConfigured() bool { - return konaHostPath != "" && konaClientPath != "" + return konaHostPath != "" } func RunKonaNative( @@ -57,7 +56,7 @@ func RunKonaNative( L2Claim: fixtureInputs.L2Claim, L2BlockNumber: big.NewInt(int64(fixtureInputs.L2BlockNumber)), } - hostCmd, err := vm.NewNativeKonaExecutor(konaClientPath).OracleCommand(vmCfg, workDir, inputs) + hostCmd, err := vm.NewNativeKonaExecutor().OracleCommand(vmCfg, workDir, inputs) require.NoError(t, err) cmd := exec.Command(hostCmd[0], hostCmd[1:]...) diff --git a/op-e2e/actions/proofs/holocene_activation_test.go b/op-e2e/actions/proofs/holocene_activation_test.go index 55b8c1162de3..1cdd3aba4573 100644 --- a/op-e2e/actions/proofs/holocene_activation_test.go +++ b/op-e2e/actions/proofs/holocene_activation_test.go @@ -26,12 +26,14 @@ func Test_ProgramAction_HoloceneActivation(gt *testing.T) { env := helpers.NewL2FaultProofEnv(t, testCfg, helpers.NewTestParams(), helpers.NewBatcherCfg(), setHoloceneTime) - t.Log("HoloceneTime: ", env.Sequencer.RollupCfg.HoloceneTime) - - // Build the L2 chain - blocks := []uint{1, 2} - targetHeadNumber := 2 - for env.Engine.L2Chain().CurrentBlock().Number.Uint64() < uint64(targetHeadNumber) { + t.Logf("L2 Genesis Time: %d, HoloceneTime: %d ", env.Sequencer.RollupCfg.Genesis.L2Time, *env.Sequencer.RollupCfg.HoloceneTime) + + // Build the L2 chain until the Holocene activation time, + // which for the Execution Engine is an L2 block timestamp + // https://specs.optimism.io/protocol/holocene/exec-engine.html?highlight=holocene#timestamp-activation + for env.Engine.L2Chain().CurrentBlock().Time < *env.Sequencer.RollupCfg.HoloceneTime { + b := env.Engine.L2Chain().GetBlockByHash(env.Sequencer.L2Unsafe().Hash) + require.Equal(t, "", string(b.Extra()), "extra data should be empty before Holocene activation") env.Sequencer.ActL2StartBlock(t) // Send an L2 tx env.Alice.L2.ActResetTxOpts(t) @@ -39,15 +41,24 @@ func Test_ProgramAction_HoloceneActivation(gt *testing.T) { env.Alice.L2.ActMakeTx(t) env.Engine.ActL2IncludeTx(env.Alice.Address())(t) env.Sequencer.ActL2EndBlock(t) + t.Log("Unsafe block with timestamp %d", b.Time) } + b := env.Engine.L2Chain().GetBlockByHash(env.Sequencer.L2Unsafe().Hash) + require.Len(t, b.Extra(), 9, "extra data should be 9 bytes after Holocene activation") // Build up a local list of frames - orderedFrames := make([][]byte, 0, 2) + orderedFrames := make([][]byte, 0, 1) + // Submit the first two blocks, this will be enough to trigger Holocene _derivation_ + // which is activated by the L1 inclusion block timestamp + // https://specs.optimism.io/protocol/holocene/derivation.html?highlight=holoce#activation + // block 1 will be 12 seconda after genesis, and 2 seconds before Holocene activation + // block 2 will be 24 seconds after genesis, and 10 seconds after Holocene activation + blocksToSubmit := []uint{1, 2} // Buffer the blocks in the batcher and populate orderedFrames list env.Batcher.ActCreateChannel(t, false) - for i, blockNum := range blocks { + for i, blockNum := range blocksToSubmit { env.Batcher.ActAddBlockByNumber(t, int64(blockNum), actionsHelpers.BlockLogger(t)) - if i == len(blocks)-1 { + if i == len(blocksToSubmit)-1 { env.Batcher.ActL2ChannelClose(t) } frame := env.Batcher.ReadNextOutputFrame(t) @@ -64,7 +75,7 @@ func Test_ProgramAction_HoloceneActivation(gt *testing.T) { // Submit first frame env.Batcher.ActL2BatchSubmitRaw(t, orderedFrames[0]) - includeBatchTx() // block should have a timestamp of 12s after genesis + includeBatchTx() // L1 block should have a timestamp of 12s after genesis // Holocene should activate 14s after genesis, so that the previous l1 block // was before HoloceneTime and the next l1 block is after it @@ -78,8 +89,11 @@ func Test_ProgramAction_HoloceneActivation(gt *testing.T) { env.Sequencer.ActL2PipelineFull(t) l2SafeHead := env.Sequencer.L2Safe() + t.Log(l2SafeHead.Time) require.EqualValues(t, uint64(0), l2SafeHead.Number) // channel should be dropped, so no safe head progression - t.Log("Safe head progressed as expected", "l2SafeHeadNumber", l2SafeHead.Number) + if uint64(0) == l2SafeHead.Number { + t.Log("Safe head progressed as expected", "l2SafeHeadNumber", l2SafeHead.Number) + } // Log assertions filters := []string{ @@ -92,7 +106,6 @@ func Test_ProgramAction_HoloceneActivation(gt *testing.T) { recs := env.Logs.FindLogs(testlog.NewMessageContainsFilter(filter), testlog.NewAttributesFilter("role", "sequencer")) require.Len(t, recs, 1, "searching for %d instances of '%s' in logs from role %s", 1, filter, "sequencer") } - env.RunFaultProofProgram(t, l2SafeHead.Number, testCfg.CheckResult, testCfg.InputParams...) } diff --git a/op-e2e/bindings/delayedvetoable.go b/op-e2e/bindings/delayedvetoable.go deleted file mode 100644 index 989bb0278d20..000000000000 --- a/op-e2e/bindings/delayedvetoable.go +++ /dev/null @@ -1,928 +0,0 @@ -// Code generated - DO NOT EDIT. -// This file is a generated binding and any manual changes will be lost. - -package bindings - -import ( - "errors" - "math/big" - "strings" - - ethereum "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/event" -) - -// Reference imports to suppress errors if they are not otherwise used. -var ( - _ = errors.New - _ = big.NewInt - _ = strings.NewReader - _ = ethereum.NotFound - _ = bind.Bind - _ = common.Big1 - _ = types.BloomLookup - _ = event.NewSubscription -) - -// DelayedVetoableMetaData contains all meta data concerning the DelayedVetoable contract. -var DelayedVetoableMetaData = &bind.MetaData{ - ABI: "[{\"type\":\"constructor\",\"inputs\":[{\"name\":\"vetoer_\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"initiator_\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"target_\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"operatingDelay_\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"nonpayable\"},{\"type\":\"fallback\",\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"delay\",\"inputs\":[],\"outputs\":[{\"name\":\"delay_\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"initiator\",\"inputs\":[],\"outputs\":[{\"name\":\"initiator_\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"queuedAt\",\"inputs\":[{\"name\":\"callHash\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"outputs\":[{\"name\":\"queuedAt_\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"target\",\"inputs\":[],\"outputs\":[{\"name\":\"target_\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"version\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"string\",\"internalType\":\"string\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"vetoer\",\"inputs\":[],\"outputs\":[{\"name\":\"vetoer_\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"nonpayable\"},{\"type\":\"event\",\"name\":\"DelayActivated\",\"inputs\":[{\"name\":\"delay\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"Forwarded\",\"inputs\":[{\"name\":\"callHash\",\"type\":\"bytes32\",\"indexed\":true,\"internalType\":\"bytes32\"},{\"name\":\"data\",\"type\":\"bytes\",\"indexed\":false,\"internalType\":\"bytes\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"Initiated\",\"inputs\":[{\"name\":\"callHash\",\"type\":\"bytes32\",\"indexed\":true,\"internalType\":\"bytes32\"},{\"name\":\"data\",\"type\":\"bytes\",\"indexed\":false,\"internalType\":\"bytes\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"Vetoed\",\"inputs\":[{\"name\":\"callHash\",\"type\":\"bytes32\",\"indexed\":true,\"internalType\":\"bytes32\"},{\"name\":\"data\",\"type\":\"bytes\",\"indexed\":false,\"internalType\":\"bytes\"}],\"anonymous\":false},{\"type\":\"error\",\"name\":\"ForwardingEarly\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"Unauthorized\",\"inputs\":[{\"name\":\"expected\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"actual\",\"type\":\"address\",\"internalType\":\"address\"}]}]", - Bin: "0x61010060405234801561001157600080fd5b506040516108ff3803806108ff8339810160408190526100309161006e565b6001600160a01b0393841660a05291831660c05290911660805260e0526100b9565b80516001600160a01b038116811461006957600080fd5b919050565b6000806000806080858703121561008457600080fd5b61008d85610052565b935061009b60208601610052565b92506100a960408601610052565b6060959095015193969295505050565b60805160a05160c05160e0516107dc610123600039600061023f01526000818161015f01528181610205015281816102cd0152818161045801526105050152600081816101a001528181610384015261059d01526000818161057101526105ff01526107dc6000f3fe608060405234801561001057600080fd5b50600436106100725760003560e01c8063b912de5d11610050578063b912de5d14610111578063d4b8399214610124578063d8bff4401461012c57610072565b806354fd4d501461007c5780635c39fcc1146100ce5780636a42b8f8146100fb575b61007a610134565b005b6100b86040518060400160405280600581526020017f312e302e3000000000000000000000000000000000000000000000000000000081525081565b6040516100c591906106a7565b60405180910390f35b6100d66104fb565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020016100c5565b610103610532565b6040519081526020016100c5565b61010361011f36600461071a565b610540565b6100d6610567565b6100d6610593565b361580156101425750600054155b15610298573373ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016148015906101c357503373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614155b1561023d576040517f295a81c100000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001660048201523360248201526044015b60405180910390fd5b7f000000000000000000000000000000000000000000000000000000000000000060008190556040519081527febf28bfb587e28dfffd9173cf71c32ba5d3f0544a0117b5539c9b274a5bba2a89060200160405180910390a1565b600080366040516102aa929190610733565b60405190819003902090503373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161480156103065750600081815260016020526040902054155b1561036c5760005460000361031e5761031e816105bf565b6000818152600160205260408082204290555182917f87a332a414acbc7da074543639ce7ae02ff1ea72e88379da9f261b080beb5a139161036191903690610743565b60405180910390a250565b3373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161480156103be575060008181526001602052604090205415155b15610406576000818152600160205260408082208290555182917fbede6852c1d97d93ff557f676de76670cd0dec861e7fe8beb13aa0ba2b0ab0409161036191903690610743565b600081815260016020526040812054900361048b576040517f295a81c100000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000166004820152336024820152604401610234565b60008054828252600160205260409091205442916104a891610790565b11156104e0576040517f43dc986d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000818152600160205260408120556104f8816105bf565b50565b60003361052757507f000000000000000000000000000000000000000000000000000000000000000090565b61052f610134565b90565b600033610527575060005490565b60003361055a575060009081526001602052604090205490565b610562610134565b919050565b60003361052757507f000000000000000000000000000000000000000000000000000000000000000090565b60003361052757507f000000000000000000000000000000000000000000000000000000000000000090565b807f4c109d85bcd0bb5c735b4be850953d652afe4cd9aa2e0b1426a65a4dcb2e12296000366040516105f2929190610743565b60405180910390a26000807f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16600036604051610645929190610733565b6000604051808303816000865af19150503d8060008114610682576040519150601f19603f3d011682016040523d82523d6000602084013e610687565b606091505b50909250905081151560010361069f57805160208201f35b805160208201fd5b600060208083528351808285015260005b818110156106d4578581018301518582016040015282016106b8565b818111156106e6576000604083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016929092016040019392505050565b60006020828403121561072c57600080fd5b5035919050565b8183823760009101908152919050565b60208152816020820152818360408301376000818301604090810191909152601f9092017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0160101919050565b600082198211156107ca577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b50019056fea164736f6c634300080f000a", -} - -// DelayedVetoableABI is the input ABI used to generate the binding from. -// Deprecated: Use DelayedVetoableMetaData.ABI instead. -var DelayedVetoableABI = DelayedVetoableMetaData.ABI - -// DelayedVetoableBin is the compiled bytecode used for deploying new contracts. -// Deprecated: Use DelayedVetoableMetaData.Bin instead. -var DelayedVetoableBin = DelayedVetoableMetaData.Bin - -// DeployDelayedVetoable deploys a new Ethereum contract, binding an instance of DelayedVetoable to it. -func DeployDelayedVetoable(auth *bind.TransactOpts, backend bind.ContractBackend, vetoer_ common.Address, initiator_ common.Address, target_ common.Address, operatingDelay_ *big.Int) (common.Address, *types.Transaction, *DelayedVetoable, error) { - parsed, err := DelayedVetoableMetaData.GetAbi() - if err != nil { - return common.Address{}, nil, nil, err - } - if parsed == nil { - return common.Address{}, nil, nil, errors.New("GetABI returned nil") - } - - address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(DelayedVetoableBin), backend, vetoer_, initiator_, target_, operatingDelay_) - if err != nil { - return common.Address{}, nil, nil, err - } - return address, tx, &DelayedVetoable{DelayedVetoableCaller: DelayedVetoableCaller{contract: contract}, DelayedVetoableTransactor: DelayedVetoableTransactor{contract: contract}, DelayedVetoableFilterer: DelayedVetoableFilterer{contract: contract}}, nil -} - -// DelayedVetoable is an auto generated Go binding around an Ethereum contract. -type DelayedVetoable struct { - DelayedVetoableCaller // Read-only binding to the contract - DelayedVetoableTransactor // Write-only binding to the contract - DelayedVetoableFilterer // Log filterer for contract events -} - -// DelayedVetoableCaller is an auto generated read-only Go binding around an Ethereum contract. -type DelayedVetoableCaller struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// DelayedVetoableTransactor is an auto generated write-only Go binding around an Ethereum contract. -type DelayedVetoableTransactor struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// DelayedVetoableFilterer is an auto generated log filtering Go binding around an Ethereum contract events. -type DelayedVetoableFilterer struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// DelayedVetoableSession is an auto generated Go binding around an Ethereum contract, -// with pre-set call and transact options. -type DelayedVetoableSession struct { - Contract *DelayedVetoable // Generic contract binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// DelayedVetoableCallerSession is an auto generated read-only Go binding around an Ethereum contract, -// with pre-set call options. -type DelayedVetoableCallerSession struct { - Contract *DelayedVetoableCaller // Generic contract caller binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session -} - -// DelayedVetoableTransactorSession is an auto generated write-only Go binding around an Ethereum contract, -// with pre-set transact options. -type DelayedVetoableTransactorSession struct { - Contract *DelayedVetoableTransactor // Generic contract transactor binding to set the session for - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// DelayedVetoableRaw is an auto generated low-level Go binding around an Ethereum contract. -type DelayedVetoableRaw struct { - Contract *DelayedVetoable // Generic contract binding to access the raw methods on -} - -// DelayedVetoableCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. -type DelayedVetoableCallerRaw struct { - Contract *DelayedVetoableCaller // Generic read-only contract binding to access the raw methods on -} - -// DelayedVetoableTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. -type DelayedVetoableTransactorRaw struct { - Contract *DelayedVetoableTransactor // Generic write-only contract binding to access the raw methods on -} - -// NewDelayedVetoable creates a new instance of DelayedVetoable, bound to a specific deployed contract. -func NewDelayedVetoable(address common.Address, backend bind.ContractBackend) (*DelayedVetoable, error) { - contract, err := bindDelayedVetoable(address, backend, backend, backend) - if err != nil { - return nil, err - } - return &DelayedVetoable{DelayedVetoableCaller: DelayedVetoableCaller{contract: contract}, DelayedVetoableTransactor: DelayedVetoableTransactor{contract: contract}, DelayedVetoableFilterer: DelayedVetoableFilterer{contract: contract}}, nil -} - -// NewDelayedVetoableCaller creates a new read-only instance of DelayedVetoable, bound to a specific deployed contract. -func NewDelayedVetoableCaller(address common.Address, caller bind.ContractCaller) (*DelayedVetoableCaller, error) { - contract, err := bindDelayedVetoable(address, caller, nil, nil) - if err != nil { - return nil, err - } - return &DelayedVetoableCaller{contract: contract}, nil -} - -// NewDelayedVetoableTransactor creates a new write-only instance of DelayedVetoable, bound to a specific deployed contract. -func NewDelayedVetoableTransactor(address common.Address, transactor bind.ContractTransactor) (*DelayedVetoableTransactor, error) { - contract, err := bindDelayedVetoable(address, nil, transactor, nil) - if err != nil { - return nil, err - } - return &DelayedVetoableTransactor{contract: contract}, nil -} - -// NewDelayedVetoableFilterer creates a new log filterer instance of DelayedVetoable, bound to a specific deployed contract. -func NewDelayedVetoableFilterer(address common.Address, filterer bind.ContractFilterer) (*DelayedVetoableFilterer, error) { - contract, err := bindDelayedVetoable(address, nil, nil, filterer) - if err != nil { - return nil, err - } - return &DelayedVetoableFilterer{contract: contract}, nil -} - -// bindDelayedVetoable binds a generic wrapper to an already deployed contract. -func bindDelayedVetoable(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { - parsed, err := abi.JSON(strings.NewReader(DelayedVetoableABI)) - if err != nil { - return nil, err - } - return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_DelayedVetoable *DelayedVetoableRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _DelayedVetoable.Contract.DelayedVetoableCaller.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_DelayedVetoable *DelayedVetoableRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _DelayedVetoable.Contract.DelayedVetoableTransactor.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_DelayedVetoable *DelayedVetoableRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _DelayedVetoable.Contract.DelayedVetoableTransactor.contract.Transact(opts, method, params...) -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_DelayedVetoable *DelayedVetoableCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _DelayedVetoable.Contract.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_DelayedVetoable *DelayedVetoableTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _DelayedVetoable.Contract.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_DelayedVetoable *DelayedVetoableTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _DelayedVetoable.Contract.contract.Transact(opts, method, params...) -} - -// Version is a free data retrieval call binding the contract method 0x54fd4d50. -// -// Solidity: function version() view returns(string) -func (_DelayedVetoable *DelayedVetoableCaller) Version(opts *bind.CallOpts) (string, error) { - var out []interface{} - err := _DelayedVetoable.contract.Call(opts, &out, "version") - - if err != nil { - return *new(string), err - } - - out0 := *abi.ConvertType(out[0], new(string)).(*string) - - return out0, err - -} - -// Version is a free data retrieval call binding the contract method 0x54fd4d50. -// -// Solidity: function version() view returns(string) -func (_DelayedVetoable *DelayedVetoableSession) Version() (string, error) { - return _DelayedVetoable.Contract.Version(&_DelayedVetoable.CallOpts) -} - -// Version is a free data retrieval call binding the contract method 0x54fd4d50. -// -// Solidity: function version() view returns(string) -func (_DelayedVetoable *DelayedVetoableCallerSession) Version() (string, error) { - return _DelayedVetoable.Contract.Version(&_DelayedVetoable.CallOpts) -} - -// Delay is a paid mutator transaction binding the contract method 0x6a42b8f8. -// -// Solidity: function delay() returns(uint256 delay_) -func (_DelayedVetoable *DelayedVetoableTransactor) Delay(opts *bind.TransactOpts) (*types.Transaction, error) { - return _DelayedVetoable.contract.Transact(opts, "delay") -} - -// Delay is a paid mutator transaction binding the contract method 0x6a42b8f8. -// -// Solidity: function delay() returns(uint256 delay_) -func (_DelayedVetoable *DelayedVetoableSession) Delay() (*types.Transaction, error) { - return _DelayedVetoable.Contract.Delay(&_DelayedVetoable.TransactOpts) -} - -// Delay is a paid mutator transaction binding the contract method 0x6a42b8f8. -// -// Solidity: function delay() returns(uint256 delay_) -func (_DelayedVetoable *DelayedVetoableTransactorSession) Delay() (*types.Transaction, error) { - return _DelayedVetoable.Contract.Delay(&_DelayedVetoable.TransactOpts) -} - -// Initiator is a paid mutator transaction binding the contract method 0x5c39fcc1. -// -// Solidity: function initiator() returns(address initiator_) -func (_DelayedVetoable *DelayedVetoableTransactor) Initiator(opts *bind.TransactOpts) (*types.Transaction, error) { - return _DelayedVetoable.contract.Transact(opts, "initiator") -} - -// Initiator is a paid mutator transaction binding the contract method 0x5c39fcc1. -// -// Solidity: function initiator() returns(address initiator_) -func (_DelayedVetoable *DelayedVetoableSession) Initiator() (*types.Transaction, error) { - return _DelayedVetoable.Contract.Initiator(&_DelayedVetoable.TransactOpts) -} - -// Initiator is a paid mutator transaction binding the contract method 0x5c39fcc1. -// -// Solidity: function initiator() returns(address initiator_) -func (_DelayedVetoable *DelayedVetoableTransactorSession) Initiator() (*types.Transaction, error) { - return _DelayedVetoable.Contract.Initiator(&_DelayedVetoable.TransactOpts) -} - -// QueuedAt is a paid mutator transaction binding the contract method 0xb912de5d. -// -// Solidity: function queuedAt(bytes32 callHash) returns(uint256 queuedAt_) -func (_DelayedVetoable *DelayedVetoableTransactor) QueuedAt(opts *bind.TransactOpts, callHash [32]byte) (*types.Transaction, error) { - return _DelayedVetoable.contract.Transact(opts, "queuedAt", callHash) -} - -// QueuedAt is a paid mutator transaction binding the contract method 0xb912de5d. -// -// Solidity: function queuedAt(bytes32 callHash) returns(uint256 queuedAt_) -func (_DelayedVetoable *DelayedVetoableSession) QueuedAt(callHash [32]byte) (*types.Transaction, error) { - return _DelayedVetoable.Contract.QueuedAt(&_DelayedVetoable.TransactOpts, callHash) -} - -// QueuedAt is a paid mutator transaction binding the contract method 0xb912de5d. -// -// Solidity: function queuedAt(bytes32 callHash) returns(uint256 queuedAt_) -func (_DelayedVetoable *DelayedVetoableTransactorSession) QueuedAt(callHash [32]byte) (*types.Transaction, error) { - return _DelayedVetoable.Contract.QueuedAt(&_DelayedVetoable.TransactOpts, callHash) -} - -// Target is a paid mutator transaction binding the contract method 0xd4b83992. -// -// Solidity: function target() returns(address target_) -func (_DelayedVetoable *DelayedVetoableTransactor) Target(opts *bind.TransactOpts) (*types.Transaction, error) { - return _DelayedVetoable.contract.Transact(opts, "target") -} - -// Target is a paid mutator transaction binding the contract method 0xd4b83992. -// -// Solidity: function target() returns(address target_) -func (_DelayedVetoable *DelayedVetoableSession) Target() (*types.Transaction, error) { - return _DelayedVetoable.Contract.Target(&_DelayedVetoable.TransactOpts) -} - -// Target is a paid mutator transaction binding the contract method 0xd4b83992. -// -// Solidity: function target() returns(address target_) -func (_DelayedVetoable *DelayedVetoableTransactorSession) Target() (*types.Transaction, error) { - return _DelayedVetoable.Contract.Target(&_DelayedVetoable.TransactOpts) -} - -// Vetoer is a paid mutator transaction binding the contract method 0xd8bff440. -// -// Solidity: function vetoer() returns(address vetoer_) -func (_DelayedVetoable *DelayedVetoableTransactor) Vetoer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _DelayedVetoable.contract.Transact(opts, "vetoer") -} - -// Vetoer is a paid mutator transaction binding the contract method 0xd8bff440. -// -// Solidity: function vetoer() returns(address vetoer_) -func (_DelayedVetoable *DelayedVetoableSession) Vetoer() (*types.Transaction, error) { - return _DelayedVetoable.Contract.Vetoer(&_DelayedVetoable.TransactOpts) -} - -// Vetoer is a paid mutator transaction binding the contract method 0xd8bff440. -// -// Solidity: function vetoer() returns(address vetoer_) -func (_DelayedVetoable *DelayedVetoableTransactorSession) Vetoer() (*types.Transaction, error) { - return _DelayedVetoable.Contract.Vetoer(&_DelayedVetoable.TransactOpts) -} - -// Fallback is a paid mutator transaction binding the contract fallback function. -// -// Solidity: fallback() returns() -func (_DelayedVetoable *DelayedVetoableTransactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) { - return _DelayedVetoable.contract.RawTransact(opts, calldata) -} - -// Fallback is a paid mutator transaction binding the contract fallback function. -// -// Solidity: fallback() returns() -func (_DelayedVetoable *DelayedVetoableSession) Fallback(calldata []byte) (*types.Transaction, error) { - return _DelayedVetoable.Contract.Fallback(&_DelayedVetoable.TransactOpts, calldata) -} - -// Fallback is a paid mutator transaction binding the contract fallback function. -// -// Solidity: fallback() returns() -func (_DelayedVetoable *DelayedVetoableTransactorSession) Fallback(calldata []byte) (*types.Transaction, error) { - return _DelayedVetoable.Contract.Fallback(&_DelayedVetoable.TransactOpts, calldata) -} - -// DelayedVetoableDelayActivatedIterator is returned from FilterDelayActivated and is used to iterate over the raw logs and unpacked data for DelayActivated events raised by the DelayedVetoable contract. -type DelayedVetoableDelayActivatedIterator struct { - Event *DelayedVetoableDelayActivated // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *DelayedVetoableDelayActivatedIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(DelayedVetoableDelayActivated) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(DelayedVetoableDelayActivated) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *DelayedVetoableDelayActivatedIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *DelayedVetoableDelayActivatedIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// DelayedVetoableDelayActivated represents a DelayActivated event raised by the DelayedVetoable contract. -type DelayedVetoableDelayActivated struct { - Delay *big.Int - Raw types.Log // Blockchain specific contextual infos -} - -// FilterDelayActivated is a free log retrieval operation binding the contract event 0xebf28bfb587e28dfffd9173cf71c32ba5d3f0544a0117b5539c9b274a5bba2a8. -// -// Solidity: event DelayActivated(uint256 delay) -func (_DelayedVetoable *DelayedVetoableFilterer) FilterDelayActivated(opts *bind.FilterOpts) (*DelayedVetoableDelayActivatedIterator, error) { - - logs, sub, err := _DelayedVetoable.contract.FilterLogs(opts, "DelayActivated") - if err != nil { - return nil, err - } - return &DelayedVetoableDelayActivatedIterator{contract: _DelayedVetoable.contract, event: "DelayActivated", logs: logs, sub: sub}, nil -} - -// WatchDelayActivated is a free log subscription operation binding the contract event 0xebf28bfb587e28dfffd9173cf71c32ba5d3f0544a0117b5539c9b274a5bba2a8. -// -// Solidity: event DelayActivated(uint256 delay) -func (_DelayedVetoable *DelayedVetoableFilterer) WatchDelayActivated(opts *bind.WatchOpts, sink chan<- *DelayedVetoableDelayActivated) (event.Subscription, error) { - - logs, sub, err := _DelayedVetoable.contract.WatchLogs(opts, "DelayActivated") - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(DelayedVetoableDelayActivated) - if err := _DelayedVetoable.contract.UnpackLog(event, "DelayActivated", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseDelayActivated is a log parse operation binding the contract event 0xebf28bfb587e28dfffd9173cf71c32ba5d3f0544a0117b5539c9b274a5bba2a8. -// -// Solidity: event DelayActivated(uint256 delay) -func (_DelayedVetoable *DelayedVetoableFilterer) ParseDelayActivated(log types.Log) (*DelayedVetoableDelayActivated, error) { - event := new(DelayedVetoableDelayActivated) - if err := _DelayedVetoable.contract.UnpackLog(event, "DelayActivated", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -// DelayedVetoableForwardedIterator is returned from FilterForwarded and is used to iterate over the raw logs and unpacked data for Forwarded events raised by the DelayedVetoable contract. -type DelayedVetoableForwardedIterator struct { - Event *DelayedVetoableForwarded // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *DelayedVetoableForwardedIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(DelayedVetoableForwarded) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(DelayedVetoableForwarded) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *DelayedVetoableForwardedIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *DelayedVetoableForwardedIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// DelayedVetoableForwarded represents a Forwarded event raised by the DelayedVetoable contract. -type DelayedVetoableForwarded struct { - CallHash [32]byte - Data []byte - Raw types.Log // Blockchain specific contextual infos -} - -// FilterForwarded is a free log retrieval operation binding the contract event 0x4c109d85bcd0bb5c735b4be850953d652afe4cd9aa2e0b1426a65a4dcb2e1229. -// -// Solidity: event Forwarded(bytes32 indexed callHash, bytes data) -func (_DelayedVetoable *DelayedVetoableFilterer) FilterForwarded(opts *bind.FilterOpts, callHash [][32]byte) (*DelayedVetoableForwardedIterator, error) { - - var callHashRule []interface{} - for _, callHashItem := range callHash { - callHashRule = append(callHashRule, callHashItem) - } - - logs, sub, err := _DelayedVetoable.contract.FilterLogs(opts, "Forwarded", callHashRule) - if err != nil { - return nil, err - } - return &DelayedVetoableForwardedIterator{contract: _DelayedVetoable.contract, event: "Forwarded", logs: logs, sub: sub}, nil -} - -// WatchForwarded is a free log subscription operation binding the contract event 0x4c109d85bcd0bb5c735b4be850953d652afe4cd9aa2e0b1426a65a4dcb2e1229. -// -// Solidity: event Forwarded(bytes32 indexed callHash, bytes data) -func (_DelayedVetoable *DelayedVetoableFilterer) WatchForwarded(opts *bind.WatchOpts, sink chan<- *DelayedVetoableForwarded, callHash [][32]byte) (event.Subscription, error) { - - var callHashRule []interface{} - for _, callHashItem := range callHash { - callHashRule = append(callHashRule, callHashItem) - } - - logs, sub, err := _DelayedVetoable.contract.WatchLogs(opts, "Forwarded", callHashRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(DelayedVetoableForwarded) - if err := _DelayedVetoable.contract.UnpackLog(event, "Forwarded", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseForwarded is a log parse operation binding the contract event 0x4c109d85bcd0bb5c735b4be850953d652afe4cd9aa2e0b1426a65a4dcb2e1229. -// -// Solidity: event Forwarded(bytes32 indexed callHash, bytes data) -func (_DelayedVetoable *DelayedVetoableFilterer) ParseForwarded(log types.Log) (*DelayedVetoableForwarded, error) { - event := new(DelayedVetoableForwarded) - if err := _DelayedVetoable.contract.UnpackLog(event, "Forwarded", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -// DelayedVetoableInitiatedIterator is returned from FilterInitiated and is used to iterate over the raw logs and unpacked data for Initiated events raised by the DelayedVetoable contract. -type DelayedVetoableInitiatedIterator struct { - Event *DelayedVetoableInitiated // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *DelayedVetoableInitiatedIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(DelayedVetoableInitiated) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(DelayedVetoableInitiated) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *DelayedVetoableInitiatedIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *DelayedVetoableInitiatedIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// DelayedVetoableInitiated represents a Initiated event raised by the DelayedVetoable contract. -type DelayedVetoableInitiated struct { - CallHash [32]byte - Data []byte - Raw types.Log // Blockchain specific contextual infos -} - -// FilterInitiated is a free log retrieval operation binding the contract event 0x87a332a414acbc7da074543639ce7ae02ff1ea72e88379da9f261b080beb5a13. -// -// Solidity: event Initiated(bytes32 indexed callHash, bytes data) -func (_DelayedVetoable *DelayedVetoableFilterer) FilterInitiated(opts *bind.FilterOpts, callHash [][32]byte) (*DelayedVetoableInitiatedIterator, error) { - - var callHashRule []interface{} - for _, callHashItem := range callHash { - callHashRule = append(callHashRule, callHashItem) - } - - logs, sub, err := _DelayedVetoable.contract.FilterLogs(opts, "Initiated", callHashRule) - if err != nil { - return nil, err - } - return &DelayedVetoableInitiatedIterator{contract: _DelayedVetoable.contract, event: "Initiated", logs: logs, sub: sub}, nil -} - -// WatchInitiated is a free log subscription operation binding the contract event 0x87a332a414acbc7da074543639ce7ae02ff1ea72e88379da9f261b080beb5a13. -// -// Solidity: event Initiated(bytes32 indexed callHash, bytes data) -func (_DelayedVetoable *DelayedVetoableFilterer) WatchInitiated(opts *bind.WatchOpts, sink chan<- *DelayedVetoableInitiated, callHash [][32]byte) (event.Subscription, error) { - - var callHashRule []interface{} - for _, callHashItem := range callHash { - callHashRule = append(callHashRule, callHashItem) - } - - logs, sub, err := _DelayedVetoable.contract.WatchLogs(opts, "Initiated", callHashRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(DelayedVetoableInitiated) - if err := _DelayedVetoable.contract.UnpackLog(event, "Initiated", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseInitiated is a log parse operation binding the contract event 0x87a332a414acbc7da074543639ce7ae02ff1ea72e88379da9f261b080beb5a13. -// -// Solidity: event Initiated(bytes32 indexed callHash, bytes data) -func (_DelayedVetoable *DelayedVetoableFilterer) ParseInitiated(log types.Log) (*DelayedVetoableInitiated, error) { - event := new(DelayedVetoableInitiated) - if err := _DelayedVetoable.contract.UnpackLog(event, "Initiated", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -// DelayedVetoableVetoedIterator is returned from FilterVetoed and is used to iterate over the raw logs and unpacked data for Vetoed events raised by the DelayedVetoable contract. -type DelayedVetoableVetoedIterator struct { - Event *DelayedVetoableVetoed // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *DelayedVetoableVetoedIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(DelayedVetoableVetoed) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(DelayedVetoableVetoed) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *DelayedVetoableVetoedIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *DelayedVetoableVetoedIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// DelayedVetoableVetoed represents a Vetoed event raised by the DelayedVetoable contract. -type DelayedVetoableVetoed struct { - CallHash [32]byte - Data []byte - Raw types.Log // Blockchain specific contextual infos -} - -// FilterVetoed is a free log retrieval operation binding the contract event 0xbede6852c1d97d93ff557f676de76670cd0dec861e7fe8beb13aa0ba2b0ab040. -// -// Solidity: event Vetoed(bytes32 indexed callHash, bytes data) -func (_DelayedVetoable *DelayedVetoableFilterer) FilterVetoed(opts *bind.FilterOpts, callHash [][32]byte) (*DelayedVetoableVetoedIterator, error) { - - var callHashRule []interface{} - for _, callHashItem := range callHash { - callHashRule = append(callHashRule, callHashItem) - } - - logs, sub, err := _DelayedVetoable.contract.FilterLogs(opts, "Vetoed", callHashRule) - if err != nil { - return nil, err - } - return &DelayedVetoableVetoedIterator{contract: _DelayedVetoable.contract, event: "Vetoed", logs: logs, sub: sub}, nil -} - -// WatchVetoed is a free log subscription operation binding the contract event 0xbede6852c1d97d93ff557f676de76670cd0dec861e7fe8beb13aa0ba2b0ab040. -// -// Solidity: event Vetoed(bytes32 indexed callHash, bytes data) -func (_DelayedVetoable *DelayedVetoableFilterer) WatchVetoed(opts *bind.WatchOpts, sink chan<- *DelayedVetoableVetoed, callHash [][32]byte) (event.Subscription, error) { - - var callHashRule []interface{} - for _, callHashItem := range callHash { - callHashRule = append(callHashRule, callHashItem) - } - - logs, sub, err := _DelayedVetoable.contract.WatchLogs(opts, "Vetoed", callHashRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(DelayedVetoableVetoed) - if err := _DelayedVetoable.contract.UnpackLog(event, "Vetoed", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseVetoed is a log parse operation binding the contract event 0xbede6852c1d97d93ff557f676de76670cd0dec861e7fe8beb13aa0ba2b0ab040. -// -// Solidity: event Vetoed(bytes32 indexed callHash, bytes data) -func (_DelayedVetoable *DelayedVetoableFilterer) ParseVetoed(log types.Log) (*DelayedVetoableVetoed, error) { - event := new(DelayedVetoableVetoed) - if err := _DelayedVetoable.contract.UnpackLog(event, "Vetoed", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} diff --git a/op-e2e/config/init.go b/op-e2e/config/init.go index 9419c1277060..032f70bf2a88 100644 --- a/op-e2e/config/init.go +++ b/op-e2e/config/init.go @@ -201,9 +201,6 @@ func initAllocType(root string, allocType AllocType) { panic(err) } - // Do not use clique in the in memory tests. Otherwise block building - // would be much more complex. - dc.L1UseClique = false // Set the L1 genesis block timestamp to now dc.L1GenesisBlockTimestamp = hexutil.Uint64(time.Now().Unix()) dc.FundDevAccounts = true diff --git a/op-e2e/e2eutils/retryproxy/proxy.go b/op-e2e/e2eutils/retryproxy/proxy.go new file mode 100644 index 000000000000..a74d5d8c8699 --- /dev/null +++ b/op-e2e/e2eutils/retryproxy/proxy.go @@ -0,0 +1,160 @@ +package retryproxy + +import ( + "bytes" + "context" + "fmt" + "io" + "net" + "net/http" + "time" + + "github.com/ethereum-optimism/optimism/op-service/retry" + "github.com/ethereum/go-ethereum/log" +) + +var copyHeaders = []string{ + "Content-Type", +} + +type RetryProxy struct { + lgr log.Logger + upstream string + client *http.Client + strategy retry.Strategy + maxRetries int + srv *http.Server + listenPort int +} + +type Option func(*RetryProxy) + +func New(lgr log.Logger, upstream string, opts ...Option) *RetryProxy { + strategy := &retry.ExponentialStrategy{ + Min: 250 * time.Millisecond, + Max: 5 * time.Second, + MaxJitter: 250 * time.Millisecond, + } + + prox := &RetryProxy{ + lgr: lgr.New("module", "retryproxy"), + upstream: upstream, + client: &http.Client{}, + strategy: strategy, + maxRetries: 5, + } + + for _, opt := range opts { + opt(prox) + } + + return prox +} + +func (p *RetryProxy) Start() error { + errC := make(chan error, 1) + + go func() { + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + errC <- fmt.Errorf("failed to listen: %w", err) + } + + p.listenPort = ln.Addr().(*net.TCPAddr).Port + + p.srv = &http.Server{ + Addr: "127.0.0.1:0", + Handler: p, + } + errC <- p.srv.Serve(ln) + }() + + timer := time.NewTimer(100 * time.Millisecond) + select { + case err := <-errC: + return fmt.Errorf("failed to start server: %w", err) + case <-timer.C: + p.lgr.Info("server started", "port", p.listenPort) + return nil + } +} + +func (p *RetryProxy) Stop() error { + if p.srv == nil { + return nil + } + + return p.srv.Shutdown(context.Background()) +} + +func (p *RetryProxy) Endpoint() string { + return fmt.Sprintf("http://127.0.0.1:%d", p.listenPort) +} + +func (p *RetryProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + + defer r.Body.Close() + reqBody, err := io.ReadAll(r.Body) + if err != nil { + p.lgr.Error("failed to read request body", "err", err) + http.Error(w, "failed to read request body", http.StatusInternalServerError) + return + } + + //nolint:bodyClose + res, resBody, err := retry.Do2(r.Context(), p.maxRetries, p.strategy, func() (*http.Response, []byte, error) { + ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) + defer cancel() + res, err := p.doProxyReq(ctx, reqBody) + if err != nil { + p.lgr.Warn("failed to proxy request", "err", err) + return nil, nil, err + } + + defer res.Body.Close() + resBody, err := io.ReadAll(res.Body) + if err != nil { + p.lgr.Warn("failed to read response body", "err", err) + return nil, nil, err + } + + return res, resBody, nil + }) + if err != nil { + p.lgr.Error("permanently failed to proxy request", "err", err) + http.Error(w, "failed to proxy request", http.StatusBadGateway) + return + } + + for _, h := range copyHeaders { + w.Header().Set(h, res.Header.Get(h)) + } + + w.WriteHeader(http.StatusOK) + if _, err := io.Copy(w, bytes.NewReader(resBody)); err != nil { + p.lgr.Error("failed to copy response", "err", err) + http.Error(w, "failed to copy response", http.StatusInternalServerError) + } +} + +func (p *RetryProxy) doProxyReq(ctx context.Context, body []byte) (*http.Response, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodPost, p.upstream, bytes.NewReader(body)) + if err != nil { + panic(fmt.Errorf("failed to create request: %w", err)) + } + res, err := p.client.Do(req) + if err != nil { + p.lgr.Warn("failed to proxy request", "err", err) + return nil, err + } + status := res.StatusCode + if status != 200 { + p.lgr.Warn("unexpected status code", "status", status) + return nil, fmt.Errorf("unexpected status code: %d", status) + } + return res, nil +} diff --git a/op-e2e/e2eutils/secrets.go b/op-e2e/e2eutils/secrets.go index cd4c91e1e09e..9cee2f25c4c1 100644 --- a/op-e2e/e2eutils/secrets.go +++ b/op-e2e/e2eutils/secrets.go @@ -19,8 +19,8 @@ const defaultHDPathPrefix = "m/44'/60'/0'/0/" // If these values are changed, it is subject to breaking tests. They // must be in sync with the values in the DeployConfig used to create the system. var DefaultMnemonicConfig = &MnemonicConfig{ - Mnemonic: "test test test test test test test test test test test junk", - CliqueSigner: "m/44'/60'/0'/0/0", + Mnemonic: "test test test test test test test test test test test junk", + // Note: "m/44'/60'/0'/0/0" is a legacy mnemonic path, used for the L1 clique signer. Proposer: "m/44'/60'/0'/0/1", Batcher: "m/44'/60'/0'/0/2", Deployer: "m/44'/60'/0'/0/3", @@ -36,9 +36,8 @@ var DefaultMnemonicConfig = &MnemonicConfig{ type MnemonicConfig struct { Mnemonic string - CliqueSigner string - Deployer string - SysCfgOwner string + Deployer string + SysCfgOwner string // rollup actors Proposer string @@ -66,10 +65,6 @@ func (m *MnemonicConfig) Secrets() (*Secrets, error) { if err != nil { return nil, err } - cliqueSigner, err := wallet.PrivateKey(account(m.CliqueSigner)) - if err != nil { - return nil, err - } sysCfgOwner, err := wallet.PrivateKey(account(m.SysCfgOwner)) if err != nil { return nil, err @@ -102,7 +97,6 @@ func (m *MnemonicConfig) Secrets() (*Secrets, error) { return &Secrets{ Deployer: deployer, SysCfgOwner: sysCfgOwner, - CliqueSigner: cliqueSigner, Proposer: proposer, Batcher: batcher, SequencerP2P: sequencerP2P, @@ -115,9 +109,8 @@ func (m *MnemonicConfig) Secrets() (*Secrets, error) { // Secrets bundles secp256k1 private keys for all common rollup actors for testing purposes. type Secrets struct { - Deployer *ecdsa.PrivateKey - CliqueSigner *ecdsa.PrivateKey - SysCfgOwner *ecdsa.PrivateKey + Deployer *ecdsa.PrivateKey + SysCfgOwner *ecdsa.PrivateKey // rollup actors Proposer *ecdsa.PrivateKey @@ -138,7 +131,6 @@ type Secrets struct { func (s *Secrets) Addresses() *Addresses { return &Addresses{ Deployer: crypto.PubkeyToAddress(s.Deployer.PublicKey), - CliqueSigner: crypto.PubkeyToAddress(s.CliqueSigner.PublicKey), SysCfgOwner: crypto.PubkeyToAddress(s.SysCfgOwner.PublicKey), Proposer: crypto.PubkeyToAddress(s.Proposer.PublicKey), Batcher: crypto.PubkeyToAddress(s.Batcher.PublicKey), @@ -151,9 +143,8 @@ func (s *Secrets) Addresses() *Addresses { // Addresses bundles the addresses for all common rollup addresses for testing purposes. type Addresses struct { - Deployer common.Address - CliqueSigner common.Address - SysCfgOwner common.Address + Deployer common.Address + SysCfgOwner common.Address // rollup actors Proposer common.Address @@ -169,7 +160,6 @@ type Addresses struct { func (a *Addresses) All() []common.Address { return []common.Address{ a.Deployer, - a.CliqueSigner, a.SysCfgOwner, a.Proposer, a.Batcher, diff --git a/op-e2e/faultproofs/precompile_test.go b/op-e2e/faultproofs/precompile_test.go index 6d116bfa0993..2beabfba54d6 100644 --- a/op-e2e/faultproofs/precompile_test.go +++ b/op-e2e/faultproofs/precompile_test.go @@ -1,9 +1,12 @@ package faultproofs import ( + "bytes" "context" + "encoding/json" "math" "math/big" + "os/exec" "path/filepath" "testing" @@ -12,7 +15,6 @@ import ( "github.com/ethereum-optimism/optimism/op-e2e/system/e2esys" "github.com/ethereum-optimism/optimism/op-e2e/system/helpers" - "github.com/ethereum-optimism/optimism/cannon/mipsevm/versions" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" @@ -276,9 +278,29 @@ func runCannon(t *testing.T, ctx context.Context, sys *e2esys.System, inputs uti err := executor.DoGenerateProof(ctx, proofsDir, math.MaxUint, math.MaxUint, extraVmArgs...) require.NoError(t, err, "failed to generate proof") - state, err := versions.LoadStateFromFile(vm.FinalStatePath(proofsDir, cfg.Cannon.BinarySnapshots)) - require.NoError(t, err, "failed to parse state") - require.True(t, state.GetExited(), "cannon did not exit") - require.Zero(t, state.GetExitCode(), "cannon failed with exit code %d", state.GetExitCode()) - t.Logf("Completed in %d steps", state.GetStep()) + stdOut, _, err := runCmd(ctx, cfg.Cannon.VmBin, "witness", "--input", vm.FinalStatePath(proofsDir, cfg.Cannon.BinarySnapshots)) + require.NoError(t, err, "failed to run witness cmd") + type stateData struct { + Step uint64 `json:"step"` + ExitCode uint8 `json:"exitCode"` + Exited bool `json:"exited"` + } + var data stateData + err = json.Unmarshal([]byte(stdOut), &data) + require.NoError(t, err, "failed to parse state data") + require.True(t, data.Exited, "cannon did not exit") + require.Zero(t, data.ExitCode, "cannon failed with exit code %d", data.ExitCode) + t.Logf("Completed in %d steps", data.Step) +} + +func runCmd(ctx context.Context, binary string, args ...string) (stdOut string, stdErr string, err error) { + var outBuf bytes.Buffer + var errBuf bytes.Buffer + cmd := exec.CommandContext(ctx, binary, args...) + cmd.Stdout = &outBuf + cmd.Stderr = &errBuf + err = cmd.Run() + stdOut = outBuf.String() + stdErr = errBuf.String() + return } diff --git a/op-e2e/interop/interop_test.go b/op-e2e/interop/interop_test.go index 302413dd98ed..81f09c125aa4 100644 --- a/op-e2e/interop/interop_test.go +++ b/op-e2e/interop/interop_test.go @@ -121,7 +121,9 @@ func TestInterop_EmitLogs(t *testing.T) { var emitParallel sync.WaitGroup emitOn := func(chainID string) { for i := 0; i < numEmits; i++ { - s2.EmitData(chainID, "Alice", payload1) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + s2.EmitData(ctx, chainID, "Alice", payload1) + cancel() } emitParallel.Done() } @@ -218,7 +220,9 @@ func TestInteropBlockBuilding(t *testing.T) { // Add chain A as dependency to chain B, // such that we can execute a message on B that was initiated on A. - depRec := s2.AddDependency(chainB, s2.ChainID(chainA)) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + depRec := s2.AddDependency(ctx, chainB, s2.ChainID(chainA)) + cancel() t.Logf("Dependency set in L1 block %d", depRec.BlockNumber) rollupClA, err := dial.DialRollupClientWithTimeout(context.Background(), time.Second*15, logger, s2.OpNode(chainA).UserRPC().RPC()) @@ -233,7 +237,9 @@ func TestInteropBlockBuilding(t *testing.T) { t.Log("Dependency information has been processed in L2 block") // emit log on chain A - emitRec := s2.EmitData(chainA, "Alice", "hello world") + ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second) + emitRec := s2.EmitData(ctx, chainA, "Alice", "hello world") + cancel() t.Logf("Emitted a log event in block %d", emitRec.BlockNumber.Uint64()) // Wait for initiating side to become cross-unsafe diff --git a/op-e2e/interop/supersystem.go b/op-e2e/interop/supersystem.go index de7e25bb9c7b..8fc663f377a0 100644 --- a/op-e2e/interop/supersystem.go +++ b/op-e2e/interop/supersystem.go @@ -103,9 +103,9 @@ type SuperSystem interface { // Deploy the Emitter Contract, which emits Event Logs DeployEmitterContract(network string, username string) common.Address // Use the Emitter Contract to emit an Event Log - EmitData(network string, username string, data string) *types.Receipt + EmitData(ctx context.Context, network string, username string, data string) *types.Receipt // AddDependency adds a dependency (by chain ID) to the given chain - AddDependency(network string, dep *big.Int) *types.Receipt + AddDependency(ctx context.Context, network string, dep *big.Int) *types.Receipt // ExecuteMessage calls the CrossL2Inbox executeMessage function ExecuteMessage( ctx context.Context, @@ -767,7 +767,7 @@ func (s *interopE2ESystem) ExecuteMessage( return bind.WaitMined(ctx, s.L2GethClient(id), tx) } -func (s *interopE2ESystem) AddDependency(id string, dep *big.Int) *types.Receipt { +func (s *interopE2ESystem) AddDependency(ctx context.Context, id string, dep *big.Int) *types.Receipt { // There is a note in OPContractsManagerInterop that the proxy-admin is used for now, // even though it should be a separate dependency-set-manager address. secret, err := s.hdWallet.Secret(devkeys.ChainOperatorKey{ @@ -779,7 +779,7 @@ func (s *interopE2ESystem) AddDependency(id string, dep *big.Int) *types.Receipt auth, err := bind.NewKeyedTransactorWithChainID(secret, s.worldOutput.L1.Genesis.Config.ChainID) require.NoError(s.t, err) - balance, err := s.l1GethClient.BalanceAt(context.Background(), crypto.PubkeyToAddress(secret.PublicKey), nil) + balance, err := s.l1GethClient.BalanceAt(ctx, crypto.PubkeyToAddress(secret.PublicKey), nil) require.NoError(s.t, err) require.False(s.t, balance.Sign() == 0, "system config owner needs a balance") @@ -790,7 +790,7 @@ func (s *interopE2ESystem) AddDependency(id string, dep *big.Int) *types.Receipt tx, err := contract.SystemconfigTransactor.AddDependency(auth, dep) require.NoError(s.t, err) - receipt, err := wait.ForReceiptOK(context.Background(), s.L1GethClient(), tx.Hash()) + receipt, err := wait.ForReceiptOK(ctx, s.L1GethClient(), tx.Hash()) require.NoError(s.t, err) return receipt } @@ -813,6 +813,7 @@ func (s *interopE2ESystem) DeployEmitterContract( } func (s *interopE2ESystem) EmitData( + ctx context.Context, id string, sender string, data string, @@ -828,7 +829,7 @@ func (s *interopE2ESystem) EmitData( contract := s.Contract(id, "emitter").(*emit.Emit) tx, err := contract.EmitTransactor.EmitData(auth, []byte(data)) require.NoError(s.t, err) - receipt, err := bind.WaitMined(context.Background(), s.L2GethClient(id), tx) + receipt, err := bind.WaitMined(ctx, s.L2GethClient(id), tx) require.NoError(s.t, err) return receipt } diff --git a/op-e2e/system/altda/concurrent_test.go b/op-e2e/system/altda/concurrent_test.go index 32506e5c4a10..e53c7f0f811b 100644 --- a/op-e2e/system/altda/concurrent_test.go +++ b/op-e2e/system/altda/concurrent_test.go @@ -34,7 +34,9 @@ func TestBatcherConcurrentAltDARequests(t *testing.T) { cfg.DisableBatcher = true sys, err := cfg.Start(t) require.NoError(t, err, "Error starting up system") - defer sys.Close() + t.Cleanup(func() { + sys.Close() + }) // make every request take 5 seconds, such that only concurrent requests will be able to make progress fast enough sys.FakeAltDAServer.SetPutRequestLatency(5 * time.Second) diff --git a/op-e2e/system/conductor/sequencer_failover_test.go b/op-e2e/system/conductor/sequencer_failover_test.go index 5722dc3b82e9..9004b528fc58 100644 --- a/op-e2e/system/conductor/sequencer_failover_test.go +++ b/op-e2e/system/conductor/sequencer_failover_test.go @@ -4,6 +4,7 @@ import ( "context" "sort" "testing" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/rpc" @@ -28,7 +29,8 @@ func TestSequencerFailover_SetupCluster(t *testing.T) { // [Category: conductor rpc] // In this test, we test all rpcs exposed by conductor. func TestSequencerFailover_ConductorRPC(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() sys, conductors, cleanup := setupSequencerFailoverTest(t) defer cleanup() @@ -176,7 +178,8 @@ func TestSequencerFailover_ActiveSequencerDown(t *testing.T) { sys, conductors, cleanup := setupSequencerFailoverTest(t) defer cleanup() - ctx := context.Background() + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() leaderId, leader := findLeader(t, conductors) err := sys.RollupNodes[leaderId].Stop(ctx) // Stop the current leader sequencer require.NoError(t, err) @@ -205,7 +208,8 @@ func TestSequencerFailover_DisasterRecovery_OverrideLeader(t *testing.T) { defer cleanup() // randomly stop 2 nodes in the cluster to simulate a disaster. - ctx := context.Background() + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() err := conductors[Sequencer1Name].service.Stop(ctx) require.NoError(t, err) err = conductors[Sequencer2Name].service.Stop(ctx) diff --git a/op-e2e/system/da/brotli_batcher_test.go b/op-e2e/system/da/brotli_batcher_test.go index fe9b4a9fab97..fd44c6365eae 100644 --- a/op-e2e/system/da/brotli_batcher_test.go +++ b/op-e2e/system/da/brotli_batcher_test.go @@ -43,7 +43,7 @@ func setupAliceAccount(t *testing.T, cfg e2esys.SystemConfig, sys *e2esys.System require.NoError(t, err) mintAmount := big.NewInt(1_000_000_000_000) opts.Value = mintAmount - helpers.SendDepositTx(t, cfg, l1Client, l2Verif, opts, func(l2Opts *helpers.DepositTxOpts) {}) + helpers.SendDepositTx(t, cfg, l1Client, l2Verif, opts, nil) // Confirm balance ctx, cancel = context.WithTimeout(context.Background(), 15*time.Second) diff --git a/op-e2e/system/da/eip4844_test.go b/op-e2e/system/da/eip4844_test.go index f3cf8fc7f03f..e1b6468378e9 100644 --- a/op-e2e/system/da/eip4844_test.go +++ b/op-e2e/system/da/eip4844_test.go @@ -57,7 +57,6 @@ func testSystem4844E2E(t *testing.T, multiBlob bool, daType batcherFlags.DataAva cfg.BatcherBatchType = derive.SpanBatchType cfg.DeployConfig.L1GenesisBlockBaseFeePerGas = (*hexutil.Big)(big.NewInt(7000)) - const maxBlobs = eth.MaxBlobsPerBlobTx var maxL1TxSize int if multiBlob { cfg.BatcherTargetNumFrames = eth.MaxBlobsPerBlobTx @@ -120,7 +119,7 @@ func testSystem4844E2E(t *testing.T, multiBlob bool, daType batcherFlags.DataAva require.NoError(t, err) mintAmount := big.NewInt(1_000_000_000_000) opts.Value = mintAmount - helpers.SendDepositTx(t, cfg, l1Client, l2Verif, opts, func(l2Opts *helpers.DepositTxOpts) {}) + helpers.SendDepositTx(t, cfg, l1Client, l2Verif, opts, nil) // Confirm balance ctx2, cancel2 := context.WithTimeout(context.Background(), 20*time.Second) @@ -214,7 +213,8 @@ func testSystem4844E2E(t *testing.T, multiBlob bool, daType batcherFlags.DataAva if !multiBlob { require.NotZero(t, numBlobs, "single-blob: expected to find L1 blob tx") } else { - require.Equal(t, maxBlobs, numBlobs, fmt.Sprintf("multi-blob: expected to find L1 blob tx with %d blobs", eth.MaxBlobsPerBlobTx)) + const maxBlobs = eth.MaxBlobsPerBlobTx + require.Equal(t, maxBlobs, numBlobs, fmt.Sprintf("multi-blob: expected to find L1 blob tx with %d blobs", maxBlobs)) // blob tx should have filled up all but last blob bcl := sys.L1BeaconHTTPClient() hashes := toIndexedBlobHashes(blobTx.BlobHashes()...) diff --git a/op-e2e/system/gastoken/gastoken_test.go b/op-e2e/system/gastoken/gastoken_test.go index 7e03b19d3930..b5f3f56e2f6c 100644 --- a/op-e2e/system/gastoken/gastoken_test.go +++ b/op-e2e/system/gastoken/gastoken_test.go @@ -57,7 +57,7 @@ func testCustomGasToken(t *testing.T, allocType config.AllocType) { uint8(18), } - setup := func() gasTokenTestOpts { + setup := func(t *testing.T) gasTokenTestOpts { cfg := e2esys.DefaultSystemConfig(t, e2esys.WithAllocType(allocType)) offset := hexutil.Uint64(0) cfg.DeployConfig.L2GenesisRegolithTimeOffset = &offset @@ -111,7 +111,7 @@ func testCustomGasToken(t *testing.T, allocType config.AllocType) { t.Run("deposit", func(t *testing.T) { op_e2e.InitParallel(t) - gto := setup() + gto := setup(t) checkDeposit(t, gto, false) setCustomGasToken(t, gto.cfg, gto.sys, gto.weth9Address) checkDeposit(t, gto, true) @@ -119,7 +119,7 @@ func testCustomGasToken(t *testing.T, allocType config.AllocType) { t.Run("withdrawal", func(t *testing.T) { op_e2e.InitParallel(t) - gto := setup() + gto := setup(t) setCustomGasToken(t, gto.cfg, gto.sys, gto.weth9Address) checkDeposit(t, gto, true) checkWithdrawal(t, gto) @@ -127,7 +127,7 @@ func testCustomGasToken(t *testing.T, allocType config.AllocType) { t.Run("fee withdrawal", func(t *testing.T) { op_e2e.InitParallel(t) - gto := setup() + gto := setup(t) setCustomGasToken(t, gto.cfg, gto.sys, gto.weth9Address) checkDeposit(t, gto, true) checkFeeWithdrawal(t, gto, true) @@ -135,7 +135,7 @@ func testCustomGasToken(t *testing.T, allocType config.AllocType) { t.Run("token name and symbol", func(t *testing.T) { op_e2e.InitParallel(t) - gto := setup() + gto := setup(t) checkL1TokenNameAndSymbol(t, gto, gto.disabledExpectations) checkL2TokenNameAndSymbol(t, gto, gto.disabledExpectations) checkWETHTokenNameAndSymbol(t, gto, gto.disabledExpectations) diff --git a/op-e2e/system/helpers/tx_helper.go b/op-e2e/system/helpers/tx_helper.go index f5cb11aa8a1d..10c16c0e7465 100644 --- a/op-e2e/system/helpers/tx_helper.go +++ b/op-e2e/system/helpers/tx_helper.go @@ -28,7 +28,9 @@ import ( // Returns the receipt of the L2 transaction func SendDepositTx(t *testing.T, cfg e2esys.SystemConfig, l1Client *ethclient.Client, l2Client *ethclient.Client, l1Opts *bind.TransactOpts, applyL2Opts DepositTxOptsFn) *types.Receipt { l2Opts := defaultDepositTxOpts(l1Opts) - applyL2Opts(l2Opts) + if applyL2Opts != nil { + applyL2Opts(l2Opts) + } // Find deposit contract depositContract, err := bindings.NewOptimismPortal(cfg.L1Deployments.OptimismPortalProxy, l1Client) diff --git a/op-e2e/system/proofs/build_helper.go b/op-e2e/system/proofs/build_helper.go index 42201279867b..9bad76b9dcb1 100644 --- a/op-e2e/system/proofs/build_helper.go +++ b/op-e2e/system/proofs/build_helper.go @@ -2,7 +2,9 @@ package proofs import ( "context" + "os" "os/exec" + "path/filepath" "strings" "testing" "time" @@ -12,6 +14,15 @@ import ( // BuildOpProgramClient builds the `op-program` client executable and returns the path to the resulting executable func BuildOpProgramClient(t *testing.T) string { + clientPath, err := filepath.Abs("../../../op-program/bin/op-program-client") + require.NoError(t, err) + + _, err = os.Stat(clientPath) + if err == nil { + return clientPath + } + require.ErrorIs(t, err, os.ErrNotExist) + t.Log("Building op-program-client") ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) defer cancel() diff --git a/op-e2e/system/verifier/basic_test.go b/op-e2e/system/verifier/basic_test.go index effe1f412146..d0791d5ec483 100644 --- a/op-e2e/system/verifier/basic_test.go +++ b/op-e2e/system/verifier/basic_test.go @@ -71,7 +71,7 @@ func runE2ESystemTest(t *testing.T, sys *e2esys.System) { require.Nil(t, err) mintAmount := big.NewInt(1_000_000_000_000) opts.Value = mintAmount - helpers.SendDepositTx(t, sys.Cfg, l1Client, l2Verif, opts, func(l2Opts *helpers.DepositTxOpts) {}) + helpers.SendDepositTx(t, sys.Cfg, l1Client, l2Verif, opts, nil) // Confirm balance ctx, cancel = context.WithTimeout(context.Background(), 15*time.Second) diff --git a/op-node/Makefile b/op-node/Makefile index c1d480d9b71a..b63f489925ac 100644 --- a/op-node/Makefile +++ b/op-node/Makefile @@ -1,65 +1,3 @@ -GITCOMMIT ?= $(shell git rev-parse HEAD) -GITDATE ?= $(shell git show -s --format='%ct') -# Find the github tag that points to this commit. If none are found, set the version string to "untagged" -# Prioritizes release tag, if one exists, over tags suffixed with "-rc" -VERSION ?= $(shell tags=$$(git tag --points-at $(GITCOMMIT) | grep '^op-node/' | sed 's/op-node\///' | sort -V); \ - preferred_tag=$$(echo "$$tags" | grep -v -- '-rc' | tail -n 1); \ - if [ -z "$$preferred_tag" ]; then \ - if [ -z "$$tags" ]; then \ - echo "untagged"; \ - else \ - echo "$$tags" | tail -n 1; \ - fi \ - else \ - echo $$preferred_tag; \ - fi) +DEPRECATED_TARGETS := op-node clean test fuzz generate-mocks readme -LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) -LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) -LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-node/version.Version=$(VERSION) -LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-node/version.Meta=$(VERSION_META) -LDFLAGS := -ldflags "$(LDFLAGSSTRING)" - -# Use the old Apple linker to workaround broken xcode - https://github.com/golang/go/issues/65169 -ifeq ($(shell uname),Darwin) - FUZZLDFLAGS := -ldflags=-extldflags=-Wl,-ld_classic -endif - -op-node: - env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) CGO_ENABLED=0 go build -v $(LDFLAGS) -o ./bin/op-node ./cmd/main.go - -clean: - rm bin/op-node - -test: - go test -v ./... - -fuzz: - printf "%s\n" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzL1InfoBedrockRoundTrip ./rollup/derive" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzL1InfoEcotoneRoundTrip ./rollup/derive" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzL1InfoAgainstContract ./rollup/derive" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzUnmarshallLogEvent ./rollup/derive" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzParseFrames ./rollup/derive" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzFrameUnmarshalBinary ./rollup/derive" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzBatchRoundTrip ./rollup/derive" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDeriveDepositsRoundTrip ./rollup/derive" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDeriveDepositsBadVersion ./rollup/derive" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzParseL1InfoDepositTxDataValid ./rollup/derive" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzParseL1InfoDepositTxDataBadLength ./rollup/derive" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzRejectCreateBlockBadTimestamp ./rollup/driver" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDecodeDepositTxDataToL1Info ./rollup/driver" \ - | parallel -j 8 {} - -generate-mocks: - go generate ./... - -readme: - doctoc README.md - -.PHONY: \ - op-node \ - clean \ - test \ - fuzz \ - readme +include ../just/deprecated.mk diff --git a/op-node/README.md b/op-node/README.md index cfc3793052f1..b9b28fa05f15 100644 --- a/op-node/README.md +++ b/op-node/README.md @@ -1,105 +1,252 @@ - - -**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)* +# `op-node` -- [op-node](#op-node) - - [Compiling](#compiling) - - [Testing](#testing) - - [Running](#running) - - [L2 Genesis Generation](#l2-genesis-generation) - - [L1 Devnet Genesis Generation](#l1-devnet-genesis-generation) +Issues: +[monorepo](https://github.com/ethereum-optimism/optimism/issues?q=is%3Aissue%20state%3Aopen%20label%3AA-op-node) - +Pull requests: +[monorepo](https://github.com/ethereum-optimism/optimism/pulls?q=is%3Aopen+is%3Apr+label%3AA-op-node) -# op-node +User docs: +- [How to run a node](https://docs.optimism.io/builders/node-operators/rollup-node) -This is the reference implementation of the [rollup-node spec](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/rollup-node.md). -It can be thought of like the consensus layer client of an OP Stack chain where it must run with an OP Stack execution layer client -like [op-geth](https://github.com/ethereum-optimism/op-geth). +Specs: +- [rollup-node spec] -## Compiling +The op-node implements the [rollup-node spec]. +It functions as a Consensus Layer client of an OP Stack chain. +This builds, relays and verifies the canonical chain of blocks. +The blocks are processed by an execution layer client, like [op-geth]. -Compile a binary: +[rollup-node spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/rollup-node.m +[op-geth]: https://github.com/ethereum-optimism/op-geth -```shell +## Quickstart + +```bash make op-node + +# Network selection: +# - Join any of the pre-configured networks with the `--network` flag. +# - Alternatively, join a custom network with the `--rollup.config` flag. +# +# Essential Connections: +# - L1 ethereum RPC, to fetch blocks, receipts, finality +# - L1 beacon API, to fetch blobs +# - L2 engine API, to apply new blocks to +# - P2P TCP port, to expose publicly, to retrieve and relay the latest L2 blocks +# - P2P UDP port, to expose publicly, to discover other nodes to peer with +# - RPC port, to serve RPC of the op-node +# +# Other: +# - Sync mode: how to interact with the execution-engine, +# such that it enters the preferred form of syncing: +# - consensus-layer (block by block sync) +# - execution-layer (e.g. snap-sync) +# +# Tip: every CLI flag has an env-var equivalent (run `op-node --help` for more information) +./bin/op-node \ + --network=op-sepolia \ + --l1=ws://localhost:8546 \ + --l1.beacon=http://localhost:4000 \ + --l2=ws://localhost:9001 \ + --p2p.listen.tcp=9222 + --p2p.listen.udp=9222 + --rpc.port=7000 \ + --syncmode=execution-layer + +# If running inside docker, ake sure to mount the below persistent data as (host) volume, +# it may be lost on restart otherwise: +# - P2P private key: auto-generated when missing, used to maintain a stable peer identity. +# - Peerstore DB: remember peer records to connect with, used to not wait for peer discovery. +# - Discovery DB: maintain DHT data, to avoid repeating some discovery work after restarting. + --p2p.priv.path=opnode_p2p_priv.txt \ + --p2p.peerstore.path=opnode_peerstore_db \ + --p2p.discovery.path=opnode_discovery_db \ + --p2p.priv.path=opnode_p2p_priv.txt ``` -## Testing +## Usage -Run op-node unit tests: +### Build from source -```shell -make test +```bash +# from op-node dir: +make op-node +./bin/op-node --help ``` -## Running +### Run from source -Configuration options can be reviewed with: - -```shell -./bin/op-node --help +```bash +# from op-node dir: +go run ./cmd --help ``` -[eth-json-rpc-spec]: https://ethereum.github.io/execution-apis/api-documentation +### Build docker image -To start syncing the rollup: +See `op-node` docker-bake target. -Connect to one L1 Execution Client that supports the [Ethereum JSON-RPC spec][eth-json-rpc-spec], -an L1 Consensus Client that supports the [Beacon Node API](https://ethereum.github.io/beacon-APIs) and -an OP Stack based Execution Client that supports the [Ethereum JSON-RPC spec][eth-json-rpc-spec]: +## Implementation overview -- L1: use any L1 client, RPC, websocket, or IPC (connection config may differ) -- L2: use any OP Stack Execution Client like [`op-geth`](https://github.com/ethereum-optimism/op-geth) +### Interactions -Note that websockets or IPC is preferred for event notifications to improve sync, http RPC works with adaptive polling. + + -```shell -./bin/op-node \ - --l1=ws://localhost:8546 \ - --l1.beacon=http://localhost:4000 \ - --l2=ws://localhost:9001 \ - --rollup.config=./path-to-network-config/rollup.json \ - --rpc.addr=127.0.0.1 \ - --rpc.port=7000 -``` +## Product -## L2 Genesis Generation +The op-node **builds**, **relays** and **verifies** the canonical chain of blocks. -The `op-node` can generate geth compatible `genesis.json` files. These files -can be used with `geth init` to initialize the `StateDB` with accounts, storage, -code and balances. The L2 state must be initialized with predeploy contracts -that exist in the state and act as system level contracts. The `op-node` can -generate a genesis file with these predeploys configured correctly given -an L1 RPC URL, a deploy config, L2 genesis allocs and a L1 deployments artifact. +The op-node does not store critical data: +the op-node can recover from any existing L2 chain pre-state +that is sufficiently synced such that available input data can complete the sync. -The deploy config contains all of the config required to deploy the -system. Examples can be found in `packages/contracts-bedrock/deploy-config`. Each -deploy config file is a JSON file. The L2 allocs can be generated using a forge script -in the `contracts-bedrock` package and the L1 deployments are a JSON file that is the -output of doing a L1 contracts deployment. +The op-node **builds** blocks: +either from scratch as a sequencer, or from block-inputs (made available through L1) as verifier. -Example usage: +The block **relay** is a happy-path: the P2P sync is optional, and does not affect the ability to verify. +However, the block relay is still important for UX, as it lowers the latency to the latest state. -```bash -$ ./bin/op-node genesis l2 \ - --l1-rpc $ETH_RPC_URL \ - --deploy-config \ - --l2-allocs \ - --l1-deployments \ - --outfile.l2 \ - --outfile.rollup -``` +The blocks are **verified**: only valid L2 blocks that can be reproduced from L1 data are accepted. -## L1 Devnet Genesis Generation +### Optimization target -It is also possible to generate a devnet L1 `genesis.json` file. The L1 allocs can -be generated with the foundry L1 contracts deployment script if the extra parameter -`--sig 'runWithStateDump()` is added to the deployment command. + + +**Safely and reliably sync the canonical chain** + +The op-node implements the three core product features as following: + +- Block **building**: extend the chain at a throughput rate and latency that is safe to relay and verify. +- Block **relaying**: while keeping throughput high and latency low, prevent single points of failure. +- Block **verification**: efficiently sync, but always fully verify, follow the canonical chain. + +Trade-offs are made here: verification safety is at odds ideal throughput, latency, efficiency. +Or in other words: safety vs. liveness. Chain parameters determine this. +The implementation offers this trade-off, siding with safety by default, +and design-choices should aim to improve the trade-off. + +### Vision + +The op-node is changing in two ways: +- [Reliability](#reliability): improve the reliability with improved processing, testing and syncing. +- [Interoperability](#interoperability): cross-chain messaging support. + +#### Reliability + +- Parallel derivation processes: [Issue 10864](https://github.com/ethereum-optimism/optimism/issues/10864) +- Event tests: [Issue 13163](https://github.com/ethereum-optimism/optimism/issues/13163) +- Improving P2P sync: [Issue 11779](https://github.com/ethereum-optimism/optimism/issues/11779) + +#### Interoperability + +The OP Stack is make chains natively interoperable: +messages between chains form safety dependencies, and verified asynchronously. +Asynchronous verification entails that the op-node reorgs away a block +if and when the block is determined to be invalid. + +The [op-supervisor] specializes in this dependency verification work. + +The op-node encapsulates all the single-chain concerns: +it prepares the local safety data-points (DA confirmation and block contents) for the op-supervisor. + +The op-supervisor then verifies the cross-chain safety, and promotes the block safety level accordingly, +which the op-node then follows. + +See [Interop specs] and [Interop design-docs] for more information about interoperability. + +[op-supervisor]: ../op-supervisor/README.md + +### User stories + + + +As *a user* I want *reliability* so that I *don't miss blocks or fall out of sync*. +As *a RaaS dev* I want *easy configuration and monitoring* so that I *can run more chains*. +As *a customizoor* I want *clear extensible APIs* so that I *can avoid forking and be a contributor*. +As *a protocol dev* I want *integration with tests* so that I *assert protocol conformance* +As *a proof dev* I want *reusable state-transition code* so that I *don't reimplement the same thing*. + +## Design principles + + +- Encapsulate the state-transition: + - Use interfaces to abstract file-IO / concurrency / etc. away from state-transition logic. + - Ensure code-sharing with action-tests and op-program. +- No critical database: + - Persisting data is ok, but it should be recoverable from external data without too much work. + - The best chain "sync" is no sync. +- Keep the tech-stack compatible with ethereum L1: + - L1 offers well-adopted and battle tested libraries and standards, e.g. LibP2P, DiscV5, JSON-RPC. + - L1 supports a tech-stack in different languages, ensuring client-diversity, important to L2 as well. + - Downstream devs of OP-Stack should be able to pull in *one* instance of a library, that serves both OP-Stack and L1. + +## Failure modes + +This is a brief overview of what might fail, and how the op-node responds. + +### L1 downtime + +When the L1 data-source is temporarily unavailable the op-node `safe`/`finalized` progression halts. +Blocks may continue to sync through the happy-path if P2P connectivity is undisrupted. + +### No batch confirmation + +As per the [rollup-node spec] the sequencing-window ensures that after a bounded period of L1 blocks +the verifier will infer blocks, to ensure liveness of blocks with deposited transactions. +The op-node will continue to process the happy-path in the mean time, +which may have to be reorged out if it does not match the blocks that is inferred after sequencing window expiry. + +### L1 reorg + +L1 reorgs are detected passively during traversal: upon traversal to block `N+1`, +if the next canonical block has a parent-hash that does not match the +current block `N` we know the remote L1 chain view has diverged. + +When this happens, the op-node assumes the local view is wrong, and resets itself to follow that of the remote node, +dropping any non-canonical blocks in the process. + +### No L1 finality + +When L1 does not finalize for an extended period of time, +the op-node is also unable to finalize the L2 chain for the same time. + +Note that the `safe` block in the execution-layer is bootstrapped from the `finalized` block: +some verification work may repeat after a restart. + +Blocks will continue to be derived from L1 batch-submissions, and optimistic processing will also continue to function. + +### P2P failure + +On P2P failure, e.g. issues with peering or failed propagation of block-data, the `unsafe` part of the chain may stall. +The `unsafe` part of the chain will no longer progress optimistically ahead of the `safe` part. + +The `safe` blocks will continue to be derived from L1 however, providing a higher-latency access to the latest chain. + +The op-node may pick back up the latest `unsafe` blocks after recovering its P2P connectivity, +and buffering `unsafe` blocks until the `safe` blocks progress meets the first known buffered `unsafe` block. + +### Restarts and resyncing + +After a restart, or detection of missing chain data, +the op-node dynamically determines what L1 data is required to continue, based on the syncing state of execution-engine. +If the sync-state is far behind, the op-node may need archived blob data to sync from the original L1 inputs. + +A faster alternative may be to bootstrap through the execution-layer sync mode, +where the execution-engine may perform an optimized long-range sync, such as snap-sync. + +## Testing + + + +- Unit tests: encapsulated functionality, fuzz tests, etc. in the op-node Go packages. +- `op-e2e` action tests: in-progress Go testing, focused on the onchain aspects, + e.g. state-transition edge-cases. This applies primarily to the derivation pipeline. +- `op-e2e` system tests: in-process Go testing, focused on the offchain aspects of the op-node, + e.g. background work, P2P integration, general service functionality. +- Local devnet tests: full end to end testing, but set up on minimal resources. +- Kurtosis tests: new automated devnet-like testing. Work in progress. +- Long-running devnet: roll-out for experimental features, to ensure sufficient stability for testnet users. +- Long-running testnet: battle-testing in public environment. +- Shadow-forks: design phase, testing experiments against shadow copies of real networks. -```bash -$ ./bin/op-node genesis l1 \ - --deploy-config $CONTRACTS_BEDROCK/deploy-config \ - --l1-deployments \ - --l1-allocs -``` diff --git a/op-node/flags/p2p_flags.go b/op-node/flags/p2p_flags.go index 269b973c52da..6a38fbb817f8 100644 --- a/op-node/flags/p2p_flags.go +++ b/op-node/flags/p2p_flags.go @@ -7,6 +7,7 @@ import ( "github.com/urfave/cli/v2" "github.com/ethereum-optimism/optimism/op-node/p2p" + opsigner "github.com/ethereum-optimism/optimism/op-service/signer" ) func p2pEnv(envprefix, v string) []string { @@ -87,7 +88,7 @@ func deprecatedP2PFlags(envPrefix string) []cli.Flag { // None of these flags are strictly required. // Some are hidden if they are too technical, or not recommended. func P2PFlags(envPrefix string) []cli.Flag { - return []cli.Flag{ + return append([]cli.Flag{ &cli.BoolFlag{ Name: DisableP2PName, Usage: "Completely disable the P2P stack", @@ -410,5 +411,5 @@ func P2PFlags(envPrefix string) []cli.Flag { Required: false, EnvVars: p2pEnv(envPrefix, "PING"), }, - } + }, opsigner.CLIFlags(envPrefix, P2PCategory)...) } diff --git a/op-node/justfile b/op-node/justfile new file mode 100644 index 000000000000..46aadcc84b39 --- /dev/null +++ b/op-node/justfile @@ -0,0 +1,49 @@ +import '../just/go.just' + +# Build ldflags string +_LDFLAGSSTRING := "'" + trim( + "-X main.GitCommit=" + GITCOMMIT + " " + \ + "-X main.GitDate=" + GITDATE + " " + \ + "-X github.com/ethereum-optimism/optimism/op-node/version.Version=" + VERSION + " " + \ + "-X github.com/ethereum-optimism/optimism/op-node/version.Meta=" + VERSION_META + " " + \ + "") + "'" + +BINARY := "./bin/op-node" + +# Build op-node binary +op-node: (go_build BINARY "./cmd" "-ldflags" _LDFLAGSSTRING) + +# Clean build artifacts +clean: + rm -f {{BINARY}} + +# Run tests +test: (go_test "./...") + +# Generate mocks +generate-mocks: (go_generate "./...") + +# Update readme +readme: + doctoc README.md + +[private] +node_fuzz_task FUZZ TIME='10s': (go_fuzz FUZZ TIME "./rollup/derive") + +# Run fuzz tests +fuzz: + printf "%s\n" \ + "FuzzL1InfoBedrockRoundTrip" \ + "FuzzL1InfoEcotoneRoundTrip" \ + "FuzzL1InfoAgainstContract" \ + "FuzzUnmarshallLogEvent" \ + "FuzzParseFrames" \ + "FuzzFrameUnmarshalBinary" \ + "FuzzBatchRoundTrip" \ + "FuzzDeriveDepositsRoundTrip" \ + "FuzzDeriveDepositsBadVersion" \ + "FuzzParseL1InfoDepositTxDataValid" \ + "FuzzParseL1InfoDepositTxDataBadLength" \ + "FuzzRejectCreateBlockBadTimestamp" \ + "FuzzDecodeDepositTxDataToL1Info" \ + | parallel -j {{PARALLEL_JOBS}} {{just_executable()}} node_fuzz_task {} diff --git a/op-node/metrics/metrics.go b/op-node/metrics/metrics.go index 88d8c4d0caa3..6e1b664eca2b 100644 --- a/op-node/metrics/metrics.go +++ b/op-node/metrics/metrics.go @@ -35,6 +35,7 @@ type Metricer interface { RecordRPCClientRequest(method string) func(err error) RecordRPCClientResponse(method string, err error) SetDerivationIdle(status bool) + SetSequencerState(active bool) RecordPipelineReset() RecordSequencingError() RecordPublishingError() @@ -48,7 +49,7 @@ type Metricer interface { RecordL2Ref(name string, ref eth.L2BlockRef) RecordUnsafePayloadsBuffer(length uint64, memSize uint64, next eth.BlockID) RecordDerivedBatches(batchType string) - CountSequencedTxs(count int) + CountSequencedTxsInBlock(txns int, deposits int) RecordL1ReorgDepth(d uint64) RecordSequencerInconsistentL1Origin(from eth.BlockID, to eth.BlockID) RecordSequencerReset() @@ -94,6 +95,7 @@ type Metrics struct { DerivationErrors *metrics.Event SequencingErrors *metrics.Event PublishingErrors *metrics.Event + SequencerActive prometheus.Gauge EmittedEvents *prometheus.CounterVec ProcessedEvents *prometheus.CounterVec @@ -133,7 +135,7 @@ type Metrics struct { L1ReorgDepth prometheus.Histogram - TransactionsSequencedTotal prometheus.Counter + TransactionsSequencedTotal *prometheus.CounterVec AltDAMetrics altda.Metricer @@ -209,6 +211,11 @@ func NewMetrics(procName string) *Metrics { DerivationErrors: metrics.NewEvent(factory, ns, "", "derivation_errors", "derivation errors"), SequencingErrors: metrics.NewEvent(factory, ns, "", "sequencing_errors", "sequencing errors"), PublishingErrors: metrics.NewEvent(factory, ns, "", "publishing_errors", "p2p publishing errors"), + SequencerActive: factory.NewGauge(prometheus.GaugeOpts{ + Namespace: ns, + Name: "sequencer_active", + Help: "1 if sequencer active, 0 otherwise", + }), EmittedEvents: factory.NewCounterVec( prometheus.CounterOpts{ @@ -261,12 +268,11 @@ func NewMetrics(procName string) *Metrics { Help: "Histogram of L1 Reorg Depths", }), - TransactionsSequencedTotal: factory.NewGauge(prometheus.GaugeOpts{ + TransactionsSequencedTotal: factory.NewCounterVec(prometheus.CounterOpts{ Namespace: ns, Name: "transactions_sequenced_total", Help: "Count of total transactions sequenced", - }), - + }, []string{"type"}), PeerCount: factory.NewGauge(prometheus.GaugeOpts{ Namespace: ns, Subsystem: "p2p", @@ -470,6 +476,14 @@ func (m *Metrics) SetDerivationIdle(status bool) { m.DerivationIdle.Set(val) } +func (m *Metrics) SetSequencerState(active bool) { + var val float64 + if active { + val = 1 + } + m.SequencerActive.Set(val) +} + func (m *Metrics) RecordPipelineReset() { m.PipelineResets.Record() } @@ -516,8 +530,9 @@ func (m *Metrics) RecordDerivedBatches(batchType string) { m.DerivedBatches.Record(batchType) } -func (m *Metrics) CountSequencedTxs(count int) { - m.TransactionsSequencedTotal.Add(float64(count)) +func (m *Metrics) CountSequencedTxsInBlock(txns int, deposits int) { + m.TransactionsSequencedTotal.WithLabelValues("deposits").Add(float64(deposits)) + m.TransactionsSequencedTotal.WithLabelValues("txns").Add(float64(txns - deposits)) } func (m *Metrics) RecordL1ReorgDepth(d uint64) { @@ -686,6 +701,9 @@ func (n *noopMetricer) RecordUp() { func (n *noopMetricer) SetDerivationIdle(status bool) { } +func (m *noopMetricer) SetSequencerState(active bool) { +} + func (n *noopMetricer) RecordPipelineReset() { } @@ -725,7 +743,7 @@ func (n *noopMetricer) RecordUnsafePayloadsBuffer(length uint64, memSize uint64, func (n *noopMetricer) RecordDerivedBatches(batchType string) { } -func (n *noopMetricer) CountSequencedTxs(count int) { +func (n *noopMetricer) CountSequencedTxsInBlock(txns int, deposits int) { } func (n *noopMetricer) RecordL1ReorgDepth(d uint64) { diff --git a/op-node/node/config_persistence.go b/op-node/node/config_persistence.go index 7a30c11b9c9c..3f2b8b47537e 100644 --- a/op-node/node/config_persistence.go +++ b/op-node/node/config_persistence.go @@ -55,6 +55,7 @@ func (p *ActiveConfigPersistence) SequencerStopped() error { func (p *ActiveConfigPersistence) persist(sequencerStarted bool) error { p.lock.Lock() defer p.lock.Unlock() + data, err := json.Marshal(persistedState{SequencerStarted: &sequencerStarted}) if err != nil { return fmt.Errorf("marshall new config: %w", err) diff --git a/op-node/node/node.go b/op-node/node/node.go index 2727552b8190..8bf8040b465c 100644 --- a/op-node/node/node.go +++ b/op-node/node/node.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + gosync "sync" "sync/atomic" "time" @@ -60,6 +61,7 @@ type OpNode struct { l2Source *sources.EngineClient // L2 Execution Engine RPC bindings server *rpcServer // RPC server hosting the rollup-node API p2pNode *p2p.NodeP2P // P2P node functionality + p2pMu gosync.Mutex // protects p2pNode p2pSigner p2p.Signer // p2p gossip application messages will be signed with this signer tracer Tracer // tracer to get events for testing/debugging runCfg *RuntimeConfig // runtime configurables @@ -434,8 +436,9 @@ func (n *OpNode) initRPCServer(cfg *Config) error { if err != nil { return err } - if n.p2pEnabled() { - server.EnableP2P(p2p.NewP2PAPIBackend(n.p2pNode, n.log, n.metrics)) + + if p2pNode := n.getP2PNodeIfEnabled(); p2pNode != nil { + server.EnableP2P(p2p.NewP2PAPIBackend(p2pNode, n.log, n.metrics)) } if cfg.RPC.EnableAdmin { server.EnableAdminAPI(NewAdminAPI(n.l2Driver, n.metrics, n.log)) @@ -487,6 +490,8 @@ func (n *OpNode) p2pEnabled() bool { } func (n *OpNode) initP2P(cfg *Config) (err error) { + n.p2pMu.Lock() + defer n.p2pMu.Unlock() if n.p2pNode != nil { panic("p2p node already initialized") } @@ -580,13 +585,13 @@ func (n *OpNode) PublishL2Payload(ctx context.Context, envelope *eth.ExecutionPa n.tracer.OnPublishL2Payload(ctx, envelope) // publish to p2p, if we are running p2p at all - if n.p2pEnabled() { + if p2pNode := n.getP2PNodeIfEnabled(); p2pNode != nil { payload := envelope.ExecutionPayload if n.p2pSigner == nil { return fmt.Errorf("node has no p2p signer, payload %s cannot be published", payload.ID()) } n.log.Info("Publishing signed execution payload on p2p", "id", payload.ID()) - return n.p2pNode.GossipOut().PublishL2Payload(ctx, envelope, n.p2pSigner) + return p2pNode.GossipOut().PublishL2Payload(ctx, envelope, n.p2pSigner) } // if p2p is not enabled then we just don't publish the payload return nil @@ -594,7 +599,7 @@ func (n *OpNode) PublishL2Payload(ctx context.Context, envelope *eth.ExecutionPa func (n *OpNode) OnUnsafeL2Payload(ctx context.Context, from peer.ID, envelope *eth.ExecutionPayloadEnvelope) error { // ignore if it's from ourselves - if n.p2pEnabled() && from == n.p2pNode.Host().ID() { + if p2pNode := n.getP2PNodeIfEnabled(); p2pNode != nil && from == p2pNode.Host().ID() { return nil } @@ -615,7 +620,7 @@ func (n *OpNode) OnUnsafeL2Payload(ctx context.Context, from peer.ID, envelope * } func (n *OpNode) RequestL2Range(ctx context.Context, start, end eth.L2BlockRef) error { - if n.p2pEnabled() && n.p2pNode.AltSyncEnabled() { + if p2pNode := n.getP2PNodeIfEnabled(); p2pNode != nil && p2pNode.AltSyncEnabled() { if unixTimeStale(start.Time, 12*time.Hour) { n.log.Debug( "ignoring request to sync L2 range, timestamp is too old for p2p", @@ -624,7 +629,7 @@ func (n *OpNode) RequestL2Range(ctx context.Context, start, end eth.L2BlockRef) "start_time", start.Time) return nil } - return n.p2pNode.RequestL2Range(ctx, start, end) + return p2pNode.RequestL2Range(ctx, start, end) } n.log.Debug("ignoring request to sync L2 range, no sync method available", "start", start, "end", end) return nil @@ -636,7 +641,7 @@ func unixTimeStale(timestamp uint64, duration time.Duration) bool { } func (n *OpNode) P2P() p2p.Node { - return n.p2pNode + return n.getP2PNodeIfEnabled() } func (n *OpNode) RuntimeConfig() ReadonlyRuntimeConfig { @@ -671,6 +676,8 @@ func (n *OpNode) Stop(ctx context.Context) error { result = multierror.Append(result, fmt.Errorf("error stopping sequencer: %w", err)) } } + + n.p2pMu.Lock() if n.p2pNode != nil { if err := n.p2pNode.Close(); err != nil { result = multierror.Append(result, fmt.Errorf("failed to close p2p node: %w", err)) @@ -678,6 +685,8 @@ func (n *OpNode) Stop(ctx context.Context) error { // Prevent further use of p2p. n.p2pNode = nil } + n.p2pMu.Unlock() + if n.p2pSigner != nil { if err := n.p2pSigner.Close(); err != nil { result = multierror.Append(result, fmt.Errorf("failed to close p2p signer: %w", err)) @@ -778,3 +787,13 @@ func (n *OpNode) HTTPEndpoint() string { } return fmt.Sprintf("http://%s", n.server.Addr().String()) } + +func (n *OpNode) getP2PNodeIfEnabled() *p2p.NodeP2P { + if !n.p2pEnabled() { + return nil + } + + n.p2pMu.Lock() + defer n.p2pMu.Unlock() + return n.p2pNode +} diff --git a/op-node/p2p/cli/load_signer.go b/op-node/p2p/cli/load_signer.go index 7416fa76397a..3c0c532edb20 100644 --- a/op-node/p2p/cli/load_signer.go +++ b/op-node/p2p/cli/load_signer.go @@ -5,18 +5,18 @@ import ( "strings" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" "github.com/urfave/cli/v2" "github.com/ethereum-optimism/optimism/op-node/flags" "github.com/ethereum-optimism/optimism/op-node/p2p" + opsigner "github.com/ethereum-optimism/optimism/op-service/signer" ) -// TODO: implement remote signer setup (config to authenticated endpoint) -// and remote signer itself (e.g. a open http client to make signing requests) - // LoadSignerSetup loads a configuration for a Signer to be set up later -func LoadSignerSetup(ctx *cli.Context) (p2p.SignerSetup, error) { +func LoadSignerSetup(ctx *cli.Context, logger log.Logger) (p2p.SignerSetup, error) { key := ctx.String(flags.SequencerP2PKeyName) + signerCfg := opsigner.ReadCLIConfig(ctx) if key != "" { // Mnemonics are bad because they leak *all* keys when they leak. // Unencrypted keys from file are bad because they are easy to leak (and we are not checking file permissions). @@ -26,9 +26,13 @@ func LoadSignerSetup(ctx *cli.Context) (p2p.SignerSetup, error) { } return &p2p.PreparedSigner{Signer: p2p.NewLocalSigner(priv)}, nil + } else if signerCfg.Enabled() { + remoteSigner, err := p2p.NewRemoteSigner(logger, signerCfg) + if err != nil { + return nil, err + } + return &p2p.PreparedSigner{Signer: remoteSigner}, nil } - // TODO: create remote signer - return nil, nil } diff --git a/op-node/p2p/config.go b/op-node/p2p/config.go index ee21ba20fc39..10a75881b87d 100644 --- a/op-node/p2p/config.go +++ b/op-node/p2p/config.go @@ -29,7 +29,6 @@ var DefaultBootnodes = []*enode.Node{ // OP Labs enode.MustParse("enode://869d07b5932f17e8490990f75a3f94195e9504ddb6b85f7189e5a9c0a8fff8b00aecf6f3ac450ecba6cdabdb5858788a94bde2b613e0f2d82e9b395355f76d1a@34.65.67.101:30305"), enode.MustParse("enode://2d4e7e9d48f4dd4efe9342706dd1b0024681bd4c3300d021f86fc75eab7865d4e0cbec6fbc883f011cfd6a57423e7e2f6e104baad2b744c3cafaec6bc7dc92c1@34.65.43.171:30305"), - enode.MustParse("enode://9d7a3efefe442351217e73b3a593bcb8efffb55b4807699972145324eab5e6b382152f8d24f6301baebbfb5ecd4127bd3faab2842c04cd432bdf50ba092f6645@34.65.109.126:30305"), // Base enode.MustParse("enr:-J24QNz9lbrKbN4iSmmjtnr7SjUMk4zB7f1krHZcTZx-JRKZd0kA2gjufUROD6T3sOWDVDnFJRvqBBo62zuF-hYCohOGAYiOoEyEgmlkgnY0gmlwhAPniryHb3BzdGFja4OFQgCJc2VjcDI1NmsxoQKNVFlCxh_B-716tTs-h1vMzZkSs1FTu_OYTNjgufplG4N0Y3CCJAaDdWRwgiQG"), enode.MustParse("enr:-J24QH-f1wt99sfpHy4c0QJM-NfmsIfmlLAMMcgZCUEgKG_BBYFc6FwYgaMJMQN5dsRBJApIok0jFn-9CS842lGpLmqGAYiOoDRAgmlkgnY0gmlwhLhIgb2Hb3BzdGFja4OFQgCJc2VjcDI1NmsxoQJ9FTIv8B9myn1MWaC_2lJ-sMoeCDkusCsk4BYHjjCq04N0Y3CCJAaDdWRwgiQG"), @@ -37,7 +36,9 @@ var DefaultBootnodes = []*enode.Node{ enode.MustParse("enr:-J24QHmGyBwUZXIcsGYMaUqGGSl4CFdx9Tozu-vQCn5bHIQbR7On7dZbU61vYvfrJr30t0iahSqhc64J46MnUO2JvQaGAYiOoCKKgmlkgnY0gmlwhAPnCzSHb3BzdGFja4OFQgCJc2VjcDI1NmsxoQINc4fSijfbNIiGhcgvwjsjxVFJHUstK9L1T8OTKUjgloN0Y3CCJAaDdWRwgiQG"), enode.MustParse("enr:-J24QG3ypT4xSu0gjb5PABCmVxZqBjVw9ca7pvsI8jl4KATYAnxBmfkaIuEqy9sKvDHKuNCsy57WwK9wTt2aQgcaDDyGAYiOoGAXgmlkgnY0gmlwhDbGmZaHb3BzdGFja4OFQgCJc2VjcDI1NmsxoQIeAK_--tcLEiu7HvoUlbV52MspE0uCocsx1f_rYvRenIN0Y3CCJAaDdWRwgiQG"), // Conduit - enode.MustParse("enode://9d7a3efefe442351217e73b3a593bcb8efffb55b4807699972145324eab5e6b382152f8d24f6301baebbfb5ecd4127bd3faab2842c04cd432bdf50ba092f6645@34.65.109.126:30305"), + enode.MustParse("enode://d25ce99435982b04d60c4b41ba256b84b888626db7bee45a9419382300fbe907359ae5ef250346785bff8d3b9d07cd3e017a27e2ee3cfda3bcbb0ba762ac9674@bootnode.conduit.xyz:0?discport=30301"), + enode.MustParse("enode://2d4e7e9d48f4dd4efe9342706dd1b0024681bd4c3300d021f86fc75eab7865d4e0cbec6fbc883f011cfd6a57423e7e2f6e104baad2b744c3cafaec6bc7dc92c1@34.65.43.171:0?discport=30305"), + enode.MustParse("enode://9d7a3efefe442351217e73b3a593bcb8efffb55b4807699972145324eab5e6b382152f8d24f6301baebbfb5ecd4127bd3faab2842c04cd432bdf50ba092f6645@34.65.109.126:0?discport=30305"), } type HostMetrics interface { diff --git a/op-node/p2p/gossip_test.go b/op-node/p2p/gossip_test.go index 0833f270e40b..9f047f4b5ba8 100644 --- a/op-node/p2p/gossip_test.go +++ b/op-node/p2p/gossip_test.go @@ -3,31 +3,33 @@ package p2p import ( "bytes" "context" + "crypto/ecdsa" "fmt" "io" "math/big" "testing" "time" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" - "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/golang/snappy" - // "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" + oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" + opsigner "github.com/ethereum-optimism/optimism/op-service/signer" + "github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-service/testutils" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rpc" + pubsub "github.com/libp2p/go-libp2p-pubsub" pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" - - "github.com/ethereum-optimism/optimism/op-service/testlog" ) func TestGuardGossipValidator(t *testing.T) { @@ -62,30 +64,122 @@ func TestVerifyBlockSignature(t *testing.T) { L2ChainID: big.NewInt(100), } peerId := peer.ID("foo") - secrets, err := e2eutils.DefaultMnemonicConfig.Secrets() + secrets, err := crypto.GenerateKey() + require.NoError(t, err) + msg := []byte("any msg") + + t.Run("Valid", func(t *testing.T) { + runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.PublicKey)} + signer := &PreparedSigner{Signer: NewLocalSigner(secrets)} + sig, err := signer.Sign(context.Background(), SigningDomainBlocksV1, cfg.L2ChainID, msg) + require.NoError(t, err) + result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:], msg) + require.Equal(t, pubsub.ValidationAccept, result) + }) + + t.Run("WrongSigner", func(t *testing.T) { + runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: common.HexToAddress("0x1234")} + signer := &PreparedSigner{Signer: NewLocalSigner(secrets)} + sig, err := signer.Sign(context.Background(), SigningDomainBlocksV1, cfg.L2ChainID, msg) + require.NoError(t, err) + result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:], msg) + require.Equal(t, pubsub.ValidationReject, result) + }) + + t.Run("InvalidSignature", func(t *testing.T) { + runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.PublicKey)} + sig := make([]byte, 65) + result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig, msg) + require.Equal(t, pubsub.ValidationReject, result) + }) + + t.Run("NoSequencer", func(t *testing.T) { + runCfg := &testutils.MockRuntimeConfig{} + signer := &PreparedSigner{Signer: NewLocalSigner(secrets)} + sig, err := signer.Sign(context.Background(), SigningDomainBlocksV1, cfg.L2ChainID, msg) + require.NoError(t, err) + result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:], msg) + require.Equal(t, pubsub.ValidationIgnore, result) + }) +} + +type mockRemoteSigner struct { + priv *ecdsa.PrivateKey +} + +func (t *mockRemoteSigner) SignBlockPayload(args opsigner.BlockPayloadArgs) (hexutil.Bytes, error) { + signingHash, err := args.ToSigningHash() + if err != nil { + return nil, err + } + signature, err := crypto.Sign(signingHash[:], t.priv) + if err != nil { + return nil, err + } + return signature, nil +} + +func TestVerifyBlockSignatureWithRemoteSigner(t *testing.T) { + secrets, err := crypto.GenerateKey() require.NoError(t, err) + + remoteSigner := &mockRemoteSigner{secrets} + server := oprpc.NewServer( + "127.0.0.1", + 0, + "test", + oprpc.WithAPIs([]rpc.API{ + { + Namespace: "opsigner", + Service: remoteSigner, + }, + }), + ) + + require.NoError(t, server.Start()) + defer func() { + _ = server.Stop() + }() + + logger := testlog.Logger(t, log.LevelCrit) + cfg := &rollup.Config{ + L2ChainID: big.NewInt(100), + } + + peerId := peer.ID("foo") msg := []byte("any msg") + signerCfg := opsigner.NewCLIConfig() + signerCfg.Endpoint = fmt.Sprintf("http://%s", server.Endpoint()) + signerCfg.TLSConfig.TLSKey = "" + signerCfg.TLSConfig.TLSCert = "" + signerCfg.TLSConfig.TLSCaCert = "" + signerCfg.TLSConfig.Enabled = false + t.Run("Valid", func(t *testing.T) { - runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.SequencerP2P.PublicKey)} - signer := &PreparedSigner{Signer: NewLocalSigner(secrets.SequencerP2P)} + runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.PublicKey)} + remoteSigner, err := NewRemoteSigner(logger, signerCfg) + require.NoError(t, err) + signer := &PreparedSigner{Signer: remoteSigner} sig, err := signer.Sign(context.Background(), SigningDomainBlocksV1, cfg.L2ChainID, msg) require.NoError(t, err) - result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:65], msg) + result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:], msg) require.Equal(t, pubsub.ValidationAccept, result) }) t.Run("WrongSigner", func(t *testing.T) { runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: common.HexToAddress("0x1234")} - signer := &PreparedSigner{Signer: NewLocalSigner(secrets.SequencerP2P)} + remoteSigner, err := NewRemoteSigner(logger, signerCfg) + require.NoError(t, err) + signer := &PreparedSigner{Signer: remoteSigner} sig, err := signer.Sign(context.Background(), SigningDomainBlocksV1, cfg.L2ChainID, msg) require.NoError(t, err) - result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:65], msg) + result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:], msg) require.Equal(t, pubsub.ValidationReject, result) }) t.Run("InvalidSignature", func(t *testing.T) { - runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.SequencerP2P.PublicKey)} + runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.PublicKey)} sig := make([]byte, 65) result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig, msg) require.Equal(t, pubsub.ValidationReject, result) @@ -93,12 +187,36 @@ func TestVerifyBlockSignature(t *testing.T) { t.Run("NoSequencer", func(t *testing.T) { runCfg := &testutils.MockRuntimeConfig{} - signer := &PreparedSigner{Signer: NewLocalSigner(secrets.SequencerP2P)} + remoteSigner, err := NewRemoteSigner(logger, signerCfg) + require.NoError(t, err) + signer := &PreparedSigner{Signer: remoteSigner} sig, err := signer.Sign(context.Background(), SigningDomainBlocksV1, cfg.L2ChainID, msg) require.NoError(t, err) - result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:65], msg) + result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:], msg) require.Equal(t, pubsub.ValidationIgnore, result) }) + + t.Run("RemoteSignerNoTLS", func(t *testing.T) { + signerCfg := opsigner.NewCLIConfig() + signerCfg.Endpoint = fmt.Sprintf("http://%s", server.Endpoint()) + signerCfg.TLSConfig.TLSKey = "invalid" + signerCfg.TLSConfig.TLSCert = "invalid" + signerCfg.TLSConfig.TLSCaCert = "invalid" + signerCfg.TLSConfig.Enabled = true + + _, err := NewRemoteSigner(logger, signerCfg) + require.Error(t, err) + }) + + t.Run("RemoteSignerInvalidEndpoint", func(t *testing.T) { + signerCfg := opsigner.NewCLIConfig() + signerCfg.Endpoint = "Invalid" + signerCfg.TLSConfig.TLSKey = "" + signerCfg.TLSConfig.TLSCert = "" + signerCfg.TLSConfig.TLSCaCert = "" + _, err := NewRemoteSigner(logger, signerCfg) + require.Error(t, err) + }) } type MarshalSSZ interface { @@ -146,10 +264,10 @@ func TestBlockValidator(t *testing.T) { cfg := &rollup.Config{ L2ChainID: big.NewInt(100), } - secrets, err := e2eutils.DefaultMnemonicConfig.Secrets() + secrets, err := crypto.GenerateKey() require.NoError(t, err) - runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.SequencerP2P.PublicKey)} - signer := &PreparedSigner{Signer: NewLocalSigner(secrets.SequencerP2P)} + runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.PublicKey)} + signer := &PreparedSigner{Signer: NewLocalSigner(secrets)} // Params Set 2: Call the validation function peerID := peer.ID("foo") diff --git a/op-node/p2p/signer.go b/op-node/p2p/signer.go index cd5e9f94a01b..20a52d0a2625 100644 --- a/op-node/p2p/signer.go +++ b/op-node/p2p/signer.go @@ -9,8 +9,10 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum-optimism/optimism/op-node/rollup" + opsigner "github.com/ethereum-optimism/optimism/op-service/signer" ) var SigningDomainBlocksV1 = [32]byte{} @@ -20,40 +22,27 @@ type Signer interface { io.Closer } -func SigningHash(domain [32]byte, chainID *big.Int, payloadBytes []byte) (common.Hash, error) { - var msgInput [32 + 32 + 32]byte - // domain: first 32 bytes - copy(msgInput[:32], domain[:]) - // chain_id: second 32 bytes - if chainID.BitLen() > 256 { - return common.Hash{}, errors.New("chain_id is too large") - } - chainID.FillBytes(msgInput[32:64]) - // payload_hash: third 32 bytes, hash of encoded payload - copy(msgInput[64:], crypto.Keccak256(payloadBytes)) - - return crypto.Keccak256Hash(msgInput[:]), nil -} - func BlockSigningHash(cfg *rollup.Config, payloadBytes []byte) (common.Hash, error) { - return SigningHash(SigningDomainBlocksV1, cfg.L2ChainID, payloadBytes) + return opsigner.NewBlockPayloadArgs(SigningDomainBlocksV1, cfg.L2ChainID, payloadBytes, nil).ToSigningHash() } // LocalSigner is suitable for testing type LocalSigner struct { - priv *ecdsa.PrivateKey - hasher func(domain [32]byte, chainID *big.Int, payloadBytes []byte) (common.Hash, error) + priv *ecdsa.PrivateKey } func NewLocalSigner(priv *ecdsa.PrivateKey) *LocalSigner { - return &LocalSigner{priv: priv, hasher: SigningHash} + return &LocalSigner{priv: priv} } func (s *LocalSigner) Sign(ctx context.Context, domain [32]byte, chainID *big.Int, encodedMsg []byte) (sig *[65]byte, err error) { if s.priv == nil { return nil, errors.New("signer is closed") } - signingHash, err := s.hasher(domain, chainID, encodedMsg) + + blockPayloadArgs := opsigner.NewBlockPayloadArgs(domain, chainID, encodedMsg, nil) + signingHash, err := blockPayloadArgs.ToSigningHash() + if err != nil { return nil, err } @@ -69,6 +58,39 @@ func (s *LocalSigner) Close() error { return nil } +type RemoteSigner struct { + client *opsigner.SignerClient + sender *common.Address +} + +func NewRemoteSigner(logger log.Logger, config opsigner.CLIConfig) (*RemoteSigner, error) { + signerClient, err := opsigner.NewSignerClientFromConfig(logger, config) + if err != nil { + return nil, err + } + senderAddress := common.HexToAddress(config.Address) + return &RemoteSigner{signerClient, &senderAddress}, nil +} + +func (s *RemoteSigner) Sign(ctx context.Context, domain [32]byte, chainID *big.Int, encodedMsg []byte) (sig *[65]byte, err error) { + if s.client == nil { + return nil, errors.New("signer is closed") + } + + blockPayloadArgs := opsigner.NewBlockPayloadArgs(domain, chainID, encodedMsg, s.sender) + signature, err := s.client.SignBlockPayload(ctx, blockPayloadArgs) + + if err != nil { + return nil, err + } + return &signature, nil +} + +func (s *RemoteSigner) Close() error { + s.client = nil + return nil +} + type PreparedSigner struct { Signer } diff --git a/op-node/p2p/signer_test.go b/op-node/p2p/signer_test.go index abcfe4825b30..53f78504d18f 100644 --- a/op-node/p2p/signer_test.go +++ b/op-node/p2p/signer_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/ethereum-optimism/optimism/op-node/rollup" + opsigner "github.com/ethereum-optimism/optimism/op-service/signer" "github.com/stretchr/testify/require" ) @@ -14,10 +15,10 @@ func TestSigningHash_DifferentDomain(t *testing.T) { } payloadBytes := []byte("arbitraryData") - hash, err := SigningHash(SigningDomainBlocksV1, cfg.L2ChainID, payloadBytes) + hash, err := opsigner.NewBlockPayloadArgs(SigningDomainBlocksV1, cfg.L2ChainID, payloadBytes, nil).ToSigningHash() require.NoError(t, err, "creating first signing hash") - hash2, err := SigningHash([32]byte{3}, cfg.L2ChainID, payloadBytes) + hash2, err := opsigner.NewBlockPayloadArgs([32]byte{3}, cfg.L2ChainID, payloadBytes, nil).ToSigningHash() require.NoError(t, err, "creating second signing hash") require.NotEqual(t, hash, hash2, "signing hash should be different when domain is different") @@ -32,10 +33,10 @@ func TestSigningHash_DifferentChainID(t *testing.T) { } payloadBytes := []byte("arbitraryData") - hash, err := SigningHash(SigningDomainBlocksV1, cfg1.L2ChainID, payloadBytes) + hash, err := opsigner.NewBlockPayloadArgs(SigningDomainBlocksV1, cfg1.L2ChainID, payloadBytes, nil).ToSigningHash() require.NoError(t, err, "creating first signing hash") - hash2, err := SigningHash(SigningDomainBlocksV1, cfg2.L2ChainID, payloadBytes) + hash2, err := opsigner.NewBlockPayloadArgs(SigningDomainBlocksV1, cfg2.L2ChainID, payloadBytes, nil).ToSigningHash() require.NoError(t, err, "creating second signing hash") require.NotEqual(t, hash, hash2, "signing hash should be different when chain ID is different") @@ -46,10 +47,10 @@ func TestSigningHash_DifferentMessage(t *testing.T) { L2ChainID: big.NewInt(100), } - hash, err := SigningHash(SigningDomainBlocksV1, cfg.L2ChainID, []byte("msg1")) + hash, err := opsigner.NewBlockPayloadArgs(SigningDomainBlocksV1, cfg.L2ChainID, []byte("msg1"), nil).ToSigningHash() require.NoError(t, err, "creating first signing hash") - hash2, err := SigningHash(SigningDomainBlocksV1, cfg.L2ChainID, []byte("msg2")) + hash2, err := opsigner.NewBlockPayloadArgs(SigningDomainBlocksV1, cfg.L2ChainID, []byte("msg2"), nil).ToSigningHash() require.NoError(t, err, "creating second signing hash") require.NotEqual(t, hash, hash2, "signing hash should be different when message is different") @@ -62,6 +63,6 @@ func TestSigningHash_LimitChainID(t *testing.T) { cfg := &rollup.Config{ L2ChainID: chainID, } - _, err := SigningHash(SigningDomainBlocksV1, cfg.L2ChainID, []byte("arbitraryData")) + _, err := opsigner.NewBlockPayloadArgs(SigningDomainBlocksV1, cfg.L2ChainID, []byte("arbitraryData"), nil).ToSigningHash() require.ErrorContains(t, err, "chain_id is too large") } diff --git a/op-node/p2p/sync.go b/op-node/p2p/sync.go index c8777fe51f92..1b2cc570a2e9 100644 --- a/op-node/p2p/sync.go +++ b/op-node/p2p/sync.go @@ -346,6 +346,9 @@ func (s *SyncClient) AddPeer(id peer.ID) { func (s *SyncClient) RemovePeer(id peer.ID) { s.peersLock.Lock() defer s.peersLock.Unlock() + if s.closingPeers { + return + } cancel, ok := s.peers[id] if !ok { s.log.Warn("cannot remove peer from sync duties, peer was not registered", "peer", id) diff --git a/op-node/rollup/driver/driver.go b/op-node/rollup/driver/driver.go index 1fd751846cf3..01a05fe2b538 100644 --- a/op-node/rollup/driver/driver.go +++ b/op-node/rollup/driver/driver.go @@ -49,6 +49,7 @@ type Metrics interface { RecordUnsafePayloadsBuffer(length uint64, memSize uint64, next eth.BlockID) SetDerivationIdle(idle bool) + SetSequencerState(active bool) RecordL1ReorgDepth(d uint64) diff --git a/op-node/rollup/engine/build_seal.go b/op-node/rollup/engine/build_seal.go index b292681e13f1..cd3ca8888271 100644 --- a/op-node/rollup/engine/build_seal.go +++ b/op-node/rollup/engine/build_seal.go @@ -110,16 +110,18 @@ func (eq *EngDeriver) onBuildSeal(ev BuildSealEvent) { eq.metrics.RecordSequencerBuildingDiffTime(buildTime - time.Duration(eq.cfg.BlockTime)*time.Second) txnCount := len(envelope.ExecutionPayload.Transactions) - eq.metrics.CountSequencedTxs(txnCount) + depositCount, _ := lastDeposit(envelope.ExecutionPayload.Transactions) + eq.metrics.CountSequencedTxsInBlock(txnCount, depositCount) - eq.log.Debug("Processed new L2 block", "l2_unsafe", ref, "l1_origin", ref.L1Origin, - "txs", txnCount, "time", ref.Time, "seal_time", sealTime, "build_time", buildTime) + eq.log.Debug("Built new L2 block", "l2_unsafe", ref, "l1_origin", ref.L1Origin, + "txs", txnCount, "deposits", depositCount, "time", ref.Time, "seal_time", sealTime, "build_time", buildTime) eq.emitter.Emit(BuildSealedEvent{ - Concluding: ev.Concluding, - DerivedFrom: ev.DerivedFrom, - Info: ev.Info, - Envelope: envelope, - Ref: ref, + Concluding: ev.Concluding, + DerivedFrom: ev.DerivedFrom, + BuildStarted: ev.BuildStarted, + Info: ev.Info, + Envelope: envelope, + Ref: ref, }) } diff --git a/op-node/rollup/engine/build_sealed.go b/op-node/rollup/engine/build_sealed.go index eb2680850a75..5ceff489ecc6 100644 --- a/op-node/rollup/engine/build_sealed.go +++ b/op-node/rollup/engine/build_sealed.go @@ -1,6 +1,8 @@ package engine import ( + "time" + "github.com/ethereum-optimism/optimism/op-service/eth" ) @@ -10,7 +12,8 @@ type BuildSealedEvent struct { // if payload should be promoted to (local) safe (must also be pending safe, see DerivedFrom) Concluding bool // payload is promoted to pending-safe if non-zero - DerivedFrom eth.L1BlockRef + DerivedFrom eth.L1BlockRef + BuildStarted time.Time Info eth.PayloadInfo Envelope *eth.ExecutionPayloadEnvelope @@ -25,10 +28,11 @@ func (eq *EngDeriver) onBuildSealed(ev BuildSealedEvent) { // If a (pending) safe block, immediately process the block if ev.DerivedFrom != (eth.L1BlockRef{}) { eq.emitter.Emit(PayloadProcessEvent{ - Concluding: ev.Concluding, - DerivedFrom: ev.DerivedFrom, - Envelope: ev.Envelope, - Ref: ev.Ref, + Concluding: ev.Concluding, + DerivedFrom: ev.DerivedFrom, + Envelope: ev.Envelope, + Ref: ev.Ref, + BuildStarted: ev.BuildStarted, }) } } diff --git a/op-node/rollup/engine/engine_controller.go b/op-node/rollup/engine/engine_controller.go index b088e382f07f..907238e84a9e 100644 --- a/op-node/rollup/engine/engine_controller.go +++ b/op-node/rollup/engine/engine_controller.go @@ -330,6 +330,7 @@ func (e *EngineController) InsertUnsafePayload(ctx context.Context, envelope *et } } // Insert the payload & then call FCU + newPayloadStart := time.Now() status, err := e.engine.NewPayload(ctx, envelope.ExecutionPayload, envelope.ParentBeaconBlockRoot) if err != nil { return derive.NewTemporaryError(fmt.Errorf("failed to update insert payload: %w", err)) @@ -342,6 +343,7 @@ func (e *EngineController) InsertUnsafePayload(ctx context.Context, envelope *et return derive.NewTemporaryError(fmt.Errorf("cannot process unsafe payload: new - %v; parent: %v; err: %w", payload.ID(), payload.ParentID(), eth.NewPayloadErr(payload, status))) } + newPayloadFinish := time.Now() // Mark the new payload as valid fc := eth.ForkchoiceState{ @@ -361,6 +363,7 @@ func (e *EngineController) InsertUnsafePayload(ctx context.Context, envelope *et } logFn := e.logSyncProgressMaybe() defer logFn() + fcu2Start := time.Now() fcRes, err := e.engine.ForkchoiceUpdate(ctx, &fc, nil) if err != nil { var rpcErr rpc.Error @@ -380,6 +383,7 @@ func (e *EngineController) InsertUnsafePayload(ctx context.Context, envelope *et return derive.NewTemporaryError(fmt.Errorf("cannot prepare unsafe chain for new payload: new - %v; parent: %v; err: %w", payload.ID(), payload.ParentID(), eth.ForkchoiceUpdateErr(fcRes.PayloadStatus))) } + fcu2Finish := time.Now() e.SetUnsafeHead(ref) e.needFCUCall = false e.emitter.Emit(UnsafeUpdateEvent{Ref: ref}) @@ -397,6 +401,16 @@ func (e *EngineController) InsertUnsafePayload(ctx context.Context, envelope *et }) } + totalTime := fcu2Finish.Sub(newPayloadStart) + e.log.Info("Inserted new L2 unsafe block (synchronous)", + "hash", envelope.ExecutionPayload.BlockHash, + "number", uint64(envelope.ExecutionPayload.BlockNumber), + "newpayload_time", common.PrettyDuration(newPayloadFinish.Sub(newPayloadStart)), + "fcu2_time", common.PrettyDuration(fcu2Finish.Sub(fcu2Start)), + "total_time", common.PrettyDuration(totalTime), + "mgas", float64(envelope.ExecutionPayload.GasUsed)/1000000, + "mgasps", float64(envelope.ExecutionPayload.GasUsed)*1000/float64(totalTime)) + return nil } diff --git a/op-node/rollup/engine/events.go b/op-node/rollup/engine/events.go index bb4499564875..fbc8b7b1db2f 100644 --- a/op-node/rollup/engine/events.go +++ b/op-node/rollup/engine/events.go @@ -6,6 +6,7 @@ import ( "fmt" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum-optimism/optimism/op-node/rollup" @@ -15,7 +16,7 @@ import ( ) type Metrics interface { - CountSequencedTxs(count int) + CountSequencedTxsInBlock(txns int, deposits int) RecordSequencerBuildingDiffTime(duration time.Duration) RecordSequencerSealingTime(duration time.Duration) @@ -203,12 +204,67 @@ func (ev TryBackupUnsafeReorgEvent) String() string { return "try-backup-unsafe-reorg" } -type TryUpdateEngineEvent struct{} +type TryUpdateEngineEvent struct { + // These fields will be zero-value (BuildStarted,InsertStarted=time.Time{}, Envelope=nil) if + // this event is emitted outside of engineDeriver.onPayloadSuccess + BuildStarted time.Time + InsertStarted time.Time + Envelope *eth.ExecutionPayloadEnvelope +} func (ev TryUpdateEngineEvent) String() string { return "try-update-engine" } +// Checks for the existence of the Envelope field, which is only +// added by the PayloadSuccessEvent +func (ev TryUpdateEngineEvent) triggeredByPayloadSuccess() bool { + return ev.Envelope != nil +} + +// Returns key/value pairs that can be logged and are useful for plotting +// block build/insert time as a way to measure performance. +func (ev TryUpdateEngineEvent) getBlockProcessingMetrics() []interface{} { + fcuFinish := time.Now() + payload := ev.Envelope.ExecutionPayload + + logValues := []interface{}{ + "hash", payload.BlockHash, + "number", uint64(payload.BlockNumber), + "state_root", payload.StateRoot, + "timestamp", uint64(payload.Timestamp), + "parent", payload.ParentHash, + "prev_randao", payload.PrevRandao, + "fee_recipient", payload.FeeRecipient, + "txs", len(payload.Transactions), + } + + var totalTime time.Duration + var mgasps float64 + if !ev.BuildStarted.IsZero() { + totalTime = fcuFinish.Sub(ev.BuildStarted) + logValues = append(logValues, + "build_time", common.PrettyDuration(ev.InsertStarted.Sub(ev.BuildStarted)), + "insert_time", common.PrettyDuration(fcuFinish.Sub(ev.InsertStarted)), + ) + } else if !ev.InsertStarted.IsZero() { + totalTime = fcuFinish.Sub(ev.InsertStarted) + } + + // Avoid divide-by-zero for mgasps + if totalTime > 0 { + mgasps = float64(payload.GasUsed) * 1000 / float64(totalTime) + } + + logValues = append(logValues, + "total_time", common.PrettyDuration(totalTime), + "mgas", float64(payload.GasUsed)/1000000, + "mgasps", mgasps, + ) + + return logValues +} + type ForceEngineResetEvent struct { Unsafe, Safe, Finalized eth.L2BlockRef } @@ -322,6 +378,9 @@ func (d *EngDeriver) OnEvent(ev event.Event) bool { } else { d.emitter.Emit(rollup.CriticalErrorEvent{Err: fmt.Errorf("unexpected TryUpdateEngine error type: %w", err)}) } + } else if x.triggeredByPayloadSuccess() { + logValues := x.getBlockProcessingMetrics() + d.log.Info("Inserted new L2 unsafe block", logValues...) } case ProcessUnsafePayloadEvent: ref, err := derive.PayloadToBlockRef(d.cfg, x.Envelope.ExecutionPayload) diff --git a/op-node/rollup/engine/payload_process.go b/op-node/rollup/engine/payload_process.go index 62d7ded47f0d..272fce3febcf 100644 --- a/op-node/rollup/engine/payload_process.go +++ b/op-node/rollup/engine/payload_process.go @@ -3,6 +3,7 @@ package engine import ( "context" "fmt" + "time" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -12,7 +13,8 @@ type PayloadProcessEvent struct { // if payload should be promoted to (local) safe (must also be pending safe, see DerivedFrom) Concluding bool // payload is promoted to pending-safe if non-zero - DerivedFrom eth.L1BlockRef + DerivedFrom eth.L1BlockRef + BuildStarted time.Time Envelope *eth.ExecutionPayloadEnvelope Ref eth.L2BlockRef @@ -26,6 +28,7 @@ func (eq *EngDeriver) onPayloadProcess(ev PayloadProcessEvent) { ctx, cancel := context.WithTimeout(eq.ctx, payloadProcessTimeout) defer cancel() + insertStart := time.Now() status, err := eq.ec.engine.NewPayload(ctx, ev.Envelope.ExecutionPayload, ev.Envelope.ParentBeaconBlockRoot) if err != nil { @@ -49,7 +52,14 @@ func (eq *EngDeriver) onPayloadProcess(ev PayloadProcessEvent) { }) return case eth.ExecutionValid: - eq.emitter.Emit(PayloadSuccessEvent(ev)) + eq.emitter.Emit(PayloadSuccessEvent{ + Concluding: ev.Concluding, + DerivedFrom: ev.DerivedFrom, + BuildStarted: ev.BuildStarted, + InsertStarted: insertStart, + Envelope: ev.Envelope, + Ref: ev.Ref, + }) return default: eq.emitter.Emit(rollup.EngineTemporaryErrorEvent{ diff --git a/op-node/rollup/engine/payload_success.go b/op-node/rollup/engine/payload_success.go index c00d8e81ea77..17d8b0163b5f 100644 --- a/op-node/rollup/engine/payload_success.go +++ b/op-node/rollup/engine/payload_success.go @@ -1,6 +1,8 @@ package engine import ( + "time" + "github.com/ethereum-optimism/optimism/op-service/eth" ) @@ -8,7 +10,9 @@ type PayloadSuccessEvent struct { // if payload should be promoted to (local) safe (must also be pending safe, see DerivedFrom) Concluding bool // payload is promoted to pending-safe if non-zero - DerivedFrom eth.L1BlockRef + DerivedFrom eth.L1BlockRef + BuildStarted time.Time + InsertStarted time.Time Envelope *eth.ExecutionPayloadEnvelope Ref eth.L2BlockRef @@ -30,11 +34,9 @@ func (eq *EngDeriver) onPayloadSuccess(ev PayloadSuccessEvent) { }) } - payload := ev.Envelope.ExecutionPayload - eq.log.Info("Inserted block", "hash", payload.BlockHash, "number", uint64(payload.BlockNumber), - "state_root", payload.StateRoot, "timestamp", uint64(payload.Timestamp), "parent", payload.ParentHash, - "prev_randao", payload.PrevRandao, "fee_recipient", payload.FeeRecipient, - "txs", len(payload.Transactions), "concluding", ev.Concluding, "derived_from", ev.DerivedFrom) - - eq.emitter.Emit(TryUpdateEngineEvent{}) + eq.emitter.Emit(TryUpdateEngineEvent{ + BuildStarted: ev.BuildStarted, + InsertStarted: ev.InsertStarted, + Envelope: ev.Envelope, + }) } diff --git a/op-node/rollup/interop/interop.go b/op-node/rollup/interop/interop.go index a4342b6a19f6..94fa77a5b309 100644 --- a/op-node/rollup/interop/interop.go +++ b/op-node/rollup/interop/interop.go @@ -3,6 +3,7 @@ package interop import ( "context" "fmt" + "strings" "sync" "time" @@ -139,7 +140,10 @@ func (d *InteropDeriver) onInteropPendingSafeChangedEvent(x engine.InteropPendin defer cancel() if err := d.backend.UpdateLocalSafe(ctx, d.chainID, x.DerivedFrom, x.Ref.BlockRef()); err != nil { d.log.Debug("Failed to signal derived-from update to interop backend", "derivedFrom", x.DerivedFrom, "block", x.Ref) - // still continue to try and do a cross-safe update + if strings.Contains(err.Error(), "too far behind") { + d.log.Error("Supervisor is too far behind, resetting derivation", "err", err) + d.emitter.Emit(rollup.ResetEvent{Err: fmt.Errorf("supervisor is too far behind: %w", err)}) + } } // Now that the op-supervisor is aware of the new local-safe block, we want to check if cross-safe changed. d.emitter.Emit(engine.RequestCrossSafeEvent{}) diff --git a/op-node/rollup/sequencing/sequencer.go b/op-node/rollup/sequencing/sequencer.go index b6605d601fa7..2476b9b639f9 100644 --- a/op-node/rollup/sequencing/sequencer.go +++ b/op-node/rollup/sequencing/sequencer.go @@ -33,6 +33,7 @@ type L1OriginSelectorIface interface { } type Metrics interface { + SetSequencerState(active bool) RecordSequencerInconsistentL1Origin(from eth.BlockID, to eth.BlockID) RecordSequencerReset() RecordSequencingError() @@ -281,10 +282,11 @@ func (d *Sequencer) onBuildSealed(x engine.BuildSealedEvent) { d.asyncGossip.Gossip(x.Envelope) // Now after having gossiped the block, try to put it in our own canonical chain d.emitter.Emit(engine.PayloadProcessEvent{ - Concluding: x.Concluding, - DerivedFrom: x.DerivedFrom, - Envelope: x.Envelope, - Ref: x.Ref, + Concluding: x.Concluding, + DerivedFrom: x.DerivedFrom, + BuildStarted: x.BuildStarted, + Envelope: x.Envelope, + Ref: x.Ref, }) d.latest.Ref = x.Ref d.latestSealed = x.Ref @@ -619,6 +621,7 @@ func (d *Sequencer) Init(ctx context.Context, active bool) error { if active { return d.forceStart() } else { + d.metrics.SetSequencerState(false) if err := d.listener.SequencerStopped(); err != nil { return fmt.Errorf("failed to notify sequencer-state listener of initial stopped state: %w", err) } @@ -652,6 +655,7 @@ func (d *Sequencer) forceStart() error { d.nextActionOK = true d.nextAction = d.timeNow() d.active.Store(true) + d.metrics.SetSequencerState(true) d.log.Info("Sequencer has been started", "next action", d.nextAction) return nil } @@ -697,6 +701,7 @@ func (d *Sequencer) Stop(ctx context.Context) (common.Hash, error) { d.nextActionOK = false d.active.Store(false) + d.metrics.SetSequencerState(false) d.log.Info("Sequencer has been stopped") return d.latestHead.Hash, nil } diff --git a/op-node/rollup/sequencing/sequencer_chaos_test.go b/op-node/rollup/sequencing/sequencer_chaos_test.go index d5000fbed339..93ba254c1b54 100644 --- a/op-node/rollup/sequencing/sequencer_chaos_test.go +++ b/op-node/rollup/sequencing/sequencer_chaos_test.go @@ -216,7 +216,13 @@ func (c *ChaoticEngine) OnEvent(ev event.Event) bool { c.clockRandomIncrement(0, time.Second*3) } c.unsafe = x.Ref - c.emitter.Emit(engine.PayloadSuccessEvent(x)) + c.emitter.Emit(engine.PayloadSuccessEvent{ + Concluding: x.Concluding, + DerivedFrom: x.DerivedFrom, + BuildStarted: x.BuildStarted, + Envelope: x.Envelope, + Ref: x.Ref, + }) // With event delay, the engine would update and signal the new forkchoice. c.emitter.Emit(engine.ForkchoiceRequestEvent{}) } diff --git a/op-node/service.go b/op-node/service.go index 4d12c7f5446f..55c1c7173b74 100644 --- a/op-node/service.go +++ b/op-node/service.go @@ -49,7 +49,7 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) { driverConfig := NewDriverConfig(ctx) - p2pSignerSetup, err := p2pcli.LoadSignerSetup(ctx) + p2pSignerSetup, err := p2pcli.LoadSignerSetup(ctx, log) if err != nil { return nil, fmt.Errorf("failed to load p2p signer: %w", err) } diff --git a/op-node/version/version.go b/op-node/version/version.go index 327ee7b49727..2456f656d45c 100644 --- a/op-node/version/version.go +++ b/op-node/version/version.go @@ -1,6 +1,6 @@ package version var ( - Version = "v0.10.14" + Version = "v0.0.0" Meta = "dev" ) diff --git a/op-program/README.md b/op-program/README.md index 15e89ffb5cf7..932ec85db22f 100644 --- a/op-program/README.md +++ b/op-program/README.md @@ -45,6 +45,7 @@ After running `make reproducible-prestate`, the following files can be found in [./bin/](./bin/): - [`op-program`](./bin/op-program) - [`op-program-client.elf`](./bin/op-program-client.elf) +- [`op-program-client64.elf`](./bin/op-program-client64.elf) - [`prestate.bin.gz`](./bin/prestate.bin.gz) - [`prestate-proof.json`](./bin/prestate-proof.json) diff --git a/op-program/host/version/version.go b/op-program/host/version/version.go index 327ee7b49727..2456f656d45c 100644 --- a/op-program/host/version/version.go +++ b/op-program/host/version/version.go @@ -1,6 +1,6 @@ package version var ( - Version = "v0.10.14" + Version = "v0.0.0" Meta = "dev" ) diff --git a/op-program/prestates/releases.json b/op-program/prestates/releases.json index 63c54a0ba589..75106c4b3c99 100644 --- a/op-program/prestates/releases.json +++ b/op-program/prestates/releases.json @@ -1,5 +1,8 @@ [ { + "version": "1.4.0-rc.2", + "hash": "0x0364e4e72922e7d649338f558f8a14b50ca31922a1484e73ea03987fb1516095" + }, { "version": "1.4.0-rc.1", "hash": "0x03925193e3e89f87835bbdf3a813f60b2aa818a36bbe71cd5d8fd7e79f5e8afe" }, diff --git a/op-proposer/Makefile b/op-proposer/Makefile index 561d7d32f301..3a036e6d5d59 100644 --- a/op-proposer/Makefile +++ b/op-proposer/Makefile @@ -1,34 +1,3 @@ -GITCOMMIT ?= $(shell git rev-parse HEAD) -GITDATE ?= $(shell git show -s --format='%ct') -# Find the github tag that points to this commit. If none are found, set the version string to "untagged" -# Prioritizes release tag, if one exists, over tags suffixed with "-rc" -VERSION ?= $(shell tags=$$(git tag --points-at $(GITCOMMIT) | grep '^op-proposer/' | sed 's/op-proposer\///' | sort -V); \ - preferred_tag=$$(echo "$$tags" | grep -v -- '-rc' | tail -n 1); \ - if [ -z "$$preferred_tag" ]; then \ - if [ -z "$$tags" ]; then \ - echo "untagged"; \ - else \ - echo "$$tags" | tail -n 1; \ - fi \ - else \ - echo $$preferred_tag; \ - fi) +DEPRECATED_TARGETS := op-proposer clean test -LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) -LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) -LDFLAGSSTRING +=-X main.Version=$(VERSION) -LDFLAGS := -ldflags "$(LDFLAGSSTRING)" - -op-proposer: - env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) CGO_ENABLED=0 go build -v $(LDFLAGS) -o ./bin/op-proposer ./cmd - -clean: - rm bin/op-proposer - -test: - go test -v ./... - -.PHONY: \ - clean \ - op-proposer \ - test +include ../just/deprecated.mk diff --git a/op-proposer/README.md b/op-proposer/README.md new file mode 100644 index 000000000000..56f086061e1d --- /dev/null +++ b/op-proposer/README.md @@ -0,0 +1,153 @@ +# `op-proposer` + +Issues: [monorepo](https://github.com/ethereum-optimism/optimism/issues?q=is%3Aissue%20state%3Aopen%20label%3AA-op-proposer) + +Pull requests: [monorepo](https://github.com/ethereum-optimism/optimism/pulls?q=is%3Aopen+is%3Apr+label%3AA-op-proposer) + +User docs: +- [Proposer Configuration docs] + +[Proposer Configuration docs]: https://docs.optimism.io/builders/chain-operators/configuration/proposer + +Specs: +- [`proposals.md`](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/proposals.md) +- [`withdrawals`](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/withdrawals.md) +- [`fault-proof/stage-one/bridge-integration.md`](https://github.com/ethereum-optimism/specs/blob/main/specs/fault-proof/stage-one/bridge-integration.md) + +The `op-proposer` is a light-weight service to automate output-root proposal transactions on regular interval. +Each proposal transaction submits a claim of the L2 state to L1. + +Chains with a pre-Fault-Proof deployment make proposal-transactions towards a pre-fault-proofs `OptimismPortal` deployment. + +Chains with permissioned or permissionless Fault Proofs make proposal-transactions to the `DisputeGameFactory`, +which instantiates claims (each claim being a new fault-proof "game"), +which can then be resolved for the proposals to persist. + +Withdrawals are authenticated against resolved proposals, +with an inclusion-proof of a withdrawn message (as registered in the L2 withdrawal-contract storage). + +## Quickstart + +```bash +go run ./op-proposer/cmd \ + --l1-eth-rpc http://l1:8545 \ + --rollup-rpc: http://op-node:8545 \ + --game-factory-address=changeme \ + --game-type=changeme +``` + +See [Proposer Configuration docs] for customization of the transaction-managent, +and usage of a remote signer to isolate the proposer secret key. + +On test networks, `--allow-non-finalized` may be used to make proposals sooner, to reduce test time. + +## Usage + +### Build from source + +```bash +make op-proposer + +./bin/op-proposer --help +``` + +### Run from source + +```bash +# from op-proposer dir: +go run ./cmd --help +``` + +### Build docker image + +See `op-proposer` docker-bake target. + +## Overview + + + +The op-proposer relays subjective `finalized` blocks (irreversible, as locally verified) +to L1 by constructing and submitting proposals. +The proposed claims can then be resolved, and used for withdrawals on L1. + +```mermaid +sequenceDiagram +autonumber + +participant portal as OptimismPortal (v2)
contract +participant challenger as op-challenger +participant claim as Fault Dispute Game
contract +participant dgf as Dispute Game Factory
contract on L1 +participant proposer as op-proposer +participant opnode as op-node +participant el as Execution Engine
(e.g. op-geth) + +proposer ->>opnode: query output-root +opnode ->>el: query block and withdrawals-root +el -->> opnode: return block and withdrawals-root +opnode -->> proposer: return output root +proposer ->> dgf: send claim +proposer ->> proposer: repeat with next claim +dgf ->> claim: create game contract +challenger ->> claim: resolve (or counter) claim +portal -->> claim: proveWithdrawalTransaction checks game state +``` + +The `op-proposer` itself is a light-weight loop to maintain this relay: +schedule when to propose, inspect what to propose, transact on L1 to proposer, and repeat. + +## Product + +### Optimization target + +The `op-proposer` code optimizes for simplicity. + +Proposals are few and far-between, commonly only at a 1 hour interval. +Proposal execution speed affects tests more than it does production, and thus not a primary optimization target. + +Most costs are made in the proposal contract execution, +not the operation of the op-proposer, and thus not the primary optimization concern. + +Proposals are critical to safety however, and simplicity is thus important to this service. + +### Vision + +The pre-fault-proof proposal functionality is effectively unused code, and may be removed in the near future. +Solutions for alternative proving systems are a work in progress. + +With the proposed withdrawals-root feature (see [Isthmus upgrade feature]), +the op-node will soon no longer have to query the storage separately +from the block-header that it constructs an output-root for. +This lowers the requirements to run a proposer, +since no archive-node is required anymore to determine the withdrawals-root. + +[Isthmus upgrade feature]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/isthmus/exec-engine.md#l2tol1messagepasser-storage-root-in-header + +Testing of this service may be further improved by decoupling the scheduling and processing. +Better encapsulated processing would lend itself better to [op-e2e](../op-e2e) action-tests. + + +## Design principles + + + +- Reuse the transaction-management: this is the most complicated part of the op-proposer, but is common with other services. +- Keep the proposal flow simple: given that we only expect one transaction per hour, + but the transaction is a critical claim, we have a strong preference for safety over liveness. + +## Failure modes + + + +While disabled by default, the op-proposer is capable of submitting proposals too eagerly. +A proposal for unfinalized L2 state that does not hold true later may result in an invalid claim on L1, +and thus in dispute-game penalties. + +Assuming finality, the op-proposer is only really subject to liveness failures: +- to L1 RPC failure (mitigated with redundancy in L1 RPC) +- local temporary failure, e.g. offline execution engine (mitigated with alerts) + or odd tx-inclusion situations (mitigated with fresh state upon restart). + +## Testing + +The `op-proposer` integration is covered in system `op-e2e` tests. diff --git a/op-proposer/cmd/main.go b/op-proposer/cmd/main.go index cbb21fb28516..e5096351fdd7 100644 --- a/op-proposer/cmd/main.go +++ b/op-proposer/cmd/main.go @@ -19,7 +19,7 @@ import ( ) var ( - Version = "v0.10.14" + Version = "v0.0.0" GitCommit = "" GitDate = "" ) diff --git a/op-proposer/justfile b/op-proposer/justfile new file mode 100644 index 000000000000..08b1b7b73911 --- /dev/null +++ b/op-proposer/justfile @@ -0,0 +1,20 @@ +import '../just/go.just' + +# Build ldflags string +_LDFLAGSSTRING := "'" + trim( + "-X main.GitCommit=" + GITCOMMIT + " " + \ + "-X main.GitDate=" + GITDATE + " " + \ + "-X main.Version=" + VERSION + " " + \ + "") + "'" + +BINARY := "./bin/op-proposer" + +# Build op-proposer binary +op-proposer: (go_build BINARY "./cmd" "-ldflags" _LDFLAGSSTRING) + +# Clean build artifacts +clean: + rm -f {{BINARY}} + +# Run tests +test: (go_test "./...") diff --git a/op-service/Makefile b/op-service/Makefile index f57c0b6b53a4..eeb03302985d 100644 --- a/op-service/Makefile +++ b/op-service/Makefile @@ -1,27 +1,3 @@ -# Use the old Apple linker to workaround broken xcode - https://github.com/golang/go/issues/65169 -ifeq ($(shell uname),Darwin) - FUZZLDFLAGS := -ldflags=-extldflags=-Wl,-ld_classic -endif +DEPRECATED_TARGETS := test generate-mocks fuzz -test: - go test -v ./... - -generate-mocks: - go generate ./... - -fuzz: - printf "%s\n" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzExecutionPayloadUnmarshal ./eth" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzExecutionPayloadMarshalUnmarshalV1 ./eth" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzExecutionPayloadMarshalUnmarshalV2 ./eth" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzExecutionPayloadMarshalUnmarshalV3 ./eth" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzOBP01 ./eth" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzEncodeDecodeBlob ./eth" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDetectNonBijectivity ./eth" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzEncodeScalar ./eth" \ - | parallel -j 8 {} - -.PHONY: \ - test \ - generate-mocks \ - fuzz +include ../just/deprecated.mk diff --git a/op-service/README.md b/op-service/README.md new file mode 100644 index 000000000000..9f8bf03c54cc --- /dev/null +++ b/op-service/README.md @@ -0,0 +1,75 @@ +# `op-service` + +Issues: [monorepo](https://github.com/ethereum-optimism/optimism/issues?q=is%3Aissue%20state%3Aopen%20label%3AA-op-service) + +Pull requests: [monorepo](https://github.com/ethereum-optimism/optimism/pulls?q=is%3Aopen+is%3Apr+label%3AA-op-service) + +`op-service` is a collection of Go utilities to build OP-Stack services with. + +```text +├── cliapp - Flag and lifecycle handling for a Urfave v2 CLI app. +├── client - RPC and HTTP client utils +├── clock - Clock interface, system clock, tickers, mock/test time utils +├── crypto - Cryptography utils, complements geth crypto package +├── ctxinterrupt - Blocking/Interrupt handling +├── dial - Dialing util functions for RPC clients +├── endpoint - Abstracts away type of RPC endpoint +├── enum - Utils to create enums +├── errutil - Utils to work with customized errors +├── eth - Common Ethereum data types and OP-Stack extension types +├── flags - Utils and flag types for CLI usage +├── httputil - Utils to create enhanced HTTP Server +├── ioutil - File utils, including atomic files and compression +├── jsonutil - JSON encoding/decoding utils +├── locks - Lock utils, like read-write wrapped types +├── log - Logging CLI and middleware utils +├── metrics - Metrics types, metering abstractions, server utils +├── oppprof - P-Prof CLI types and server setup +├── predeploys - OP-Stack predeploy definitions +├── queue - Generic queue implementation +├── retry - Function retry utils +├── rpc - RPC server utils +├── safego - Utils to make Go memory more safe +├── serialize - Binary serialization abstractions +├── signer - CLI flags and bindings to work with a remote signer +├── solabi - Utils to encode/decode Solidity ABI formatted data +├── sources - RPC client bindings +├── tasks - Err-group with panic handling +├── testlog - Test logger and log-capture utils for testing +├── testutils - Simplified Ethereum types, mock RPC bindings, utils for testing. +├── tls - CLI flags and utils to work with TLS connections +├── txmgr - Transaction manager: automated nonce, fee and confirmation handling. +└── *.go - Miscellaneous utils (soon to be deprecated / moved) +``` + +## Usage + +From `op-service` dir: +```bash +# Run Go tests +make test +# Run Go fuzz tests +make fuzz +``` + +## Product + +### Optimization target + +Provide solid reusable building blocks for all OP-Stack Go services. + +### Vision + +- Remove unused utilities: `op-service` itself needs to stay maintainable. +- Make all Go services consistent: `op-service` modules can be used to simplify and improve more Go services. + +## Design principles + +- Reduce boilerplate in Go services: provide service building utils ranging from CLI to testing. +- Protect devs from sharp edges in the Go std-lib: think of providing missing composition, + proper resource-closing, well set up network-binding, safe concurrency utils. + +## Testing + +Each op-service package has its own unit-testing. +More advanced utils, such as the transaction manager, are covered in `op-e2e` as well. diff --git a/op-service/eth/sync_status.go b/op-service/eth/sync_status.go index f9db1f672b82..e16275920e2b 100644 --- a/op-service/eth/sync_status.go +++ b/op-service/eth/sync_status.go @@ -5,7 +5,7 @@ package eth type SyncStatus struct { // CurrentL1 is the L1 block that the derivation process is last idled at. // This may not be fully derived into L2 data yet. - // The safe L2 blocks were produced/included fully from the L1 chain up to and including this L1 block. + // The safe L2 blocks were produced/included fully from the L1 chain up to _but excluding_ this L1 block. // If the node is synced, this matches the HeadL1, minus the verifier confirmation distance. CurrentL1 L1BlockRef `json:"current_l1"` // CurrentL1Finalized is a legacy sync-status attribute. This is deprecated. diff --git a/op-service/justfile b/op-service/justfile new file mode 100644 index 000000000000..bec1214f3087 --- /dev/null +++ b/op-service/justfile @@ -0,0 +1,23 @@ +import '../just/go.just' + +# Run tests +test: (go_test "./...") + +# Generate mocks +generate-mocks: (go_generate "./...") + +[private] +service_fuzz_task FUZZ TIME='10s': (go_fuzz FUZZ TIME "./eth") + +# Run fuzzing tests +fuzz: + printf "%s\n" \ + "FuzzExecutionPayloadUnmarshal" \ + "FuzzExecutionPayloadMarshalUnmarshalV1" \ + "FuzzExecutionPayloadMarshalUnmarshalV2" \ + "FuzzExecutionPayloadMarshalUnmarshalV3" \ + "FuzzOBP01" \ + "FuzzEncodeDecodeBlob" \ + "FuzzDetectNonBijectivity" \ + "FuzzEncodeScalar" \ + | parallel -j {{PARALLEL_JOBS}} {{just_executable()}} service_fuzz_task {} diff --git a/op-service/signer/blockpayload_args.go b/op-service/signer/blockpayload_args.go new file mode 100644 index 000000000000..8239bc0967d6 --- /dev/null +++ b/op-service/signer/blockpayload_args.go @@ -0,0 +1,62 @@ +package signer + +import ( + "errors" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +// BlockPayloadArgs represents the arguments to sign a new block payload from the sequencer. +type BlockPayloadArgs struct { + Domain [32]byte `json:"domain"` + ChainID *big.Int `json:"chainId"` + PayloadHash []byte `json:"payloadHash"` + PayloadBytes []byte + SenderAddress *common.Address `json:"senderAddress"` +} + +// NewBlockPayloadArgs creates a BlockPayloadArgs struct +func NewBlockPayloadArgs(domain [32]byte, chainId *big.Int, payloadBytes []byte, senderAddress *common.Address) *BlockPayloadArgs { + payloadHash := crypto.Keccak256(payloadBytes) + args := &BlockPayloadArgs{ + Domain: domain, + ChainID: chainId, + PayloadHash: payloadHash, + PayloadBytes: payloadBytes, + SenderAddress: senderAddress, + } + return args +} + +func (args *BlockPayloadArgs) Check() error { + if args.ChainID == nil { + return errors.New("chainId not specified") + } + if len(args.PayloadHash) == 0 { + return errors.New("payloadHash not specified") + } + return nil +} + +// ToSigningHash creates a signingHash from the block payload args. +// Uses the hashing scheme from https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/rollup-node-p2p.md#block-signatures +func (args *BlockPayloadArgs) ToSigningHash() (common.Hash, error) { + if err := args.Check(); err != nil { + return common.Hash{}, err + } + var msgInput [32 + 32 + 32]byte + // domain: first 32 bytes + copy(msgInput[:32], args.Domain[:]) + // chain_id: second 32 bytes + if args.ChainID.BitLen() > 256 { + return common.Hash{}, errors.New("chain_id is too large") + } + args.ChainID.FillBytes(msgInput[32:64]) + + // payload_hash: third 32 bytes, hash of encoded payload + copy(msgInput[64:], args.PayloadHash[:]) + + return crypto.Keccak256Hash(msgInput[:]), nil +} diff --git a/op-service/signer/cli.go b/op-service/signer/cli.go index 0c1df648286d..534fd09c97bc 100644 --- a/op-service/signer/cli.go +++ b/op-service/signer/cli.go @@ -17,18 +17,20 @@ const ( HeadersFlagName = "signer.header" ) -func CLIFlags(envPrefix string) []cli.Flag { +func CLIFlags(envPrefix string, category string) []cli.Flag { envPrefix += "_SIGNER" flags := []cli.Flag{ &cli.StringFlag{ - Name: EndpointFlagName, - Usage: "Signer endpoint the client will connect to", - EnvVars: opservice.PrefixEnvVar(envPrefix, "ENDPOINT"), + Name: EndpointFlagName, + Usage: "Signer endpoint the client will connect to", + EnvVars: opservice.PrefixEnvVar(envPrefix, "ENDPOINT"), + Category: category, }, &cli.StringFlag{ - Name: AddressFlagName, - Usage: "Address the signer is signing transactions for", - EnvVars: opservice.PrefixEnvVar(envPrefix, "ADDRESS"), + Name: AddressFlagName, + Usage: "Address the signer is signing requests for", + EnvVars: opservice.PrefixEnvVar(envPrefix, "ADDRESS"), + Category: category, }, &cli.StringSliceFlag{ Name: HeadersFlagName, @@ -36,7 +38,7 @@ func CLIFlags(envPrefix string) []cli.Flag { EnvVars: opservice.PrefixEnvVar(envPrefix, "HEADER"), }, } - flags = append(flags, optls.CLIFlagsWithFlagPrefix(envPrefix, "signer")...) + flags = append(flags, optls.CLIFlagsWithFlagPrefix(envPrefix, "signer", category)...) return flags } @@ -65,10 +67,7 @@ func (c CLIConfig) Check() error { } func (c CLIConfig) Enabled() bool { - if c.Endpoint != "" && c.Address != "" { - return true - } - return false + return c.Endpoint != "" && c.Address != "" } func ReadCLIConfig(ctx *cli.Context) CLIConfig { diff --git a/op-service/signer/cli_test.go b/op-service/signer/cli_test.go index 056ed4815601..3453258c6ca1 100644 --- a/op-service/signer/cli_test.go +++ b/op-service/signer/cli_test.go @@ -93,7 +93,7 @@ func TestInvalidConfig(t *testing.T) { func configForArgs(args ...string) CLIConfig { app := cli.NewApp() - app.Flags = CLIFlags("TEST_") + app.Flags = CLIFlags("TEST_", "") app.Name = "test" var config CLIConfig app.Action = func(ctx *cli.Context) error { diff --git a/op-service/signer/client.go b/op-service/signer/client.go index cdb9094dfe6b..acd753ecef7a 100644 --- a/op-service/signer/client.go +++ b/op-service/signer/client.go @@ -113,3 +113,19 @@ func (s *SignerClient) SignTransaction(ctx context.Context, chainId *big.Int, fr return &signed, nil } + +func (s *SignerClient) SignBlockPayload(ctx context.Context, args *BlockPayloadArgs) ([65]byte, error) { + var result hexutil.Bytes + + if err := s.client.CallContext(ctx, &result, "opsigner_signBlockPayload", args); err != nil { + return [65]byte{}, fmt.Errorf("opsigner_signBlockPayload failed: %w", err) + } + + if len(result) != 65 { + return [65]byte{}, fmt.Errorf("invalid signature: %s", result.String()) + } + + signature := [65]byte(result) + + return signature, nil +} diff --git a/op-service/sources/receipts_rpc.go b/op-service/sources/receipts_rpc.go index ecf2c8582210..962c7391a76b 100644 --- a/op-service/sources/receipts_rpc.go +++ b/op-service/sources/receipts_rpc.go @@ -329,8 +329,7 @@ func AvailableReceiptsFetchingMethods(kind RPCProviderKind) ReceiptsFetchingMeth case RPCKindQuickNode: return DebugGetRawReceipts | EthGetBlockReceipts | EthGetTransactionReceiptBatch case RPCKindInfura: - // Infura is big, but sadly does not support more optimized receipts fetching methods (yet?) - return EthGetTransactionReceiptBatch + return EthGetBlockReceipts | EthGetTransactionReceiptBatch case RPCKindParity: return ParityGetBlockReceipts | EthGetTransactionReceiptBatch case RPCKindNethermind: diff --git a/op-service/sources/receipts_test.go b/op-service/sources/receipts_test.go index 088a3d9b22cb..230fcf0e72b5 100644 --- a/op-service/sources/receipts_test.go +++ b/op-service/sources/receipts_test.go @@ -283,7 +283,7 @@ func TestEthClient_FetchReceipts(t *testing.T) { { name: "infura", providerKind: RPCKindInfura, - setup: fallbackCase(4, EthGetTransactionReceiptBatch), + setup: fallbackCase(4, EthGetBlockReceipts, EthGetTransactionReceiptBatch), }, { name: "nethermind", diff --git a/op-service/testlog/testlog.go b/op-service/testlog/testlog.go index fe7ede207cbc..965fbe9d47b2 100644 --- a/op-service/testlog/testlog.go +++ b/op-service/testlog/testlog.go @@ -159,11 +159,22 @@ func (l *logger) flush() { scanner := bufio.NewScanner(l.buf) for scanner.Scan() { - l.t.Logf("%*s%s", padding, "", scanner.Text()) + l.internalFlush("%*s%s", padding, "", scanner.Text()) } l.buf.Reset() } +func (l *logger) internalFlush(format string, args ...any) { + defer func() { + if r := recover(); r != nil { + log.Warn("testlog: panic during flush", "recover", r) + } + }() + + l.t.Helper() + l.t.Logf(format, args...) +} + // The Go testing lib uses the runtime package to get info about the calling site, and then decorates the line. // We can't disable this decoration, but we can adjust the contents to align by padding after the info. // To pad the right amount, we estimate how long the info is. diff --git a/op-service/testutils/anvil/anvil.go b/op-service/testutils/anvil/anvil.go index 7419f9da6252..50590a096a7f 100644 --- a/op-service/testutils/anvil/anvil.go +++ b/op-service/testutils/anvil/anvil.go @@ -38,6 +38,8 @@ func New(l1RPCURL string, logger log.Logger) (*Runner, error) { "--fork-url", l1RPCURL, "--port", "0", + "--base-fee", + "1000000000", ) stdout, err := proc.StdoutPipe() if err != nil { diff --git a/op-service/testutils/metrics.go b/op-service/testutils/metrics.go index 421d32f2109c..25edee14a068 100644 --- a/op-service/testutils/metrics.go +++ b/op-service/testutils/metrics.go @@ -17,7 +17,7 @@ type TestDerivationMetrics struct { FnRecordChannelTimedOut func() } -func (t *TestDerivationMetrics) CountSequencedTxs(count int) { +func (t *TestDerivationMetrics) CountSequencedTxsInBlock(txns int, deposits int) { } func (t *TestDerivationMetrics) RecordSequencerBuildingDiffTime(duration time.Duration) { diff --git a/op-service/tls/cli.go b/op-service/tls/cli.go index e2e086e922c3..f85de807a83e 100644 --- a/op-service/tls/cli.go +++ b/op-service/tls/cli.go @@ -21,7 +21,7 @@ const ( // CLIFlags returns flags with env var envPrefix // This should be used for server TLS configs, or when client and server tls configs are the same func CLIFlags(envPrefix string) []cli.Flag { - return CLIFlagsWithFlagPrefix(envPrefix, "") + return CLIFlagsWithFlagPrefix(envPrefix, "", "") } var ( @@ -33,7 +33,7 @@ var ( // CLIFlagsWithFlagPrefix returns flags with env var and cli flag prefixes // Should be used for client TLS configs when different from server on the same process -func CLIFlagsWithFlagPrefix(envPrefix string, flagPrefix string) []cli.Flag { +func CLIFlagsWithFlagPrefix(envPrefix string, flagPrefix string, category string) []cli.Flag { prefixFunc := func(flagName string) string { return strings.Trim(fmt.Sprintf("%s.%s", flagPrefix, flagName), ".") } @@ -48,22 +48,25 @@ func CLIFlagsWithFlagPrefix(envPrefix string, flagPrefix string) []cli.Flag { EnvVars: prefixEnvVars("TLS_ENABLED"), }, &cli.StringFlag{ - Name: prefixFunc(TLSCaCertFlagName), - Usage: "tls ca cert path", - Value: defaultTLSCaCert, - EnvVars: prefixEnvVars("TLS_CA"), + Name: prefixFunc(TLSCaCertFlagName), + Usage: "tls ca cert path", + Value: defaultTLSCaCert, + EnvVars: prefixEnvVars("TLS_CA"), + Category: category, }, &cli.StringFlag{ - Name: prefixFunc(TLSCertFlagName), - Usage: "tls cert path", - Value: defaultTLSCert, - EnvVars: prefixEnvVars("TLS_CERT"), + Name: prefixFunc(TLSCertFlagName), + Usage: "tls cert path", + Value: defaultTLSCert, + EnvVars: prefixEnvVars("TLS_CERT"), + Category: category, }, &cli.StringFlag{ - Name: prefixFunc(TLSKeyFlagName), - Usage: "tls key", - Value: defaultTLSKey, - EnvVars: prefixEnvVars("TLS_KEY"), + Name: prefixFunc(TLSKeyFlagName), + Usage: "tls key", + Value: defaultTLSKey, + EnvVars: prefixEnvVars("TLS_KEY"), + Category: category, }, } } diff --git a/op-service/tls/cli_test.go b/op-service/tls/cli_test.go index bd4ea4bf17c9..ce5e41d96c2d 100644 --- a/op-service/tls/cli_test.go +++ b/op-service/tls/cli_test.go @@ -53,7 +53,7 @@ func TestInvalidConfig(t *testing.T) { func configForArgs(args ...string) CLIConfig { app := cli.NewApp() - app.Flags = CLIFlagsWithFlagPrefix("TEST_", "test") + app.Flags = CLIFlagsWithFlagPrefix("TEST_", "test", "") app.Name = "test" var config CLIConfig app.Action = func(ctx *cli.Context) error { diff --git a/op-service/txmgr/cli.go b/op-service/txmgr/cli.go index 2390933d79ca..fc1aa2cceb0c 100644 --- a/op-service/txmgr/cli.go +++ b/op-service/txmgr/cli.go @@ -191,7 +191,7 @@ func CLIFlagsWithDefaults(envPrefix string, defaults DefaultFlagValues) []cli.Fl Value: defaults.ReceiptQueryInterval, EnvVars: prefixEnvVars("TXMGR_RECEIPT_QUERY_INTERVAL"), }, - }, opsigner.CLIFlags(envPrefix)...) + }, opsigner.CLIFlags(envPrefix, "")...) } type CLIConfig struct { diff --git a/op-service/txmgr/queue.go b/op-service/txmgr/queue.go index ee7a03ffa928..c48687d459ec 100644 --- a/op-service/txmgr/queue.go +++ b/op-service/txmgr/queue.go @@ -51,17 +51,36 @@ func (q *Queue[T]) Wait() error { return q.group.Wait() } +// handleResponse will wait for the response on the first passed channel, +// and then forward it on the second passed channel (attaching the id). It returns +// the response error or the context error if the context is canceled. +func handleResponse[T any](ctx context.Context, c chan SendResponse, d chan TxReceipt[T], id T) error { + select { + case response := <-c: + d <- TxReceipt[T]{ID: id, Receipt: response.Receipt, Err: response.Err} + return response.Err + case <-ctx.Done(): + d <- TxReceipt[T]{ID: id, Err: ctx.Err()} + return ctx.Err() + } +} + // Send will wait until the number of pending txs is below the max pending, -// and then send the next tx. +// and then send the next tx asynchronously. The nonce of the transaction is +// determined synchronously, so transactions should be confirmed on chain in +// the order they are sent using this method. // // The actual tx sending is non-blocking, with the receipt returned on the // provided receipt channel. If the channel is unbuffered, the goroutine is // blocked from completing until the channel is read from. func (q *Queue[T]) Send(id T, candidate TxCandidate, receiptCh chan TxReceipt[T]) { group, ctx := q.groupContext() - group.Go(func() error { - return q.sendTx(ctx, id, candidate, receiptCh) - }) + responseChan := make(chan SendResponse, 1) + handleResponse := func() error { + return handleResponse(ctx, responseChan, receiptCh, id) + } + group.Go(handleResponse) // This blocks until the number of handlers is below the limit + q.txMgr.SendAsync(ctx, candidate, responseChan) // Nonce management handled synchronously, i.e. before this returns } // TrySend sends the next tx, but only if the number of pending txs is below the @@ -75,19 +94,17 @@ func (q *Queue[T]) Send(id T, candidate TxCandidate, receiptCh chan TxReceipt[T] // blocked from completing until the channel is read from. func (q *Queue[T]) TrySend(id T, candidate TxCandidate, receiptCh chan TxReceipt[T]) bool { group, ctx := q.groupContext() - return group.TryGo(func() error { - return q.sendTx(ctx, id, candidate, receiptCh) - }) -} - -func (q *Queue[T]) sendTx(ctx context.Context, id T, candidate TxCandidate, receiptCh chan TxReceipt[T]) error { - receipt, err := q.txMgr.Send(ctx, candidate) - receiptCh <- TxReceipt[T]{ - ID: id, - Receipt: receipt, - Err: err, + responseChan := make(chan SendResponse, 1) + handleResponse := func() error { + return handleResponse(ctx, responseChan, receiptCh, id) + } + ok := group.TryGo(handleResponse) + if !ok { + return false + } else { + q.txMgr.SendAsync(ctx, candidate, responseChan) + return true } - return err } // groupContext returns a Group and a Context to use when sending a tx. diff --git a/op-service/txmgr/queue_test.go b/op-service/txmgr/queue_test.go index 549142c8592a..27dce154bcc8 100644 --- a/op-service/txmgr/queue_test.go +++ b/op-service/txmgr/queue_test.go @@ -58,12 +58,15 @@ func (b *mockBackendWithNonce) NonceAt(ctx context.Context, account common.Addre func TestQueue_Send(t *testing.T) { testCases := []struct { - name string // name of the test - max uint64 // max concurrency of the queue - calls []queueCall // calls to the queue - txs []testTx // txs to generate from the factory (and potentially error in send) - nonces []uint64 // expected sent tx nonces after all calls are made - total time.Duration // approx. total time it should take to complete all queue calls + name string // name of the test + max uint64 // max concurrency of the queue + calls []queueCall // calls to the queue + txs []testTx // txs to generate from the factory (and potentially error in send) + nonces []uint64 // expected sent tx nonces after all calls are made + // With Holocene, it is important that transactions are included on chain in the same order as they are sent. + // The txmgr.Queue.Send() method should ensure nonces are determined _synchronously_ even if transactions + // are otherwise launched asynchronously. + confirmedIds []uint // expected tx Ids after all calls are made }{ { name: "success", @@ -76,8 +79,8 @@ func TestQueue_Send(t *testing.T) { {}, {}, }, - nonces: []uint64{0, 1}, - total: 1 * time.Second, + nonces: []uint64{0, 1}, + confirmedIds: []uint{0, 1}, }, { name: "no limit", @@ -90,8 +93,8 @@ func TestQueue_Send(t *testing.T) { {}, {}, }, - nonces: []uint64{0, 1}, - total: 1 * time.Second, + nonces: []uint64{0, 1}, + confirmedIds: []uint{0, 1}, }, { name: "single threaded", @@ -104,8 +107,8 @@ func TestQueue_Send(t *testing.T) { txs: []testTx{ {}, }, - nonces: []uint64{0}, - total: 1 * time.Second, + nonces: []uint64{0}, + confirmedIds: []uint{0}, }, { name: "single threaded blocking", @@ -121,8 +124,8 @@ func TestQueue_Send(t *testing.T) { {}, {}, }, - nonces: []uint64{0, 1, 2}, - total: 3 * time.Second, + nonces: []uint64{0, 1, 2}, + confirmedIds: []uint{0, 2, 3}, }, { name: "dual threaded blocking", @@ -142,8 +145,8 @@ func TestQueue_Send(t *testing.T) { {}, {}, }, - nonces: []uint64{0, 1, 2, 3, 4}, - total: 3 * time.Second, + nonces: []uint64{0, 1, 2, 3, 4}, + confirmedIds: []uint{0, 1, 3, 4, 5}, }, { name: "subsequent txs fail after tx failure", @@ -158,8 +161,8 @@ func TestQueue_Send(t *testing.T) { {sendErr: true}, {}, }, - nonces: []uint64{0, 1}, - total: 1 * time.Second, + nonces: []uint64{0, 1}, + confirmedIds: []uint{0}, }, } for _, test := range testCases { @@ -183,9 +186,11 @@ func TestQueue_Send(t *testing.T) { // track the nonces, and return any expected errors from tx sending var ( - nonces []uint64 - nonceMu sync.Mutex + nonces []uint64 + nonceForTxId map[uint]uint64 // maps from txid to nonce + nonceMu sync.Mutex ) + nonceForTxId = make(map[uint]uint64) sendTx := func(ctx context.Context, tx *types.Transaction) error { index := int(tx.Data()[0]) nonceMu.Lock() @@ -198,8 +203,12 @@ func TestQueue_Send(t *testing.T) { if testTx != nil && testTx.sendErr { return core.ErrNonceTooLow } + txHash := tx.Hash() + nonceMu.Lock() backend.mine(&txHash, tx.GasFeeCap(), nil) + nonceForTxId[uint(index)] = tx.Nonce() + nonceMu.Unlock() return nil } backend.setTxSender(sendTx) @@ -209,7 +218,6 @@ func TestQueue_Send(t *testing.T) { queue := NewQueue[int](ctx, mgr, test.max) // make all the queue calls given in the test case - start := time.Now() receiptChs := make([]chan TxReceipt[int], len(test.calls)) for i, c := range test.calls { msg := fmt.Sprintf("Call %d", i) @@ -217,19 +225,28 @@ func TestQueue_Send(t *testing.T) { TxData: []byte{byte(i)}, To: &common.Address{}, } + if i == 0 { + // Make the first tx much larger to expose + // any race conditions in the queue + candidate.TxData = make([]byte, 100_000) + } receiptChs[i] = make(chan TxReceipt[int], 1) queued := c.call(i, candidate, receiptChs[i], queue) require.Equal(t, c.queued, queued, msg) } // wait for the queue to drain (all txs complete or failed) _ = queue.Wait() - duration := time.Since(start) - // expect the execution time within a certain window - now := time.Now() - require.WithinDuration(t, now.Add(test.total), now.Add(duration), 500*time.Millisecond, "unexpected queue transaction timing") - // check that the nonces match + + // NOTE the backend in this test does not order transactions based on the nonce + // So what we want to check is that the txs match expectations when they are ordered + // in the same way as the nonces. slices.Sort(nonces) require.Equal(t, test.nonces, nonces, "expected nonces do not match") + for i, id := range test.confirmedIds { + require.Equal(t, nonces[i], nonceForTxId[id], + "nonce for tx id %d was %d instead of %d", id, nonceForTxId[id], nonces[i]) + } + // check receipts for i, c := range test.calls { if !c.queued { diff --git a/op-service/txmgr/txmgr.go b/op-service/txmgr/txmgr.go index e633a74c2881..e6593f993759 100644 --- a/op-service/txmgr/txmgr.go +++ b/op-service/txmgr/txmgr.go @@ -363,26 +363,32 @@ func (m *SimpleTxManager) craftTx(ctx context.Context, candidate TxCandidate) (* } } + // Calculate the intrinsic gas for the transaction + callMsg := ethereum.CallMsg{ + From: m.cfg.From, + To: candidate.To, + GasTipCap: gasTipCap, + GasFeeCap: gasFeeCap, + Data: candidate.TxData, + Value: candidate.Value, + } + if len(blobHashes) > 0 { + callMsg.BlobGasFeeCap = blobBaseFee + callMsg.BlobHashes = blobHashes + } // If the gas limit is set, we can use that as the gas if gasLimit == 0 { - // Calculate the intrinsic gas for the transaction - callMsg := ethereum.CallMsg{ - From: m.cfg.From, - To: candidate.To, - GasTipCap: gasTipCap, - GasFeeCap: gasFeeCap, - Data: candidate.TxData, - Value: candidate.Value, - } - if len(blobHashes) > 0 { - callMsg.BlobGasFeeCap = blobBaseFee - callMsg.BlobHashes = blobHashes - } gas, err := m.backend.EstimateGas(ctx, callMsg) if err != nil { return nil, fmt.Errorf("failed to estimate gas: %w", errutil.TryAddRevertReason(err)) } gasLimit = gas + } else { + callMsg.Gas = gasLimit + _, err := m.backend.CallContract(ctx, callMsg, nil) + if err != nil { + return nil, fmt.Errorf("failed to call: %w", errutil.TryAddRevertReason(err)) + } } var txMessage types.TxData @@ -600,7 +606,7 @@ func (m *SimpleTxManager) sendTx(ctx context.Context, tx *types.Transaction) (*t func (m *SimpleTxManager) publishTx(ctx context.Context, tx *types.Transaction, sendState *SendState) (*types.Transaction, bool) { l := m.txLogger(tx, true) - l.Info("Publishing transaction", "tx", tx.Hash()) + l.Info("Publishing transaction") for { if sendState.bumpFees { @@ -797,14 +803,30 @@ func (m *SimpleTxManager) increaseGasPrice(ctx context.Context, tx *types.Transa } // Re-estimate gaslimit in case things have changed or a previous gaslimit estimate was wrong - gas, err := m.backend.EstimateGas(ctx, ethereum.CallMsg{ + callMsg := ethereum.CallMsg{ From: m.cfg.From, To: tx.To(), GasTipCap: bumpedTip, GasFeeCap: bumpedFee, Data: tx.Data(), Value: tx.Value(), - }) + } + var bumpedBlobFee *big.Int + if tx.Type() == types.BlobTxType { + // Blob transactions have an additional blob gas price we must specify, so we must make sure it is + // getting bumped appropriately. + bumpedBlobFee = calcThresholdValue(tx.BlobGasFeeCap(), true) + if bumpedBlobFee.Cmp(blobBaseFee) < 0 { + bumpedBlobFee = blobBaseFee + } + if err := m.checkBlobFeeLimits(blobBaseFee, bumpedBlobFee); err != nil { + return nil, err + } + + callMsg.BlobGasFeeCap = bumpedBlobFee + callMsg.BlobHashes = tx.BlobHashes() + } + gas, err := m.backend.EstimateGas(ctx, callMsg) if err != nil { // If this is a transaction resubmission, we sometimes see this outcome because the // original tx can get included in a block just before the above call. In this case the @@ -830,15 +852,6 @@ func (m *SimpleTxManager) increaseGasPrice(ctx context.Context, tx *types.Transa var newTx *types.Transaction if tx.Type() == types.BlobTxType { - // Blob transactions have an additional blob gas price we must specify, so we must make sure it is - // getting bumped appropriately. - bumpedBlobFee := calcThresholdValue(tx.BlobGasFeeCap(), true) - if bumpedBlobFee.Cmp(blobBaseFee) < 0 { - bumpedBlobFee = blobBaseFee - } - if err := m.checkBlobFeeLimits(blobBaseFee, bumpedBlobFee); err != nil { - return nil, err - } message := &types.BlobTx{ Nonce: tx.Nonce(), To: *tx.To(), diff --git a/op-supervisor/Makefile b/op-supervisor/Makefile index de4f2d9d2612..144f7abd606e 100644 --- a/op-supervisor/Makefile +++ b/op-supervisor/Makefile @@ -1,23 +1,3 @@ -GITCOMMIT ?= $(shell git rev-parse HEAD) -GITDATE ?= $(shell git show -s --format='%ct') -VERSION ?= v0.0.0 +DEPRECATED_TARGETS := op-supervisor clean test -LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) -LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) -LDFLAGSSTRING +=-X main.Version=$(VERSION) -LDFLAGSSTRING +=-X main.Meta=$(VERSION_META) -LDFLAGS := -ldflags "$(LDFLAGSSTRING)" - -op-supervisor: - env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) CGO_ENABLED=0 go build -v $(LDFLAGS) -o ./bin/op-supervisor ./cmd - -clean: - rm bin/op-supervisor - -test: - go test -v ./... - -.PHONY: \ - op-supervisor \ - clean \ - test +include ../just/deprecated.mk diff --git a/op-supervisor/cmd/main.go b/op-supervisor/cmd/main.go index 8e306bf9009d..5aec4e927d03 100644 --- a/op-supervisor/cmd/main.go +++ b/op-supervisor/cmd/main.go @@ -20,7 +20,7 @@ import ( ) var ( - Version = "v0.0.1" + Version = "v0.0.0" GitCommit = "" GitDate = "" ) diff --git a/op-supervisor/justfile b/op-supervisor/justfile new file mode 100644 index 000000000000..7063a3ffb05e --- /dev/null +++ b/op-supervisor/justfile @@ -0,0 +1,21 @@ +import '../just/go.just' + +# Build ldflags string +_LDFLAGSSTRING := "'" + trim( + "-X main.GitCommit=" + GITCOMMIT + " " + \ + "-X main.GitDate=" + GITDATE + " " + \ + "-X main.Version=" + VERSION + " " + \ + "-X main.Meta=" + VERSION_META + " " + \ + "") + "'" + +BINARY := "./bin/op-supervisor" + +# Build op-supervisor binary +op-supervisor: (go_build BINARY "./cmd" "-ldflags" _LDFLAGSSTRING) + +# Clean build artifacts +clean: + rm -f {{BINARY}} + +# Run tests +test: (go_test "./...") diff --git a/op-supervisor/supervisor/backend/db/fromda/update.go b/op-supervisor/supervisor/backend/db/fromda/update.go index 146e558cf266..957df9e2dfa7 100644 --- a/op-supervisor/supervisor/backend/db/fromda/update.go +++ b/op-supervisor/supervisor/backend/db/fromda/update.go @@ -67,8 +67,10 @@ func (db *DB) AddDerived(derivedFrom eth.BlockRef, derived eth.BlockRef) error { derived, derived.ParentHash, lastDerived, types.ErrConflict) } } else if lastDerived.Number+1 < derived.Number { - return fmt.Errorf("derived block %s (parent: %s) is too new, expected to build on top of %s: %w", - derived, derived.ParentHash, lastDerived, types.ErrOutOfOrder) + return fmt.Errorf("cannot add block (%s derived from %s), last block (%s derived from %s) is too far behind: (%w)", + derived, derivedFrom, + lastDerived, lastDerivedFrom, + types.ErrOutOfOrder) } else { return fmt.Errorf("derived block %s is older than current derived block %s: %w", derived, lastDerived, types.ErrOutOfOrder) @@ -89,8 +91,10 @@ func (db *DB) AddDerived(derivedFrom eth.BlockRef, derived eth.BlockRef) error { } } else if lastDerivedFrom.Number+1 < derivedFrom.Number { // adding block that is derived from something too far into the future - return fmt.Errorf("cannot add block %s as derived from %s, still deriving from %s: %w", - derived, derivedFrom, lastDerivedFrom, types.ErrOutOfOrder) + return fmt.Errorf("cannot add block (%s derived from %s), last block (%s derived from %s) is too far behind: (%w)", + derived, derivedFrom, + lastDerived, lastDerivedFrom, + types.ErrOutOfOrder) } else { // adding block that is derived from something too old return fmt.Errorf("cannot add block %s as derived from %s, deriving already at %s: %w", diff --git a/op-ufm/README.md b/op-ufm/README.md deleted file mode 100644 index 08d1b4a6eca7..000000000000 --- a/op-ufm/README.md +++ /dev/null @@ -1,26 +0,0 @@ -# ⚠️ Important -This project has been moved to [ethereum-optimism/infra](https://github.com/ethereum-optimism/infra) - -# OP User Facing Monitoring - -This project simulates a synthetic user interacting with a OP Stack chain. - -It is intended to be used as a tool for monitoring -the health of the network by measuring end-to-end transaction latency. - - -## Metrics - -* Round-trip duration time to get transaction receipt (from creation timestamp) - -* First-seen duration time (from creation timestamp) - - -## Usage - -Run `make ufm` to build the binary. No additional dependencies are necessary. - -Copy `example.config.toml` to `config.toml` and edit the file to configure the service. - -Start the service with `ufm config.toml`. - diff --git a/op-wheel/Makefile b/op-wheel/Makefile index 06d114012022..78363075e995 100644 --- a/op-wheel/Makefile +++ b/op-wheel/Makefile @@ -1,14 +1,3 @@ -GITCOMMIT ?= $(shell git rev-parse HEAD) -GITDATE ?= $(shell git show -s --format='%ct') -VERSION ?= v0.0.0 +DEPRECATED_TARGETS := op-wheel -LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) -LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) -LDFLAGSSTRING +=-X main.Version=$(VERSION) -LDFLAGS := -ldflags "$(LDFLAGSSTRING)" - -op-wheel: - env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build -v $(LDFLAGS) -o ./bin/op-wheel ./cmd - -.PHONY: \ - op-wheel +include ../just/deprecated.mk diff --git a/op-wheel/README.md b/op-wheel/README.md new file mode 100644 index 000000000000..ce5dfa298f14 --- /dev/null +++ b/op-wheel/README.md @@ -0,0 +1,92 @@ +# `op-wheel` + +Issues: [monorepo](https://github.com/ethereum-optimism/optimism/issues?q=is%3Aissue%20state%3Aopen%20label%3AA-op-wheel) + +Pull requests: [monorepo](https://github.com/ethereum-optimism/optimism/pulls?q=is%3Aopen+is%3Apr+label%3AA-op-wheel) + +`op-wheel` is a CLI tool to direct the engine one way or the other with DB cheats and Engine API routines. + +It was named the "wheel" because of two reasons: +- Figuratively, it allows to steer the stack, an interface for a *driver* (like the op-node sub-component) to control the execution *engine* (e.g. op-geth). +- Idiomatically, like the Unix wheel-bit and its slang origins: empower a user to execute restricted commands, or more generally just someone with great power or influence. + +## Quickstart + +### Cheat utils + +Cheating commands to modify a Geth database without corresponding in-protocol change. + +The `cheat` sub-command has sub-commands for interacting with the DB, making patches, and dumping debug data. + +Note that the validity of state-changes, as applied through patches, +does not get checked until the block is re-processed. +This can be used ot trick the node into things like hypothetical +test-states or shadow-forks without diverging the block-hashes. + +To run: +```bash +go run ./op-wheel/cmd cheat --help +``` + +### Engine utils + +Engine API commands to build/reorg/rewind/finalize/copy blocks. + +Each sub-command dials the engine API endpoint (with provided JWT secret) and then runs the action. + +To run: +```bash +go run ./op-wheel/cmd engine --help +``` + +## Usage + +### Build from source + +```bash +# from op-wheel dir: +make op-wheel +./bin/op-wheel --help +``` + +### Run from source + +```bash +# from op-wheel dir: +go run ./cmd --help +``` + +### Build docker image + +See `op-wheel` docker-bake target. + +## Product + +`op-wheel` is a tool for expert-users to perform advanced data recoveries, tests and overrides. +This tool optimizes for reusability of these expert actions, to make them less error-prone. + +This is not part of a standard release / process, as this tool is not used commonly, +and the end-user is expected to be familiar with building from source. + +Actions that are common enough to be used at least once by the average end-user should +be part of the op-node or other standard op-stack release. + +## Design principles + +Design for an expert-user: this tool aims to provide full control over critical op-stack data +such as the engine-API and database itself, without hiding important information. + +However, even as expert-user, wrong assumptions can be made. +Defaults should aim to reduce errors, and leave the stack in a safe state to recover from. + +## Failure modes + +This tool is not used in the happy-path, but can be critical during expert-recovery of advanced failure modes. +E.g. database recovery after Geth database corruption, or manual forkchoice overrides. +Most importantly, each CLI command used for recovery aims to be verbose, +and avoids leaving an inconsistent state after failed or interrupted recovery. + +## Testing + +This is a test-utility more than a production tool, and thus does currently not have test-coverage of its own. +However, when it is used as tool during (dev/test) chain or node issues, usage does inform fixes/improvements. diff --git a/op-wheel/cheat/cheat.go b/op-wheel/cheat/cheat.go index 4b2d428e1029..1b5089c28364 100644 --- a/op-wheel/cheat/cheat.go +++ b/op-wheel/cheat/cheat.go @@ -138,7 +138,7 @@ func (ch *Cheater) RunAndClose(fn HeadFn) error { // Geth stores the TD for each block separately from the block itself. We must update this // manually, otherwise Geth thinks we haven't reached TTD yet and tries to build a block - // using Clique consensus, which causes a panic. + // using pre-merge consensus, which causes a panic. rawdb.WriteTd(batch, blockHash, preID.Number, ch.Blockchain.GetTd(preID.Hash, preID.Number)) // Need to copy over receipts since they are keyed by block hash. diff --git a/op-wheel/justfile b/op-wheel/justfile new file mode 100644 index 000000000000..40696592ed3b --- /dev/null +++ b/op-wheel/justfile @@ -0,0 +1,13 @@ +import '../just/go.just' + +# Build ldflags string +_LDFLAGSSTRING := "'" + trim( + "-X main.GitCommit=" + GITCOMMIT + " " + \ + "-X main.GitDate=" + GITDATE + " " + \ + "-X main.Version=" + VERSION + " " + \ + "") + "'" + +BINARY := "./bin/op-wheel" + +# Build op-wheel binary +op-wheel: (go_build BINARY "./cmd" "-ldflags" _LDFLAGSSTRING) \ No newline at end of file diff --git a/ops-bedrock/l1-geth.Dockerfile b/ops-bedrock/l1-geth.Dockerfile index c84a5debf72e..50262ec94ea6 100644 --- a/ops-bedrock/l1-geth.Dockerfile +++ b/ops-bedrock/l1-geth.Dockerfile @@ -1,4 +1,4 @@ -FROM ethereum/client-go:v1.14.11 +FROM ethereum/client-go:v1.14.12 RUN apk add --no-cache jq bash diff --git a/ops-bedrock/l2-op-geth-interop.Dockerfile b/ops-bedrock/l2-op-geth-interop.Dockerfile index 41a667c0fc29..5021fede46db 100644 --- a/ops-bedrock/l2-op-geth-interop.Dockerfile +++ b/ops-bedrock/l2-op-geth-interop.Dockerfile @@ -1,4 +1,4 @@ -FROM us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:v1.101411.1-rc.3 +FROM us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:v1.101411.2-rc.1 # Note: depend on dev-release for sequencer interop message checks RUN apk add --no-cache jq diff --git a/ops/docker/Dockerfile.packages b/ops/docker/Dockerfile.packages index fd2939cb45ff..d4c8041321b1 100644 --- a/ops/docker/Dockerfile.packages +++ b/ops/docker/Dockerfile.packages @@ -24,7 +24,7 @@ COPY --from=foundry /usr/local/bin/cast /usr/local/bin/cast WORKDIR /opt/optimism -COPY ./versions.json ./versions.json +COPY ./mise.toml ./mise.toml COPY ./packages ./packages COPY .git/ ./.git COPY .gitmodules ./.gitmodules @@ -33,41 +33,3 @@ RUN git submodule update --init --recursive \ && cd packages/contracts-bedrock \ && just forge-build \ && echo $(git rev-parse HEAD) > .gitcommit - -FROM --platform=linux/amd64 debian:bookworm-20240812-slim as contracts-bedrock - -RUN apt-get update && apt-get install -y \ - curl \ - jq \ - ca-certificates \ - git \ - make \ - bash \ - --no-install-recommends - -COPY /ops/docker/oplabs.crt /usr/local/share/ca-certificates/oplabs.crt - -RUN chmod 644 /usr/local/share/ca-certificates/oplabs.crt \ - && update-ca-certificates - -COPY --from=foundry /usr/local/bin/just /usr/local/bin/just -COPY --from=foundry /usr/local/bin/forge /usr/local/bin/forge -COPY --from=foundry /usr/local/bin/cast /usr/local/bin/cast -COPY --from=foundry /usr/local/bin/svm /usr/local/bin/svm - -RUN svm install 0.8.25 && \ - svm install 0.8.15 && \ - svm install 0.8.19 && \ - svm install 0.8.26 - -# Not to be confused with OP, this is a OnePassword CLI tool. -COPY --from=1password/op:2 /usr/local/bin/op /usr/local/bin/op - -RUN mkdir -p /opt/optimism/packages/contracts-bedrock - -COPY --from=base /opt/optimism/packages/contracts-bedrock /opt/optimism/packages/contracts-bedrock -COPY --from=base /opt/optimism/versions.json /opt/optimism/versions.json - -WORKDIR /opt/optimism/packages/contracts-bedrock - -CMD ["echo", "Override this command to use this image."] diff --git a/ops/docker/ci-builder/Dockerfile b/ops/docker/ci-builder/Dockerfile index a1eb71b4795e..cda9a0e71a9a 100644 --- a/ops/docker/ci-builder/Dockerfile +++ b/ops/docker/ci-builder/Dockerfile @@ -1,120 +1,64 @@ -# Copy docker buildx in order to generate the absolute prestate -# in the CI pipeline for reproducible fault proof builds -FROM --platform=linux/amd64 docker as buildx +############################################################################### +# BUILDX # +############################################################################### + +FROM --platform=linux/amd64 docker AS buildx COPY --from=docker/buildx-bin /buildx /usr/libexec/docker/cli-plugins/docker-buildx RUN docker buildx version -FROM --platform=linux/amd64 debian:bullseye-slim as rust-build - -SHELL ["/bin/bash", "-c"] - -ENV DEBIAN_FRONTEND=noninteractive -RUN apt-get update && \ - apt-get install -y build-essential git clang lld curl jq - -RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs > rustup.sh && \ - chmod +x ./rustup.sh && \ - sh rustup.sh -y - -# Install nightly toolchain -RUN source $HOME/.profile && rustup update nightly - -RUN source $HOME/.profile && cargo install just -RUN source $HOME/.profile && cargo install svm-rs -# Only diff from upstream docker image is this clone instead -# of COPY. We select a specific commit to use. -COPY ./versions.json ./versions.json -COPY ./ops/scripts/install-foundry.sh ./install-foundry.sh +############################################################################### +# CI BUILDER (BASE) # +############################################################################### -RUN curl -L https://foundry.paradigm.xyz | bash -RUN source $HOME/.profile && ./install-foundry.sh +FROM --platform=linux/amd64 debian:bullseye-slim AS base-builder -RUN strip /root/.foundry/bin/forge && \ - strip /root/.foundry/bin/cast && \ - strip /root/.foundry/bin/anvil && \ - strip /root/.cargo/bin/svm && \ - strip /root/.cargo/bin/just - -FROM --platform=linux/amd64 debian:bullseye-slim as go-build - -RUN apt-get update && apt-get install -y curl ca-certificates jq binutils - -ENV GO_VERSION=1.22.7 - -# Fetch go manually, rather than using a Go base image, so we can copy the installation into the final stage -RUN curl -sL https://go.dev/dl/go$GO_VERSION.linux-amd64.tar.gz -o go$GO_VERSION.linux-amd64.tar.gz && \ - tar -C /usr/local/ -xzvf go$GO_VERSION.linux-amd64.tar.gz - -ENV GOPATH=/go -ENV PATH=/usr/local/go/bin:$GOPATH/bin:$PATH - -# Install the specific version of abigen and geth from version control -COPY ./versions.json ./versions.json -RUN go install github.com/ethereum/go-ethereum/cmd/abigen@$(jq -r .abigen < versions.json) -RUN go install github.com/ethereum/go-ethereum/cmd/geth@$(jq -r .geth < versions.json) - -RUN go install gotest.tools/gotestsum@v1.12.0 -RUN go install github.com/vektra/mockery/v2@v2.46.0 -RUN go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.61.0 -RUN go install github.com/mikefarah/yq/v4@v4.44.3 +# Use bash as the shell +SHELL ["/bin/bash", "-c"] +ENV SHELL=/bin/bash +ENV BASH=/bin/bash -# Strip binaries to reduce size -RUN strip /go/bin/gotestsum && \ - strip /go/bin/mockery && \ - strip /go/bin/golangci-lint && \ - strip /go/bin/abigen && \ - strip /go/bin/geth && \ - strip /go/bin/yq +# Copy mise configuration +COPY ./mise.toml ./mise.toml -FROM --platform=linux/amd64 debian:bullseye-slim as base-builder +# Set up mise environment +ENV PATH="/root/.local/share/mise/shims:$PATH" +ENV PATH="/root/.local/bin:${PATH}" -ENV GOPATH=/go -ENV PATH=/usr/local/go/bin:$GOPATH/bin:$PATH -ENV PATH=/root/.cargo/bin:$PATH +# Install dependencies +# We do this in one mega RUN command to avoid blowing up the size of the image ENV DEBIAN_FRONTEND=noninteractive - -# copy the go installation, but not the module cache (cache will get stale, and would add a lot of weight) -COPY --from=go-build /usr/local/go /usr/local/go - -# copy tools -COPY --from=go-build /go/bin/gotestsum /go/bin/gotestsum -COPY --from=go-build /go/bin/mockery /go/bin/mockery -COPY --from=go-build /go/bin/golangci-lint /go/bin/golangci-lint -COPY --from=go-build /go/bin/abigen /usr/local/bin/abigen -COPY --from=go-build /go/bin/geth /usr/local/bin/geth -COPY --from=go-build /go/bin/yq /go/bin/yq - -# copy tools -COPY --from=rust-build /root/.foundry/bin/forge /usr/local/bin/forge -COPY --from=rust-build /root/.foundry/bin/cast /usr/local/bin/cast -COPY --from=rust-build /root/.foundry/bin/anvil /usr/local/bin/anvil -COPY --from=rust-build /root/.cargo/bin/svm /usr/local/bin/svm -COPY --from=rust-build /root/.cargo/bin/just /usr/local/bin/just - -COPY ./versions.json ./versions.json - RUN /bin/sh -c set -eux; \ apt-get update; \ - apt-get install -y --no-install-recommends bash curl openssh-client git build-essential ca-certificates jq gnupg binutils-mips-linux-gnu python3 python3-pip python3-setuptools; \ + apt-get install -y --no-install-recommends bash curl openssh-client git build-essential ca-certificates gnupg binutils-mips-linux-gnu clang libffi-dev; \ mkdir -p /etc/apt/keyrings; \ curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg; \ chmod a+r /etc/apt/keyrings/docker.gpg; \ echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null; \ apt-get update; \ apt-get install -y docker-ce-cli; \ - ln -s /usr/local/go/bin/gofmt /usr/local/bin/gofmt; \ - pip install capstone pyelftools; \ - pip install semgrep==$(jq -r .semgrep < versions.json); \ + curl https://mise.run | sh; \ + mise trust ./mise.toml; \ + mise install; \ curl -fLSs https://raw.githubusercontent.com/CircleCI-Public/circleci-cli/master/install.sh | bash; \ + pip install capstone pyelftools; \ + go env -w GOMODCACHE=/go/pkg/mod; \ + go env -w GOCACHE=/root/.cache/go-build; \ + ln -s /usr/local/go/bin/gofmt /usr/local/bin/gofmt; \ apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false; \ + go clean -cache -modcache -testcache; \ rm -rf /var/lib/apt/lists/*; \ - rm -rf /root/.cache/pip; + rm -rf /root/.cache/pip; \ + rm -rf /root/.cache/uv; \ + rm -rf /root/.rustup; -RUN svm install 0.8.25 && \ - svm install 0.8.15 && \ - svm install 0.8.19 +# Install Solidity versions +RUN echo "installing Solidity versions" && \ + svm install 0.8.25 && \ + svm install 0.8.19 && \ + svm install 0.8.15 +# Install Codecov uploader RUN echo "downloading and verifying Codecov uploader" && \ curl https://keybase.io/codecovsecurity/pgp_keys.asc | gpg --no-default-keyring --keyring trustedkeys.gpg --import && \ curl -Os "https://uploader.codecov.io/latest/linux/codecov" && \ @@ -129,24 +73,18 @@ RUN echo "downloading and verifying Codecov uploader" && \ # Copy docker buildx COPY --from=buildx /usr/libexec/docker/cli-plugins/docker-buildx /usr/libexec/docker/cli-plugins/docker-buildx -# within docker use bash -SHELL ["/bin/bash", "-c"] +# Set up entrypoint +ENTRYPOINT ["/bin/bash", "-c"] -# set env to use bash -ENV SHELL=/bin/bash -ENV BASH=/bin/bash -ENTRYPOINT ["/bin/bash", "-c"] +############################################################################### +# CI BUILDER (RUST) # +############################################################################### FROM base-builder as rust-builder # Install clang & lld RUN apt-get update && apt-get install -y clang lld -# Copy the rust installation, alongside the installed toolchains -COPY --from=rust-build /root/.cargo /root/.cargo -COPY --from=rust-build /root/.rustup /root/.rustup - -# copy the rust installation, alongside the installed toolchains -COPY --from=rust-build /root/.cargo/bin /root/.cargo/bin -COPY --from=rust-build /root/.rustup /root/.rustup +# Install nightly toolchain +RUN rustup update nightly diff --git a/ops/docker/ci-builder/Dockerfile.dockerignore b/ops/docker/ci-builder/Dockerfile.dockerignore index 229d6f1165c2..4f44e253194c 100644 --- a/ops/docker/ci-builder/Dockerfile.dockerignore +++ b/ops/docker/ci-builder/Dockerfile.dockerignore @@ -1,4 +1,3 @@ * !/.nvmrc -!/versions.json -!/ops/scripts/install-foundry.sh +!/mise.toml diff --git a/ops/docker/op-stack-go/Dockerfile b/ops/docker/op-stack-go/Dockerfile index e7e183804ab9..9384aef5e247 100644 --- a/ops/docker/op-stack-go/Dockerfile +++ b/ops/docker/op-stack-go/Dockerfile @@ -9,10 +9,26 @@ # It will default to the target platform. ARG TARGET_BASE_IMAGE=alpine:3.20 +# The ubuntu target base image is used for the op-challenger build with kona and asterisc. +ARG UBUNTU_TARGET_BASE_IMAGE=ubuntu:22.04 + +# The version of kona to use. +# The only build that uses this is `op-challenger-target`. +ARG KONA_VERSION=none + # We may be cross-building for another platform. Specify which platform we need as builder. FROM --platform=$BUILDPLATFORM golang:1.22.7-alpine3.20 AS builder -RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash +RUN apk add --no-cache curl tar gzip make gcc musl-dev linux-headers git jq bash + +# install yq +RUN wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/local/bin/yq && \ + chmod +x /usr/local/bin/yq + +# install versioned toolchain +COPY ./mise.toml . +RUN curl -L https://github.com/casey/just/releases/download/$(yq '.tools.just' mise.toml)/just-$(yq '.tools.just' mise.toml)-x86_64-unknown-linux-musl.tar.gz | \ + tar xz -C /usr/local/bin just # We copy the go.mod/sum first, so the `go mod download` does not have to re-run if dependencies do not change. COPY ./go.mod /app/go.mod @@ -127,14 +143,23 @@ FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE AS op-node-target COPY --from=op-node-builder /app/op-node/bin/op-node /usr/local/bin/ CMD ["op-node"] -FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE AS op-challenger-target +# Make the kona docker image published by upstream available as a source to copy kona and asterisc from. +FROM --platform=$BUILDPLATFORM ghcr.io/anton-rs/kona/kona-fpp-asterisc:$KONA_VERSION AS kona + +# Also produce an op-challenger loaded with kona and asterisc using ubuntu +FROM --platform=$TARGETPLATFORM $UBUNTU_TARGET_BASE_IMAGE AS op-challenger-target +RUN apt-get update && apt-get install -y --no-install-recommends musl openssl ca-certificates COPY --from=op-challenger-builder /app/op-challenger/bin/op-challenger /usr/local/bin/ -# Make the bundled op-program the default cannon server +# Copy in op-program and cannon COPY --from=op-program-builder /app/op-program/bin/op-program /usr/local/bin/ ENV OP_CHALLENGER_CANNON_SERVER /usr/local/bin/op-program -# Make the bundled cannon the default cannon executable COPY --from=cannon-builder /app/cannon/bin/cannon /usr/local/bin/ ENV OP_CHALLENGER_CANNON_BIN /usr/local/bin/cannon +# Copy in kona and asterisc +COPY --from=kona /kona-host /usr/local/bin/ +ENV OP_CHALLENGER_ASTERISC_KONA_SERVER=/usr/local/bin/kona-host +COPY --from=kona /asterisc /usr/local/bin/ +ENV OP_CHALLENGER_ASTERISC_BIN=/usr/local/bin/asterisc CMD ["op-challenger"] FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE AS op-dispute-mon-target diff --git a/ops/docker/op-stack-go/Dockerfile.dockerignore b/ops/docker/op-stack-go/Dockerfile.dockerignore index bd700e291e49..847bbbb0eb75 100644 --- a/ops/docker/op-stack-go/Dockerfile.dockerignore +++ b/ops/docker/op-stack-go/Dockerfile.dockerignore @@ -20,3 +20,5 @@ !/op-alt-da !/go.mod !/go.sum +!/just +!/mise.toml diff --git a/ops/scripts/check-foundry.sh b/ops/scripts/check-foundry.sh deleted file mode 100755 index 530046bd85ea..000000000000 --- a/ops/scripts/check-foundry.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bash - -VERSIONS_FILE="versions.json" - -if ! command -v jq &> /dev/null -then - # shellcheck disable=SC2006 - echo "Please install jq" >&2 - exit 1 -fi - -if ! command -v forge &> /dev/null -then - # shellcheck disable=SC2006 - echo "Is Foundry not installed? Consider installing via just install-foundry" >&2 - exit 1 -fi - -# Check VERSIONS_FILE has expected foundry property -if ! jq -e '.foundry' "$VERSIONS_FILE" &> /dev/null; then - echo "'foundry' is missing from $VERSIONS_FILE" >&2 - exit 1 -fi - -# Extract the expected foundry version from versions.json -EXPECTED_VERSION=$(jq -r '.foundry' "$VERSIONS_FILE" | cut -c 1-7) -if [ -z "$EXPECTED_VERSION" ]; then - echo "Unable to extract Foundry version from $VERSIONS_FILE" >&2 - exit 1 -fi - -# Extract the installed forge version -INSTALLED_VERSION=$(forge --version | grep -o '[a-f0-9]\{7\}' | head -n 1) - -# Compare the installed timestamp with the expected timestamp -if [ "$INSTALLED_VERSION" = "$EXPECTED_VERSION" ]; then - echo "Foundry version matches the expected version." -else - echo "Mismatch between installed Foundry version ($INSTALLED_VERSION) and expected version ($EXPECTED_VERSION)." - echo "Your version of Foundry may either not be up to date, or it could be a later version." - echo "Running 'just update-foundry' from the repository root will install the expected version." -fi diff --git a/ops/scripts/ci-docker-tag-op-stack-release.sh b/ops/scripts/ci-docker-tag-op-stack-release.sh index 45dd92094994..1de86b749d32 100755 --- a/ops/scripts/ci-docker-tag-op-stack-release.sh +++ b/ops/scripts/ci-docker-tag-op-stack-release.sh @@ -6,7 +6,7 @@ DOCKER_REPO=$1 GIT_TAG=$2 GIT_SHA=$3 -IMAGE_NAME=$(echo "$GIT_TAG" | grep -Eow '^(ci-builder(-rust)?|da-server|proofs-tools|cannon|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)' || true) +IMAGE_NAME=$(echo "$GIT_TAG" | grep -Eow '^(ci-builder(-rust)?|da-server|proofs-tools|holocene-deployer|cannon|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)' || true) if [ -z "$IMAGE_NAME" ]; then echo "image name could not be parsed from git tag '$GIT_TAG'" exit 1 diff --git a/ops/scripts/geth-version-checker.sh b/ops/scripts/geth-version-checker.sh deleted file mode 100755 index 98d94e664136..000000000000 --- a/ops/scripts/geth-version-checker.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -SCRIPTS_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -MONOREPO_DIR=$(cd "$SCRIPTS_DIR/../../" && pwd) - -# Extract the version from the geth command output -GETH_VERSION="v$(geth version | grep '^Version:' | awk '{print $2}')" - -# Read the version from the versions file -EXPECTED_GETH_VERSION=$(jq -r .geth < "$MONOREPO_DIR"/versions.json) - -# Check if EXPECTED_GETH_VERSION contains a '-'. If not, append '-stable'. -if [[ $EXPECTED_GETH_VERSION != *-* ]]; then - EXPECTED_GETH_VERSION="${EXPECTED_GETH_VERSION}-stable" -fi - -# Compare the versions -if [[ "$GETH_VERSION" == "$EXPECTED_GETH_VERSION" ]]; then - echo "Geth version $GETH_VERSION is correct!" - exit 0 -else - echo "Geth version does not match!" - echo "Local geth version: $GETH_VERSION" - echo "Expected geth version: $EXPECTED_GETH_VERSION" - exit 1 -fi - - diff --git a/ops/scripts/install-foundry.sh b/ops/scripts/install-foundry.sh deleted file mode 100755 index 654ed6b87f76..000000000000 --- a/ops/scripts/install-foundry.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/bash - -set -e - -SCRIPTS_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -MONOREPO_DIR=$(cd "$SCRIPTS_DIR/../../" && pwd) - -# Grab the foundry commit hash. -SHA=$(jq -r .foundry < "$MONOREPO_DIR"/versions.json) - -# Check if there is a nightly tag corresponding to the commit hash -TAG="nightly-$SHA" - -# If the foundry repository exists and a branch is checked out, we need to abort -# any changes inside ~/.foundry/foundry-rs/foundry. This is because foundryup will -# attempt to pull the latest changes from the remote repository, which will fail -# if there are any uncommitted changes. -if [ -d ~/.foundry/foundry-rs/foundry ]; then - echo "Foundry repository exists! Aborting any changes..." - cd ~/.foundry/foundry-rs/foundry - git reset --hard - git clean -fd - cd - -fi - -# Create a temporary directory -TMP_DIR=$(mktemp -d) -echo "Created tempdir @ $TMP_DIR" - -# Clone the foundry repo temporarily. We do this to avoid the need for a personal access -# token to interact with the GitHub REST API, and clean it up after we're done. -git clone https://github.com/foundry-rs/foundry.git "$TMP_DIR" && cd "$TMP_DIR" - -# If the nightly tag exists, we can download the pre-built binaries rather than building -# from source. Otherwise, clone the repository, check out the commit SHA, and build `forge`, -# `cast`, `anvil`, and `chisel` from source. -if git rev-parse "$TAG" >/dev/null 2>&1; then - echo "Nightly tag exists! Downloading prebuilt binaries..." - foundryup -v "$TAG" -else - echo "Nightly tag doesn't exist! Building from source..." - git checkout "$SHA" - - # Use native `cargo` build to avoid any rustc environment variables `foundryup` sets. We explicitly - # ignore chisel, as it is not a part of `ci-builder`. - cargo build --bin forge --release - cargo build --bin cast --release - cargo build --bin anvil --release - mkdir -p ~/.foundry/bin - mv target/release/forge ~/.foundry/bin - mv target/release/cast ~/.foundry/bin - mv target/release/anvil ~/.foundry/bin -fi - -# Remove the temporary foundry repo; Used just for checking the nightly tag's existence. -rm -rf "$TMP_DIR" -echo "Removed tempdir @ $TMP_DIR" diff --git a/packages/contracts-bedrock/README.md b/packages/contracts-bedrock/README.md index a9f1dbeeaa5f..24212f38ff34 100644 --- a/packages/contracts-bedrock/README.md +++ b/packages/contracts-bedrock/README.md @@ -66,7 +66,7 @@ See the [Optimism Developer Docs](https://docs.optimism.io/chain/addresses) for ### Contributing Guide Contributions to the OP Stack are always welcome. -Please refer to the [CONTRIBUTING.md](./meta/CONTRIBUTING.md) for more information about how to contribute to the OP Stack smart contracts. +Please refer to the [CONTRIBUTING.md](../../CONTRIBUTING.md) for more information about how to contribute to the OP Stack smart contracts. ### Style Guide diff --git a/packages/contracts-bedrock/deploy-config/sepolia-devnet-0.json b/packages/contracts-bedrock/deploy-config/sepolia-devnet-0.json index ff87fc4f8cbb..8cc17a8f668d 100644 --- a/packages/contracts-bedrock/deploy-config/sepolia-devnet-0.json +++ b/packages/contracts-bedrock/deploy-config/sepolia-devnet-0.json @@ -15,8 +15,6 @@ "l2OutputOracleStartingBlockNumber": 0, "l2OutputOracleProposer": "0x95014c45078354ff839f14192228108eac82e00a", "l2OutputOracleChallenger": "0x8c20c40180751d93e939dddee3517ae0d1ebead2", - "cliqueSignerAddress": "0x0000000000000000000000000000000000000000", - "l1UseClique": false, "l1BlockTime": 12, "l1GenesisBlockTimestamp": "0x0", "l1GenesisBlockNonce": "0x0", @@ -65,7 +63,7 @@ "eip1559Denominator": 250, "eip1559DenominatorCanyon": 250, "systemConfigStartBlock": 4071248, - "faultGameAbsolutePrestate": "0x0385c3f8ee78491001d92b90b07d0cf387b7b52ab9b83b4d87c994e92cf823ba", + "faultGameAbsolutePrestate": "0x03925193e3e89f87835bbdf3a813f60b2aa818a36bbe71cd5d8fd7e79f5e8afe", "faultGameMaxDepth": 73, "faultGameClockExtension": 3600, "faultGameMaxClockDuration": 14400, diff --git a/packages/contracts-bedrock/foundry.toml b/packages/contracts-bedrock/foundry.toml index 6bd441ca3cdb..b77e7bca4312 100644 --- a/packages/contracts-bedrock/foundry.toml +++ b/packages/contracts-bedrock/foundry.toml @@ -1,15 +1,23 @@ ################################################################ -# PROFILE: DEFAULT (Local) # +# PROFILE: DEFAULT (local) # ################################################################ [profile.default] -# Compilation settings src = 'src' out = 'forge-artifacts' script = 'scripts' +build_info_path = 'artifacts/build-info' +snapshots = 'notarealpath' # workaround for foundry#9477 + optimizer = true optimizer_runs = 999999 + +extra_output = ['devdoc', 'userdoc', 'metadata', 'storageLayout'] +bytecode_hash = 'none' +ast = true +evm_version = 'cancun' + remappings = [ '@openzeppelin/contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/contracts', '@openzeppelin/contracts/=lib/openzeppelin-contracts/contracts', @@ -22,24 +30,10 @@ remappings = [ 'ds-test/=lib/forge-std/lib/ds-test/src', 'safe-contracts/=lib/safe-contracts/contracts', 'kontrol-cheatcodes/=lib/kontrol-cheatcodes/src', - 'gelato/=lib/automate/contracts' + 'gelato/=lib/automate/contracts', + 'interfaces/=interfaces' ] -extra_output = ['devdoc', 'userdoc', 'metadata', 'storageLayout'] -bytecode_hash = 'none' -build_info_path = 'artifacts/build-info' -ast = true -evm_version = "cancun" -# 5159 error code is selfdestruct error code -ignored_error_codes = ["transient-storage", "code-size", "init-code-size", 5159] -# We set the gas limit to max int64 to avoid running out of gas during testing, since the default -# gas limit is 1B and some of our tests require more gas than that, such as `test_callWithMinGas_noLeakageLow_succeeds`. -# We use this gas limit since it was the default gas limit prior to https://github.com/foundry-rs/foundry/pull/8274. -# Due to toml-rs limitations, if you increase the gas limit above this value it must be a string. -gas_limit = 9223372036854775807 - -# Test / Script Runner Settings -ffi = true fs_permissions = [ { access='read-write', path='./.resource-metering.csv' }, { access='read-write', path='./snapshots/' }, @@ -52,7 +46,17 @@ fs_permissions = [ { access='read', path='./kout-deployment' }, { access='read', path='./test/fixtures' }, ] -libs = ["node_modules", "lib"] + +# 5159 error code is selfdestruct error code +ignored_error_codes = ["transient-storage", "code-size", "init-code-size", 5159] +ffi = true + +# We set the gas limit to max int64 to avoid running out of gas during testing, since the default +# gas limit is 1B and some of our tests require more gas than that, such as +# test_callWithMinGas_noLeakageLow_succeeds. We use this gas limit since it was the default gas +# limit prior to https://github.com/foundry-rs/foundry/pull/8274. Due to toml-rs limitations, if +# you increase the gas limit above this value it must be a string. +gas_limit = 9223372036854775807 [fuzz] runs = 64 @@ -67,21 +71,39 @@ wrap_comments=true # PROFILE: CI # ################################################################ -[profile.ci] -fuzz = { runs = 512 } +[profile.ci.fuzz] +runs = 512 [profile.ci.invariant] runs = 256 depth = 32 -[profile.ciheavy] -# fuzz = { runs = 20000 } -# temporary reduce fuzz runs to unblock CI -fuzz = { runs = 200 } +################################################################ +# PROFILE: CICOVERAGE # +################################################################ + +[profile.cicoverage] +optimizer = false + +[profile.cicoverage.fuzz] +runs = 512 + +[profile.cicoverage.invariant] +runs = 256 +depth = 32 + +################################################################ +# PROFILE: CIHEAVY # +################################################################ + +[profile.ciheavy.fuzz] +runs = 20000 +timeout = 600 [profile.ciheavy.invariant] runs = 128 depth = 512 +timeout = 600 ################################################################ # PROFILE: LITE # @@ -93,7 +115,6 @@ optimizer = false ################################################################ # PROFILE: KONTROL # ################################################################ -# See test/kontrol/README.md for an explanation of how the profiles are configured [profile.kprove] src = 'test/kontrol/proofs' diff --git a/packages/contracts-bedrock/src/L1/interfaces/IDataAvailabilityChallenge.sol b/packages/contracts-bedrock/interfaces/L1/IDataAvailabilityChallenge.sol similarity index 100% rename from packages/contracts-bedrock/src/L1/interfaces/IDataAvailabilityChallenge.sol rename to packages/contracts-bedrock/interfaces/L1/IDataAvailabilityChallenge.sol diff --git a/packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessenger.sol b/packages/contracts-bedrock/interfaces/L1/IL1CrossDomainMessenger.sol similarity index 68% rename from packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessenger.sol rename to packages/contracts-bedrock/interfaces/L1/IL1CrossDomainMessenger.sol index 8a6de84e2c9d..ae9aa75ef155 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessenger.sol +++ b/packages/contracts-bedrock/interfaces/L1/IL1CrossDomainMessenger.sol @@ -1,10 +1,10 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IOptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; interface IL1CrossDomainMessenger is ICrossDomainMessenger { function PORTAL() external view returns (IOptimismPortal); diff --git a/packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessengerV160.sol b/packages/contracts-bedrock/interfaces/L1/IL1CrossDomainMessengerV160.sol similarity index 76% rename from packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessengerV160.sol rename to packages/contracts-bedrock/interfaces/L1/IL1CrossDomainMessengerV160.sol index d81bb5d25565..09d4e604fb8f 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessengerV160.sol +++ b/packages/contracts-bedrock/interfaces/L1/IL1CrossDomainMessengerV160.sol @@ -1,9 +1,9 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IOptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; /// @notice This interface corresponds to the op-contracts/v1.6.0 release of the L1CrossDomainMessenger /// contract, which has a semver of 2.3.0 as specified in diff --git a/packages/contracts-bedrock/src/L1/interfaces/IL1ERC721Bridge.sol b/packages/contracts-bedrock/interfaces/L1/IL1ERC721Bridge.sol similarity index 82% rename from packages/contracts-bedrock/src/L1/interfaces/IL1ERC721Bridge.sol rename to packages/contracts-bedrock/interfaces/L1/IL1ERC721Bridge.sol index 51356bc8d346..33756dbc49f1 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/IL1ERC721Bridge.sol +++ b/packages/contracts-bedrock/interfaces/L1/IL1ERC721Bridge.sol @@ -1,9 +1,9 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { IERC721Bridge } from "src/universal/interfaces/IERC721Bridge.sol"; -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IERC721Bridge } from "interfaces/universal/IERC721Bridge.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; interface IL1ERC721Bridge is IERC721Bridge { function bridgeERC721( diff --git a/packages/contracts-bedrock/src/L1/interfaces/IL1StandardBridge.sol b/packages/contracts-bedrock/interfaces/L1/IL1StandardBridge.sol similarity index 87% rename from packages/contracts-bedrock/src/L1/interfaces/IL1StandardBridge.sol rename to packages/contracts-bedrock/interfaces/L1/IL1StandardBridge.sol index 816436cf1084..847ea76b44d2 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/IL1StandardBridge.sol +++ b/packages/contracts-bedrock/interfaces/L1/IL1StandardBridge.sol @@ -1,10 +1,10 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { IStandardBridge } from "src/universal/interfaces/IStandardBridge.sol"; -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; +import { IStandardBridge } from "interfaces/universal/IStandardBridge.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; interface IL1StandardBridge is IStandardBridge { event ERC20DepositInitiated( diff --git a/packages/contracts-bedrock/src/L1/interfaces/IL1StandardBridgeV160.sol b/packages/contracts-bedrock/interfaces/L1/IL1StandardBridgeV160.sol similarity index 88% rename from packages/contracts-bedrock/src/L1/interfaces/IL1StandardBridgeV160.sol rename to packages/contracts-bedrock/interfaces/L1/IL1StandardBridgeV160.sol index b382c4f1ad6d..8d212de24486 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/IL1StandardBridgeV160.sol +++ b/packages/contracts-bedrock/interfaces/L1/IL1StandardBridgeV160.sol @@ -1,10 +1,10 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { IStandardBridge } from "src/universal/interfaces/IStandardBridge.sol"; -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; +import { IStandardBridge } from "interfaces/universal/IStandardBridge.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; /// @notice This interface corresponds to the op-contracts/v1.6.0 release of the L1StandardBridge /// contract, which has a semver of 2.1.0 as specified in diff --git a/packages/contracts-bedrock/src/L1/interfaces/IL2OutputOracle.sol b/packages/contracts-bedrock/interfaces/L1/IL2OutputOracle.sol similarity index 100% rename from packages/contracts-bedrock/src/L1/interfaces/IL2OutputOracle.sol rename to packages/contracts-bedrock/interfaces/L1/IL2OutputOracle.sol diff --git a/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortal.sol b/packages/contracts-bedrock/interfaces/L1/IOptimismPortal.sol similarity index 93% rename from packages/contracts-bedrock/src/L1/interfaces/IOptimismPortal.sol rename to packages/contracts-bedrock/interfaces/L1/IOptimismPortal.sol index b9035a6e5143..086a9ebc7bd8 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortal.sol +++ b/packages/contracts-bedrock/interfaces/L1/IOptimismPortal.sol @@ -2,9 +2,9 @@ pragma solidity ^0.8.0; import { Types } from "src/libraries/Types.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IL2OutputOracle } from "src/L1/interfaces/IL2OutputOracle.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IL2OutputOracle } from "interfaces/L1/IL2OutputOracle.sol"; interface IOptimismPortal { error BadTarget(); diff --git a/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortal2.sol b/packages/contracts-bedrock/interfaces/L1/IOptimismPortal2.sol similarity index 93% rename from packages/contracts-bedrock/src/L1/interfaces/IOptimismPortal2.sol rename to packages/contracts-bedrock/interfaces/L1/IOptimismPortal2.sol index 91f09d714314..74ba7b4790e9 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortal2.sol +++ b/packages/contracts-bedrock/interfaces/L1/IOptimismPortal2.sol @@ -3,10 +3,10 @@ pragma solidity ^0.8.0; import { Types } from "src/libraries/Types.sol"; import { GameType, Timestamp } from "src/dispute/lib/LibUDT.sol"; -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; -import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; interface IOptimismPortal2 { error AlreadyFinalized(); diff --git a/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortalInterop.sol b/packages/contracts-bedrock/interfaces/L1/IOptimismPortalInterop.sol similarity index 92% rename from packages/contracts-bedrock/src/L1/interfaces/IOptimismPortalInterop.sol rename to packages/contracts-bedrock/interfaces/L1/IOptimismPortalInterop.sol index 521c7232e125..58fe5eff5dca 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortalInterop.sol +++ b/packages/contracts-bedrock/interfaces/L1/IOptimismPortalInterop.sol @@ -3,11 +3,11 @@ pragma solidity ^0.8.0; import { Types } from "src/libraries/Types.sol"; import { GameType, Timestamp } from "src/dispute/lib/LibUDT.sol"; -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; -import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { ConfigType } from "src/L2/L1BlockInterop.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { ConfigType } from "interfaces/L2/IL1BlockInterop.sol"; interface IOptimismPortalInterop { error AlreadyFinalized(); diff --git a/packages/contracts-bedrock/src/L1/interfaces/IProtocolVersions.sol b/packages/contracts-bedrock/interfaces/L1/IProtocolVersions.sol similarity index 100% rename from packages/contracts-bedrock/src/L1/interfaces/IProtocolVersions.sol rename to packages/contracts-bedrock/interfaces/L1/IProtocolVersions.sol diff --git a/packages/contracts-bedrock/src/L1/interfaces/IResourceMetering.sol b/packages/contracts-bedrock/interfaces/L1/IResourceMetering.sol similarity index 100% rename from packages/contracts-bedrock/src/L1/interfaces/IResourceMetering.sol rename to packages/contracts-bedrock/interfaces/L1/IResourceMetering.sol diff --git a/packages/contracts-bedrock/src/L1/interfaces/ISuperchainConfig.sol b/packages/contracts-bedrock/interfaces/L1/ISuperchainConfig.sol similarity index 100% rename from packages/contracts-bedrock/src/L1/interfaces/ISuperchainConfig.sol rename to packages/contracts-bedrock/interfaces/L1/ISuperchainConfig.sol diff --git a/packages/contracts-bedrock/src/L1/interfaces/ISystemConfig.sol b/packages/contracts-bedrock/interfaces/L1/ISystemConfig.sol similarity index 98% rename from packages/contracts-bedrock/src/L1/interfaces/ISystemConfig.sol rename to packages/contracts-bedrock/interfaces/L1/ISystemConfig.sol index 8959c00b744a..904375167f48 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/ISystemConfig.sol +++ b/packages/contracts-bedrock/interfaces/L1/ISystemConfig.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; /// @notice This interface corresponds to the Custom Gas Token version of the SystemConfig contract. interface ISystemConfig { diff --git a/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigInterop.sol b/packages/contracts-bedrock/interfaces/L1/ISystemConfigInterop.sol similarity index 96% rename from packages/contracts-bedrock/src/L1/interfaces/ISystemConfigInterop.sol rename to packages/contracts-bedrock/interfaces/L1/ISystemConfigInterop.sol index e11d17c9f7bd..4cf4a06f943f 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigInterop.sol +++ b/packages/contracts-bedrock/interfaces/L1/ISystemConfigInterop.sol @@ -1,8 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; interface ISystemConfigInterop { event ConfigUpdate(uint256 indexed version, ISystemConfig.UpdateType indexed updateType, bytes data); diff --git a/packages/contracts-bedrock/src/L2/interfaces/IBaseFeeVault.sol b/packages/contracts-bedrock/interfaces/L2/IBaseFeeVault.sol similarity index 100% rename from packages/contracts-bedrock/src/L2/interfaces/IBaseFeeVault.sol rename to packages/contracts-bedrock/interfaces/L2/IBaseFeeVault.sol diff --git a/packages/contracts-bedrock/src/L2/interfaces/ICrossL2Inbox.sol b/packages/contracts-bedrock/interfaces/L2/ICrossL2Inbox.sol similarity index 100% rename from packages/contracts-bedrock/src/L2/interfaces/ICrossL2Inbox.sol rename to packages/contracts-bedrock/interfaces/L2/ICrossL2Inbox.sol diff --git a/packages/contracts-bedrock/src/L2/interfaces/IDependencySet.sol b/packages/contracts-bedrock/interfaces/L2/IDependencySet.sol similarity index 100% rename from packages/contracts-bedrock/src/L2/interfaces/IDependencySet.sol rename to packages/contracts-bedrock/interfaces/L2/IDependencySet.sol diff --git a/packages/contracts-bedrock/src/L2/interfaces/IERC7802.sol b/packages/contracts-bedrock/interfaces/L2/IERC7802.sol similarity index 75% rename from packages/contracts-bedrock/src/L2/interfaces/IERC7802.sol rename to packages/contracts-bedrock/interfaces/L2/IERC7802.sol index 469230e822d1..38c92d01d8d2 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IERC7802.sol +++ b/packages/contracts-bedrock/interfaces/L2/IERC7802.sol @@ -9,12 +9,14 @@ interface IERC7802 is IERC165 { /// @notice Emitted when a crosschain transfer mints tokens. /// @param to Address of the account tokens are being minted for. /// @param amount Amount of tokens minted. - event CrosschainMint(address indexed to, uint256 amount); + /// @param sender Address of the account that finilized the crosschain transfer. + event CrosschainMint(address indexed to, uint256 amount, address indexed sender); /// @notice Emitted when a crosschain transfer burns tokens. /// @param from Address of the account tokens are being burned from. /// @param amount Amount of tokens burned. - event CrosschainBurn(address indexed from, uint256 amount); + /// @param sender Address of the account that initiated the crosschain transfer. + event CrosschainBurn(address indexed from, uint256 amount, address indexed sender); /// @notice Mint tokens through a crosschain transfer. /// @param _to Address to mint tokens to. diff --git a/packages/contracts-bedrock/src/L2/interfaces/IETHLiquidity.sol b/packages/contracts-bedrock/interfaces/L2/IETHLiquidity.sol similarity index 100% rename from packages/contracts-bedrock/src/L2/interfaces/IETHLiquidity.sol rename to packages/contracts-bedrock/interfaces/L2/IETHLiquidity.sol diff --git a/packages/contracts-bedrock/src/L2/interfaces/IFeeVault.sol b/packages/contracts-bedrock/interfaces/L2/IFeeVault.sol similarity index 100% rename from packages/contracts-bedrock/src/L2/interfaces/IFeeVault.sol rename to packages/contracts-bedrock/interfaces/L2/IFeeVault.sol diff --git a/packages/contracts-bedrock/src/L2/interfaces/IGasPriceOracle.sol b/packages/contracts-bedrock/interfaces/L2/IGasPriceOracle.sol similarity index 100% rename from packages/contracts-bedrock/src/L2/interfaces/IGasPriceOracle.sol rename to packages/contracts-bedrock/interfaces/L2/IGasPriceOracle.sol diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL1Block.sol b/packages/contracts-bedrock/interfaces/L2/IL1Block.sol similarity index 100% rename from packages/contracts-bedrock/src/L2/interfaces/IL1Block.sol rename to packages/contracts-bedrock/interfaces/L2/IL1Block.sol diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL1BlockInterop.sol b/packages/contracts-bedrock/interfaces/L2/IL1BlockInterop.sol similarity index 100% rename from packages/contracts-bedrock/src/L2/interfaces/IL1BlockInterop.sol rename to packages/contracts-bedrock/interfaces/L2/IL1BlockInterop.sol diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL1FeeVault.sol b/packages/contracts-bedrock/interfaces/L2/IL1FeeVault.sol similarity index 100% rename from packages/contracts-bedrock/src/L2/interfaces/IL1FeeVault.sol rename to packages/contracts-bedrock/interfaces/L2/IL1FeeVault.sol diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL2CrossDomainMessenger.sol b/packages/contracts-bedrock/interfaces/L2/IL2CrossDomainMessenger.sol similarity index 83% rename from packages/contracts-bedrock/src/L2/interfaces/IL2CrossDomainMessenger.sol rename to packages/contracts-bedrock/interfaces/L2/IL2CrossDomainMessenger.sol index 1cb49f674ec0..c35b664b9470 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IL2CrossDomainMessenger.sol +++ b/packages/contracts-bedrock/interfaces/L2/IL2CrossDomainMessenger.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; interface IL2CrossDomainMessenger is ICrossDomainMessenger { function MESSAGE_VERSION() external view returns (uint16); diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL2ERC721Bridge.sol b/packages/contracts-bedrock/interfaces/L2/IL2ERC721Bridge.sol similarity index 86% rename from packages/contracts-bedrock/src/L2/interfaces/IL2ERC721Bridge.sol rename to packages/contracts-bedrock/interfaces/L2/IL2ERC721Bridge.sol index a760ce1d803c..1ca3d778e7a2 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IL2ERC721Bridge.sol +++ b/packages/contracts-bedrock/interfaces/L2/IL2ERC721Bridge.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { IERC721Bridge } from "src/universal/interfaces/IERC721Bridge.sol"; +import { IERC721Bridge } from "interfaces/universal/IERC721Bridge.sol"; interface IL2ERC721Bridge is IERC721Bridge { function finalizeBridgeERC721( diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL2StandardBridge.sol b/packages/contracts-bedrock/interfaces/L2/IL2StandardBridge.sol similarity index 93% rename from packages/contracts-bedrock/src/L2/interfaces/IL2StandardBridge.sol rename to packages/contracts-bedrock/interfaces/L2/IL2StandardBridge.sol index 9f9ce1a85621..2ad5f1bb5cc2 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IL2StandardBridge.sol +++ b/packages/contracts-bedrock/interfaces/L2/IL2StandardBridge.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { IStandardBridge } from "src/universal/interfaces/IStandardBridge.sol"; +import { IStandardBridge } from "interfaces/universal/IStandardBridge.sol"; interface IL2StandardBridge is IStandardBridge { event DepositFinalized( diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL2StandardBridgeInterop.sol b/packages/contracts-bedrock/interfaces/L2/IL2StandardBridgeInterop.sol similarity index 94% rename from packages/contracts-bedrock/src/L2/interfaces/IL2StandardBridgeInterop.sol rename to packages/contracts-bedrock/interfaces/L2/IL2StandardBridgeInterop.sol index 6b60f5e4f9b2..97b24a2b258c 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IL2StandardBridgeInterop.sol +++ b/packages/contracts-bedrock/interfaces/L2/IL2StandardBridgeInterop.sol @@ -1,8 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { IStandardBridge } from "src/universal/interfaces/IStandardBridge.sol"; -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; +import { IStandardBridge } from "interfaces/universal/IStandardBridge.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; interface IL2StandardBridgeInterop is IStandardBridge { error InvalidDecimals(); diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL2ToL1MessagePasser.sol b/packages/contracts-bedrock/interfaces/L2/IL2ToL1MessagePasser.sol similarity index 100% rename from packages/contracts-bedrock/src/L2/interfaces/IL2ToL1MessagePasser.sol rename to packages/contracts-bedrock/interfaces/L2/IL2ToL1MessagePasser.sol diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol b/packages/contracts-bedrock/interfaces/L2/IL2ToL2CrossDomainMessenger.sol similarity index 97% rename from packages/contracts-bedrock/src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol rename to packages/contracts-bedrock/interfaces/L2/IL2ToL2CrossDomainMessenger.sol index 00ca4906b5c4..89311cf18f01 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol +++ b/packages/contracts-bedrock/interfaces/L2/IL2ToL2CrossDomainMessenger.sol @@ -42,6 +42,9 @@ interface IL2ToL2CrossDomainMessenger { /// @notice Thrown when a call to the target contract during message relay fails. error TargetCallFailed(); + /// @notice Thrown when attempting to use a chain ID that is not in the dependency set. + error InvalidChainId(); + /// @notice Emitted whenever a message is sent to a destination /// @param destination Chain ID of the destination chain. /// @param target Target contract or wallet address. diff --git a/packages/contracts-bedrock/src/L2/interfaces/IMintableAndBurnableERC20.sol b/packages/contracts-bedrock/interfaces/L2/IMintableAndBurnableERC20.sol similarity index 100% rename from packages/contracts-bedrock/src/L2/interfaces/IMintableAndBurnableERC20.sol rename to packages/contracts-bedrock/interfaces/L2/IMintableAndBurnableERC20.sol diff --git a/packages/contracts-bedrock/src/L2/interfaces/IOptimismERC20Factory.sol b/packages/contracts-bedrock/interfaces/L2/IOptimismERC20Factory.sol similarity index 100% rename from packages/contracts-bedrock/src/L2/interfaces/IOptimismERC20Factory.sol rename to packages/contracts-bedrock/interfaces/L2/IOptimismERC20Factory.sol diff --git a/packages/contracts-bedrock/src/L2/interfaces/IOptimismSuperchainERC20.sol b/packages/contracts-bedrock/interfaces/L2/IOptimismSuperchainERC20.sol similarity index 92% rename from packages/contracts-bedrock/src/L2/interfaces/IOptimismSuperchainERC20.sol rename to packages/contracts-bedrock/interfaces/L2/IOptimismSuperchainERC20.sol index 0284e29841cb..58dea0df33a1 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IOptimismSuperchainERC20.sol +++ b/packages/contracts-bedrock/interfaces/L2/IOptimismSuperchainERC20.sol @@ -2,7 +2,7 @@ pragma solidity ^0.8.0; // Interfaces -import { ISuperchainERC20 } from "src/L2/interfaces/ISuperchainERC20.sol"; +import { ISuperchainERC20 } from "interfaces/L2/ISuperchainERC20.sol"; /// @title IOptimismSuperchainERC20 /// @notice This interface is available on the OptimismSuperchainERC20 contract. diff --git a/packages/contracts-bedrock/src/L2/interfaces/IOptimismSuperchainERC20Beacon.sol b/packages/contracts-bedrock/interfaces/L2/IOptimismSuperchainERC20Beacon.sol similarity index 83% rename from packages/contracts-bedrock/src/L2/interfaces/IOptimismSuperchainERC20Beacon.sol rename to packages/contracts-bedrock/interfaces/L2/IOptimismSuperchainERC20Beacon.sol index fccb56b715f8..850cbb6f1e20 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IOptimismSuperchainERC20Beacon.sol +++ b/packages/contracts-bedrock/interfaces/L2/IOptimismSuperchainERC20Beacon.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @title IOptimismSuperchainERC20Beacon /// @notice Interface for the OptimismSuperchainERC20Beacon contract diff --git a/packages/contracts-bedrock/src/L2/interfaces/IOptimismSuperchainERC20Factory.sol b/packages/contracts-bedrock/interfaces/L2/IOptimismSuperchainERC20Factory.sol similarity index 80% rename from packages/contracts-bedrock/src/L2/interfaces/IOptimismSuperchainERC20Factory.sol rename to packages/contracts-bedrock/interfaces/L2/IOptimismSuperchainERC20Factory.sol index aa8e4f1f1605..2c23d575326b 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IOptimismSuperchainERC20Factory.sol +++ b/packages/contracts-bedrock/interfaces/L2/IOptimismSuperchainERC20Factory.sol @@ -1,8 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { IOptimismERC20Factory } from "src/L2/interfaces/IOptimismERC20Factory.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { IOptimismERC20Factory } from "interfaces/L2/IOptimismERC20Factory.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @title IOptimismSuperchainERC20Factory /// @notice Interface for the OptimismSuperchainERC20Factory contract diff --git a/packages/contracts-bedrock/src/L2/interfaces/ISequencerFeeVault.sol b/packages/contracts-bedrock/interfaces/L2/ISequencerFeeVault.sol similarity index 100% rename from packages/contracts-bedrock/src/L2/interfaces/ISequencerFeeVault.sol rename to packages/contracts-bedrock/interfaces/L2/ISequencerFeeVault.sol diff --git a/packages/contracts-bedrock/src/L2/interfaces/ISuperchainERC20.sol b/packages/contracts-bedrock/interfaces/L2/ISuperchainERC20.sol similarity index 70% rename from packages/contracts-bedrock/src/L2/interfaces/ISuperchainERC20.sol rename to packages/contracts-bedrock/interfaces/L2/ISuperchainERC20.sol index 029b13d5520a..243f6f7ba49b 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/ISuperchainERC20.sol +++ b/packages/contracts-bedrock/interfaces/L2/ISuperchainERC20.sol @@ -2,9 +2,9 @@ pragma solidity ^0.8.0; // Interfaces -import { IERC7802 } from "src/L2/interfaces/IERC7802.sol"; -import { IERC20Solady as IERC20 } from "src/vendor/interfaces/IERC20Solady.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { IERC7802 } from "interfaces/L2/IERC7802.sol"; +import { IERC20Solady as IERC20 } from "interfaces/vendor/IERC20Solady.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @title ISuperchainERC20 /// @notice This interface is available on the SuperchainERC20 contract. diff --git a/packages/contracts-bedrock/src/L2/interfaces/ISuperchainTokenBridge.sol b/packages/contracts-bedrock/interfaces/L2/ISuperchainTokenBridge.sol similarity index 93% rename from packages/contracts-bedrock/src/L2/interfaces/ISuperchainTokenBridge.sol rename to packages/contracts-bedrock/interfaces/L2/ISuperchainTokenBridge.sol index af9d7d8d8411..0794fa83c996 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/ISuperchainTokenBridge.sol +++ b/packages/contracts-bedrock/interfaces/L2/ISuperchainTokenBridge.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @title ISuperchainTokenBridge /// @notice Interface for the SuperchainTokenBridge contract. diff --git a/packages/contracts-bedrock/interfaces/L2/ISuperchainWETH.sol b/packages/contracts-bedrock/interfaces/L2/ISuperchainWETH.sol new file mode 100644 index 000000000000..a6b6ef3ce7bf --- /dev/null +++ b/packages/contracts-bedrock/interfaces/L2/ISuperchainWETH.sol @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import { IWETH98 } from "interfaces/universal/IWETH98.sol"; +import { IERC7802 } from "interfaces/L2/IERC7802.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; + +interface ISuperchainWETH is IWETH98, IERC7802, ISemver { + error Unauthorized(); + error NotCustomGasToken(); + error InvalidCrossDomainSender(); + error ZeroAddress(); + + event SendETH(address indexed from, address indexed to, uint256 amount, uint256 destination); + + event RelayETH(address indexed from, address indexed to, uint256 amount, uint256 source); + + function balanceOf(address src) external view returns (uint256); + function withdraw(uint256 _amount) external; + function supportsInterface(bytes4 _interfaceId) external view returns (bool); + function sendETH(address _to, uint256 _chainId) external payable returns (bytes32 msgHash_); + function relayETH(address _from, address _to, uint256 _amount) external; + + function __constructor__() external; +} diff --git a/packages/contracts-bedrock/src/cannon/interfaces/IMIPS.sol b/packages/contracts-bedrock/interfaces/cannon/IMIPS.sol similarity index 84% rename from packages/contracts-bedrock/src/cannon/interfaces/IMIPS.sol rename to packages/contracts-bedrock/interfaces/cannon/IMIPS.sol index 665cf223d083..8ad7ccdc21a3 100644 --- a/packages/contracts-bedrock/src/cannon/interfaces/IMIPS.sol +++ b/packages/contracts-bedrock/interfaces/cannon/IMIPS.sol @@ -1,8 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; /// @title IMIPS /// @notice Interface for the MIPS contract. diff --git a/packages/contracts-bedrock/src/cannon/interfaces/IMIPS2.sol b/packages/contracts-bedrock/interfaces/cannon/IMIPS2.sol similarity index 90% rename from packages/contracts-bedrock/src/cannon/interfaces/IMIPS2.sol rename to packages/contracts-bedrock/interfaces/cannon/IMIPS2.sol index be37d5b49ff7..bdaf97d77e5b 100644 --- a/packages/contracts-bedrock/src/cannon/interfaces/IMIPS2.sol +++ b/packages/contracts-bedrock/interfaces/cannon/IMIPS2.sol @@ -1,8 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; /// @title IMIPS2 /// @notice Interface for the MIPS2 contract. diff --git a/packages/contracts-bedrock/src/cannon/interfaces/IPreimageOracle.sol b/packages/contracts-bedrock/interfaces/cannon/IPreimageOracle.sol similarity index 100% rename from packages/contracts-bedrock/src/cannon/interfaces/IPreimageOracle.sol rename to packages/contracts-bedrock/interfaces/cannon/IPreimageOracle.sol diff --git a/packages/contracts-bedrock/src/dispute/interfaces/IAnchorStateRegistry.sol b/packages/contracts-bedrock/interfaces/dispute/IAnchorStateRegistry.sol similarity index 81% rename from packages/contracts-bedrock/src/dispute/interfaces/IAnchorStateRegistry.sol rename to packages/contracts-bedrock/interfaces/dispute/IAnchorStateRegistry.sol index 4c79c4c092a9..dfb46ad8378f 100644 --- a/packages/contracts-bedrock/src/dispute/interfaces/IAnchorStateRegistry.sol +++ b/packages/contracts-bedrock/interfaces/dispute/IAnchorStateRegistry.sol @@ -1,9 +1,9 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; -import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; +import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; import { GameType, Hash, OutputRoot } from "src/dispute/lib/Types.sol"; interface IAnchorStateRegistry { diff --git a/packages/contracts-bedrock/src/dispute/interfaces/IBigStepper.sol b/packages/contracts-bedrock/interfaces/dispute/IBigStepper.sol similarity index 97% rename from packages/contracts-bedrock/src/dispute/interfaces/IBigStepper.sol rename to packages/contracts-bedrock/interfaces/dispute/IBigStepper.sol index f38e58f9d9d6..e5a8bd3ec342 100644 --- a/packages/contracts-bedrock/src/dispute/interfaces/IBigStepper.sol +++ b/packages/contracts-bedrock/interfaces/dispute/IBigStepper.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; /// @title IBigStepper /// @notice Describes a state machine that can perform a single instruction step, provided a prestate and an optional diff --git a/packages/contracts-bedrock/src/dispute/interfaces/IDelayedWETH.sol b/packages/contracts-bedrock/interfaces/dispute/IDelayedWETH.sol similarity index 96% rename from packages/contracts-bedrock/src/dispute/interfaces/IDelayedWETH.sol rename to packages/contracts-bedrock/interfaces/dispute/IDelayedWETH.sol index 98b221285b56..63ffa49919b6 100644 --- a/packages/contracts-bedrock/src/dispute/interfaces/IDelayedWETH.sol +++ b/packages/contracts-bedrock/interfaces/dispute/IDelayedWETH.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; interface IDelayedWETH { struct WithdrawalRequest { diff --git a/packages/contracts-bedrock/src/dispute/interfaces/IDisputeGame.sol b/packages/contracts-bedrock/interfaces/dispute/IDisputeGame.sol similarity index 92% rename from packages/contracts-bedrock/src/dispute/interfaces/IDisputeGame.sol rename to packages/contracts-bedrock/interfaces/dispute/IDisputeGame.sol index 0f860e68b5a1..2f79cc79d327 100644 --- a/packages/contracts-bedrock/src/dispute/interfaces/IDisputeGame.sol +++ b/packages/contracts-bedrock/interfaces/dispute/IDisputeGame.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { IInitializable } from "src/dispute/interfaces/IInitializable.sol"; +import { IInitializable } from "interfaces/dispute/IInitializable.sol"; import { Timestamp, GameStatus, GameType, Claim, Hash } from "src/dispute/lib/Types.sol"; interface IDisputeGame is IInitializable { diff --git a/packages/contracts-bedrock/src/dispute/interfaces/IDisputeGameFactory.sol b/packages/contracts-bedrock/interfaces/dispute/IDisputeGameFactory.sol similarity index 97% rename from packages/contracts-bedrock/src/dispute/interfaces/IDisputeGameFactory.sol rename to packages/contracts-bedrock/interfaces/dispute/IDisputeGameFactory.sol index 3e7233f440d6..789d47072291 100644 --- a/packages/contracts-bedrock/src/dispute/interfaces/IDisputeGameFactory.sol +++ b/packages/contracts-bedrock/interfaces/dispute/IDisputeGameFactory.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; import { GameId, Timestamp, Claim, Hash, GameType } from "src/dispute/lib/Types.sol"; interface IDisputeGameFactory { diff --git a/packages/contracts-bedrock/src/dispute/interfaces/IFaultDisputeGame.sol b/packages/contracts-bedrock/interfaces/dispute/IFaultDisputeGame.sol similarity index 87% rename from packages/contracts-bedrock/src/dispute/interfaces/IFaultDisputeGame.sol rename to packages/contracts-bedrock/interfaces/dispute/IFaultDisputeGame.sol index 8c5bac02e9ba..038bb55998f8 100644 --- a/packages/contracts-bedrock/src/dispute/interfaces/IFaultDisputeGame.sol +++ b/packages/contracts-bedrock/interfaces/dispute/IFaultDisputeGame.sol @@ -1,10 +1,10 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; -import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; -import { IBigStepper } from "src/dispute/interfaces/IBigStepper.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IBigStepper } from "interfaces/dispute/IBigStepper.sol"; import { Types } from "src/libraries/Types.sol"; import { GameType, Claim, Position, Clock, Hash, Duration } from "src/dispute/lib/Types.sol"; @@ -26,6 +26,19 @@ interface IFaultDisputeGame is IDisputeGame { address counteredBy; } + struct GameConstructorParams { + GameType gameType; + Claim absolutePrestate; + uint256 maxGameDepth; + uint256 splitDepth; + Duration clockExtension; + Duration maxClockDuration; + IBigStepper vm; + IDelayedWETH weth; + IAnchorStateRegistry anchorStateRegistry; + uint256 l2ChainId; + } + error AlreadyInitialized(); error AnchorRootNotFound(); error BlockNumberMatches(); @@ -113,17 +126,5 @@ interface IFaultDisputeGame is IDisputeGame { function vm() external view returns (IBigStepper vm_); function weth() external view returns (IDelayedWETH weth_); - function __constructor__( - GameType _gameType, - Claim _absolutePrestate, - uint256 _maxGameDepth, - uint256 _splitDepth, - Duration _clockExtension, - Duration _maxClockDuration, - IBigStepper _vm, - IDelayedWETH _weth, - IAnchorStateRegistry _anchorStateRegistry, - uint256 _l2ChainId - ) - external; + function __constructor__(GameConstructorParams memory _params) external; } diff --git a/packages/contracts-bedrock/src/dispute/interfaces/IInitializable.sol b/packages/contracts-bedrock/interfaces/dispute/IInitializable.sol similarity index 100% rename from packages/contracts-bedrock/src/dispute/interfaces/IInitializable.sol rename to packages/contracts-bedrock/interfaces/dispute/IInitializable.sol diff --git a/packages/contracts-bedrock/src/dispute/interfaces/IPermissionedDisputeGame.sol b/packages/contracts-bedrock/interfaces/dispute/IPermissionedDisputeGame.sol similarity index 87% rename from packages/contracts-bedrock/src/dispute/interfaces/IPermissionedDisputeGame.sol rename to packages/contracts-bedrock/interfaces/dispute/IPermissionedDisputeGame.sol index c5a5a187ec12..c9d26d70a6ca 100644 --- a/packages/contracts-bedrock/src/dispute/interfaces/IPermissionedDisputeGame.sol +++ b/packages/contracts-bedrock/interfaces/dispute/IPermissionedDisputeGame.sol @@ -2,12 +2,13 @@ pragma solidity ^0.8.0; import { Types } from "src/libraries/Types.sol"; -import { GameType, Claim, Position, Clock, Hash, Duration } from "src/dispute/lib/Types.sol"; +import { Claim, Position, Clock, Hash, Duration } from "src/dispute/lib/Types.sol"; -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; -import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; -import { IBigStepper } from "src/dispute/interfaces/IBigStepper.sol"; -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; +import { IBigStepper } from "interfaces/dispute/IBigStepper.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; interface IPermissionedDisputeGame is IDisputeGame { struct ClaimData { @@ -120,16 +121,7 @@ interface IPermissionedDisputeGame is IDisputeGame { function challenger() external view returns (address challenger_); function __constructor__( - GameType _gameType, - Claim _absolutePrestate, - uint256 _maxGameDepth, - uint256 _splitDepth, - Duration _clockExtension, - Duration _maxClockDuration, - IBigStepper _vm, - IDelayedWETH _weth, - IAnchorStateRegistry _anchorStateRegistry, - uint256 _l2ChainId, + IFaultDisputeGame.GameConstructorParams memory _params, address _proposer, address _challenger ) diff --git a/packages/contracts-bedrock/src/governance/interfaces/IGovernanceToken.sol b/packages/contracts-bedrock/interfaces/governance/IGovernanceToken.sol similarity index 100% rename from packages/contracts-bedrock/src/governance/interfaces/IGovernanceToken.sol rename to packages/contracts-bedrock/interfaces/governance/IGovernanceToken.sol diff --git a/packages/contracts-bedrock/src/governance/interfaces/IMintManager.sol b/packages/contracts-bedrock/interfaces/governance/IMintManager.sol similarity index 91% rename from packages/contracts-bedrock/src/governance/interfaces/IMintManager.sol rename to packages/contracts-bedrock/interfaces/governance/IMintManager.sol index 68399f3336c9..7ed7f1afc403 100644 --- a/packages/contracts-bedrock/src/governance/interfaces/IMintManager.sol +++ b/packages/contracts-bedrock/interfaces/governance/IMintManager.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { IGovernanceToken } from "src/governance/interfaces/IGovernanceToken.sol"; +import { IGovernanceToken } from "interfaces/governance/IGovernanceToken.sol"; interface IMintManager { event OwnershipTransferred(address indexed previousOwner, address indexed newOwner); diff --git a/packages/contracts-bedrock/src/legacy/interfaces/IAddressManager.sol b/packages/contracts-bedrock/interfaces/legacy/IAddressManager.sol similarity index 87% rename from packages/contracts-bedrock/src/legacy/interfaces/IAddressManager.sol rename to packages/contracts-bedrock/interfaces/legacy/IAddressManager.sol index 0c0004a53675..e925fac5847f 100644 --- a/packages/contracts-bedrock/src/legacy/interfaces/IAddressManager.sol +++ b/packages/contracts-bedrock/interfaces/legacy/IAddressManager.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { IOwnable } from "src/universal/interfaces/IOwnable.sol"; +import { IOwnable } from "interfaces/universal/IOwnable.sol"; /// @title IAddressManager /// @notice Interface for the AddressManager contract. diff --git a/packages/contracts-bedrock/src/legacy/interfaces/IDeployerWhitelist.sol b/packages/contracts-bedrock/interfaces/legacy/IDeployerWhitelist.sol similarity index 100% rename from packages/contracts-bedrock/src/legacy/interfaces/IDeployerWhitelist.sol rename to packages/contracts-bedrock/interfaces/legacy/IDeployerWhitelist.sol diff --git a/packages/contracts-bedrock/src/legacy/interfaces/IL1BlockNumber.sol b/packages/contracts-bedrock/interfaces/legacy/IL1BlockNumber.sol similarity index 84% rename from packages/contracts-bedrock/src/legacy/interfaces/IL1BlockNumber.sol rename to packages/contracts-bedrock/interfaces/legacy/IL1BlockNumber.sol index 551514632696..f20770a55d41 100644 --- a/packages/contracts-bedrock/src/legacy/interfaces/IL1BlockNumber.sol +++ b/packages/contracts-bedrock/interfaces/legacy/IL1BlockNumber.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @title IL1BlockNumber /// @notice Interface for the L1BlockNumber contract. diff --git a/packages/contracts-bedrock/src/legacy/interfaces/IL1ChugSplashProxy.sol b/packages/contracts-bedrock/interfaces/legacy/IL1ChugSplashProxy.sol similarity index 100% rename from packages/contracts-bedrock/src/legacy/interfaces/IL1ChugSplashProxy.sol rename to packages/contracts-bedrock/interfaces/legacy/IL1ChugSplashProxy.sol diff --git a/packages/contracts-bedrock/src/legacy/interfaces/ILegacyMessagePasser.sol b/packages/contracts-bedrock/interfaces/legacy/ILegacyMessagePasser.sol similarity index 85% rename from packages/contracts-bedrock/src/legacy/interfaces/ILegacyMessagePasser.sol rename to packages/contracts-bedrock/interfaces/legacy/ILegacyMessagePasser.sol index 0eebc30d5878..d73d57dfa701 100644 --- a/packages/contracts-bedrock/src/legacy/interfaces/ILegacyMessagePasser.sol +++ b/packages/contracts-bedrock/interfaces/legacy/ILegacyMessagePasser.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @title ILegacyMessagePasser /// @notice Interface for the LegacyMessagePasser contract. diff --git a/packages/contracts-bedrock/src/legacy/interfaces/IResolvedDelegateProxy.sol b/packages/contracts-bedrock/interfaces/legacy/IResolvedDelegateProxy.sol similarity index 82% rename from packages/contracts-bedrock/src/legacy/interfaces/IResolvedDelegateProxy.sol rename to packages/contracts-bedrock/interfaces/legacy/IResolvedDelegateProxy.sol index b3201ff0b1c7..a677d5b0a478 100644 --- a/packages/contracts-bedrock/src/legacy/interfaces/IResolvedDelegateProxy.sol +++ b/packages/contracts-bedrock/interfaces/legacy/IResolvedDelegateProxy.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; +import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; /// @title IResolvedDelegateProxy /// @notice Interface for the ResolvedDelegateProxy contract. diff --git a/packages/contracts-bedrock/src/safe/interfaces/IDeputyGuardianModule.sol b/packages/contracts-bedrock/interfaces/safe/IDeputyGuardianModule.sol similarity index 73% rename from packages/contracts-bedrock/src/safe/interfaces/IDeputyGuardianModule.sol rename to packages/contracts-bedrock/interfaces/safe/IDeputyGuardianModule.sol index da74cf4dd983..a5c0e3313027 100644 --- a/packages/contracts-bedrock/src/safe/interfaces/IDeputyGuardianModule.sol +++ b/packages/contracts-bedrock/interfaces/safe/IDeputyGuardianModule.sol @@ -1,12 +1,12 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; -import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; import { GameType, Timestamp } from "src/dispute/lib/Types.sol"; import { GnosisSafe as Safe } from "safe-contracts/GnosisSafe.sol"; diff --git a/packages/contracts-bedrock/src/safe/interfaces/ILivenessGuard.sol b/packages/contracts-bedrock/interfaces/safe/ILivenessGuard.sol similarity index 93% rename from packages/contracts-bedrock/src/safe/interfaces/ILivenessGuard.sol rename to packages/contracts-bedrock/interfaces/safe/ILivenessGuard.sol index a69921211955..db6761096d12 100644 --- a/packages/contracts-bedrock/src/safe/interfaces/ILivenessGuard.sol +++ b/packages/contracts-bedrock/interfaces/safe/ILivenessGuard.sol @@ -3,7 +3,7 @@ pragma solidity ^0.8.0; import { GnosisSafe as Safe } from "safe-contracts/GnosisSafe.sol"; import { Enum } from "safe-contracts/common/Enum.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; interface ILivenessGuard is ISemver { event OwnerRecorded(address owner); diff --git a/packages/contracts-bedrock/src/safe/interfaces/ILivenessModule.sol b/packages/contracts-bedrock/interfaces/safe/ILivenessModule.sol similarity index 95% rename from packages/contracts-bedrock/src/safe/interfaces/ILivenessModule.sol rename to packages/contracts-bedrock/interfaces/safe/ILivenessModule.sol index 43711d508597..ad5088fcf2bb 100644 --- a/packages/contracts-bedrock/src/safe/interfaces/ILivenessModule.sol +++ b/packages/contracts-bedrock/interfaces/safe/ILivenessModule.sol @@ -3,7 +3,7 @@ pragma solidity ^0.8.0; import { GnosisSafe as Safe } from "safe-contracts/GnosisSafe.sol"; import { LivenessGuard } from "src/safe/LivenessGuard.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; interface ILivenessModule is ISemver { error OwnerRemovalFailed(string); diff --git a/packages/contracts-bedrock/src/universal/interfaces/ICrossDomainMessenger.sol b/packages/contracts-bedrock/interfaces/universal/ICrossDomainMessenger.sol similarity index 100% rename from packages/contracts-bedrock/src/universal/interfaces/ICrossDomainMessenger.sol rename to packages/contracts-bedrock/interfaces/universal/ICrossDomainMessenger.sol diff --git a/packages/contracts-bedrock/src/universal/interfaces/IEIP712.sol b/packages/contracts-bedrock/interfaces/universal/IEIP712.sol similarity index 100% rename from packages/contracts-bedrock/src/universal/interfaces/IEIP712.sol rename to packages/contracts-bedrock/interfaces/universal/IEIP712.sol diff --git a/packages/contracts-bedrock/src/universal/interfaces/IERC721Bridge.sol b/packages/contracts-bedrock/interfaces/universal/IERC721Bridge.sol similarity index 93% rename from packages/contracts-bedrock/src/universal/interfaces/IERC721Bridge.sol rename to packages/contracts-bedrock/interfaces/universal/IERC721Bridge.sol index 3c97958c1033..79497fa6383c 100644 --- a/packages/contracts-bedrock/src/universal/interfaces/IERC721Bridge.sol +++ b/packages/contracts-bedrock/interfaces/universal/IERC721Bridge.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; interface IERC721Bridge { event ERC721BridgeFinalized( diff --git a/packages/contracts-bedrock/src/universal/interfaces/ILegacyMintableERC20.sol b/packages/contracts-bedrock/interfaces/universal/ILegacyMintableERC20.sol similarity index 100% rename from packages/contracts-bedrock/src/universal/interfaces/ILegacyMintableERC20.sol rename to packages/contracts-bedrock/interfaces/universal/ILegacyMintableERC20.sol diff --git a/packages/contracts-bedrock/src/universal/interfaces/IOptimismMintableERC20.sol b/packages/contracts-bedrock/interfaces/universal/IOptimismMintableERC20.sol similarity index 100% rename from packages/contracts-bedrock/src/universal/interfaces/IOptimismMintableERC20.sol rename to packages/contracts-bedrock/interfaces/universal/IOptimismMintableERC20.sol diff --git a/packages/contracts-bedrock/src/universal/interfaces/IOptimismMintableERC20Factory.sol b/packages/contracts-bedrock/interfaces/universal/IOptimismMintableERC20Factory.sol similarity index 100% rename from packages/contracts-bedrock/src/universal/interfaces/IOptimismMintableERC20Factory.sol rename to packages/contracts-bedrock/interfaces/universal/IOptimismMintableERC20Factory.sol diff --git a/packages/contracts-bedrock/src/universal/interfaces/IOptimismMintableERC721.sol b/packages/contracts-bedrock/interfaces/universal/IOptimismMintableERC721.sol similarity index 100% rename from packages/contracts-bedrock/src/universal/interfaces/IOptimismMintableERC721.sol rename to packages/contracts-bedrock/interfaces/universal/IOptimismMintableERC721.sol diff --git a/packages/contracts-bedrock/src/universal/interfaces/IOptimismMintableERC721Factory.sol b/packages/contracts-bedrock/interfaces/universal/IOptimismMintableERC721Factory.sol similarity index 100% rename from packages/contracts-bedrock/src/universal/interfaces/IOptimismMintableERC721Factory.sol rename to packages/contracts-bedrock/interfaces/universal/IOptimismMintableERC721Factory.sol diff --git a/packages/contracts-bedrock/src/universal/interfaces/IOwnable.sol b/packages/contracts-bedrock/interfaces/universal/IOwnable.sol similarity index 100% rename from packages/contracts-bedrock/src/universal/interfaces/IOwnable.sol rename to packages/contracts-bedrock/interfaces/universal/IOwnable.sol diff --git a/packages/contracts-bedrock/src/universal/interfaces/IProxy.sol b/packages/contracts-bedrock/interfaces/universal/IProxy.sol similarity index 100% rename from packages/contracts-bedrock/src/universal/interfaces/IProxy.sol rename to packages/contracts-bedrock/interfaces/universal/IProxy.sol diff --git a/packages/contracts-bedrock/src/universal/interfaces/IProxyAdmin.sol b/packages/contracts-bedrock/interfaces/universal/IProxyAdmin.sol similarity index 95% rename from packages/contracts-bedrock/src/universal/interfaces/IProxyAdmin.sol rename to packages/contracts-bedrock/interfaces/universal/IProxyAdmin.sol index b35947e6cd78..09688257b925 100644 --- a/packages/contracts-bedrock/src/universal/interfaces/IProxyAdmin.sol +++ b/packages/contracts-bedrock/interfaces/universal/IProxyAdmin.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; +import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; interface IProxyAdmin { enum ProxyType { diff --git a/packages/contracts-bedrock/src/universal/interfaces/ISemver.sol b/packages/contracts-bedrock/interfaces/universal/ISemver.sol similarity index 100% rename from packages/contracts-bedrock/src/universal/interfaces/ISemver.sol rename to packages/contracts-bedrock/interfaces/universal/ISemver.sol diff --git a/packages/contracts-bedrock/src/universal/interfaces/IStandardBridge.sol b/packages/contracts-bedrock/interfaces/universal/IStandardBridge.sol similarity index 95% rename from packages/contracts-bedrock/src/universal/interfaces/IStandardBridge.sol rename to packages/contracts-bedrock/interfaces/universal/IStandardBridge.sol index 406a172c0737..a9c45017204d 100644 --- a/packages/contracts-bedrock/src/universal/interfaces/IStandardBridge.sol +++ b/packages/contracts-bedrock/interfaces/universal/IStandardBridge.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; interface IStandardBridge { event ERC20BridgeFinalized( diff --git a/packages/contracts-bedrock/src/universal/interfaces/IStaticERC1967Proxy.sol b/packages/contracts-bedrock/interfaces/universal/IStaticERC1967Proxy.sol similarity index 100% rename from packages/contracts-bedrock/src/universal/interfaces/IStaticERC1967Proxy.sol rename to packages/contracts-bedrock/interfaces/universal/IStaticERC1967Proxy.sol diff --git a/packages/contracts-bedrock/src/universal/interfaces/IWETH98.sol b/packages/contracts-bedrock/interfaces/universal/IWETH98.sol similarity index 100% rename from packages/contracts-bedrock/src/universal/interfaces/IWETH98.sol rename to packages/contracts-bedrock/interfaces/universal/IWETH98.sol diff --git a/packages/contracts-bedrock/src/vendor/interfaces/IERC20Solady.sol b/packages/contracts-bedrock/interfaces/vendor/IERC20Solady.sol similarity index 100% rename from packages/contracts-bedrock/src/vendor/interfaces/IERC20Solady.sol rename to packages/contracts-bedrock/interfaces/vendor/IERC20Solady.sol diff --git a/packages/contracts-bedrock/src/vendor/interfaces/IGelatoTreasury.sol b/packages/contracts-bedrock/interfaces/vendor/IGelatoTreasury.sol similarity index 100% rename from packages/contracts-bedrock/src/vendor/interfaces/IGelatoTreasury.sol rename to packages/contracts-bedrock/interfaces/vendor/IGelatoTreasury.sol diff --git a/packages/contracts-bedrock/interfaces/vendor/asterisc/IRISCV.sol b/packages/contracts-bedrock/interfaces/vendor/asterisc/IRISCV.sol new file mode 100644 index 000000000000..d69a22dfb0c4 --- /dev/null +++ b/packages/contracts-bedrock/interfaces/vendor/asterisc/IRISCV.sol @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; + +/// @title IRISCV +/// @notice Interface for the RISCV contract. +interface IRISCV is ISemver { + function oracle() external view returns (IPreimageOracle); + function step(bytes memory _stateData, bytes memory _proof, bytes32 _localContext) external returns (bytes32); + + function __constructor__(IPreimageOracle _oracle) external; +} diff --git a/packages/contracts-bedrock/justfile b/packages/contracts-bedrock/justfile index fe6f4c0834c9..6a34cc6998d3 100644 --- a/packages/contracts-bedrock/justfile +++ b/packages/contracts-bedrock/justfile @@ -15,24 +15,12 @@ dep-status: # BUILD # ######################################################## -# Checks that the correct version of Foundry is installed. -check-foundry: - cd ../../ && ./ops/scripts/check-foundry.sh - -# Checks that semgrep is installed. -check-semgrep: - cd ../../ && just check-semgrep - -# Checks that the correct versions of Foundry and semgrep are installed. -check-dependencies: - just check-foundry && just check-semgrep - # Core forge build command forge-build: forge build # Builds the contracts. -build: check-dependencies lint-fix-no-fail forge-build interfaces-check-no-build +build: lint-fix-no-fail forge-build interfaces-check-no-build # Builds the go-ffi tool for contract tests. build-go-ffi-default: @@ -54,8 +42,8 @@ clean: ######################################################## # Runs standard contract tests. -test: build-go-ffi - forge test +test *ARGS: build-go-ffi + forge test {{ARGS}} # Runs standard contract tests with rerun flag. test-rerun: build-go-ffi @@ -70,11 +58,11 @@ test-kontrol-no-build: # Runs contract coverage. coverage: build-go-ffi - forge coverage || (bash -c "forge coverage 2>&1 | grep -q 'Stack too deep' && echo -e '\\033[1;33mWARNING\\033[0m: Coverage failed with stack too deep, so overriding and exiting successfully' && exit 0 || exit 1") + forge coverage # Runs contract coverage with lcov. coverage-lcov: build-go-ffi - forge coverage --report lcov || (bash -c "forge coverage --report lcov 2>&1 | grep -q 'Stack too deep' && echo -e '\\033[1;33mWARNING\\033[0m: Coverage failed with stack too deep, so overriding and exiting successfully' && exit 0 || exit 1") + forge coverage --report lcov ######################################################## @@ -216,7 +204,6 @@ semgrep: semgrep-test: cd ../../ && semgrep scan --test --config .semgrep/rules/ .semgrep/tests/ -# TODO: Also run lint-forge-tests-check but we need to fix the test names first. # Runs all checks. check: @just gas-snapshot-check-no-build \ diff --git a/packages/contracts-bedrock/lib/lib-keccak b/packages/contracts-bedrock/lib/lib-keccak index 0115edbbc60b..3b1e7bbb4cc2 160000 --- a/packages/contracts-bedrock/lib/lib-keccak +++ b/packages/contracts-bedrock/lib/lib-keccak @@ -1 +1 @@ -Subproject commit 0115edbbc60b5f702392caafc3a142061e6142fa +Subproject commit 3b1e7bbb4cc23e9228097cfebe42aedaf3b8f2b9 diff --git a/packages/contracts-bedrock/meta/CONTRIBUTING.md b/packages/contracts-bedrock/meta/CONTRIBUTING.md deleted file mode 100644 index 462aa6660c4f..000000000000 --- a/packages/contracts-bedrock/meta/CONTRIBUTING.md +++ /dev/null @@ -1,106 +0,0 @@ -# Contributing to CONTRIBUTING.md - -First off, thanks for taking the time to contribute! - -We welcome and appreciate all kinds of contributions. We ask that before contributing you please review the procedures for each type of contribution available in the [Table of Contents](#table-of-contents). This will streamline the process for both maintainers and contributors. To find ways to contribute, view the [I Want To Contribute](#i-want-to-contribute) section below. Larger contributions should [open an issue](https://github.com/ethereum-optimism/optimism/issues/new) before implementation to ensure changes don't go to waste. - -We're excited to work with you and your contributions to scaling Ethereum! - -## Table of Contents - -- [I Have a Question](#i-have-a-question) -- [I Want To Contribute](#i-want-to-contribute) -- [Reporting Bugs](#reporting-bugs) -- [Suggesting Enhancements](#suggesting-enhancements) -- [Your First Code Contribution](#your-first-code-contribution) -- [Improving The Documentation](#improving-the-documentation) -- [Deploying on Devnet](#deploying-on-devnet) -- [Tools](#tools) - -## I Have a Question - -> **Note** -> Before making an issue, please read the documentation and search the issues to see if your question has already been answered. - -If you have any questions about the smart contracts, please feel free to ask them in the Optimism discord developer channels or create a new detailed issue. - -## I Want To Contribute - -### Reporting Bugs - -**Any and all bug reports on production smart contract code should be submitted privately to the Optimism team so that we can mitigate the issue before it is exploited. Please see our security policy document [here](https://github.com/ethereum-optimism/.github/blob/master/SECURITY.md).** - -### Suggesting Enhancements - -#### Before Submitting an Enhancement - -- Read the documentation and the smart contracts themselves to see if the feature already exists. -- Perform a search in the issues to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. - -#### How Do I Submit a Good Enhancement Suggestion? - -Enhancement suggestions are tracked as [GitHub issues](https://github.com/ethereum-optimism/optimism/issues). - -- Use a **clear and descriptive title** for the issue to identify the suggestion. -- Provide a **step-by-step** description of the suggested enhancement in as many details as possible. -- Describe the **current** behavior and why the **intended** behavior you expected to see differs. At this point you can also tell which alternatives do not work for you. -- Explain why this enhancement would be useful in Optimism's smart contracts. You may also want to point out the other projects that solved it better and which could serve as inspiration. - -### Your First Code Contribution - -The best place to begin contributing is by looking through the issues with the `good first issue` label. These are issues that are relatively easy to implement and are a great way to get familiar with the codebase. - -Optimism's smart contracts are written in Solidity and we use [foundry](https://github.com/foundry-rs/foundry) as our development framework. To get started, you'll need to install several dependencies: - -1. [just](https://github.com/casey/just) - Make sure to `just install` -1. [foundry](https://getfoundry.sh) - Foundry is built with [rust](https://www.rust-lang.org/tools/install), and this project uses a pinned version of foundry. Install the rust toolchain with `rustup`. - Make sure to install the version of foundry used by `ci-builder`, defined in the `versions.json` file in the root of this repo under the `foundry` key. Once you have `foundryup` installed, there is a helper to do this: `just install-foundry` -1. [golang](https://golang.org/doc/install) -1. [python](https://www.python.org/downloads/) - -Our [Style Guide](STYLE_GUIDE.md) contains information about the project structure, syntax preferences, naming conventions, and more. Please take a look at it before submitting a PR, and let us know if you spot inconsistencies! - -Once you've read the style guide and are ready to work on your PR, there are a plethora of useful `just` scripts to know about that will help you with development. -You can run `just -l` to list them all, some of the key ones are: - -1. `just build` Builds the smart contracts. -1. `just test` Runs the full `forge` test suite. -1. `just gas-snapshot` Generates the gas snapshot for the smart contracts. -1. `just semver-lock` Generates the semver lockfile. -1. `just snapshots` Generates the storage and ABI snapshots. -1. `just clean` Removes all build artifacts for `forge` and `go` compilations. -1. `just validate-spacers` Validates the positions of the storage slot spacers. -1. `just validate-deploy-configs` Validates the deployment configurations in `deploy-config` -1. `just lint` Runs the linter on the smart contracts and scripts. -1. `just pre-pr` Runs most checks, generators, and linters prior to a PR. For most PRs, this is sufficient to pass CI if everything is in order. -1. `just pre-pr-full` Runs all checks, generators, and linters prior to a PR. - -### Improving The Documentation - -Documentation improvements are more than welcome! If you see a typo or feel that a code comment describes something poorly or incorrectly, please submit a PR with a fix. - -### Deploying on Devnet - -To deploy the smart contracts on a local devnet, run `make devnet-up` in the monorepo root. For more information on the local devnet, see [dev-node](https://docs.optimism.io/chain/testing/dev-node). - -### Tools - -#### Validate Spacing - -In order to make sure that we don't accidentally overwrite storage slots, contract storage layouts are checked to make sure spacing is correct. - -This uses the `snapshots/storageLayout` directory to check contract spacing. Run `just validate-spacers` to check the spacing of all contracts. - -#### Gas Snapshots - -We use forge's `gas-snapshot` subcommand to produce a gas snapshot for tests in `Benchmark.t.sol`. CI will check that the gas snapshot has been updated properly when it runs, so make sure to run `just gas-snapshot`! - -#### Semver Locking - -Many of our smart contracts are semantically versioned. To make sure that changes are not made to a contract without deliberately bumping its version, we commit to the source code and the creation bytecode of its dependencies in a lockfile. Consult the [Style Guide](./STYLE_GUIDE.md#Versioning) for more information about how our contracts are versioned. - -#### Storage Snapshots - -Due to the many proxied contracts in Optimism's protocol, we automate tracking the diff to storage layouts of the contracts in the project. This is to ensure that we don't break a proxy by upgrading its implementation to a contract with a different storage layout. To generate the storage lockfile, run `just snapshots`. diff --git a/packages/contracts-bedrock/meta/STYLE_GUIDE.md b/packages/contracts-bedrock/meta/STYLE_GUIDE.md index 1d8b84818b9b..00af13d5a5b4 100644 --- a/packages/contracts-bedrock/meta/STYLE_GUIDE.md +++ b/packages/contracts-bedrock/meta/STYLE_GUIDE.md @@ -96,8 +96,8 @@ Spacers MUST be `private`. All contracts should be assumed to live behind proxies (except in certain special circumstances). This means that new contracts MUST be built under the assumption of upgradeability. -We use a minimal [`Proxy`](./src/universal/Proxy.sol) contract designed to be owned by a -corresponding [`ProxyAdmin`](./src/universal/ProxyAdmin.sol) which follow the interfaces +We use a minimal [`Proxy`](../src/universal/Proxy.sol) contract designed to be owned by a +corresponding [`ProxyAdmin`](../src/universal/ProxyAdmin.sol) which follow the interfaces of OpenZeppelin's `Proxy` and `ProxyAdmin` contracts, respectively. Unless explicitly discussed otherwise, you MUST include the following basic upgradeability diff --git a/packages/contracts-bedrock/scripts/L2Genesis.s.sol b/packages/contracts-bedrock/scripts/L2Genesis.s.sol index 0f681185a551..299a817ebe0b 100644 --- a/packages/contracts-bedrock/scripts/L2Genesis.s.sol +++ b/packages/contracts-bedrock/scripts/L2Genesis.s.sol @@ -18,19 +18,19 @@ import { Preinstalls } from "src/libraries/Preinstalls.sol"; import { Types } from "src/libraries/Types.sol"; // Interfaces -import { ISequencerFeeVault } from "src/L2/interfaces/ISequencerFeeVault.sol"; -import { IBaseFeeVault } from "src/L2/interfaces/IBaseFeeVault.sol"; -import { IL1FeeVault } from "src/L2/interfaces/IL1FeeVault.sol"; -import { IOptimismMintableERC721Factory } from "src/universal/interfaces/IOptimismMintableERC721Factory.sol"; -import { IGovernanceToken } from "src/governance/interfaces/IGovernanceToken.sol"; -import { IOptimismMintableERC20Factory } from "src/universal/interfaces/IOptimismMintableERC20Factory.sol"; -import { IL2StandardBridge } from "src/L2/interfaces/IL2StandardBridge.sol"; -import { IL2ERC721Bridge } from "src/L2/interfaces/IL2ERC721Bridge.sol"; -import { IStandardBridge } from "src/universal/interfaces/IStandardBridge.sol"; -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; -import { IL2CrossDomainMessenger } from "src/L2/interfaces/IL2CrossDomainMessenger.sol"; -import { IGasPriceOracle } from "src/L2/interfaces/IGasPriceOracle.sol"; -import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; +import { ISequencerFeeVault } from "interfaces/L2/ISequencerFeeVault.sol"; +import { IBaseFeeVault } from "interfaces/L2/IBaseFeeVault.sol"; +import { IL1FeeVault } from "interfaces/L2/IL1FeeVault.sol"; +import { IOptimismMintableERC721Factory } from "interfaces/universal/IOptimismMintableERC721Factory.sol"; +import { IGovernanceToken } from "interfaces/governance/IGovernanceToken.sol"; +import { IOptimismMintableERC20Factory } from "interfaces/universal/IOptimismMintableERC20Factory.sol"; +import { IL2StandardBridge } from "interfaces/L2/IL2StandardBridge.sol"; +import { IL2ERC721Bridge } from "interfaces/L2/IL2ERC721Bridge.sol"; +import { IStandardBridge } from "interfaces/universal/IStandardBridge.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; +import { IL2CrossDomainMessenger } from "interfaces/L2/IL2CrossDomainMessenger.sol"; +import { IGasPriceOracle } from "interfaces/L2/IGasPriceOracle.sol"; +import { IL1Block } from "interfaces/L2/IL1Block.sol"; struct L1Dependencies { address payable l1CrossDomainMessengerProxy; diff --git a/packages/contracts-bedrock/scripts/checks/check-semver-diff.sh b/packages/contracts-bedrock/scripts/checks/check-semver-diff.sh index 70304f641690..08540879a96d 100755 --- a/packages/contracts-bedrock/scripts/checks/check-semver-diff.sh +++ b/packages/contracts-bedrock/scripts/checks/check-semver-diff.sh @@ -11,6 +11,22 @@ source "$SCRIPT_DIR/utils/semver-utils.sh" # Path to semver-lock.json. SEMVER_LOCK="snapshots/semver-lock.json" +# Define excluded contracts. +EXCLUDED_CONTRACTS=( + "src/vendor/asterisc/RISCV.sol" +) + +# Helper function to check if a contract is excluded. +is_excluded() { + local contract="$1" + for excluded in "${EXCLUDED_CONTRACTS[@]}"; do + if [[ "$contract" == "$excluded" ]]; then + return 0 + fi + done + return 1 +} + # Create a temporary directory. temp_dir=$(mktemp -d) trap 'rm -rf "$temp_dir"' EXIT @@ -49,6 +65,11 @@ has_errors=false # Check each changed contract for a semver version change. for contract in $changed_contracts; do + # Skip excluded contracts. + if is_excluded "$contract"; then + continue + fi + # Check if the contract file exists. if [ ! -f "$contract" ]; then echo "❌ Error: Contract file $contract not found" diff --git a/packages/contracts-bedrock/scripts/checks/common/util.go b/packages/contracts-bedrock/scripts/checks/common/util.go new file mode 100644 index 000000000000..131dff3b8987 --- /dev/null +++ b/packages/contracts-bedrock/scripts/checks/common/util.go @@ -0,0 +1,124 @@ +package common + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "runtime" + "sync" + "sync/atomic" + + "github.com/bmatcuk/doublestar/v4" + "github.com/ethereum-optimism/optimism/op-chain-ops/solc" + "golang.org/x/sync/errgroup" +) + +type ErrorReporter struct { + hasErr atomic.Bool + outMtx sync.Mutex +} + +func NewErrorReporter() *ErrorReporter { + return &ErrorReporter{} +} + +func (e *ErrorReporter) Fail(msg string, args ...any) { + e.outMtx.Lock() + // Useful for suppressing error reporting in tests + if os.Getenv("SUPPRESS_ERROR_REPORTER") == "" { + _, _ = fmt.Fprintf(os.Stderr, "❌ "+msg+"\n", args...) + } + e.outMtx.Unlock() + e.hasErr.Store(true) +} + +func (e *ErrorReporter) HasError() bool { + return e.hasErr.Load() +} + +type FileProcessor func(path string) []error + +func ProcessFiles(files map[string]string, processor FileProcessor) error { + g := errgroup.Group{} + g.SetLimit(runtime.NumCPU()) + + reporter := NewErrorReporter() + for name, path := range files { + name, path := name, path // Capture loop variables + g.Go(func() error { + if errs := processor(path); len(errs) > 0 { + for _, err := range errs { + reporter.Fail("%s: %v", name, err) + } + } + return nil + }) + } + + err := g.Wait() + if err != nil { + return fmt.Errorf("processing failed: %w", err) + } + if reporter.HasError() { + return fmt.Errorf("processing failed") + } + return nil +} + +func ProcessFilesGlob(includes, excludes []string, processor FileProcessor) error { + files, err := FindFiles(includes, excludes) + if err != nil { + return err + } + return ProcessFiles(files, processor) +} + +func FindFiles(includes, excludes []string) (map[string]string, error) { + included := make(map[string]string) + excluded := make(map[string]struct{}) + + // Get all included files + for _, pattern := range includes { + matches, err := doublestar.Glob(os.DirFS("."), pattern) + if err != nil { + return nil, fmt.Errorf("glob pattern error: %w", err) + } + for _, match := range matches { + name := filepath.Base(match) + included[name] = match + } + } + + // Get all excluded files + for _, pattern := range excludes { + matches, err := doublestar.Glob(os.DirFS("."), pattern) + if err != nil { + return nil, fmt.Errorf("glob pattern error: %w", err) + } + for _, match := range matches { + excluded[filepath.Base(match)] = struct{}{} + } + } + + // Remove excluded files from result + for name := range excluded { + delete(included, name) + } + + return included, nil +} + +func ReadForgeArtifact(path string) (*solc.ForgeArtifact, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("failed to read artifact: %w", err) + } + + var artifact solc.ForgeArtifact + if err := json.Unmarshal(data, &artifact); err != nil { + return nil, fmt.Errorf("failed to parse artifact: %w", err) + } + + return &artifact, nil +} diff --git a/packages/contracts-bedrock/scripts/checks/common/util_test.go b/packages/contracts-bedrock/scripts/checks/common/util_test.go new file mode 100644 index 000000000000..4defc1c70454 --- /dev/null +++ b/packages/contracts-bedrock/scripts/checks/common/util_test.go @@ -0,0 +1,180 @@ +package common + +import ( + "os" + "path/filepath" + "testing" +) + +func TestErrorReporter(t *testing.T) { + os.Setenv("SUPPRESS_ERROR_REPORTER", "1") + defer os.Unsetenv("SUPPRESS_ERROR_REPORTER") + + reporter := NewErrorReporter() + + if reporter.HasError() { + t.Error("new reporter should not have errors") + } + + reporter.Fail("test error") + + if !reporter.HasError() { + t.Error("reporter should have error after Fail") + } +} + +func TestProcessFiles(t *testing.T) { + os.Setenv("SUPPRESS_ERROR_REPORTER", "1") + defer os.Unsetenv("SUPPRESS_ERROR_REPORTER") + + files := map[string]string{ + "file1": "path1", + "file2": "path2", + } + + // Test successful processing + err := ProcessFiles(files, func(path string) []error { + return nil + }) + if err != nil { + t.Errorf("expected no error, got %v", err) + } + + // Test error handling + err = ProcessFiles(files, func(path string) []error { + var errors []error + errors = append(errors, os.ErrNotExist) + return errors + }) + if err == nil { + t.Error("expected error, got nil") + } +} + +func TestProcessFilesGlob(t *testing.T) { + os.Setenv("SUPPRESS_ERROR_REPORTER", "1") + defer os.Unsetenv("SUPPRESS_ERROR_REPORTER") + + // Create test directory structure + tmpDir := t.TempDir() + if err := os.Chdir(tmpDir); err != nil { + t.Fatal(err) + } + + // Create test files + files := map[string]string{ + "test1.txt": "content1", + "test2.txt": "content2", + "skip.txt": "content3", + } + + for name, content := range files { + if err := os.WriteFile(name, []byte(content), 0644); err != nil { + t.Fatal(err) + } + } + + // Test processing with includes and excludes + includes := []string{"*.txt"} + excludes := []string{"skip.txt"} + + processedFiles := make(map[string]bool) + err := ProcessFilesGlob(includes, excludes, func(path string) []error { + processedFiles[filepath.Base(path)] = true + return nil + }) + + if err != nil { + t.Errorf("ProcessFiles failed: %v", err) + } + + // Verify results + if len(processedFiles) != 2 { + t.Errorf("expected 2 processed files, got %d", len(processedFiles)) + } + if !processedFiles["test1.txt"] { + t.Error("expected to process test1.txt") + } + if !processedFiles["test2.txt"] { + t.Error("expected to process test2.txt") + } + if processedFiles["skip.txt"] { + t.Error("skip.txt should have been excluded") + } +} + +func TestFindFiles(t *testing.T) { + // Create test directory structure + tmpDir := t.TempDir() + if err := os.Chdir(tmpDir); err != nil { + t.Fatal(err) + } + + // Create test files + files := map[string]string{ + "test1.txt": "content1", + "test2.txt": "content2", + "skip.txt": "content3", + } + + for name, content := range files { + if err := os.WriteFile(name, []byte(content), 0644); err != nil { + t.Fatal(err) + } + } + + // Test finding files + includes := []string{"*.txt"} + excludes := []string{"skip.txt"} + + found, err := FindFiles(includes, excludes) + if err != nil { + t.Fatalf("FindFiles failed: %v", err) + } + + // Verify results + if len(found) != 2 { + t.Errorf("expected 2 files, got %d", len(found)) + } + if _, exists := found["test1.txt"]; !exists { + t.Error("expected to find test1.txt") + } + if _, exists := found["test2.txt"]; !exists { + t.Error("expected to find test2.txt") + } + if _, exists := found["skip.txt"]; exists { + t.Error("skip.txt should have been excluded") + } +} + +func TestReadForgeArtifact(t *testing.T) { + // Create a temporary test artifact + tmpDir := t.TempDir() + artifactContent := `{ + "abi": [], + "bytecode": { + "object": "0x123" + }, + "deployedBytecode": { + "object": "0x456" + } + }` + tmpFile := filepath.Join(tmpDir, "Test.json") + if err := os.WriteFile(tmpFile, []byte(artifactContent), 0644); err != nil { + t.Fatal(err) + } + + // Test processing + artifact, err := ReadForgeArtifact(tmpFile) + if err != nil { + t.Fatalf("ReadForgeArtifact failed: %v", err) + } + + // Verify results + if artifact.Bytecode.Object != "0x123" { + t.Errorf("expected bytecode '0x123', got %q", artifact.Bytecode.Object) + } + if artifact.DeployedBytecode.Object != "0x456" { + t.Errorf("expected deployed bytecode '0x456', got %q", artifact.DeployedBytecode.Object) + } +} diff --git a/packages/contracts-bedrock/scripts/checks/spacers/main.go b/packages/contracts-bedrock/scripts/checks/spacers/main.go index 3360bda74d39..daf617defb93 100644 --- a/packages/contracts-bedrock/scripts/checks/spacers/main.go +++ b/packages/contracts-bedrock/scripts/checks/spacers/main.go @@ -1,175 +1,123 @@ package main import ( - "encoding/json" "fmt" "os" - "path/filepath" "regexp" "strconv" "strings" -) - -// directoryPath is the path to the artifacts directory. -// It can be configured as the first argument to the script or -// defaults to the forge-artifacts directory. -var directoryPath string - -func init() { - if len(os.Args) > 1 { - directoryPath = os.Args[1] - } else { - currentDir, _ := os.Getwd() - directoryPath = filepath.Join(currentDir, "forge-artifacts") - } -} - -// skipped returns true if the contract should be skipped when inspecting its storage layout. -func skipped(contractName string) bool { - return strings.Contains(contractName, "CrossDomainMessengerLegacySpacer") -} - -// variableInfo represents the parsed variable information. -type variableInfo struct { - name string - slot int - offset int - length int -} -// parseVariableInfo parses out variable info from the variable structure in standard compiler json output. -func parseVariableInfo(variable map[string]interface{}) (variableInfo, error) { - var info variableInfo - var err error + "github.com/ethereum-optimism/optimism/op-chain-ops/solc" + "github.com/ethereum-optimism/optimism/packages/contracts-bedrock/scripts/checks/common" +) - info.name = variable["label"].(string) - info.slot, err = strconv.Atoi(variable["slot"].(string)) - if err != nil { - return info, err +func parseVariableLength(variableType string, types map[string]solc.StorageLayoutType) (int, error) { + if t, exists := types[variableType]; exists { + return int(t.NumberOfBytes), nil } - info.offset = int(variable["offset"].(float64)) - variableType := variable["type"].(string) if strings.HasPrefix(variableType, "t_mapping") { - info.length = 32 + return 32, nil } else if strings.HasPrefix(variableType, "t_uint") { re := regexp.MustCompile(`uint(\d+)`) matches := re.FindStringSubmatch(variableType) if len(matches) > 1 { bitSize, _ := strconv.Atoi(matches[1]) - info.length = bitSize / 8 + return bitSize / 8, nil } } else if strings.HasPrefix(variableType, "t_bytes_") { - info.length = 32 + return 32, nil } else if strings.HasPrefix(variableType, "t_bytes") { re := regexp.MustCompile(`bytes(\d+)`) matches := re.FindStringSubmatch(variableType) if len(matches) > 1 { - info.length, _ = strconv.Atoi(matches[1]) + return strconv.Atoi(matches[1]) } } else if strings.HasPrefix(variableType, "t_address") { - info.length = 20 + return 20, nil } else if strings.HasPrefix(variableType, "t_bool") { - info.length = 1 + return 1, nil } else if strings.HasPrefix(variableType, "t_array") { re := regexp.MustCompile(`^t_array\((\w+)\)(\d+)`) matches := re.FindStringSubmatch(variableType) if len(matches) > 2 { innerType := matches[1] size, _ := strconv.Atoi(matches[2]) - innerInfo, err := parseVariableInfo(map[string]interface{}{ - "label": variable["label"], - "offset": variable["offset"], - "slot": variable["slot"], - "type": innerType, - }) + length, err := parseVariableLength(innerType, types) if err != nil { - return info, err + return 0, err } - info.length = innerInfo.length * size + return length * size, nil } - } else { - return info, fmt.Errorf("%s: unsupported type %s, add it to the script", info.name, variableType) } - return info, nil + return 0, fmt.Errorf("unsupported type %s, add it to the script", variableType) } -func main() { - err := filepath.Walk(directoryPath, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if info.IsDir() || strings.Contains(path, "t.sol") { - return nil - } - - raw, err := os.ReadFile(path) - if err != nil { - return err - } - - var artifact map[string]interface{} - err = json.Unmarshal(raw, &artifact) - if err != nil { - return err - } - - storageLayout, ok := artifact["storageLayout"].(map[string]interface{}) - if !ok { - return nil - } +func validateSpacer(variable solc.StorageLayoutEntry, types map[string]solc.StorageLayoutType) []error { + var errors []error - storage, ok := storageLayout["storage"].([]interface{}) - if !ok { - return nil - } + parts := strings.Split(variable.Label, "_") + if len(parts) != 4 { + return []error{fmt.Errorf("invalid spacer name format: %s", variable.Label)} + } - for _, v := range storage { - variable := v.(map[string]interface{}) - fqn := variable["contract"].(string) + expectedSlot, _ := strconv.Atoi(parts[1]) + expectedOffset, _ := strconv.Atoi(parts[2]) + expectedLength, _ := strconv.Atoi(parts[3]) - if skipped(fqn) { - continue - } + actualLength, err := parseVariableLength(variable.Type, types) + if err != nil { + return []error{err} + } - label := variable["label"].(string) - if strings.HasPrefix(label, "spacer_") { - parts := strings.Split(label, "_") - if len(parts) != 4 { - return fmt.Errorf("invalid spacer name format: %s", label) - } + if int(variable.Slot) != expectedSlot { + errors = append(errors, fmt.Errorf("%s %s is in slot %d but should be in %d", + variable.Contract, variable.Label, variable.Slot, expectedSlot)) + } - slot, _ := strconv.Atoi(parts[1]) - offset, _ := strconv.Atoi(parts[2]) - length, _ := strconv.Atoi(parts[3]) + if int(variable.Offset) != expectedOffset { + errors = append(errors, fmt.Errorf("%s %s is at offset %d but should be at %d", + variable.Contract, variable.Label, variable.Offset, expectedOffset)) + } - variableInfo, err := parseVariableInfo(variable) - if err != nil { - return err - } + if actualLength != expectedLength { + errors = append(errors, fmt.Errorf("%s %s is %d bytes long but should be %d", + variable.Contract, variable.Label, actualLength, expectedLength)) + } - if slot != variableInfo.slot { - return fmt.Errorf("%s %s is in slot %d but should be in %d", fqn, label, variableInfo.slot, slot) - } + return errors +} - if offset != variableInfo.offset { - return fmt.Errorf("%s %s is at offset %d but should be at %d", fqn, label, variableInfo.offset, offset) - } +func processFile(path string) []error { + artifact, err := common.ReadForgeArtifact(path) + if err != nil { + return []error{err} + } - if length != variableInfo.length { - return fmt.Errorf("%s %s is %d bytes long but should be %d", fqn, label, variableInfo.length, length) - } + if artifact.StorageLayout == nil { + return nil + } - fmt.Printf("%s.%s is valid\n", fqn, label) + var errors []error + for _, variable := range artifact.StorageLayout.Storage { + if strings.HasPrefix(variable.Label, "spacer_") { + if errs := validateSpacer(variable, artifact.StorageLayout.Types); len(errs) > 0 { + errors = append(errors, errs...) + continue } } + } - return nil - }) + return errors +} - if err != nil { +func main() { + if err := common.ProcessFilesGlob( + []string{"forge-artifacts/**/*.json"}, + []string{"forge-artifacts/**/CrossDomainMessengerLegacySpacer{0,1}.json"}, + processFile, + ); err != nil { fmt.Printf("Error: %v\n", err) os.Exit(1) } diff --git a/packages/contracts-bedrock/scripts/checks/spacers/main_test.go b/packages/contracts-bedrock/scripts/checks/spacers/main_test.go new file mode 100644 index 000000000000..7548fa80d031 --- /dev/null +++ b/packages/contracts-bedrock/scripts/checks/spacers/main_test.go @@ -0,0 +1,167 @@ +package main + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-chain-ops/solc" + "github.com/stretchr/testify/require" +) + +func Test_parseVariableLength(t *testing.T) { + tests := []struct { + name string + variableType string + types map[string]solc.StorageLayoutType + expected int + expectError bool + }{ + { + name: "uses type from map", + variableType: "t_custom", + types: map[string]solc.StorageLayoutType{ + "t_custom": {NumberOfBytes: 16}, + }, + expected: 16, + }, + { + name: "mapping type", + variableType: "t_mapping(address,uint256)", + expected: 32, + }, + { + name: "uint type", + variableType: "t_uint256", + expected: 32, + }, + { + name: "bytes_ type", + variableType: "t_bytes_storage", + expected: 32, + }, + { + name: "bytes type", + variableType: "t_bytes32", + expected: 32, + }, + { + name: "address type", + variableType: "t_address", + expected: 20, + }, + { + name: "bool type", + variableType: "t_bool", + expected: 1, + }, + { + name: "array type", + variableType: "t_array(t_uint256)2", + expected: 64, // 2 * 32 + }, + { + name: "unsupported type", + variableType: "t_unknown", + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + length, err := parseVariableLength(tt.variableType, tt.types) + if tt.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.expected, length) + } + }) + } +} + +func Test_validateSpacer(t *testing.T) { + tests := []struct { + name string + variable solc.StorageLayoutEntry + types map[string]solc.StorageLayoutType + expectedErrs int + errorContains string + }{ + { + name: "valid spacer", + variable: solc.StorageLayoutEntry{ + Contract: "TestContract", + Label: "spacer_1_2_32", + Slot: 1, + Offset: 2, + Type: "t_uint256", + }, + types: map[string]solc.StorageLayoutType{ + "t_uint256": {NumberOfBytes: 32}, + }, + expectedErrs: 0, + }, + { + name: "invalid name format", + variable: solc.StorageLayoutEntry{ + Label: "spacer_invalid", + }, + expectedErrs: 1, + errorContains: "invalid spacer name format", + }, + { + name: "wrong slot", + variable: solc.StorageLayoutEntry{ + Contract: "TestContract", + Label: "spacer_1_2_32", + Slot: 2, + Offset: 2, + Type: "t_uint256", + }, + types: map[string]solc.StorageLayoutType{ + "t_uint256": {NumberOfBytes: 32}, + }, + expectedErrs: 1, + errorContains: "is in slot", + }, + { + name: "wrong offset", + variable: solc.StorageLayoutEntry{ + Contract: "TestContract", + Label: "spacer_1_2_32", + Slot: 1, + Offset: 3, + Type: "t_uint256", + }, + types: map[string]solc.StorageLayoutType{ + "t_uint256": {NumberOfBytes: 32}, + }, + expectedErrs: 1, + errorContains: "is at offset", + }, + { + name: "wrong length", + variable: solc.StorageLayoutEntry{ + Contract: "TestContract", + Label: "spacer_1_2_32", + Slot: 1, + Offset: 2, + Type: "t_uint128", + }, + types: map[string]solc.StorageLayoutType{ + "t_uint128": {NumberOfBytes: 16}, + }, + expectedErrs: 1, + errorContains: "bytes long", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateSpacer(tt.variable, tt.types) + require.Len(t, errors, tt.expectedErrs) + if tt.errorContains != "" { + require.Contains(t, errors[0].Error(), tt.errorContains) + } + }) + } +} diff --git a/packages/contracts-bedrock/scripts/checks/test-names/main.go b/packages/contracts-bedrock/scripts/checks/test-names/main.go index 86550f211cba..84ead0d6fa0c 100644 --- a/packages/contracts-bedrock/scripts/checks/test-names/main.go +++ b/packages/contracts-bedrock/scripts/checks/test-names/main.go @@ -1,27 +1,87 @@ package main import ( - "encoding/json" "fmt" "os" - "os/exec" - "path/filepath" "strconv" "strings" "unicode" + + "github.com/ethereum-optimism/optimism/op-chain-ops/solc" + "github.com/ethereum-optimism/optimism/packages/contracts-bedrock/scripts/checks/common" ) -type Check func(parts []string) bool +func main() { + if err := common.ProcessFilesGlob( + []string{"forge-artifacts/**/*.json"}, + []string{}, + processFile, + ); err != nil { + fmt.Printf("error: %v\n", err) + os.Exit(1) + } +} + +func processFile(path string) []error { + artifact, err := common.ReadForgeArtifact(path) + if err != nil { + return []error{err} + } + + var errors []error + names := extractTestNames(artifact) + for _, name := range names { + if err = checkTestName(name); err != nil { + errors = append(errors, err) + } + } + + return errors +} + +func extractTestNames(artifact *solc.ForgeArtifact) []string { + isTest := false + for _, entry := range artifact.Abi.Methods { + if entry.Name == "IS_TEST" { + isTest = true + break + } + } + if !isTest { + return nil + } + + names := []string{} + for _, entry := range artifact.Abi.Methods { + if !strings.HasPrefix(entry.Name, "test") { + continue + } + names = append(names, entry.Name) + } + + return names +} + +type CheckFunc func(parts []string) bool type CheckInfo struct { - check Check error string + check CheckFunc } -var excludes = map[string]bool{} - -var checks = []CheckInfo{ - { +var checks = map[string]CheckInfo{ + "doubleUnderscores": { + error: "test names cannot have double underscores", + check: func(parts []string) bool { + for _, part := range parts { + if len(strings.TrimSpace(part)) == 0 { + return false + } + } + return true + }, + }, + "camelCase": { error: "test name parts should be in camelCase", check: func(parts []string) bool { for _, part := range parts { @@ -32,21 +92,24 @@ var checks = []CheckInfo{ return true }, }, - { + "partsCount": { error: "test names should have either 3 or 4 parts, each separated by underscores", check: func(parts []string) bool { return len(parts) == 3 || len(parts) == 4 }, }, - { - error: "test names should begin with \"test\", \"testFuzz\", or \"testDiff\"", + "prefix": { + error: "test names should begin with 'test', 'testFuzz', or 'testDiff'", check: func(parts []string) bool { - return parts[0] == "test" || parts[0] == "testFuzz" || parts[0] == "testDiff" + return len(parts) > 0 && (parts[0] == "test" || parts[0] == "testFuzz" || parts[0] == "testDiff") }, }, - { - error: "test names should end with either \"succeeds\", \"reverts\", \"fails\", \"works\" or \"benchmark[_num]\"", + "suffix": { + error: "test names should end with either 'succeeds', 'reverts', 'fails', 'works', or 'benchmark[_num]'", check: func(parts []string) bool { + if len(parts) == 0 { + return false + } last := parts[len(parts)-1] if last == "succeeds" || last == "reverts" || last == "fails" || last == "works" { return true @@ -58,113 +121,24 @@ var checks = []CheckInfo{ return last == "benchmark" }, }, - { + "failureParts": { error: "failure tests should have 4 parts, third part should indicate the reason for failure", check: func(parts []string) bool { + if len(parts) == 0 { + return false + } last := parts[len(parts)-1] return len(parts) == 4 || (last != "reverts" && last != "fails") }, }, } -func main() { - cmd := exec.Command("forge", "config", "--json") - output, err := cmd.Output() - if err != nil { - fmt.Printf("Error executing forge config: %v\n", err) - os.Exit(1) - } - - var config map[string]interface{} - err = json.Unmarshal(output, &config) - if err != nil { - fmt.Printf("Error parsing forge config: %v\n", err) - os.Exit(1) - } - - outDir, ok := config["out"].(string) - if !ok { - outDir = "out" - } - - fmt.Println("Success:") - var errors []string - - err = filepath.Walk(outDir, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if info.IsDir() { - return nil - } - - if excludes[strings.TrimSuffix(filepath.Base(path), filepath.Ext(path))] { - return nil - } - - data, err := os.ReadFile(path) - if err != nil { - return err - } - - var artifact map[string]interface{} - err = json.Unmarshal(data, &artifact) - if err != nil { - return nil // Skip files that are not valid JSON - } - - abi, ok := artifact["abi"].([]interface{}) - if !ok { - return nil +func checkTestName(name string) error { + parts := strings.Split(name, "_") + for _, check := range checks { + if !check.check(parts) { + return fmt.Errorf("%s: %s", name, check.error) } - - isTest := false - for _, element := range abi { - if elem, ok := element.(map[string]interface{}); ok { - if elem["name"] == "IS_TEST" { - isTest = true - break - } - } - } - - if isTest { - success := true - for _, element := range abi { - if elem, ok := element.(map[string]interface{}); ok { - if elem["type"] == "function" { - name, ok := elem["name"].(string) - if !ok || !strings.HasPrefix(name, "test") { - continue - } - - parts := strings.Split(name, "_") - for _, check := range checks { - if !check.check(parts) { - errors = append(errors, fmt.Sprintf("%s#%s: %s", path, name, check.error)) - success = false - } - } - } - } - } - - if success { - fmt.Printf(" - %s\n", filepath.Base(path[:len(path)-len(filepath.Ext(path))])) - } - } - - return nil - }) - - if err != nil { - fmt.Printf("Error walking the path %q: %v\n", outDir, err) - os.Exit(1) - } - - if len(errors) > 0 { - fmt.Println(strings.Join(errors, "\n")) - os.Exit(1) } + return nil } diff --git a/packages/contracts-bedrock/scripts/checks/test-names/main_test.go b/packages/contracts-bedrock/scripts/checks/test-names/main_test.go new file mode 100644 index 000000000000..5a9a0fd1846e --- /dev/null +++ b/packages/contracts-bedrock/scripts/checks/test-names/main_test.go @@ -0,0 +1,283 @@ +package main + +import ( + "reflect" + "slices" + "testing" + + "github.com/ethereum-optimism/optimism/op-chain-ops/solc" + "github.com/ethereum/go-ethereum/accounts/abi" +) + +func TestCamelCaseCheck(t *testing.T) { + tests := []struct { + name string + parts []string + expected bool + }{ + {"valid single part", []string{"test"}, true}, + {"valid multiple parts", []string{"test", "something", "succeeds"}, true}, + {"invalid uppercase", []string{"Test"}, false}, + {"invalid middle uppercase", []string{"test", "Something", "succeeds"}, false}, + {"empty parts", []string{}, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := checks["camelCase"].check(tt.parts); got != tt.expected { + t.Errorf("checkCamelCase error for %v = %v, want %v", tt.parts, got, tt.expected) + } + }) + } +} + +func TestPartsCountCheck(t *testing.T) { + tests := []struct { + name string + parts []string + expected bool + }{ + {"three parts", []string{"test", "something", "succeeds"}, true}, + {"four parts", []string{"test", "something", "reason", "fails"}, true}, + {"too few parts", []string{"test", "fails"}, false}, + {"too many parts", []string{"test", "a", "b", "c", "fails"}, false}, + {"empty parts", []string{}, false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := checks["partsCount"].check(tt.parts); got != tt.expected { + t.Errorf("checkPartsCount error for %v = %v, want %v", tt.parts, got, tt.expected) + } + }) + } +} + +func TestPrefixCheck(t *testing.T) { + tests := []struct { + name string + parts []string + expected bool + }{ + {"valid test", []string{"test", "something", "succeeds"}, true}, + {"valid testFuzz", []string{"testFuzz", "something", "succeeds"}, true}, + {"valid testDiff", []string{"testDiff", "something", "succeeds"}, true}, + {"invalid prefix", []string{"testing", "something", "succeeds"}, false}, + {"empty parts", []string{}, false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := checks["prefix"].check(tt.parts); got != tt.expected { + t.Errorf("checkPrefix error for %v = %v, want %v", tt.parts, got, tt.expected) + } + }) + } +} + +func TestSuffixCheck(t *testing.T) { + tests := []struct { + name string + parts []string + expected bool + }{ + {"valid succeeds", []string{"test", "something", "succeeds"}, true}, + {"valid reverts", []string{"test", "something", "reverts"}, true}, + {"valid fails", []string{"test", "something", "fails"}, true}, + {"valid works", []string{"test", "something", "works"}, true}, + {"valid benchmark", []string{"test", "something", "benchmark"}, true}, + {"valid benchmark_num", []string{"test", "something", "benchmark", "123"}, true}, + {"invalid suffix", []string{"test", "something", "invalid"}, false}, + {"invalid benchmark_text", []string{"test", "something", "benchmark", "abc"}, false}, + {"empty parts", []string{}, false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := checks["suffix"].check(tt.parts); got != tt.expected { + t.Errorf("checkSuffix error for %v = %v, want %v", tt.parts, got, tt.expected) + } + }) + } +} + +func TestFailurePartsCheck(t *testing.T) { + tests := []struct { + name string + parts []string + expected bool + }{ + {"valid failure with reason", []string{"test", "something", "reason", "fails"}, true}, + {"valid failure with reason", []string{"test", "something", "reason", "reverts"}, true}, + {"invalid failure without reason", []string{"test", "something", "fails"}, false}, + {"invalid failure without reason", []string{"test", "something", "reverts"}, false}, + {"valid non-failure with three parts", []string{"test", "something", "succeeds"}, true}, + {"empty parts", []string{}, false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := checks["failureParts"].check(tt.parts); got != tt.expected { + t.Errorf("checkFailureParts error for %v = %v, want %v", tt.parts, got, tt.expected) + } + }) + } +} + +func TestCheckTestName(t *testing.T) { + tests := []struct { + name string + testName string + shouldSucceed bool + }{ + // Valid test names - Basic patterns + {"valid basic test succeeds", "test_something_succeeds", true}, + {"valid basic test fails with reason", "test_something_reason_fails", true}, + {"valid basic test reverts with reason", "test_something_reason_reverts", true}, + {"valid basic test works", "test_something_works", true}, + + // Valid test names - Fuzz variants + {"valid fuzz test succeeds", "testFuzz_something_succeeds", true}, + {"valid fuzz test fails with reason", "testFuzz_something_reason_fails", true}, + {"valid fuzz test reverts with reason", "testFuzz_something_reason_reverts", true}, + {"valid fuzz test works", "testFuzz_something_works", true}, + + // Valid test names - Diff variants + {"valid diff test succeeds", "testDiff_something_succeeds", true}, + {"valid diff test fails with reason", "testDiff_something_reason_fails", true}, + {"valid diff test reverts with reason", "testDiff_something_reason_reverts", true}, + {"valid diff test works", "testDiff_something_works", true}, + + // Valid test names - Benchmark variants + {"valid benchmark test", "test_something_benchmark", true}, + {"valid benchmark with number", "test_something_benchmark_123", true}, + {"valid benchmark with large number", "test_something_benchmark_999999", true}, + {"valid benchmark with zero", "test_something_benchmark_0", true}, + + // Valid test names - Complex middle parts + {"valid complex middle part", "test_complexOperation_succeeds", true}, + {"valid multiple word middle", "test_veryComplexOperation_succeeds", true}, + {"valid numbers in middle", "test_operation123_succeeds", true}, + {"valid special case", "test_specialCase_reason_fails", true}, + + // Invalid test names - Prefix issues + {"invalid empty string", "", false}, + {"invalid prefix Test", "Test_something_succeeds", false}, + {"invalid prefix testing", "testing_something_succeeds", false}, + {"invalid prefix testfuzz", "testfuzz_something_succeeds", false}, + {"invalid prefix testdiff", "testdiff_something_succeeds", false}, + {"invalid prefix TEST", "TEST_something_succeeds", false}, + + // Invalid test names - Suffix issues + {"invalid suffix succeed", "test_something_succeed", false}, + {"invalid suffix revert", "test_something_revert", false}, + {"invalid suffix fail", "test_something_fail", false}, + {"invalid suffix work", "test_something_work", false}, + {"invalid suffix benchmarks", "test_something_benchmarks", false}, + {"invalid benchmark suffix text", "test_something_benchmark_abc", false}, + {"invalid benchmark suffix special", "test_something_benchmark_123abc", false}, + + // Invalid test names - Case issues + {"invalid uppercase middle", "test_Something_succeeds", false}, + {"invalid multiple uppercase", "test_SomethingHere_succeeds", false}, + {"invalid all caps middle", "test_SOMETHING_succeeds", false}, + {"invalid mixed case suffix", "test_something_Succeeds", false}, + + // Invalid test names - Structure issues + {"invalid single part", "test", false}, + {"invalid two parts", "test_succeeds", false}, + {"invalid five parts", "test_this_that_those_succeeds", false}, + {"invalid six parts", "test_this_that_those_these_succeeds", false}, + {"invalid failure without reason", "test_something_fails", false}, + {"invalid revert without reason", "test_something_reverts", false}, + + // Invalid test names - Special cases + {"invalid empty parts", "test__succeeds", false}, + {"invalid multiple underscores", "test___succeeds", false}, + {"invalid trailing underscore", "test_something_succeeds_", false}, + {"invalid leading underscore", "_test_something_succeeds", false}, + {"invalid benchmark no number", "test_something_benchmark_", false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := checkTestName(tt.testName) + if (err != nil) == tt.shouldSucceed { + t.Errorf("checkTestName(%q) error = %v, shouldSucceed %v", tt.testName, err, tt.shouldSucceed) + } + }) + } +} + +func TestExtractTestNames(t *testing.T) { + tests := []struct { + name string + artifact *solc.ForgeArtifact + want []string + }{ + { + name: "valid test contract", + artifact: &solc.ForgeArtifact{ + Abi: abi.ABI{ + Methods: map[string]abi.Method{ + "IS_TEST": {Name: "IS_TEST"}, + "test_something_succeeds": {Name: "test_something_succeeds"}, + "test_other_fails": {Name: "test_other_fails"}, + "not_a_test": {Name: "not_a_test"}, + "testFuzz_something_works": {Name: "testFuzz_something_works"}, + }, + }, + }, + want: []string{ + "test_something_succeeds", + "test_other_fails", + "testFuzz_something_works", + }, + }, + { + name: "non-test contract", + artifact: &solc.ForgeArtifact{ + Abi: abi.ABI{ + Methods: map[string]abi.Method{ + "test_something_succeeds": {Name: "test_something_succeeds"}, + "not_a_test": {Name: "not_a_test"}, + }, + }, + }, + want: nil, + }, + { + name: "empty contract", + artifact: &solc.ForgeArtifact{ + Abi: abi.ABI{ + Methods: map[string]abi.Method{}, + }, + }, + want: nil, + }, + { + name: "test contract with no test methods", + artifact: &solc.ForgeArtifact{ + Abi: abi.ABI{ + Methods: map[string]abi.Method{ + "IS_TEST": {Name: "IS_TEST"}, + "not_a_test": {Name: "not_a_test"}, + "another_method": {Name: "another_method"}, + }, + }, + }, + want: []string{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := extractTestNames(tt.artifact) + slices.Sort(got) + slices.Sort(tt.want) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("extractTestNames() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/packages/contracts-bedrock/scripts/checks/unused-imports/main.go b/packages/contracts-bedrock/scripts/checks/unused-imports/main.go index ae4acb528b4d..df3ab4f4494d 100644 --- a/packages/contracts-bedrock/scripts/checks/unused-imports/main.go +++ b/packages/contracts-bedrock/scripts/checks/unused-imports/main.go @@ -2,101 +2,51 @@ package main import ( "bufio" - "errors" "fmt" "os" - "path/filepath" "regexp" - "runtime" "strings" - "sync" - "sync/atomic" + + "github.com/ethereum-optimism/optimism/packages/contracts-bedrock/scripts/checks/common" ) var importPattern = regexp.MustCompile(`import\s*{([^}]+)}`) var asPattern = regexp.MustCompile(`(\S+)\s+as\s+(\S+)`) func main() { - if err := run(); err != nil { - writeStderr("an error occurred: %v", err) + if err := common.ProcessFilesGlob( + []string{"src/**/*.sol", "scripts/**/*.sol", "test/**/*.sol"}, + []string{}, + processFile, + ); err != nil { + fmt.Printf("error: %v\n", err) os.Exit(1) } } -func writeStderr(msg string, args ...any) { - _, _ = fmt.Fprintf(os.Stderr, msg+"\n", args...) -} - -func run() error { - cwd, err := os.Getwd() - if err != nil { - return fmt.Errorf("failed to get current working directory: %w", err) - } - - var hasErr atomic.Bool - var outMtx sync.Mutex - fail := func(msg string, args ...any) { - outMtx.Lock() - writeStderr("❌ "+msg, args...) - outMtx.Unlock() - hasErr.Store(true) - } - - dirs := []string{"src", "scripts", "test"} - sem := make(chan struct{}, runtime.NumCPU()) - - for _, dir := range dirs { - dirPath := filepath.Join(cwd, dir) - if _, err := os.Stat(dirPath); errors.Is(err, os.ErrNotExist) { - continue - } - - err := filepath.Walk(dirPath, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() && strings.HasSuffix(info.Name(), ".sol") { - sem <- struct{}{} - go func() { - defer func() { <-sem }() - processFile(path, fail) - }() - } - return nil - }) - - if err != nil { - return fmt.Errorf("failed to walk directory %s: %w", dir, err) - } - } - - for i := 0; i < cap(sem); i++ { - sem <- struct{}{} - } - - if hasErr.Load() { - return errors.New("unused imports check failed, see logs above") - } - - return nil -} - -func processFile(filePath string, fail func(string, ...any)) { +func processFile(filePath string) []error { content, err := os.ReadFile(filePath) if err != nil { - fail("%s: failed to read file: %v", filePath, err) - return + return []error{fmt.Errorf("%s: failed to read file: %w", filePath, err)} } imports := findImports(string(content)) - unusedImports := checkUnusedImports(imports, string(content)) + var unusedImports []string + for _, imp := range imports { + if !isImportUsed(imp, string(content)) { + unusedImports = append(unusedImports, imp) + } + } if len(unusedImports) > 0 { - fail("File: %s\nUnused imports:", filePath) + var errors []error for _, unused := range unusedImports { - fail(" - %s", unused) + errors = append(errors, fmt.Errorf("%s", unused)) } + return errors } + + return nil } func findImports(content string) []string { @@ -106,31 +56,19 @@ func findImports(content string) []string { if len(match) > 1 { importList := strings.Split(match[1], ",") for _, imp := range importList { - imports = append(imports, strings.TrimSpace(imp)) + imp = strings.TrimSpace(imp) + if asMatch := asPattern.FindStringSubmatch(imp); len(asMatch) > 2 { + // Use the renamed identifier (after 'as') + imports = append(imports, strings.TrimSpace(asMatch[2])) + } else { + imports = append(imports, imp) + } } } } return imports } -func checkUnusedImports(imports []string, content string) []string { - var unusedImports []string - for _, imp := range imports { - searchTerm := imp - displayName := imp - - if match := asPattern.FindStringSubmatch(imp); len(match) > 2 { - searchTerm = match[2] - displayName = fmt.Sprintf("%s as %s", match[1], match[2]) - } - - if !isImportUsed(searchTerm, content) { - unusedImports = append(unusedImports, displayName) - } - } - return unusedImports -} - func isImportUsed(imp, content string) bool { scanner := bufio.NewScanner(strings.NewReader(content)) for scanner.Scan() { diff --git a/packages/contracts-bedrock/scripts/checks/unused-imports/main_test.go b/packages/contracts-bedrock/scripts/checks/unused-imports/main_test.go new file mode 100644 index 000000000000..7d03867a8a40 --- /dev/null +++ b/packages/contracts-bedrock/scripts/checks/unused-imports/main_test.go @@ -0,0 +1,131 @@ +package main + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_findImports(t *testing.T) { + tests := []struct { + name string + content string + expected []string + }{ + { + name: "finds single named import", + content: ` + pragma solidity ^0.8.0; + import { Contract } from "./Contract.sol"; + contract Test {} + `, + expected: []string{"Contract"}, + }, + { + name: "finds multiple named imports", + content: ` + pragma solidity ^0.8.0; + import { Contract1, Contract2 } from "./Contracts.sol"; + contract Test {} + `, + expected: []string{"Contract1", "Contract2"}, + }, + { + name: "handles import with as keyword", + content: ` + pragma solidity ^0.8.0; + import { Contract as Renamed } from "./Contract.sol"; + contract Test {} + `, + expected: []string{"Renamed"}, + }, + { + name: "handles multiple imports with as keyword", + content: ` + pragma solidity ^0.8.0; + import { Contract1 as C1, Contract2 as C2 } from "./Contracts.sol"; + contract Test {} + `, + expected: []string{"C1", "C2"}, + }, + { + name: "ignores regular imports", + content: ` + pragma solidity ^0.8.0; + import "./Contract.sol"; + contract Test {} + `, + expected: nil, + }, + { + name: "empty content", + content: "", + expected: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := findImports(tt.content) + require.Equal(t, tt.expected, result) + }) + } +} + +func Test_isImportUsed(t *testing.T) { + tests := []struct { + name string + importedName string + content string + expected bool + }{ + { + name: "import used in contract", + importedName: "UsedContract", + content: ` + contract Test { + UsedContract used; + } + `, + expected: true, + }, + { + name: "import used in inheritance", + importedName: "BaseContract", + content: ` + contract Test is BaseContract { + } + `, + expected: true, + }, + { + name: "import used in function", + importedName: "Utility", + content: ` + contract Test { + function test() { + Utility.doSomething(); + } + } + `, + expected: true, + }, + { + name: "import not used", + importedName: "UnusedContract", + content: ` + contract Test { + OtherContract other; + } + `, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := isImportUsed(tt.importedName, tt.content) + require.Equal(t, tt.expected, result) + }) + } +} diff --git a/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol b/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol index 25d67be3828c..cecf2b942731 100644 --- a/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol +++ b/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol @@ -7,7 +7,7 @@ import { console2 as console } from "forge-std/console2.sol"; // Scripts import { DeployConfig } from "scripts/deploy/DeployConfig.s.sol"; -import { ISystemConfigInterop } from "src/L1/interfaces/ISystemConfigInterop.sol"; +import { ISystemConfigInterop } from "interfaces/L1/ISystemConfigInterop.sol"; // Libraries import { Constants } from "src/libraries/Constants.sol"; @@ -15,21 +15,21 @@ import { Predeploys } from "src/libraries/Predeploys.sol"; import { Types } from "scripts/libraries/Types.sol"; // Interfaces -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { IL2OutputOracle } from "src/L1/interfaces/IL2OutputOracle.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IL1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; -import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; -import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; -import { IL1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; -import { IL1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; -import { ProtocolVersion, IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; -import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; -import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; -import { IOptimismMintableERC20Factory } from "src/universal/interfaces/IOptimismMintableERC20Factory.sol"; -import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; -import { IMIPS } from "src/cannon/interfaces/IMIPS.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { IL2OutputOracle } from "interfaces/L1/IL2OutputOracle.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IL1CrossDomainMessenger } from "interfaces/L1/IL1CrossDomainMessenger.sol"; +import { IOptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; +import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; +import { IL1ERC721Bridge } from "interfaces/L1/IL1ERC721Bridge.sol"; +import { IL1StandardBridge } from "interfaces/L1/IL1StandardBridge.sol"; +import { ProtocolVersion, IProtocolVersions } from "interfaces/L1/IProtocolVersions.sol"; +import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; +import { IOptimismMintableERC20Factory } from "interfaces/universal/IOptimismMintableERC20Factory.sol"; +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; +import { IMIPS } from "interfaces/cannon/IMIPS.sol"; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; library ChainAssertions { diff --git a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol index 54cc1a23e172..425e5e5f39e4 100644 --- a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol @@ -25,7 +25,6 @@ import { } from "scripts/deploy/DeployImplementations.s.sol"; // Contracts -import { StorageSetter } from "src/universal/StorageSetter.sol"; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; // Libraries @@ -36,29 +35,23 @@ import { StorageSlot, ForgeArtifacts } from "scripts/libraries/ForgeArtifacts.so import { GameType, Claim, GameTypes, OutputRoot, Hash } from "src/dispute/lib/Types.sol"; // Interfaces -import { IProxy } from "src/universal/interfaces/IProxy.sol"; -import { IProxyAdmin } from "src/universal/interfaces/IProxyAdmin.sol"; -import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; -import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; -import { IL2OutputOracle } from "src/L1/interfaces/IL2OutputOracle.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { IDataAvailabilityChallenge } from "src/L1/interfaces/IDataAvailabilityChallenge.sol"; -import { IL1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; -import { IL1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; -import { ProtocolVersion } from "src/L1/interfaces/IProtocolVersions.sol"; -import { IBigStepper } from "src/dispute/interfaces/IBigStepper.sol"; -import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; -import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; -import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; -import { IMIPS } from "src/cannon/interfaces/IMIPS.sol"; -import { IMIPS2 } from "src/cannon/interfaces/IMIPS2.sol"; -import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; -import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; -import { IL1ChugSplashProxy } from "src/legacy/interfaces/IL1ChugSplashProxy.sol"; -import { IResolvedDelegateProxy } from "src/legacy/interfaces/IResolvedDelegateProxy.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; +import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; +import { IOptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; +import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; +import { IL2OutputOracle } from "interfaces/L1/IL2OutputOracle.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { IDataAvailabilityChallenge } from "interfaces/L1/IDataAvailabilityChallenge.sol"; +import { ProtocolVersion } from "interfaces/L1/IProtocolVersions.sol"; +import { IBigStepper } from "interfaces/dispute/IBigStepper.sol"; +import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IMIPS } from "interfaces/cannon/IMIPS.sol"; +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; /// @title Deploy /// @notice Script used to deploy a bedrock system. The entire system is deployed within the `run` function. @@ -71,20 +64,6 @@ import { IResolvedDelegateProxy } from "src/legacy/interfaces/IResolvedDelegateP contract Deploy is Deployer { using stdJson for string; - /// @notice FaultDisputeGameParams is a struct that contains the parameters necessary to call - /// the function _setFaultGameImplementation. This struct exists because the EVM needs - /// to finally adopt PUSHN and get rid of stack too deep once and for all. - /// Someday we will look back and laugh about stack too deep, today is not that day. - struct FaultDisputeGameParams { - IAnchorStateRegistry anchorStateRegistry; - IDelayedWETH weth; - GameType gameType; - Claim absolutePrestate; - IBigStepper faultVm; - uint256 maxGameDepth; - Duration maxClockDuration; - } - //////////////////////////////////////////////////////////////// // Modifiers // //////////////////////////////////////////////////////////////// @@ -104,18 +83,6 @@ contract Deploy is Deployer { } } - /// @notice Modifier that will only allow a function to be called on a public - /// testnet or devnet. - modifier onlyTestnetOrDevnet() { - uint256 chainid = block.chainid; - if ( - chainid == Chains.Goerli || chainid == Chains.Sepolia || chainid == Chains.LocalDevnet - || chainid == Chains.GethDevnet - ) { - _; - } - } - /// @notice Modifier that wraps a function with statediff recording. /// The returned AccountAccess[] array is then written to /// the `snapshots/state-diff/.json` output file. @@ -162,7 +129,7 @@ contract Deploy is Deployer { L1ERC721Bridge: getAddress("L1ERC721BridgeProxy"), ProtocolVersions: getAddress("ProtocolVersionsProxy"), SuperchainConfig: getAddress("SuperchainConfigProxy"), - OPContractsManager: getAddress("OPContractsManagerProxy") + OPContractsManager: getAddress("OPContractsManager") }); } @@ -187,28 +154,6 @@ contract Deploy is Deployer { }); } - //////////////////////////////////////////////////////////////// - // State Changing Helper Functions // - //////////////////////////////////////////////////////////////// - - /// @notice Transfer ownership of the ProxyAdmin contract to the final system owner - function transferProxyAdminOwnership() public broadcast { - // Get the ProxyAdmin contract. - IProxyAdmin proxyAdmin = IProxyAdmin(mustGetAddress("ProxyAdmin")); - - // Transfer ownership to the final system owner if necessary. - address owner = proxyAdmin.owner(); - address finalSystemOwner = cfg.finalSystemOwner(); - if (owner != finalSystemOwner) { - proxyAdmin.transferOwnership(finalSystemOwner); - console.log("ProxyAdmin ownership transferred to final system owner at: %s", finalSystemOwner); - } - - // Make sure the ProxyAdmin owner is set to the final system owner. - owner = proxyAdmin.owner(); - require(owner == finalSystemOwner, "Deploy: ProxyAdmin ownership not transferred to final system owner"); - } - //////////////////////////////////////////////////////////////// // SetUp and Run // //////////////////////////////////////////////////////////////// @@ -216,20 +161,13 @@ contract Deploy is Deployer { /// @notice Deploy all of the L1 contracts necessary for a full Superchain with a single Op Chain. function run() public { console.log("Deploying a fresh OP Stack including SuperchainConfig"); - _run(); + _run({ _needsSuperchain: true }); } /// @notice Deploy a new OP Chain using an existing SuperchainConfig and ProtocolVersions /// @param _superchainConfigProxy Address of the existing SuperchainConfig proxy /// @param _protocolVersionsProxy Address of the existing ProtocolVersions proxy - /// @param _includeDump Whether to include a state dump after deployment - function runWithSuperchain( - address payable _superchainConfigProxy, - address payable _protocolVersionsProxy, - bool _includeDump - ) - public - { + function runWithSuperchain(address payable _superchainConfigProxy, address payable _protocolVersionsProxy) public { require(_superchainConfigProxy != address(0), "Deploy: must specify address for superchain config proxy"); require(_protocolVersionsProxy != address(0), "Deploy: must specify address for protocol versions proxy"); @@ -245,31 +183,24 @@ contract Deploy is Deployer { save("ProtocolVersions", pvProxy.implementation()); save("ProtocolVersionsProxy", _protocolVersionsProxy); - _run(false); - - if (_includeDump) { - vm.dumpState(Config.stateDumpPath("")); - } + _run({ _needsSuperchain: false }); } + /// @notice Used for L1 alloc generation. function runWithStateDump() public { vm.chainId(cfg.l1ChainID()); - _run(); + _run({ _needsSuperchain: true }); vm.dumpState(Config.stateDumpPath("")); } /// @notice Deploy all L1 contracts and write the state diff to a file. + /// Used to generate kontrol tests. function runWithStateDiff() public stateDiff { - _run(); - } - - /// @notice Compatibility function for tests that override _run(). - function _run() internal virtual { - _run(true); + _run({ _needsSuperchain: true }); } /// @notice Internal function containing the deploy logic. - function _run(bool _needsSuperchain) internal { + function _run(bool _needsSuperchain) internal virtual { console.log("start of L1 Deploy!"); // Set up the Superchain if needed. @@ -311,7 +242,7 @@ contract Deploy is Deployer { bytes32 typeHash = keccak256(bytes(cfg.daCommitmentType())); bytes32 keccakHash = keccak256(bytes("KeccakCommitment")); if (typeHash == keccakHash) { - setupOpAltDA(); + deployOpAltDA(); } } @@ -378,13 +309,13 @@ contract Deploy is Deployer { dii.set(dii.disputeGameFinalityDelaySeconds.selector, cfg.disputeGameFinalityDelaySeconds()); dii.set(dii.mipsVersion.selector, Config.useMultithreadedCannon() ? 2 : 1); string memory release = "dev"; - dii.set(dii.release.selector, release); + dii.set(dii.l1ContractsRelease.selector, release); dii.set( dii.standardVersionsToml.selector, string.concat(vm.projectRoot(), "/test/fixtures/standard-versions.toml") ); dii.set(dii.superchainConfigProxy.selector, mustGetAddress("SuperchainConfigProxy")); dii.set(dii.protocolVersionsProxy.selector, mustGetAddress("ProtocolVersionsProxy")); - dii.set(dii.opcmProxyOwner.selector, cfg.finalSystemOwner()); + dii.set(dii.salt.selector, _implSalt()); if (_isInterop) { di = DeployImplementations(new DeployImplementationsInterop()); @@ -409,8 +340,7 @@ contract Deploy is Deployer { save("DelayedWETH", address(dio.delayedWETHImpl())); save("PreimageOracle", address(dio.preimageOracleSingleton())); save("Mips", address(dio.mipsSingleton())); - save("OPContractsManagerProxy", address(dio.opcmProxy())); - save("OPContractsManager", address(dio.opcmImpl())); + save("OPContractsManager", address(dio.opcm())); Types.ContractSet memory contracts = _impls(); ChainAssertions.checkL1CrossDomainMessenger({ _contracts: contracts, _vm: vm, _isProxy: false }); @@ -446,7 +376,7 @@ contract Deploy is Deployer { // Ensure that the requisite contracts are deployed address superchainConfigProxy = mustGetAddress("SuperchainConfigProxy"); - OPContractsManager opcm = OPContractsManager(mustGetAddress("OPContractsManagerProxy")); + OPContractsManager opcm = OPContractsManager(mustGetAddress("OPContractsManager")); OPContractsManager.DeployInput memory deployInput = getDeployInput(); OPContractsManager.DeployOutput memory deployOutput = opcm.deploy(deployInput); @@ -486,9 +416,9 @@ contract Deploy is Deployer { _data: abi.encodeCall(IDelayedWETH.initialize, (msg.sender, ISuperchainConfig(superchainConfigProxy))) }); - setAlphabetFaultGameImplementation({ _allowUpgrade: false }); - setFastFaultGameImplementation({ _allowUpgrade: false }); - setCannonFaultGameImplementation({ _allowUpgrade: false }); + setAlphabetFaultGameImplementation(); + setFastFaultGameImplementation(); + setCannonFaultGameImplementation(); transferDisputeGameFactoryOwnership(); transferDelayedWETHOwnership(); @@ -496,109 +426,17 @@ contract Deploy is Deployer { } /// @notice Add AltDA setup to the OP chain - function setupOpAltDA() public { + function deployOpAltDA() public { console.log("Deploying OP AltDA"); deployDataAvailabilityChallengeProxy(); deployDataAvailabilityChallenge(); initializeDataAvailabilityChallenge(); } - //////////////////////////////////////////////////////////////// - // Non-Proxied Deployment Functions // - //////////////////////////////////////////////////////////////// - - /// @notice Deploy the AddressManager - function deployAddressManager() public broadcast returns (address addr_) { - // Use create instead of create2 because we need the owner to be set to msg.sender but - // forge will automatically use the create2 factory which messes up the sender. - IAddressManager manager = IAddressManager( - DeployUtils.create1AndSave({ - _save: this, - _name: "AddressManager", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IAddressManager.__constructor__, ())) - }) - ); - require(manager.owner() == msg.sender); - addr_ = address(manager); - } - - /// @notice Deploys the ProxyAdmin contract. Should NOT be used for the Superchain. - function deployProxyAdmin() public broadcast returns (address addr_) { - // Deploy the ProxyAdmin contract. - IProxyAdmin admin = IProxyAdmin( - DeployUtils.create2AndSave({ - _save: this, - _salt: _implSalt(), - _name: "ProxyAdmin", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IProxyAdmin.__constructor__, (msg.sender))) - }) - ); - - // Make sure the owner was set to the deployer. - require(admin.owner() == msg.sender); - - // Set the address manager if it is not already set. - IAddressManager addressManager = IAddressManager(mustGetAddress("AddressManager")); - if (admin.addressManager() != addressManager) { - admin.setAddressManager(addressManager); - } - - // Make sure the address manager is set properly. - require(admin.addressManager() == addressManager); - - // Return the address of the deployed contract. - addr_ = address(admin); - } - - /// @notice Deploy the StorageSetter contract, used for upgrades. - function deployStorageSetter() public broadcast returns (address addr_) { - console.log("Deploying StorageSetter"); - StorageSetter setter = new StorageSetter{ salt: _implSalt() }(); - console.log("StorageSetter deployed at: %s", address(setter)); - string memory version = setter.version(); - console.log("StorageSetter version: %s", version); - addr_ = address(setter); - } - //////////////////////////////////////////////////////////////// // Proxy Deployment Functions // //////////////////////////////////////////////////////////////// - /// @notice Deploy the L1StandardBridgeProxy using a ChugSplashProxy - function deployL1StandardBridgeProxy() public broadcast returns (address addr_) { - address proxyAdmin = mustGetAddress("ProxyAdmin"); - IL1ChugSplashProxy proxy = IL1ChugSplashProxy( - DeployUtils.create2AndSave({ - _save: this, - _salt: _implSalt(), - _name: "L1ChugSplashProxy", - _nick: "L1StandardBridgeProxy", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1ChugSplashProxy.__constructor__, (proxyAdmin))) - }) - ); - require(EIP1967Helper.getAdmin(address(proxy)) == proxyAdmin); - addr_ = address(proxy); - } - - /// @notice Deploy the L1CrossDomainMessengerProxy using a ResolvedDelegateProxy - function deployL1CrossDomainMessengerProxy() public broadcast returns (address addr_) { - IResolvedDelegateProxy proxy = IResolvedDelegateProxy( - DeployUtils.create2AndSave({ - _save: this, - _salt: _implSalt(), - _name: "ResolvedDelegateProxy", - _nick: "L1CrossDomainMessengerProxy", - _args: DeployUtils.encodeConstructor( - abi.encodeCall( - IResolvedDelegateProxy.__constructor__, - (IAddressManager(mustGetAddress("AddressManager")), "OVM_L1CrossDomainMessenger") - ) - ) - }) - ); - addr_ = address(proxy); - } - /// @notice Deploys an ERC1967Proxy contract with the ProxyAdmin as the owner. /// @param _name The name of the proxy contract to be deployed. /// @return addr_ The address of the deployed proxy contract. @@ -697,95 +535,6 @@ contract Deploy is Deployer { addr_ = address(oracle); } - /// @notice Deploy Mips VM. Deploys either MIPS or MIPS2 depending on the environment - function deployMips() public broadcast returns (address addr_) { - addr_ = DeployUtils.create2AndSave({ - _save: this, - _salt: _implSalt(), - _name: Config.useMultithreadedCannon() ? "MIPS2" : "MIPS", - _args: DeployUtils.encodeConstructor( - abi.encodeCall(IMIPS2.__constructor__, (IPreimageOracle(mustGetAddress("PreimageOracle")))) - ) - }); - save("Mips", address(addr_)); - } - - /// @notice Deploy the AnchorStateRegistry - function deployAnchorStateRegistry() public broadcast returns (address addr_) { - IAnchorStateRegistry anchorStateRegistry = IAnchorStateRegistry( - DeployUtils.create2AndSave({ - _save: this, - _salt: _implSalt(), - _name: "AnchorStateRegistry", - _args: DeployUtils.encodeConstructor( - abi.encodeCall( - IAnchorStateRegistry.__constructor__, - (IDisputeGameFactory(mustGetAddress("DisputeGameFactoryProxy"))) - ) - ) - }) - ); - - addr_ = address(anchorStateRegistry); - } - - /// @notice Deploy the L1StandardBridge - function deployL1StandardBridge() public broadcast returns (address addr_) { - IL1StandardBridge bridge = IL1StandardBridge( - DeployUtils.create2AndSave({ - _save: this, - _salt: _implSalt(), - _name: "L1StandardBridge", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1StandardBridge.__constructor__, ())) - }) - ); - - // Override the `L1StandardBridge` contract to the deployed implementation. This is necessary - // to check the `L1StandardBridge` implementation alongside dependent contracts, which - // are always proxies. - Types.ContractSet memory contracts = _proxies(); - contracts.L1StandardBridge = address(bridge); - ChainAssertions.checkL1StandardBridge({ _contracts: contracts, _isProxy: false }); - - addr_ = address(bridge); - } - - /// @notice Deploy the L1ERC721Bridge - function deployL1ERC721Bridge() public broadcast returns (address addr_) { - IL1ERC721Bridge bridge = IL1ERC721Bridge( - DeployUtils.create2AndSave({ - _save: this, - _salt: _implSalt(), - _name: "L1ERC721Bridge", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1ERC721Bridge.__constructor__, ())) - }) - ); - - // Override the `L1ERC721Bridge` contract to the deployed implementation. This is necessary - // to check the `L1ERC721Bridge` implementation alongside dependent contracts, which - // are always proxies. - Types.ContractSet memory contracts = _proxies(); - contracts.L1ERC721Bridge = address(bridge); - - ChainAssertions.checkL1ERC721Bridge({ _contracts: contracts, _isProxy: false }); - - addr_ = address(bridge); - } - - /// @notice Transfer ownership of the address manager to the ProxyAdmin - function transferAddressManagerOwnership() public broadcast { - console.log("Transferring AddressManager ownership to IProxyAdmin"); - IAddressManager addressManager = IAddressManager(mustGetAddress("AddressManager")); - address owner = addressManager.owner(); - address proxyAdmin = mustGetAddress("ProxyAdmin"); - if (owner != proxyAdmin) { - addressManager.transferOwnership(proxyAdmin); - console.log("AddressManager ownership transferred to %s", proxyAdmin); - } - - require(addressManager.owner() == proxyAdmin); - } - /// @notice Deploy the DataAvailabilityChallenge function deployDataAvailabilityChallenge() public broadcast returns (address addr_) { IDataAvailabilityChallenge dac = IDataAvailabilityChallenge( @@ -918,6 +667,61 @@ contract Deploy is Deployer { ChainAssertions.checkOptimismPortal({ _contracts: _proxies(), _cfg: cfg, _isProxy: true }); } + /// @notice Initialize the DataAvailabilityChallenge + function initializeDataAvailabilityChallenge() public broadcast { + console.log("Upgrading and initializing DataAvailabilityChallenge proxy"); + address dataAvailabilityChallengeProxy = mustGetAddress("DataAvailabilityChallengeProxy"); + address dataAvailabilityChallenge = mustGetAddress("DataAvailabilityChallenge"); + + address finalSystemOwner = cfg.finalSystemOwner(); + uint256 daChallengeWindow = cfg.daChallengeWindow(); + uint256 daResolveWindow = cfg.daResolveWindow(); + uint256 daBondSize = cfg.daBondSize(); + uint256 daResolverRefundPercentage = cfg.daResolverRefundPercentage(); + + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ + _proxy: payable(dataAvailabilityChallengeProxy), + _implementation: dataAvailabilityChallenge, + _data: abi.encodeCall( + IDataAvailabilityChallenge.initialize, + (finalSystemOwner, daChallengeWindow, daResolveWindow, daBondSize, daResolverRefundPercentage) + ) + }); + + IDataAvailabilityChallenge dac = IDataAvailabilityChallenge(payable(dataAvailabilityChallengeProxy)); + string memory version = dac.version(); + console.log("DataAvailabilityChallenge version: %s", version); + + require(dac.owner() == finalSystemOwner); + require(dac.challengeWindow() == daChallengeWindow); + require(dac.resolveWindow() == daResolveWindow); + require(dac.bondSize() == daBondSize); + require(dac.resolverRefundPercentage() == daResolverRefundPercentage); + } + + //////////////////////////////////////////////////////////////// + // Ownership Transfer Helper Functions // + //////////////////////////////////////////////////////////////// + + /// @notice Transfer ownership of the ProxyAdmin contract to the final system owner + function transferProxyAdminOwnership() public broadcast { + // Get the ProxyAdmin contract. + IProxyAdmin proxyAdmin = IProxyAdmin(mustGetAddress("ProxyAdmin")); + + // Transfer ownership to the final system owner if necessary. + address owner = proxyAdmin.owner(); + address finalSystemOwner = cfg.finalSystemOwner(); + if (owner != finalSystemOwner) { + proxyAdmin.transferOwnership(finalSystemOwner); + console.log("ProxyAdmin ownership transferred to final system owner at: %s", finalSystemOwner); + } + + // Make sure the ProxyAdmin owner is set to the final system owner. + owner = proxyAdmin.owner(); + require(owner == finalSystemOwner, "Deploy: ProxyAdmin ownership not transferred to final system owner"); + } + /// @notice Transfer ownership of the DisputeGameFactory contract to the final system owner function transferDisputeGameFactoryOwnership() public broadcast { console.log("Transferring DisputeGameFactory ownership to Safe"); @@ -974,6 +778,10 @@ contract Deploy is Deployer { }); } + /////////////////////////////////////////////////////////// + // Proofs setup helper functions // + /////////////////////////////////////////////////////////// + /// @notice Load the appropriate mips absolute prestate for devenets depending on config environment. function loadMipsAbsolutePrestate() internal returns (Claim mipsAbsolutePrestate_) { if (block.chainid == Chains.LocalDevnet || block.chainid == Chains.GethDevnet) { @@ -1021,13 +829,13 @@ contract Deploy is Deployer { mipsAbsolutePrestate_ = Claim.wrap(abi.decode(bytes(Process.bash(string.concat("cat ", filePath, " | jq -r .pre"))), (bytes32))); console.log( - "[MT-Cannon Dispute Game] Using devnet MIPS2 Absolute prestate: %s", + "[MT-Cannon Dispute Game] Using devnet MIPS64 Absolute prestate: %s", vm.toString(Claim.unwrap(mipsAbsolutePrestate_)) ); } /// @notice Sets the implementation for the `CANNON` game type in the `DisputeGameFactory` - function setCannonFaultGameImplementation(bool _allowUpgrade) public broadcast { + function setCannonFaultGameImplementation() public broadcast { console.log("Setting Cannon FaultDisputeGame implementation"); IDisputeGameFactory factory = IDisputeGameFactory(mustGetAddress("DisputeGameFactoryProxy")); IDelayedWETH weth = IDelayedWETH(mustGetAddress("DelayedWETHProxy")); @@ -1035,43 +843,23 @@ contract Deploy is Deployer { // Set the Cannon FaultDisputeGame implementation in the factory. _setFaultGameImplementation({ _factory: factory, - _allowUpgrade: _allowUpgrade, - _params: FaultDisputeGameParams({ - anchorStateRegistry: IAnchorStateRegistry(mustGetAddress("AnchorStateRegistryProxy")), - weth: weth, + _params: IFaultDisputeGame.GameConstructorParams({ gameType: GameTypes.CANNON, absolutePrestate: loadMipsAbsolutePrestate(), - faultVm: IBigStepper(mustGetAddress("Mips")), maxGameDepth: cfg.faultGameMaxDepth(), - maxClockDuration: Duration.wrap(uint64(cfg.faultGameMaxClockDuration())) - }) - }); - } - - /// @notice Sets the implementation for the `PERMISSIONED_CANNON` game type in the `DisputeGameFactory` - function setPermissionedCannonFaultGameImplementation(bool _allowUpgrade) public broadcast { - console.log("Setting Cannon PermissionedDisputeGame implementation"); - IDisputeGameFactory factory = IDisputeGameFactory(mustGetAddress("DisputeGameFactoryProxy")); - IDelayedWETH weth = IDelayedWETH(mustGetAddress("PermissionedDelayedWETHProxy")); - - // Deploys and sets the Permissioned FaultDisputeGame implementation in the factory. - _setFaultGameImplementation({ - _factory: factory, - _allowUpgrade: _allowUpgrade, - _params: FaultDisputeGameParams({ - anchorStateRegistry: IAnchorStateRegistry(mustGetAddress("AnchorStateRegistryProxy")), + splitDepth: cfg.faultGameSplitDepth(), + clockExtension: Duration.wrap(uint64(cfg.faultGameClockExtension())), + maxClockDuration: Duration.wrap(uint64(cfg.faultGameMaxClockDuration())), + vm: IBigStepper(mustGetAddress("Mips")), weth: weth, - gameType: GameTypes.PERMISSIONED_CANNON, - absolutePrestate: loadMipsAbsolutePrestate(), - faultVm: IBigStepper(mustGetAddress("Mips")), - maxGameDepth: cfg.faultGameMaxDepth(), - maxClockDuration: Duration.wrap(uint64(cfg.faultGameMaxClockDuration())) + anchorStateRegistry: IAnchorStateRegistry(mustGetAddress("AnchorStateRegistryProxy")), + l2ChainId: cfg.l2ChainID() }) }); } /// @notice Sets the implementation for the `ALPHABET` game type in the `DisputeGameFactory` - function setAlphabetFaultGameImplementation(bool _allowUpgrade) public onlyDevnet broadcast { + function setAlphabetFaultGameImplementation() public onlyDevnet broadcast { console.log("Setting Alphabet FaultDisputeGame implementation"); IDisputeGameFactory factory = IDisputeGameFactory(mustGetAddress("DisputeGameFactoryProxy")); IDelayedWETH weth = IDelayedWETH(mustGetAddress("DelayedWETHProxy")); @@ -1079,22 +867,24 @@ contract Deploy is Deployer { Claim outputAbsolutePrestate = Claim.wrap(bytes32(cfg.faultGameAbsolutePrestate())); _setFaultGameImplementation({ _factory: factory, - _allowUpgrade: _allowUpgrade, - _params: FaultDisputeGameParams({ - anchorStateRegistry: IAnchorStateRegistry(mustGetAddress("AnchorStateRegistryProxy")), - weth: weth, + _params: IFaultDisputeGame.GameConstructorParams({ gameType: GameTypes.ALPHABET, absolutePrestate: outputAbsolutePrestate, - faultVm: IBigStepper(new AlphabetVM(outputAbsolutePrestate, IPreimageOracle(mustGetAddress("PreimageOracle")))), // The max depth for the alphabet trace is always 3. Add 1 because split depth is fully inclusive. maxGameDepth: cfg.faultGameSplitDepth() + 3 + 1, - maxClockDuration: Duration.wrap(uint64(cfg.faultGameMaxClockDuration())) + splitDepth: cfg.faultGameSplitDepth(), + clockExtension: Duration.wrap(uint64(cfg.faultGameClockExtension())), + maxClockDuration: Duration.wrap(uint64(cfg.faultGameMaxClockDuration())), + vm: IBigStepper(new AlphabetVM(outputAbsolutePrestate, IPreimageOracle(mustGetAddress("PreimageOracle")))), + weth: weth, + anchorStateRegistry: IAnchorStateRegistry(mustGetAddress("AnchorStateRegistryProxy")), + l2ChainId: cfg.l2ChainID() }) }); } /// @notice Sets the implementation for the `ALPHABET` game type in the `DisputeGameFactory` - function setFastFaultGameImplementation(bool _allowUpgrade) public onlyDevnet broadcast { + function setFastFaultGameImplementation() public onlyDevnet broadcast { console.log("Setting Fast FaultDisputeGame implementation"); IDisputeGameFactory factory = IDisputeGameFactory(mustGetAddress("DisputeGameFactoryProxy")); IDelayedWETH weth = IDelayedWETH(mustGetAddress("DelayedWETHProxy")); @@ -1113,29 +903,30 @@ contract Deploy is Deployer { ); _setFaultGameImplementation({ _factory: factory, - _allowUpgrade: _allowUpgrade, - _params: FaultDisputeGameParams({ - anchorStateRegistry: IAnchorStateRegistry(mustGetAddress("AnchorStateRegistryProxy")), - weth: weth, + _params: IFaultDisputeGame.GameConstructorParams({ gameType: GameTypes.FAST, absolutePrestate: outputAbsolutePrestate, - faultVm: IBigStepper(new AlphabetVM(outputAbsolutePrestate, fastOracle)), // The max depth for the alphabet trace is always 3. Add 1 because split depth is fully inclusive. maxGameDepth: cfg.faultGameSplitDepth() + 3 + 1, - maxClockDuration: Duration.wrap(0) // Resolvable immediately - }) + splitDepth: cfg.faultGameSplitDepth(), + clockExtension: Duration.wrap(uint64(cfg.faultGameClockExtension())), + maxClockDuration: Duration.wrap(0), // Resolvable immediately + vm: IBigStepper(new AlphabetVM(outputAbsolutePrestate, fastOracle)), + weth: weth, + anchorStateRegistry: IAnchorStateRegistry(mustGetAddress("AnchorStateRegistryProxy")), + l2ChainId: cfg.l2ChainID() + }) }); } /// @notice Sets the implementation for the given fault game type in the `DisputeGameFactory`. function _setFaultGameImplementation( IDisputeGameFactory _factory, - bool _allowUpgrade, - FaultDisputeGameParams memory _params + IFaultDisputeGame.GameConstructorParams memory _params ) internal { - if (address(_factory.gameImpls(_params.gameType)) != address(0) && !_allowUpgrade) { + if (address(_factory.gameImpls(_params.gameType)) != address(0)) { console.log( "[WARN] DisputeGameFactoryProxy: `FaultDisputeGame` implementation already set for game type: %s", vm.toString(GameType.unwrap(_params.gameType)) @@ -1144,37 +935,19 @@ contract Deploy is Deployer { } uint32 rawGameType = GameType.unwrap(_params.gameType); - - // Redefine _param variable to avoid stack too deep error during compilation - FaultDisputeGameParams memory _params_ = _params; require( rawGameType != GameTypes.PERMISSIONED_CANNON.raw(), "Deploy: Permissioned Game should be deployed by OPCM" ); + _factory.setImplementation( - _params_.gameType, + _params.gameType, IDisputeGame( DeployUtils.create2AndSave({ _save: this, _salt: _implSalt(), _name: "FaultDisputeGame", _nick: string.concat("FaultDisputeGame_", vm.toString(rawGameType)), - _args: DeployUtils.encodeConstructor( - abi.encodeCall( - IFaultDisputeGame.__constructor__, - ( - _params_.gameType, - _params_.absolutePrestate, - _params_.maxGameDepth, - cfg.faultGameSplitDepth(), - Duration.wrap(uint64(cfg.faultGameClockExtension())), - _params_.maxClockDuration, - _params_.faultVm, - _params_.weth, - IAnchorStateRegistry(mustGetAddress("AnchorStateRegistryProxy")), - cfg.l2ChainID() - ) - ) - ) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IFaultDisputeGame.__constructor__, (_params))) }) ) ); @@ -1195,39 +968,6 @@ contract Deploy is Deployer { ); } - /// @notice Initialize the DataAvailabilityChallenge - function initializeDataAvailabilityChallenge() public broadcast { - console.log("Upgrading and initializing DataAvailabilityChallenge proxy"); - address dataAvailabilityChallengeProxy = mustGetAddress("DataAvailabilityChallengeProxy"); - address dataAvailabilityChallenge = mustGetAddress("DataAvailabilityChallenge"); - - address finalSystemOwner = cfg.finalSystemOwner(); - uint256 daChallengeWindow = cfg.daChallengeWindow(); - uint256 daResolveWindow = cfg.daResolveWindow(); - uint256 daBondSize = cfg.daBondSize(); - uint256 daResolverRefundPercentage = cfg.daResolverRefundPercentage(); - - IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); - proxyAdmin.upgradeAndCall({ - _proxy: payable(dataAvailabilityChallengeProxy), - _implementation: dataAvailabilityChallenge, - _data: abi.encodeCall( - IDataAvailabilityChallenge.initialize, - (finalSystemOwner, daChallengeWindow, daResolveWindow, daBondSize, daResolverRefundPercentage) - ) - }); - - IDataAvailabilityChallenge dac = IDataAvailabilityChallenge(payable(dataAvailabilityChallengeProxy)); - string memory version = dac.version(); - console.log("DataAvailabilityChallenge version: %s", version); - - require(dac.owner() == finalSystemOwner); - require(dac.challengeWindow() == daChallengeWindow); - require(dac.resolveWindow() == daResolveWindow); - require(dac.bondSize() == daBondSize); - require(dac.resolverRefundPercentage() == daResolverRefundPercentage); - } - /// @notice Get the DeployInput struct to use for testing function getDeployInput() public view returns (OPContractsManager.DeployInput memory) { OutputRoot memory testOutputRoot = OutputRoot({ @@ -1273,6 +1013,7 @@ contract Deploy is Deployer { }); } + /// @notice Reset the initialized value on a proxy contract so that it can be initialized again function resetInitializedProxy(string memory _contractName) internal { console.log("resetting initialized value on %s Proxy", _contractName); address proxy = mustGetAddress(string.concat(_contractName, "Proxy")); diff --git a/packages/contracts-bedrock/scripts/deploy/DeployAltDA.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployAltDA.s.sol index a5071474926b..1943b40a35bb 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployAltDA.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployAltDA.s.sol @@ -2,11 +2,11 @@ pragma solidity 0.8.15; import { BaseDeployIO } from "scripts/deploy/BaseDeployIO.sol"; -import { IDataAvailabilityChallenge } from "src/L1/interfaces/IDataAvailabilityChallenge.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; +import { IDataAvailabilityChallenge } from "interfaces/L1/IDataAvailabilityChallenge.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; import { Script } from "forge-std/Script.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; -import { IProxyAdmin } from "src/universal/interfaces/IProxyAdmin.sol"; +import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; import { Solarray } from "scripts/libraries/Solarray.sol"; contract DeployAltDAInput is BaseDeployIO { diff --git a/packages/contracts-bedrock/scripts/deploy/DeployAsterisc.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployAsterisc.s.sol new file mode 100644 index 000000000000..66f6ba33f362 --- /dev/null +++ b/packages/contracts-bedrock/scripts/deploy/DeployAsterisc.s.sol @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.15; + +// Forge +import { Script } from "forge-std/Script.sol"; + +// Scripts +import { BaseDeployIO } from "scripts/deploy/BaseDeployIO.sol"; +import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; + +// Interfaces +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; +import { IRISCV } from "interfaces/vendor/asterisc/IRISCV.sol"; + +/// @title DeployAsteriscInput +contract DeployAsteriscInput is BaseDeployIO { + // Specify the PreimageOracle to use + address internal _preimageOracle; + + function set(bytes4 _sel, address _value) public { + if (_sel == this.preimageOracle.selector) { + require(_value != address(0), "DeployAsterisc: preimageOracle cannot be empty"); + _preimageOracle = _value; + } else { + revert("DeployAsterisc: unknown selector"); + } + } + + function preimageOracle() public view returns (address) { + require(_preimageOracle != address(0), "DeployAsterisc: preimageOracle not set"); + return _preimageOracle; + } +} + +/// @title DeployAsteriscOutput +contract DeployAsteriscOutput is BaseDeployIO { + IRISCV internal _asteriscSingleton; + + function set(bytes4 _sel, address _value) public { + if (_sel == this.asteriscSingleton.selector) { + require(_value != address(0), "DeployAsterisc: asteriscSingleton cannot be zero address"); + _asteriscSingleton = IRISCV(_value); + } else { + revert("DeployAsterisc: unknown selector"); + } + } + + function checkOutput(DeployAsteriscInput _mi) public view { + DeployUtils.assertValidContractAddress(address(_asteriscSingleton)); + assertValidDeploy(_mi); + } + + function asteriscSingleton() public view returns (IRISCV) { + DeployUtils.assertValidContractAddress(address(_asteriscSingleton)); + return _asteriscSingleton; + } + + function assertValidDeploy(DeployAsteriscInput _mi) public view { + assertValidAsteriscSingleton(_mi); + } + + function assertValidAsteriscSingleton(DeployAsteriscInput _mi) internal view { + IRISCV asterisc = asteriscSingleton(); + + require(address(asterisc.oracle()) == address(_mi.preimageOracle()), "ASTERISC-10"); + } +} + +/// @title DeployAsterisc +contract DeployAsterisc is Script { + function run(DeployAsteriscInput _mi, DeployAsteriscOutput _mo) public { + DeployAsteriscSingleton(_mi, _mo); + _mo.checkOutput(_mi); + } + + function DeployAsteriscSingleton(DeployAsteriscInput _mi, DeployAsteriscOutput _mo) internal { + IPreimageOracle preimageOracle = IPreimageOracle(_mi.preimageOracle()); + vm.broadcast(msg.sender); + IRISCV singleton = IRISCV( + DeployUtils.create1({ + _name: "RISCV", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IRISCV.__constructor__, (preimageOracle))) + }) + ); + + vm.label(address(singleton), "AsteriscSingleton"); + _mo.set(_mo.asteriscSingleton.selector, address(singleton)); + } +} diff --git a/packages/contracts-bedrock/scripts/deploy/DeployDelayedWETH.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployDelayedWETH.s.sol index 979869cb3ad0..c450b1f155ea 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployDelayedWETH.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployDelayedWETH.s.sol @@ -12,9 +12,9 @@ import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; import { LibString } from "@solady/utils/LibString.sol"; // Interfaces -import { IProxy } from "src/universal/interfaces/IProxy.sol"; -import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; /// @title DeployDelayedWETH contract DeployDelayedWETHInput is BaseDeployIO { diff --git a/packages/contracts-bedrock/scripts/deploy/DeployDisputeGame.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployDisputeGame.s.sol index 51b60c6c2995..6399115fad74 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployDisputeGame.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployDisputeGame.s.sol @@ -13,13 +13,11 @@ import { GameType, Claim, Duration } from "src/dispute/lib/Types.sol"; import { LibString } from "@solady/utils/LibString.sol"; // Interfaces -import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; -import { IPermissionedDisputeGame } from "src/dispute/interfaces/IPermissionedDisputeGame.sol"; -import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; -import { IBigStepper } from "src/dispute/interfaces/IBigStepper.sol"; -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; -import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; -import { IMIPS } from "src/cannon/interfaces/IMIPS.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; +import { IPermissionedDisputeGame } from "interfaces/dispute/IPermissionedDisputeGame.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; +import { IBigStepper } from "interfaces/dispute/IBigStepper.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; /// @title DeployDisputeGameInput contract DeployDisputeGameInput is BaseDeployIO { @@ -27,13 +25,6 @@ contract DeployDisputeGameInput is BaseDeployIO { string internal _release; string internal _standardVersionsToml; - // Specify which MIPS version to use. - uint256 internal _mipsVersion; - - // All inputs required to deploy PreimageOracle. - uint256 internal _minProposalSizeBytes; - uint256 internal _challengePeriodSeconds; - // Specify which game kind is being deployed here. string internal _gameKind; @@ -46,6 +37,7 @@ contract DeployDisputeGameInput is BaseDeployIO { uint256 internal _maxClockDuration; IDelayedWETH internal _delayedWethProxy; IAnchorStateRegistry internal _anchorStateRegistryProxy; + IBigStepper internal _vm; uint256 internal _l2ChainId; // Additional inputs required to deploy PermissionedDisputeGame. @@ -53,16 +45,7 @@ contract DeployDisputeGameInput is BaseDeployIO { address internal _challenger; function set(bytes4 _sel, uint256 _value) public { - if (_sel == this.mipsVersion.selector) { - require(_value == 1 || _value == 2, "DeployDisputeGame: unknown mips version"); - _mipsVersion = _value; - } else if (_sel == this.minProposalSizeBytes.selector) { - require(_value != 0, "DeployDisputeGame: minProposalSizeBytes cannot be zero"); - _minProposalSizeBytes = _value; - } else if (_sel == this.challengePeriodSeconds.selector) { - require(_value != 0, "DeployDisputeGame: challengePeriodSeconds cannot be zero"); - _challengePeriodSeconds = _value; - } else if (_sel == this.gameType.selector) { + if (_sel == this.gameType.selector) { require(_value <= type(uint32).max, "DeployDisputeGame: gameType must fit inside uint32"); _gameType = _value; } else if (_sel == this.maxGameDepth.selector) { @@ -88,7 +71,9 @@ contract DeployDisputeGameInput is BaseDeployIO { } function set(bytes4 _sel, address _value) public { - if (_sel == this.delayedWethProxy.selector) { + if (_sel == this.vmAddress.selector) { + _vm = IBigStepper(_value); + } else if (_sel == this.delayedWethProxy.selector) { require(_value != address(0), "DeployDisputeGame: delayedWethProxy cannot be zero address"); _delayedWethProxy = IDelayedWETH(payable(_value)); } else if (_sel == this.anchorStateRegistryProxy.selector) { @@ -133,20 +118,8 @@ contract DeployDisputeGameInput is BaseDeployIO { return _standardVersionsToml; } - function mipsVersion() public view returns (uint256) { - require(_mipsVersion != 0, "DeployDisputeGame: mipsVersion not set"); - require(_mipsVersion == 1 || _mipsVersion == 2, "DeployDisputeGame: unknown mips version"); - return _mipsVersion; - } - - function minProposalSizeBytes() public view returns (uint256) { - require(_minProposalSizeBytes != 0, "DeployDisputeGame: minProposalSizeBytes not set"); - return _minProposalSizeBytes; - } - - function challengePeriodSeconds() public view returns (uint256) { - require(_challengePeriodSeconds != 0, "DeployDisputeGame: challengePeriodSeconds not set"); - return _challengePeriodSeconds; + function vmAddress() public view returns (IBigStepper) { + return _vm; } function gameKind() public view returns (string memory) { @@ -228,65 +201,30 @@ contract DeployDisputeGameOutput is BaseDeployIO { // PermissionedDisputeGame is used as the type here because it has all of the same functions as // FaultDisputeGame but with the added proposer and challenger fields. IPermissionedDisputeGame internal _disputeGameImpl; - IMIPS internal _mipsSingleton; - IPreimageOracle internal _preimageOracleSingleton; function set(bytes4 _sel, address _value) public { if (_sel == this.disputeGameImpl.selector) { require(_value != address(0), "DeployDisputeGame: disputeGameImpl cannot be zero address"); _disputeGameImpl = IPermissionedDisputeGame(_value); - } else if (_sel == this.mipsSingleton.selector) { - require(_value != address(0), "DeployDisputeGame: mipsSingleton cannot be zero address"); - _mipsSingleton = IMIPS(_value); - } else if (_sel == this.preimageOracleSingleton.selector) { - require(_value != address(0), "DeployDisputeGame: preimageOracleSingleton cannot be zero address"); - _preimageOracleSingleton = IPreimageOracle(_value); } else { revert("DeployDisputeGame: unknown selector"); } } function checkOutput(DeployDisputeGameInput _dgi) public view { - DeployUtils.assertValidContractAddress(address(_preimageOracleSingleton)); - DeployUtils.assertValidContractAddress(address(_mipsSingleton)); DeployUtils.assertValidContractAddress(address(_disputeGameImpl)); assertValidDeploy(_dgi); } - function preimageOracleSingleton() public view returns (IPreimageOracle) { - DeployUtils.assertValidContractAddress(address(_preimageOracleSingleton)); - return _preimageOracleSingleton; - } - - function mipsSingleton() public view returns (IMIPS) { - DeployUtils.assertValidContractAddress(address(_mipsSingleton)); - return _mipsSingleton; - } - function disputeGameImpl() public view returns (IPermissionedDisputeGame) { DeployUtils.assertValidContractAddress(address(_disputeGameImpl)); return _disputeGameImpl; } function assertValidDeploy(DeployDisputeGameInput _dgi) public view { - assertValidPreimageOracleSingleton(_dgi); - assertValidMipsSingleton(_dgi); assertValidDisputeGameImpl(_dgi); } - function assertValidPreimageOracleSingleton(DeployDisputeGameInput _dgi) internal view { - IPreimageOracle oracle = preimageOracleSingleton(); - - require(oracle.minProposalSize() == _dgi.minProposalSizeBytes(), "PO-10"); - require(oracle.challengePeriod() == _dgi.challengePeriodSeconds(), "PO-20"); - } - - function assertValidMipsSingleton(DeployDisputeGameInput) internal view { - IMIPS mips = mipsSingleton(); - - require(address(mips.oracle()) == address(preimageOracleSingleton()), "MIPS-10"); - } - function assertValidDisputeGameImpl(DeployDisputeGameInput _dgi) internal view { IPermissionedDisputeGame game = disputeGameImpl(); @@ -295,7 +233,7 @@ contract DeployDisputeGameOutput is BaseDeployIO { require(game.splitDepth() == _dgi.splitDepth(), "DG-30"); require(game.clockExtension().raw() == uint64(_dgi.clockExtension()), "DG-40"); require(game.maxClockDuration().raw() == uint64(_dgi.maxClockDuration()), "DG-50"); - require(game.vm() == IBigStepper(address(mipsSingleton())), "DG-60"); + require(game.vm() == _dgi.vmAddress(), "DG-60"); require(game.weth() == _dgi.delayedWethProxy(), "DG-70"); require(game.anchorStateRegistry() == _dgi.anchorStateRegistryProxy(), "DG-80"); require(game.l2ChainId() == _dgi.l2ChainId(), "DG-90"); @@ -326,83 +264,23 @@ contract DeployDisputeGame is Script { } function run(DeployDisputeGameInput _dgi, DeployDisputeGameOutput _dgo) public { - deployPreimageOracleSingleton(_dgi, _dgo); - deployMipsSingleton(_dgi, _dgo); deployDisputeGameImpl(_dgi, _dgo); _dgo.checkOutput(_dgi); } - function deployPreimageOracleSingleton(DeployDisputeGameInput _dgi, DeployDisputeGameOutput _dgo) internal { - string memory release = _dgi.release(); - string memory stdVerToml = _dgi.standardVersionsToml(); - string memory contractName = "preimage_oracle"; - IPreimageOracle singleton; - - address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); - if (existingImplementation != address(0)) { - singleton = IPreimageOracle(payable(existingImplementation)); - } else if (isDevelopRelease(release)) { - uint256 minProposalSizeBytes = _dgi.minProposalSizeBytes(); - uint256 challengePeriodSeconds = _dgi.challengePeriodSeconds(); - vm.broadcast(msg.sender); - singleton = IPreimageOracle( - DeployUtils.create1({ - _name: "PreimageOracle", - _args: DeployUtils.encodeConstructor( - abi.encodeCall(IPreimageOracle.__constructor__, (minProposalSizeBytes, challengePeriodSeconds)) - ) - }) - ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); - } - - vm.label(address(singleton), "PreimageOracleSingleton"); - _dgo.set(_dgo.preimageOracleSingleton.selector, address(singleton)); - } - - function deployMipsSingleton(DeployDisputeGameInput _dgi, DeployDisputeGameOutput _dgo) internal { - string memory release = _dgi.release(); - string memory stdVerToml = _dgi.standardVersionsToml(); - string memory contractName = "mips"; - IMIPS singleton; - - address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); - if (existingImplementation != address(0)) { - singleton = IMIPS(payable(existingImplementation)); - } else if (isDevelopRelease(release)) { - uint256 mipsVersion = _dgi.mipsVersion(); - IPreimageOracle preimageOracle = IPreimageOracle(address(_dgo.preimageOracleSingleton())); - vm.broadcast(msg.sender); - singleton = IMIPS( - DeployUtils.create1({ - _name: mipsVersion == 1 ? "MIPS" : "MIPS2", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IMIPS.__constructor__, (preimageOracle))) - }) - ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); - } - - vm.label(address(singleton), "MIPSSingleton"); - _dgo.set(_dgo.mipsSingleton.selector, address(singleton)); - } - function deployDisputeGameImpl(DeployDisputeGameInput _dgi, DeployDisputeGameOutput _dgo) internal { // Shove the arguments into a struct to avoid stack-too-deep errors. - DisputeGameConstructorArgs memory args = DisputeGameConstructorArgs({ + IFaultDisputeGame.GameConstructorParams memory args = IFaultDisputeGame.GameConstructorParams({ gameType: GameType.wrap(uint32(_dgi.gameType())), absolutePrestate: Claim.wrap(_dgi.absolutePrestate()), maxGameDepth: _dgi.maxGameDepth(), splitDepth: _dgi.splitDepth(), clockExtension: Duration.wrap(uint64(_dgi.clockExtension())), maxClockDuration: Duration.wrap(uint64(_dgi.maxClockDuration())), - gameVm: IBigStepper(address(_dgo.mipsSingleton())), - delayedWethProxy: _dgi.delayedWethProxy(), - anchorStateRegistryProxy: _dgi.anchorStateRegistryProxy(), - l2ChainId: _dgi.l2ChainId(), - proposer: _dgi.proposer(), - challenger: _dgi.challenger() + vm: IBigStepper(address(_dgi.vmAddress())), + weth: _dgi.delayedWethProxy(), + anchorStateRegistry: _dgi.anchorStateRegistryProxy(), + l2ChainId: _dgi.l2ChainId() }); // PermissionedDisputeGame is used as the type here because it is a superset of @@ -414,23 +292,7 @@ contract DeployDisputeGame is Script { impl = IPermissionedDisputeGame( DeployUtils.create1({ _name: "FaultDisputeGame", - _args: DeployUtils.encodeConstructor( - abi.encodeCall( - IFaultDisputeGame.__constructor__, - ( - args.gameType, - args.absolutePrestate, - args.maxGameDepth, - args.splitDepth, - args.clockExtension, - args.maxClockDuration, - args.gameVm, - args.delayedWethProxy, - args.anchorStateRegistryProxy, - args.l2ChainId - ) - ) - ) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IFaultDisputeGame.__constructor__, (args))) }) ); } else { @@ -438,23 +300,7 @@ contract DeployDisputeGame is Script { DeployUtils.create1({ _name: "PermissionedDisputeGame", _args: DeployUtils.encodeConstructor( - abi.encodeCall( - IPermissionedDisputeGame.__constructor__, - ( - args.gameType, - args.absolutePrestate, - args.maxGameDepth, - args.splitDepth, - args.clockExtension, - args.maxClockDuration, - args.gameVm, - args.delayedWethProxy, - args.anchorStateRegistryProxy, - args.l2ChainId, - args.proposer, - args.challenger - ) - ) + abi.encodeCall(IPermissionedDisputeGame.__constructor__, (args, _dgi.proposer(), _dgi.challenger())) ) }) ); diff --git a/packages/contracts-bedrock/scripts/deploy/DeployImplementations.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployImplementations.s.sol index c9048a07dfa3..716a95b071cf 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployImplementations.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployImplementations.s.sol @@ -5,35 +5,30 @@ import { Script } from "forge-std/Script.sol"; import { LibString } from "@solady/utils/LibString.sol"; -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; -import { ISystemConfigV160 } from "src/L1/interfaces/ISystemConfigV160.sol"; -import { IL1CrossDomainMessengerV160 } from "src/L1/interfaces/IL1CrossDomainMessengerV160.sol"; -import { IL1StandardBridgeV160 } from "src/L1/interfaces/IL1StandardBridgeV160.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IProtocolVersions } from "interfaces/L1/IProtocolVersions.sol"; import { Constants } from "src/libraries/Constants.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; import { Bytes } from "src/libraries/Bytes.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; - -import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; -import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; -import { IMIPS } from "src/cannon/interfaces/IMIPS.sol"; -import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; +import { IMIPS } from "interfaces/cannon/IMIPS.sol"; +import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; -import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { IL1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; -import { IL1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; -import { IL1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; -import { IOptimismMintableERC20Factory } from "src/universal/interfaces/IOptimismMintableERC20Factory.sol"; +import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { IL1CrossDomainMessenger } from "interfaces/L1/IL1CrossDomainMessenger.sol"; +import { IL1ERC721Bridge } from "interfaces/L1/IL1ERC721Bridge.sol"; +import { IL1StandardBridge } from "interfaces/L1/IL1StandardBridge.sol"; +import { IOptimismMintableERC20Factory } from "interfaces/universal/IOptimismMintableERC20Factory.sol"; import { OPContractsManagerInterop } from "src/L1/OPContractsManagerInterop.sol"; -import { IOptimismPortalInterop } from "src/L1/interfaces/IOptimismPortalInterop.sol"; -import { ISystemConfigInterop } from "src/L1/interfaces/ISystemConfigInterop.sol"; +import { IOptimismPortalInterop } from "interfaces/L1/IOptimismPortalInterop.sol"; +import { ISystemConfigInterop } from "interfaces/L1/ISystemConfigInterop.sol"; import { Blueprint } from "src/libraries/Blueprint.sol"; @@ -51,8 +46,9 @@ contract DeployImplementationsInput is BaseDeployIO { uint256 internal _disputeGameFinalityDelaySeconds; uint256 internal _mipsVersion; - // The release version to set OPCM implementations for, of the format `op-contracts/vX.Y.Z`. - string internal _release; + // This is used in opcm to signal which version of the L1 smart contracts is deployed. + // It takes the format of `op-contracts/v*.*.*`. + string internal _l1ContractsRelease; // Outputs from DeploySuperchain.s.sol. ISuperchainConfig internal _superchainConfigProxy; @@ -60,8 +56,6 @@ contract DeployImplementationsInput is BaseDeployIO { string internal _standardVersionsToml; - address internal _opcmProxyOwner; - function set(bytes4 _sel, uint256 _value) public { require(_value != 0, "DeployImplementationsInput: cannot set zero value"); @@ -85,7 +79,7 @@ contract DeployImplementationsInput is BaseDeployIO { function set(bytes4 _sel, string memory _value) public { require(!LibString.eq(_value, ""), "DeployImplementationsInput: cannot set empty string"); - if (_sel == this.release.selector) _release = _value; + if (_sel == this.l1ContractsRelease.selector) _l1ContractsRelease = _value; else if (_sel == this.standardVersionsToml.selector) _standardVersionsToml = _value; else revert("DeployImplementationsInput: unknown selector"); } @@ -94,7 +88,6 @@ contract DeployImplementationsInput is BaseDeployIO { require(_addr != address(0), "DeployImplementationsInput: cannot set zero address"); if (_sel == this.superchainConfigProxy.selector) _superchainConfigProxy = ISuperchainConfig(_addr); else if (_sel == this.protocolVersionsProxy.selector) _protocolVersionsProxy = IProtocolVersions(_addr); - else if (_sel == this.opcmProxyOwner.selector) _opcmProxyOwner = _addr; else revert("DeployImplementationsInput: unknown selector"); } @@ -141,9 +134,9 @@ contract DeployImplementationsInput is BaseDeployIO { return _mipsVersion; } - function release() public view returns (string memory) { - require(!LibString.eq(_release, ""), "DeployImplementationsInput: not set"); - return _release; + function l1ContractsRelease() public view returns (string memory) { + require(!LibString.eq(_l1ContractsRelease, ""), "DeployImplementationsInput: not set"); + return _l1ContractsRelease; } function standardVersionsToml() public view returns (string memory) { @@ -160,16 +153,10 @@ contract DeployImplementationsInput is BaseDeployIO { require(address(_protocolVersionsProxy) != address(0), "DeployImplementationsInput: not set"); return _protocolVersionsProxy; } - - function opcmProxyOwner() public view returns (address) { - require(address(_opcmProxyOwner) != address(0), "DeployImplementationsInput: not set"); - return _opcmProxyOwner; - } } contract DeployImplementationsOutput is BaseDeployIO { - OPContractsManager internal _opcmProxy; - OPContractsManager internal _opcmImpl; + OPContractsManager internal _opcm; IDelayedWETH internal _delayedWETHImpl; IOptimismPortal2 internal _optimismPortalImpl; IPreimageOracle internal _preimageOracleSingleton; @@ -185,8 +172,7 @@ contract DeployImplementationsOutput is BaseDeployIO { require(_addr != address(0), "DeployImplementationsOutput: cannot set zero address"); // forgefmt: disable-start - if (_sel == this.opcmProxy.selector) _opcmProxy = OPContractsManager(payable(_addr)); - else if (_sel == this.opcmImpl.selector) _opcmImpl = OPContractsManager(payable(_addr)); + if (_sel == this.opcm.selector) _opcm = OPContractsManager(_addr); else if (_sel == this.optimismPortalImpl.selector) _optimismPortalImpl = IOptimismPortal2(payable(_addr)); else if (_sel == this.delayedWETHImpl.selector) _delayedWETHImpl = IDelayedWETH(payable(_addr)); else if (_sel == this.preimageOracleSingleton.selector) _preimageOracleSingleton = IPreimageOracle(_addr); @@ -201,12 +187,11 @@ contract DeployImplementationsOutput is BaseDeployIO { // forgefmt: disable-end } - function checkOutput(DeployImplementationsInput _dii) public { + function checkOutput(DeployImplementationsInput _dii) public view { // With 12 addresses, we'd get a stack too deep error if we tried to do this inline as a // single call to `Solarray.addresses`. So we split it into two calls. address[] memory addrs1 = Solarray.addresses( - address(this.opcmProxy()), - address(this.opcmImpl()), + address(this.opcm()), address(this.optimismPortalImpl()), address(this.delayedWETHImpl()), address(this.preimageOracleSingleton()), @@ -227,15 +212,9 @@ contract DeployImplementationsOutput is BaseDeployIO { assertValidDeploy(_dii); } - function opcmProxy() public returns (OPContractsManager) { - DeployUtils.assertValidContractAddress(address(_opcmProxy)); - DeployUtils.assertERC1967ImplementationSet(address(_opcmProxy)); - return _opcmProxy; - } - - function opcmImpl() public view returns (OPContractsManager) { - DeployUtils.assertValidContractAddress(address(_opcmImpl)); - return _opcmImpl; + function opcm() public view returns (OPContractsManager) { + DeployUtils.assertValidContractAddress(address(_opcm)); + return _opcm; } function optimismPortalImpl() public view returns (IOptimismPortal2) { @@ -289,40 +268,22 @@ contract DeployImplementationsOutput is BaseDeployIO { } // -------- Deployment Assertions -------- - function assertValidDeploy(DeployImplementationsInput _dii) public { + function assertValidDeploy(DeployImplementationsInput _dii) public view { assertValidDelayedWETHImpl(_dii); assertValidDisputeGameFactoryImpl(_dii); assertValidL1CrossDomainMessengerImpl(_dii); assertValidL1ERC721BridgeImpl(_dii); assertValidL1StandardBridgeImpl(_dii); assertValidMipsSingleton(_dii); - assertValidOpcmProxy(_dii); - assertValidOpcmImpl(_dii); + assertValidOpcm(_dii); assertValidOptimismMintableERC20FactoryImpl(_dii); assertValidOptimismPortalImpl(_dii); assertValidPreimageOracleSingleton(_dii); assertValidSystemConfigImpl(_dii); } - function assertValidOpcmProxy(DeployImplementationsInput _dii) internal { - // First we check the proxy as itself. - IProxy proxy = IProxy(payable(address(opcmProxy()))); - vm.prank(address(0)); - address admin = proxy.admin(); - require(admin == address(_dii.opcmProxyOwner()), "OPCMP-10"); - - // Then we check the proxy as OPCM. - DeployUtils.assertInitialized({ _contractAddress: address(opcmProxy()), _slot: 0, _offset: 0 }); - require(address(opcmProxy().superchainConfig()) == address(_dii.superchainConfigProxy()), "OPCMP-20"); - require(address(opcmProxy().protocolVersions()) == address(_dii.protocolVersionsProxy()), "OPCMP-30"); - require(LibString.eq(opcmProxy().latestRelease(), _dii.release()), "OPCMP-50"); // Initial release is latest. - } - - function assertValidOpcmImpl(DeployImplementationsInput _dii) internal { - IProxy proxy = IProxy(payable(address(opcmProxy()))); - vm.prank(address(0)); - OPContractsManager impl = OPContractsManager(proxy.implementation()); - DeployUtils.assertInitialized({ _contractAddress: address(impl), _slot: 0, _offset: 0 }); + function assertValidOpcm(DeployImplementationsInput _dii) internal view { + OPContractsManager impl = OPContractsManager(address(opcm())); require(address(impl.superchainConfig()) == address(_dii.superchainConfigProxy()), "OPCMI-10"); require(address(impl.protocolVersions()) == address(_dii.protocolVersionsProxy()), "OPCMI-20"); } @@ -361,7 +322,6 @@ contract DeployImplementationsOutput is BaseDeployIO { function assertValidMipsSingleton(DeployImplementationsInput) internal view { IMIPS mips = mipsSingleton(); - require(address(mips.oracle()) == address(preimageOracleSingleton()), "MIPS-10"); } @@ -480,102 +440,38 @@ contract DeployImplementations is Script { // --- OP Contracts Manager --- - function opcmSystemConfigSetter( - DeployImplementationsInput _dii, - DeployImplementationsOutput _dio - ) - internal - view - virtual - returns (OPContractsManager.ImplementationSetter memory) - { - // When configuring OPCM during Solidity tests, we are using the latest SystemConfig.sol - // version in this repo, which contains Custom Gas Token (CGT) features. This CGT version - // has a different `initialize` signature than the SystemConfig version that was released - // as part of `op-contracts/v1.6.0`, which is no longer in the repo. When running this - // script's bytecode for a production deploy of OPCM at `op-contracts/v1.6.0`, we need to - // use the ISystemConfigV160 interface instead of ISystemConfig. Therefore the selector used - // is a function of the `release` passed in by the caller. - bytes4 selector = LibString.eq(_dii.release(), "op-contracts/v1.6.0") - ? ISystemConfigV160.initialize.selector - : ISystemConfig.initialize.selector; - return OPContractsManager.ImplementationSetter({ - name: "SystemConfig", - info: OPContractsManager.Implementation(address(_dio.systemConfigImpl()), selector) - }); - } - - function l1CrossDomainMessengerConfigSetter( - DeployImplementationsInput _dii, - DeployImplementationsOutput _dio - ) - internal - view - virtual - returns (OPContractsManager.ImplementationSetter memory) - { - bytes4 selector = LibString.eq(_dii.release(), "op-contracts/v1.6.0") - ? IL1CrossDomainMessengerV160.initialize.selector - : IL1CrossDomainMessenger.initialize.selector; - return OPContractsManager.ImplementationSetter({ - name: "L1CrossDomainMessenger", - info: OPContractsManager.Implementation(address(_dio.l1CrossDomainMessengerImpl()), selector) - }); - } - - function l1StandardBridgeConfigSetter( - DeployImplementationsInput _dii, - DeployImplementationsOutput _dio - ) - internal - view - virtual - returns (OPContractsManager.ImplementationSetter memory) - { - bytes4 selector = LibString.eq(_dii.release(), "op-contracts/v1.6.0") - ? IL1StandardBridgeV160.initialize.selector - : IL1StandardBridge.initialize.selector; - return OPContractsManager.ImplementationSetter({ - name: "L1StandardBridge", - info: OPContractsManager.Implementation(address(_dio.l1StandardBridgeImpl()), selector) - }); - } - - // Deploy and initialize a proxied OPContractsManager. function createOPCMContract( DeployImplementationsInput _dii, DeployImplementationsOutput _dio, OPContractsManager.Blueprints memory _blueprints, - string memory _release, - OPContractsManager.ImplementationSetter[] memory _setters + string memory _l1ContractsRelease ) internal virtual - returns (OPContractsManager opcmProxy_) + returns (OPContractsManager opcm_) { - address opcmProxyOwner = _dii.opcmProxyOwner(); + ISuperchainConfig superchainConfigProxy = _dii.superchainConfigProxy(); + IProtocolVersions protocolVersionsProxy = _dii.protocolVersionsProxy(); + + OPContractsManager.Implementations memory implementations = OPContractsManager.Implementations({ + l1ERC721BridgeImpl: address(_dio.l1ERC721BridgeImpl()), + optimismPortalImpl: address(_dio.optimismPortalImpl()), + systemConfigImpl: address(_dio.systemConfigImpl()), + optimismMintableERC20FactoryImpl: address(_dio.optimismMintableERC20FactoryImpl()), + l1CrossDomainMessengerImpl: address(_dio.l1CrossDomainMessengerImpl()), + l1StandardBridgeImpl: address(_dio.l1StandardBridgeImpl()), + disputeGameFactoryImpl: address(_dio.disputeGameFactoryImpl()), + delayedWETHImpl: address(_dio.delayedWETHImpl()), + mipsImpl: address(_dio.mipsSingleton()) + }); vm.broadcast(msg.sender); - IProxy proxy = IProxy( - DeployUtils.create1({ - _name: "Proxy", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IProxy.__constructor__, (msg.sender))) - }) + opcm_ = new OPContractsManager( + superchainConfigProxy, protocolVersionsProxy, _l1ContractsRelease, _blueprints, implementations ); - deployOPContractsManagerImpl(_dii, _dio); - OPContractsManager opcmImpl = _dio.opcmImpl(); - - OPContractsManager.InitializerInputs memory initializerInputs = - OPContractsManager.InitializerInputs(_blueprints, _setters, _release, true); - - vm.startBroadcast(msg.sender); - proxy.upgradeToAndCall(address(opcmImpl), abi.encodeCall(opcmImpl.initialize, (initializerInputs))); - - proxy.changeAdmin(address(opcmProxyOwner)); // transfer ownership of Proxy contract to the ProxyAdmin contract - vm.stopBroadcast(); - - opcmProxy_ = OPContractsManager(address(proxy)); + vm.label(address(opcm_), "OPContractsManager"); + _dio.set(_dio.opcm.selector, address(opcm_)); } function deployOPContractsManager( @@ -585,72 +481,42 @@ contract DeployImplementations is Script { public virtual { - string memory release = _dii.release(); - - // First we deploy the blueprints for the singletons deployed by OPCM. - // forgefmt: disable-start - bytes32 salt = _dii.salt(); - OPContractsManager.Blueprints memory blueprints; - - vm.startBroadcast(msg.sender); - blueprints.addressManager = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("AddressManager")), salt); - blueprints.proxy = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("Proxy")), salt); - blueprints.proxyAdmin = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("ProxyAdmin")), salt); - blueprints.l1ChugSplashProxy = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("L1ChugSplashProxy")), salt); - blueprints.resolvedDelegateProxy = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("ResolvedDelegateProxy")), salt); - blueprints.anchorStateRegistry = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("AnchorStateRegistry")), salt); - (blueprints.permissionedDisputeGame1, blueprints.permissionedDisputeGame2) = deployBigBytecode(vm.getCode("PermissionedDisputeGame"), salt); - vm.stopBroadcast(); - // forgefmt: disable-end - - OPContractsManager.ImplementationSetter[] memory setters = new OPContractsManager.ImplementationSetter[](9); - setters[0] = OPContractsManager.ImplementationSetter({ - name: "L1ERC721Bridge", - info: OPContractsManager.Implementation(address(_dio.l1ERC721BridgeImpl()), IL1ERC721Bridge.initialize.selector) - }); - setters[1] = OPContractsManager.ImplementationSetter({ - name: "OptimismPortal", - info: OPContractsManager.Implementation( - address(_dio.optimismPortalImpl()), IOptimismPortal2.initialize.selector - ) - }); - setters[2] = opcmSystemConfigSetter(_dii, _dio); - setters[3] = OPContractsManager.ImplementationSetter({ - name: "OptimismMintableERC20Factory", - info: OPContractsManager.Implementation( - address(_dio.optimismMintableERC20FactoryImpl()), IOptimismMintableERC20Factory.initialize.selector - ) - }); - setters[4] = l1CrossDomainMessengerConfigSetter(_dii, _dio); - setters[5] = l1StandardBridgeConfigSetter(_dii, _dio); - setters[6] = OPContractsManager.ImplementationSetter({ - name: "DisputeGameFactory", - info: OPContractsManager.Implementation( - address(_dio.disputeGameFactoryImpl()), IDisputeGameFactory.initialize.selector - ) - }); - setters[7] = OPContractsManager.ImplementationSetter({ - name: "DelayedWETH", - info: OPContractsManager.Implementation(address(_dio.delayedWETHImpl()), IDelayedWETH.initialize.selector) - }); - setters[8] = OPContractsManager.ImplementationSetter({ - name: "MIPS", - // MIPS is a singleton for all chains, so it doesn't need to be initialized, so the - // selector is just `bytes4(0)`. - info: OPContractsManager.Implementation(address(_dio.mipsSingleton()), bytes4(0)) - }); + string memory l1ContractsRelease = _dii.l1ContractsRelease(); + string memory stdVerToml = _dii.standardVersionsToml(); + string memory contractName = "op_contracts_manager"; + OPContractsManager opcm; - // This call contains a broadcast to deploy OPCM which is proxied. - OPContractsManager opcmProxy = createOPCMContract(_dii, _dio, blueprints, release, setters); + address existingImplementation = getReleaseAddress(l1ContractsRelease, contractName, stdVerToml); + if (existingImplementation != address(0)) { + opcm = OPContractsManager(existingImplementation); + } else { + // First we deploy the blueprints for the singletons deployed by OPCM. + // forgefmt: disable-start + bytes32 salt = _dii.salt(); + OPContractsManager.Blueprints memory blueprints; + + vm.startBroadcast(msg.sender); + blueprints.addressManager = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("AddressManager")), salt); + blueprints.proxy = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("Proxy")), salt); + blueprints.proxyAdmin = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("ProxyAdmin")), salt); + blueprints.l1ChugSplashProxy = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("L1ChugSplashProxy")), salt); + blueprints.resolvedDelegateProxy = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("ResolvedDelegateProxy")), salt); + blueprints.anchorStateRegistry = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("AnchorStateRegistry")), salt); + (blueprints.permissionedDisputeGame1, blueprints.permissionedDisputeGame2) = deployBigBytecode(vm.getCode("PermissionedDisputeGame"), salt); + vm.stopBroadcast(); + // forgefmt: disable-end + + opcm = createOPCMContract(_dii, _dio, blueprints, l1ContractsRelease); + } - vm.label(address(opcmProxy), "OPContractsManager"); - _dio.set(_dio.opcmProxy.selector, address(opcmProxy)); + vm.label(address(opcm), "OPContractsManager"); + _dio.set(_dio.opcm.selector, address(opcm)); } // --- Core Contracts --- function deploySystemConfigImpl(DeployImplementationsInput _dii, DeployImplementationsOutput _dio) public virtual { - string memory release = _dii.release(); + string memory release = _dii.l1ContractsRelease(); string memory stdVerToml = _dii.standardVersionsToml(); // Using snake case for contract name to match the TOML file in superchain-registry. string memory contractName = "system_config"; @@ -659,7 +525,7 @@ contract DeployImplementations is Script { address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { impl = ISystemConfig(existingImplementation); - } else if (isDevelopRelease(release)) { + } else { // Deploy a new implementation for development builds. vm.broadcast(msg.sender); impl = ISystemConfig( @@ -668,8 +534,6 @@ contract DeployImplementations is Script { _args: DeployUtils.encodeConstructor(abi.encodeCall(ISystemConfig.__constructor__, ())) }) ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); } vm.label(address(impl), "SystemConfigImpl"); @@ -683,7 +547,7 @@ contract DeployImplementations is Script { public virtual { - string memory release = _dii.release(); + string memory release = _dii.l1ContractsRelease(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "l1_cross_domain_messenger"; IL1CrossDomainMessenger impl; @@ -691,7 +555,7 @@ contract DeployImplementations is Script { address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { impl = IL1CrossDomainMessenger(existingImplementation); - } else if (isDevelopRelease(release)) { + } else { vm.broadcast(msg.sender); impl = IL1CrossDomainMessenger( DeployUtils.create1({ @@ -699,8 +563,6 @@ contract DeployImplementations is Script { _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1CrossDomainMessenger.__constructor__, ())) }) ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); } vm.label(address(impl), "L1CrossDomainMessengerImpl"); @@ -714,7 +576,7 @@ contract DeployImplementations is Script { public virtual { - string memory release = _dii.release(); + string memory release = _dii.l1ContractsRelease(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "l1_erc721_bridge"; IL1ERC721Bridge impl; @@ -722,7 +584,7 @@ contract DeployImplementations is Script { address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { impl = IL1ERC721Bridge(existingImplementation); - } else if (isDevelopRelease(release)) { + } else { vm.broadcast(msg.sender); impl = IL1ERC721Bridge( DeployUtils.create1({ @@ -730,8 +592,6 @@ contract DeployImplementations is Script { _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1ERC721Bridge.__constructor__, ())) }) ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); } vm.label(address(impl), "L1ERC721BridgeImpl"); @@ -745,7 +605,7 @@ contract DeployImplementations is Script { public virtual { - string memory release = _dii.release(); + string memory release = _dii.l1ContractsRelease(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "l1_standard_bridge"; IL1StandardBridge impl; @@ -753,7 +613,7 @@ contract DeployImplementations is Script { address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { impl = IL1StandardBridge(payable(existingImplementation)); - } else if (isDevelopRelease(release)) { + } else { vm.broadcast(msg.sender); impl = IL1StandardBridge( DeployUtils.create1({ @@ -761,8 +621,6 @@ contract DeployImplementations is Script { _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1StandardBridge.__constructor__, ())) }) ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); } vm.label(address(impl), "L1StandardBridgeImpl"); @@ -776,7 +634,7 @@ contract DeployImplementations is Script { public virtual { - string memory release = _dii.release(); + string memory release = _dii.l1ContractsRelease(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "optimism_mintable_erc20_factory"; IOptimismMintableERC20Factory impl; @@ -784,7 +642,7 @@ contract DeployImplementations is Script { address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { impl = IOptimismMintableERC20Factory(existingImplementation); - } else if (isDevelopRelease(release)) { + } else { vm.broadcast(msg.sender); impl = IOptimismMintableERC20Factory( DeployUtils.create1({ @@ -792,32 +650,12 @@ contract DeployImplementations is Script { _args: DeployUtils.encodeConstructor(abi.encodeCall(IOptimismMintableERC20Factory.__constructor__, ())) }) ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); } vm.label(address(impl), "OptimismMintableERC20FactoryImpl"); _dio.set(_dio.optimismMintableERC20FactoryImpl.selector, address(impl)); } - function deployOPContractsManagerImpl( - DeployImplementationsInput _dii, - DeployImplementationsOutput _dio - ) - public - virtual - { - ISuperchainConfig superchainConfigProxy = _dii.superchainConfigProxy(); - IProtocolVersions protocolVersionsProxy = _dii.protocolVersionsProxy(); - - vm.broadcast(msg.sender); - // TODO: Eventually we will want to select the correct implementation based on the release. - OPContractsManager impl = new OPContractsManager(superchainConfigProxy, protocolVersionsProxy); - - vm.label(address(impl), "OPContractsManagerImpl"); - _dio.set(_dio.opcmImpl.selector, address(impl)); - } - // --- Fault Proofs Contracts --- // The fault proofs contracts are configured as follows: @@ -862,7 +700,7 @@ contract DeployImplementations is Script { public virtual { - string memory release = _dii.release(); + string memory release = _dii.l1ContractsRelease(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "optimism_portal"; IOptimismPortal2 impl; @@ -870,7 +708,7 @@ contract DeployImplementations is Script { address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { impl = IOptimismPortal2(payable(existingImplementation)); - } else if (isDevelopRelease(release)) { + } else { uint256 proofMaturityDelaySeconds = _dii.proofMaturityDelaySeconds(); uint256 disputeGameFinalityDelaySeconds = _dii.disputeGameFinalityDelaySeconds(); vm.broadcast(msg.sender); @@ -884,8 +722,6 @@ contract DeployImplementations is Script { ) }) ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); } vm.label(address(impl), "OptimismPortalImpl"); @@ -893,7 +729,7 @@ contract DeployImplementations is Script { } function deployDelayedWETHImpl(DeployImplementationsInput _dii, DeployImplementationsOutput _dio) public virtual { - string memory release = _dii.release(); + string memory release = _dii.l1ContractsRelease(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "delayed_weth"; IDelayedWETH impl; @@ -901,7 +737,7 @@ contract DeployImplementations is Script { address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { impl = IDelayedWETH(payable(existingImplementation)); - } else if (isDevelopRelease(release)) { + } else { uint256 withdrawalDelaySeconds = _dii.withdrawalDelaySeconds(); vm.broadcast(msg.sender); impl = IDelayedWETH( @@ -912,8 +748,6 @@ contract DeployImplementations is Script { ) }) ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); } vm.label(address(impl), "DelayedWETHImpl"); @@ -927,7 +761,7 @@ contract DeployImplementations is Script { public virtual { - string memory release = _dii.release(); + string memory release = _dii.l1ContractsRelease(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "preimage_oracle"; IPreimageOracle singleton; @@ -935,7 +769,7 @@ contract DeployImplementations is Script { address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { singleton = IPreimageOracle(payable(existingImplementation)); - } else if (isDevelopRelease(release)) { + } else { uint256 minProposalSizeBytes = _dii.minProposalSizeBytes(); uint256 challengePeriodSeconds = _dii.challengePeriodSeconds(); vm.broadcast(msg.sender); @@ -947,8 +781,6 @@ contract DeployImplementations is Script { ) }) ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); } vm.label(address(singleton), "PreimageOracleSingleton"); @@ -956,7 +788,7 @@ contract DeployImplementations is Script { } function deployMipsSingleton(DeployImplementationsInput _dii, DeployImplementationsOutput _dio) public virtual { - string memory release = _dii.release(); + string memory release = _dii.l1ContractsRelease(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "mips"; IMIPS singleton; @@ -964,18 +796,16 @@ contract DeployImplementations is Script { address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { singleton = IMIPS(payable(existingImplementation)); - } else if (isDevelopRelease(release)) { + } else { uint256 mipsVersion = _dii.mipsVersion(); IPreimageOracle preimageOracle = IPreimageOracle(address(_dio.preimageOracleSingleton())); vm.broadcast(msg.sender); singleton = IMIPS( DeployUtils.create1({ - _name: mipsVersion == 1 ? "MIPS" : "MIPS2", + _name: mipsVersion == 1 ? "MIPS" : "MIPS64", _args: DeployUtils.encodeConstructor(abi.encodeCall(IMIPS.__constructor__, (preimageOracle))) }) ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); } vm.label(address(singleton), "MIPSSingleton"); @@ -989,7 +819,7 @@ contract DeployImplementations is Script { public virtual { - string memory release = _dii.release(); + string memory release = _dii.l1ContractsRelease(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "dispute_game_factory"; IDisputeGameFactory impl; @@ -997,7 +827,7 @@ contract DeployImplementations is Script { address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { impl = IDisputeGameFactory(payable(existingImplementation)); - } else if (isDevelopRelease(release)) { + } else { vm.broadcast(msg.sender); impl = IDisputeGameFactory( DeployUtils.create1({ @@ -1005,8 +835,6 @@ contract DeployImplementations is Script { _args: DeployUtils.encodeConstructor(abi.encodeCall(IDisputeGameFactory.__constructor__, ())) }) ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); } vm.label(address(impl), "DisputeGameFactoryImpl"); @@ -1076,11 +904,6 @@ contract DeployImplementations is Script { } } } - - // A release is considered a 'develop' release if it does not start with 'op-contracts'. - function isDevelopRelease(string memory _release) internal pure returns (bool) { - return !LibString.startsWith(_release, "op-contracts"); - } } // Similar to how DeploySuperchain.s.sol contains a lot of comments to thoroughly document the script @@ -1120,36 +943,35 @@ contract DeployImplementationsInterop is DeployImplementations { DeployImplementationsInput _dii, DeployImplementationsOutput _dio, OPContractsManager.Blueprints memory _blueprints, - string memory _release, - OPContractsManager.ImplementationSetter[] memory _setters + string memory _l1ContractsRelease ) internal + virtual override - returns (OPContractsManager opcmProxy_) + returns (OPContractsManager opcm_) { - address opcmProxyOwner = _dii.opcmProxyOwner(); + ISuperchainConfig superchainConfigProxy = _dii.superchainConfigProxy(); + IProtocolVersions protocolVersionsProxy = _dii.protocolVersionsProxy(); + + OPContractsManager.Implementations memory implementations = OPContractsManager.Implementations({ + l1ERC721BridgeImpl: address(_dio.l1ERC721BridgeImpl()), + optimismPortalImpl: address(_dio.optimismPortalImpl()), + systemConfigImpl: address(_dio.systemConfigImpl()), + optimismMintableERC20FactoryImpl: address(_dio.optimismMintableERC20FactoryImpl()), + l1CrossDomainMessengerImpl: address(_dio.l1CrossDomainMessengerImpl()), + l1StandardBridgeImpl: address(_dio.l1StandardBridgeImpl()), + disputeGameFactoryImpl: address(_dio.disputeGameFactoryImpl()), + delayedWETHImpl: address(_dio.delayedWETHImpl()), + mipsImpl: address(_dio.mipsSingleton()) + }); vm.broadcast(msg.sender); - IProxy proxy = IProxy( - DeployUtils.create1({ - _name: "Proxy", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IProxy.__constructor__, (msg.sender))) - }) + opcm_ = new OPContractsManagerInterop( + superchainConfigProxy, protocolVersionsProxy, _l1ContractsRelease, _blueprints, implementations ); - deployOPContractsManagerImpl(_dii, _dio); // overriding function - OPContractsManager opcmImpl = _dio.opcmImpl(); - - OPContractsManager.InitializerInputs memory initializerInputs = - OPContractsManager.InitializerInputs(_blueprints, _setters, _release, true); - - vm.startBroadcast(msg.sender); - proxy.upgradeToAndCall(address(opcmImpl), abi.encodeCall(opcmImpl.initialize, (initializerInputs))); - - proxy.changeAdmin(opcmProxyOwner); // transfer ownership of Proxy contract to the ProxyAdmin contract - vm.stopBroadcast(); - - opcmProxy_ = OPContractsManagerInterop(address(proxy)); + vm.label(address(opcm_), "OPContractsManager"); + _dio.set(_dio.opcm.selector, address(opcm_)); } function deployOptimismPortalImpl( @@ -1159,7 +981,7 @@ contract DeployImplementationsInterop is DeployImplementations { public override { - string memory release = _dii.release(); + string memory release = _dii.l1ContractsRelease(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "optimism_portal"; IOptimismPortalInterop impl; @@ -1167,7 +989,7 @@ contract DeployImplementationsInterop is DeployImplementations { address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { impl = IOptimismPortalInterop(payable(existingImplementation)); - } else if (isDevelopRelease(release)) { + } else { uint256 proofMaturityDelaySeconds = _dii.proofMaturityDelaySeconds(); uint256 disputeGameFinalityDelaySeconds = _dii.disputeGameFinalityDelaySeconds(); vm.broadcast(msg.sender); @@ -1182,8 +1004,6 @@ contract DeployImplementationsInterop is DeployImplementations { ) }) ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); } vm.label(address(impl), "OptimismPortalImpl"); @@ -1197,7 +1017,7 @@ contract DeployImplementationsInterop is DeployImplementations { public override { - string memory release = _dii.release(); + string memory release = _dii.l1ContractsRelease(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "system_config"; @@ -1206,7 +1026,7 @@ contract DeployImplementationsInterop is DeployImplementations { address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { impl = ISystemConfigInterop(existingImplementation); - } else if (isDevelopRelease(release)) { + } else { vm.broadcast(msg.sender); impl = ISystemConfigInterop( DeployUtils.create1({ @@ -1214,46 +1034,9 @@ contract DeployImplementationsInterop is DeployImplementations { _args: DeployUtils.encodeConstructor(abi.encodeCall(ISystemConfigInterop.__constructor__, ())) }) ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); } vm.label(address(impl), "SystemConfigImpl"); _dio.set(_dio.systemConfigImpl.selector, address(impl)); } - - function deployOPContractsManagerImpl( - DeployImplementationsInput _dii, - DeployImplementationsOutput _dio - ) - public - override - { - ISuperchainConfig superchainConfigProxy = _dii.superchainConfigProxy(); - IProtocolVersions protocolVersionsProxy = _dii.protocolVersionsProxy(); - - vm.broadcast(msg.sender); - // TODO: Eventually we will want to select the correct implementation based on the release. - OPContractsManager impl = new OPContractsManagerInterop(superchainConfigProxy, protocolVersionsProxy); - - vm.label(address(impl), "OPContractsManagerImpl"); - _dio.set(_dio.opcmImpl.selector, address(impl)); - } - - function opcmSystemConfigSetter( - DeployImplementationsInput, - DeployImplementationsOutput _dio - ) - internal - view - override - returns (OPContractsManager.ImplementationSetter memory) - { - return OPContractsManager.ImplementationSetter({ - name: "SystemConfig", - info: OPContractsManager.Implementation( - address(_dio.systemConfigImpl()), ISystemConfigInterop.initialize.selector - ) - }); - } } diff --git a/packages/contracts-bedrock/scripts/deploy/DeployMIPS.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployMIPS.s.sol index 5890630c9c22..4c32be812869 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployMIPS.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployMIPS.s.sol @@ -9,8 +9,8 @@ import { BaseDeployIO } from "scripts/deploy/BaseDeployIO.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; // Interfaces -import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; -import { IMIPS } from "src/cannon/interfaces/IMIPS.sol"; +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; +import { IMIPS } from "interfaces/cannon/IMIPS.sol"; /// @title DeployMIPSInput contract DeployMIPSInput is BaseDeployIO { @@ -98,7 +98,7 @@ contract DeployMIPS is Script { vm.broadcast(msg.sender); singleton = IMIPS( DeployUtils.create1({ - _name: mipsVersion == 1 ? "MIPS" : "MIPS2", + _name: mipsVersion == 1 ? "MIPS" : "MIPS64", _args: DeployUtils.encodeConstructor(abi.encodeCall(IMIPS.__constructor__, (preimageOracle))) }) ); diff --git a/packages/contracts-bedrock/scripts/deploy/DeployOPCM.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployOPCM.s.sol new file mode 100644 index 000000000000..015cb78c7594 --- /dev/null +++ b/packages/contracts-bedrock/scripts/deploy/DeployOPCM.s.sol @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +import { Script } from "forge-std/Script.sol"; + +import { LibString } from "@solady/utils/LibString.sol"; + +import { BaseDeployIO } from "scripts/deploy/BaseDeployIO.sol"; +import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; + +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IProtocolVersions } from "interfaces/L1/IProtocolVersions.sol"; +import { OPContractsManager } from "src/L1/OPContractsManager.sol"; + +contract DeployOPCMInput is BaseDeployIO { + ISuperchainConfig internal _superchainConfig; + IProtocolVersions internal _protocolVersions; + string internal _l1ContractsRelease; + + address internal _addressManagerBlueprint; + address internal _proxyBlueprint; + address internal _proxyAdminBlueprint; + address internal _l1ChugSplashProxyBlueprint; + address internal _resolvedDelegateProxyBlueprint; + address internal _anchorStateRegistryBlueprint; + address internal _permissionedDisputeGame1Blueprint; + address internal _permissionedDisputeGame2Blueprint; + + address internal _l1ERC721BridgeImpl; + address internal _optimismPortalImpl; + address internal _systemConfigImpl; + address internal _optimismMintableERC20FactoryImpl; + address internal _l1CrossDomainMessengerImpl; + address internal _l1StandardBridgeImpl; + address internal _disputeGameFactoryImpl; + address internal _delayedWETHImpl; + address internal _mipsImpl; + + // Setter for address type + function set(bytes4 _sel, address _addr) public { + require(_addr != address(0), "DeployOPCMInput: cannot set zero address"); + + if (_sel == this.superchainConfig.selector) _superchainConfig = ISuperchainConfig(_addr); + else if (_sel == this.protocolVersions.selector) _protocolVersions = IProtocolVersions(_addr); + else if (_sel == this.addressManagerBlueprint.selector) _addressManagerBlueprint = _addr; + else if (_sel == this.proxyBlueprint.selector) _proxyBlueprint = _addr; + else if (_sel == this.proxyAdminBlueprint.selector) _proxyAdminBlueprint = _addr; + else if (_sel == this.l1ChugSplashProxyBlueprint.selector) _l1ChugSplashProxyBlueprint = _addr; + else if (_sel == this.resolvedDelegateProxyBlueprint.selector) _resolvedDelegateProxyBlueprint = _addr; + else if (_sel == this.anchorStateRegistryBlueprint.selector) _anchorStateRegistryBlueprint = _addr; + else if (_sel == this.permissionedDisputeGame1Blueprint.selector) _permissionedDisputeGame1Blueprint = _addr; + else if (_sel == this.permissionedDisputeGame2Blueprint.selector) _permissionedDisputeGame2Blueprint = _addr; + else if (_sel == this.l1ERC721BridgeImpl.selector) _l1ERC721BridgeImpl = _addr; + else if (_sel == this.optimismPortalImpl.selector) _optimismPortalImpl = _addr; + else if (_sel == this.systemConfigImpl.selector) _systemConfigImpl = _addr; + else if (_sel == this.optimismMintableERC20FactoryImpl.selector) _optimismMintableERC20FactoryImpl = _addr; + else if (_sel == this.l1CrossDomainMessengerImpl.selector) _l1CrossDomainMessengerImpl = _addr; + else if (_sel == this.l1StandardBridgeImpl.selector) _l1StandardBridgeImpl = _addr; + else if (_sel == this.disputeGameFactoryImpl.selector) _disputeGameFactoryImpl = _addr; + else if (_sel == this.delayedWETHImpl.selector) _delayedWETHImpl = _addr; + else if (_sel == this.mipsImpl.selector) _mipsImpl = _addr; + else revert("DeployOPCMInput: unknown selector"); + } + + // Setter for string type + function set(bytes4 _sel, string memory _value) public { + require(!LibString.eq(_value, ""), "DeployOPCMInput: cannot set empty string"); + if (_sel == this.l1ContractsRelease.selector) _l1ContractsRelease = _value; + else revert("DeployOPCMInput: unknown selector"); + } + + // Getters + function superchainConfig() public view returns (ISuperchainConfig) { + require(address(_superchainConfig) != address(0), "DeployOPCMInput: not set"); + return _superchainConfig; + } + + function protocolVersions() public view returns (IProtocolVersions) { + require(address(_protocolVersions) != address(0), "DeployOPCMInput: not set"); + return _protocolVersions; + } + + function l1ContractsRelease() public view returns (string memory) { + require(!LibString.eq(_l1ContractsRelease, ""), "DeployOPCMInput: not set"); + return _l1ContractsRelease; + } + + function addressManagerBlueprint() public view returns (address) { + require(_addressManagerBlueprint != address(0), "DeployOPCMInput: not set"); + return _addressManagerBlueprint; + } + + function proxyBlueprint() public view returns (address) { + require(_proxyBlueprint != address(0), "DeployOPCMInput: not set"); + return _proxyBlueprint; + } + + function proxyAdminBlueprint() public view returns (address) { + require(_proxyAdminBlueprint != address(0), "DeployOPCMInput: not set"); + return _proxyAdminBlueprint; + } + + function l1ChugSplashProxyBlueprint() public view returns (address) { + require(_l1ChugSplashProxyBlueprint != address(0), "DeployOPCMInput: not set"); + return _l1ChugSplashProxyBlueprint; + } + + function resolvedDelegateProxyBlueprint() public view returns (address) { + require(_resolvedDelegateProxyBlueprint != address(0), "DeployOPCMInput: not set"); + return _resolvedDelegateProxyBlueprint; + } + + function anchorStateRegistryBlueprint() public view returns (address) { + require(_anchorStateRegistryBlueprint != address(0), "DeployOPCMInput: not set"); + return _anchorStateRegistryBlueprint; + } + + function permissionedDisputeGame1Blueprint() public view returns (address) { + require(_permissionedDisputeGame1Blueprint != address(0), "DeployOPCMInput: not set"); + return _permissionedDisputeGame1Blueprint; + } + + function permissionedDisputeGame2Blueprint() public view returns (address) { + require(_permissionedDisputeGame2Blueprint != address(0), "DeployOPCMInput: not set"); + return _permissionedDisputeGame2Blueprint; + } + + function l1ERC721BridgeImpl() public view returns (address) { + require(_l1ERC721BridgeImpl != address(0), "DeployOPCMInput: not set"); + return _l1ERC721BridgeImpl; + } + + function optimismPortalImpl() public view returns (address) { + require(_optimismPortalImpl != address(0), "DeployOPCMInput: not set"); + return _optimismPortalImpl; + } + + function systemConfigImpl() public view returns (address) { + require(_systemConfigImpl != address(0), "DeployOPCMInput: not set"); + return _systemConfigImpl; + } + + function optimismMintableERC20FactoryImpl() public view returns (address) { + require(_optimismMintableERC20FactoryImpl != address(0), "DeployOPCMInput: not set"); + return _optimismMintableERC20FactoryImpl; + } + + function l1CrossDomainMessengerImpl() public view returns (address) { + require(_l1CrossDomainMessengerImpl != address(0), "DeployOPCMInput: not set"); + return _l1CrossDomainMessengerImpl; + } + + function l1StandardBridgeImpl() public view returns (address) { + require(_l1StandardBridgeImpl != address(0), "DeployOPCMInput: not set"); + return _l1StandardBridgeImpl; + } + + function disputeGameFactoryImpl() public view returns (address) { + require(_disputeGameFactoryImpl != address(0), "DeployOPCMInput: not set"); + return _disputeGameFactoryImpl; + } + + function delayedWETHImpl() public view returns (address) { + require(_delayedWETHImpl != address(0), "DeployOPCMInput: not set"); + return _delayedWETHImpl; + } + + function mipsImpl() public view returns (address) { + require(_mipsImpl != address(0), "DeployOPCMInput: not set"); + return _mipsImpl; + } +} + +contract DeployOPCMOutput is BaseDeployIO { + OPContractsManager internal _opcm; + + // Setter for address type + function set(bytes4 _sel, address _addr) public { + require(_addr != address(0), "DeployOPCMOutput: cannot set zero address"); + if (_sel == this.opcm.selector) _opcm = OPContractsManager(_addr); + else revert("DeployOPCMOutput: unknown selector"); + } + + // Getter + function opcm() public view returns (OPContractsManager) { + require(address(_opcm) != address(0), "DeployOPCMOutput: not set"); + return _opcm; + } +} + +contract DeployOPCM is Script { + function run(DeployOPCMInput _doi, DeployOPCMOutput _doo) public { + OPContractsManager.Blueprints memory blueprints = OPContractsManager.Blueprints({ + addressManager: _doi.addressManagerBlueprint(), + proxy: _doi.proxyBlueprint(), + proxyAdmin: _doi.proxyAdminBlueprint(), + l1ChugSplashProxy: _doi.l1ChugSplashProxyBlueprint(), + resolvedDelegateProxy: _doi.resolvedDelegateProxyBlueprint(), + anchorStateRegistry: _doi.anchorStateRegistryBlueprint(), + permissionedDisputeGame1: _doi.permissionedDisputeGame1Blueprint(), + permissionedDisputeGame2: _doi.permissionedDisputeGame2Blueprint() + }); + OPContractsManager.Implementations memory implementations = OPContractsManager.Implementations({ + l1ERC721BridgeImpl: address(_doi.l1ERC721BridgeImpl()), + optimismPortalImpl: address(_doi.optimismPortalImpl()), + systemConfigImpl: address(_doi.systemConfigImpl()), + optimismMintableERC20FactoryImpl: address(_doi.optimismMintableERC20FactoryImpl()), + l1CrossDomainMessengerImpl: address(_doi.l1CrossDomainMessengerImpl()), + l1StandardBridgeImpl: address(_doi.l1StandardBridgeImpl()), + disputeGameFactoryImpl: address(_doi.disputeGameFactoryImpl()), + delayedWETHImpl: address(_doi.delayedWETHImpl()), + mipsImpl: address(_doi.mipsImpl()) + }); + + OPContractsManager opcm_ = deployOPCM( + _doi.superchainConfig(), _doi.protocolVersions(), blueprints, implementations, _doi.l1ContractsRelease() + ); + _doo.set(_doo.opcm.selector, address(opcm_)); + + assertValidOpcm(_doi, _doo); + } + + function deployOPCM( + ISuperchainConfig _superchainConfig, + IProtocolVersions _protocolVersions, + OPContractsManager.Blueprints memory _blueprints, + OPContractsManager.Implementations memory _implementations, + string memory _l1ContractsRelease + ) + public + returns (OPContractsManager opcm_) + { + vm.broadcast(msg.sender); + opcm_ = new OPContractsManager( + _superchainConfig, _protocolVersions, _l1ContractsRelease, _blueprints, _implementations + ); + vm.label(address(opcm_), "OPContractsManager"); + } + + function assertValidOpcm(DeployOPCMInput _doi, DeployOPCMOutput _doo) public view { + OPContractsManager impl = OPContractsManager(address(_doo.opcm())); + require(address(impl.superchainConfig()) == address(_doi.superchainConfig()), "OPCMI-10"); + require(address(impl.protocolVersions()) == address(_doi.protocolVersions()), "OPCMI-20"); + require(LibString.eq(impl.l1ContractsRelease(), _doi.l1ContractsRelease())); + + OPContractsManager.Blueprints memory blueprints = impl.blueprints(); + require(blueprints.addressManager == _doi.addressManagerBlueprint(), "OPCMI-40"); + require(blueprints.proxy == _doi.proxyBlueprint(), "OPCMI-50"); + require(blueprints.proxyAdmin == _doi.proxyAdminBlueprint(), "OPCMI-60"); + require(blueprints.l1ChugSplashProxy == _doi.l1ChugSplashProxyBlueprint(), "OPCMI-70"); + require(blueprints.resolvedDelegateProxy == _doi.resolvedDelegateProxyBlueprint(), "OPCMI-80"); + require(blueprints.anchorStateRegistry == _doi.anchorStateRegistryBlueprint(), "OPCMI-90"); + require(blueprints.permissionedDisputeGame1 == _doi.permissionedDisputeGame1Blueprint(), "OPCMI-100"); + require(blueprints.permissionedDisputeGame2 == _doi.permissionedDisputeGame2Blueprint(), "OPCMI-110"); + + OPContractsManager.Implementations memory implementations = impl.implementations(); + require(implementations.l1ERC721BridgeImpl == _doi.l1ERC721BridgeImpl(), "OPCMI-120"); + require(implementations.optimismPortalImpl == _doi.optimismPortalImpl(), "OPCMI-130"); + require(implementations.systemConfigImpl == _doi.systemConfigImpl(), "OPCMI-140"); + require( + implementations.optimismMintableERC20FactoryImpl == _doi.optimismMintableERC20FactoryImpl(), "OPCMI-150" + ); + require(implementations.l1CrossDomainMessengerImpl == _doi.l1CrossDomainMessengerImpl(), "OPCMI-160"); + require(implementations.l1StandardBridgeImpl == _doi.l1StandardBridgeImpl(), "OPCMI-170"); + require(implementations.disputeGameFactoryImpl == _doi.disputeGameFactoryImpl(), "OPCMI-180"); + require(implementations.delayedWETHImpl == _doi.delayedWETHImpl(), "OPCMI-190"); + require(implementations.mipsImpl == _doi.mipsImpl(), "OPCMI-200"); + } + + function etchIOContracts() public returns (DeployOPCMInput doi_, DeployOPCMOutput doo_) { + (doi_, doo_) = getIOContracts(); + vm.etch(address(doi_), type(DeployOPCMInput).runtimeCode); + vm.etch(address(doo_), type(DeployOPCMOutput).runtimeCode); + } + + function getIOContracts() public view returns (DeployOPCMInput doi_, DeployOPCMOutput doo_) { + doi_ = DeployOPCMInput(DeployUtils.toIOAddress(msg.sender, "optimism.DeployOPCMInput")); + doo_ = DeployOPCMOutput(DeployUtils.toIOAddress(msg.sender, "optimism.DeployOPCMOutput")); + } +} diff --git a/packages/contracts-bedrock/scripts/deploy/DeployOPChain.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployOPChain.s.sol index eb3b346452e9..cd0aa6d29f68 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployOPChain.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployOPChain.s.sol @@ -9,31 +9,31 @@ import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; import { Solarray } from "scripts/libraries/Solarray.sol"; import { BaseDeployIO } from "scripts/deploy/BaseDeployIO.sol"; -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IBigStepper } from "src/dispute/interfaces/IBigStepper.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IBigStepper } from "interfaces/dispute/IBigStepper.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; import { Constants } from "src/libraries/Constants.sol"; import { Constants as ScriptConstants } from "scripts/libraries/Constants.sol"; -import { IProxyAdmin } from "src/universal/interfaces/IProxyAdmin.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; +import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; -import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; -import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; -import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; -import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; -import { IPermissionedDisputeGame } from "src/dispute/interfaces/IPermissionedDisputeGame.sol"; +import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; +import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; +import { IPermissionedDisputeGame } from "interfaces/dispute/IPermissionedDisputeGame.sol"; import { Claim, Duration, GameType, GameTypes, Hash } from "src/dispute/lib/Types.sol"; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; -import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { IL1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; -import { IL1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; -import { IL1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; -import { IOptimismMintableERC20Factory } from "src/universal/interfaces/IOptimismMintableERC20Factory.sol"; +import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { IL1CrossDomainMessenger } from "interfaces/L1/IL1CrossDomainMessenger.sol"; +import { IL1ERC721Bridge } from "interfaces/L1/IL1ERC721Bridge.sol"; +import { IL1StandardBridge } from "interfaces/L1/IL1StandardBridge.sol"; +import { IOptimismMintableERC20Factory } from "interfaces/universal/IOptimismMintableERC20Factory.sol"; contract DeployOPChainInput is BaseDeployIO { address internal _opChainProxyAdminOwner; @@ -47,7 +47,7 @@ contract DeployOPChainInput is BaseDeployIO { uint32 internal _basefeeScalar; uint32 internal _blobBaseFeeScalar; uint256 internal _l2ChainId; - OPContractsManager internal _opcmProxy; + OPContractsManager internal _opcm; string internal _saltMixer; uint64 internal _gasLimit; @@ -68,7 +68,7 @@ contract DeployOPChainInput is BaseDeployIO { else if (_sel == this.unsafeBlockSigner.selector) _unsafeBlockSigner = _addr; else if (_sel == this.proposer.selector) _proposer = _addr; else if (_sel == this.challenger.selector) _challenger = _addr; - else if (_sel == this.opcmProxy.selector) _opcmProxy = OPContractsManager(_addr); + else if (_sel == this.opcm.selector) _opcm = OPContractsManager(_addr); else revert("DeployOPChainInput: unknown selector"); } @@ -174,11 +174,10 @@ contract DeployOPChainInput is BaseDeployIO { return abi.encode(ScriptConstants.DEFAULT_STARTING_ANCHOR_ROOTS()); } - function opcmProxy() public returns (OPContractsManager) { - require(address(_opcmProxy) != address(0), "DeployOPChainInput: not set"); - DeployUtils.assertValidContractAddress(address(_opcmProxy)); - DeployUtils.assertERC1967ImplementationSet(address(_opcmProxy)); - return _opcmProxy; + function opcm() public view returns (OPContractsManager) { + require(address(_opcm) != address(0), "DeployOPChainInput: not set"); + DeployUtils.assertValidContractAddress(address(_opcm)); + return _opcm; } function saltMixer() public view returns (string memory) { @@ -347,7 +346,7 @@ contract DeployOPChain is Script { // -------- Core Deployment Methods -------- function run(DeployOPChainInput _doi, DeployOPChainOutput _doo) public { - OPContractsManager opcmProxy = _doi.opcmProxy(); + OPContractsManager opcm = _doi.opcm(); OPContractsManager.Roles memory roles = OPContractsManager.Roles({ opChainProxyAdminOwner: _doi.opChainProxyAdminOwner(), @@ -374,7 +373,7 @@ contract DeployOPChain is Script { }); vm.broadcast(msg.sender); - OPContractsManager.DeployOutput memory deployOutput = opcmProxy.deploy(deployInput); + OPContractsManager.DeployOutput memory deployOutput = opcm.deploy(deployInput); vm.label(address(deployOutput.opChainProxyAdmin), "opChainProxyAdmin"); vm.label(address(deployOutput.addressManager), "addressManager"); @@ -480,9 +479,9 @@ contract DeployOPChain is Script { "DPG-20" ); - OPContractsManager opcm = _doi.opcmProxy(); - (address mips,) = opcm.implementations(opcm.latestRelease(), "MIPS"); - require(game.vm() == IBigStepper(mips), "DPG-30"); + OPContractsManager opcm = _doi.opcm(); + address mipsImpl = opcm.implementations().mipsImpl; + require(game.vm() == IBigStepper(mipsImpl), "DPG-30"); require(address(game.weth()) == address(_doo.delayedWETHPermissionedGameProxy()), "DPG-40"); require(address(game.anchorStateRegistry()) == address(_doo.anchorStateRegistryProxy()), "DPG-50"); @@ -552,9 +551,7 @@ contract DeployOPChain is Script { require(outputConfig.maximumBaseFee == rConfig.maximumBaseFee, "SYSCON-130"); require(systemConfig.startBlock() == block.number, "SYSCON-140"); - require( - systemConfig.batchInbox() == _doi.opcmProxy().chainIdToBatchInboxAddress(_doi.l2ChainId()), "SYSCON-150" - ); + require(systemConfig.batchInbox() == _doi.opcm().chainIdToBatchInboxAddress(_doi.l2ChainId()), "SYSCON-150"); require(systemConfig.l1CrossDomainMessenger() == address(_doo.l1CrossDomainMessengerProxy()), "SYSCON-160"); require(systemConfig.l1ERC721Bridge() == address(_doo.l1ERC721BridgeProxy()), "SYSCON-170"); @@ -579,7 +576,7 @@ contract DeployOPChain is Script { require(address(messenger.PORTAL()) == address(_doo.optimismPortalProxy()), "L1xDM-30"); require(address(messenger.portal()) == address(_doo.optimismPortalProxy()), "L1xDM-40"); - require(address(messenger.superchainConfig()) == address(_doi.opcmProxy().superchainConfig()), "L1xDM-50"); + require(address(messenger.superchainConfig()) == address(_doi.opcm().superchainConfig()), "L1xDM-50"); bytes32 xdmSenderSlot = vm.load(address(messenger), bytes32(uint256(204))); require(address(uint160(uint256(xdmSenderSlot))) == Constants.DEFAULT_L2_SENDER, "L1xDM-60"); @@ -595,7 +592,7 @@ contract DeployOPChain is Script { require(address(bridge.messenger()) == address(messenger), "L1SB-20"); require(address(bridge.OTHER_BRIDGE()) == Predeploys.L2_STANDARD_BRIDGE, "L1SB-30"); require(address(bridge.otherBridge()) == Predeploys.L2_STANDARD_BRIDGE, "L1SB-40"); - require(address(bridge.superchainConfig()) == address(_doi.opcmProxy().superchainConfig()), "L1SB-50"); + require(address(bridge.superchainConfig()) == address(_doi.opcm().superchainConfig()), "L1SB-50"); } function assertValidOptimismMintableERC20Factory(DeployOPChainInput, DeployOPChainOutput _doo) internal { @@ -617,12 +614,12 @@ contract DeployOPChain is Script { require(address(bridge.MESSENGER()) == address(_doo.l1CrossDomainMessengerProxy()), "L721B-30"); require(address(bridge.messenger()) == address(_doo.l1CrossDomainMessengerProxy()), "L721B-40"); - require(address(bridge.superchainConfig()) == address(_doi.opcmProxy().superchainConfig()), "L721B-50"); + require(address(bridge.superchainConfig()) == address(_doi.opcm().superchainConfig()), "L721B-50"); } function assertValidOptimismPortal(DeployOPChainInput _doi, DeployOPChainOutput _doo) internal { IOptimismPortal2 portal = _doo.optimismPortalProxy(); - ISuperchainConfig superchainConfig = ISuperchainConfig(address(_doi.opcmProxy().superchainConfig())); + ISuperchainConfig superchainConfig = ISuperchainConfig(address(_doi.opcm().superchainConfig())); require(address(portal.disputeGameFactory()) == address(_doo.disputeGameFactoryProxy()), "PORTAL-10"); require(address(portal.systemConfig()) == address(_doo.systemConfigProxy()), "PORTAL-20"); diff --git a/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol index e126c39f18c9..29709f2b125f 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol @@ -16,7 +16,7 @@ import { Deployer } from "scripts/deploy/Deployer.sol"; import { LivenessGuard } from "src/safe/LivenessGuard.sol"; import { LivenessModule } from "src/safe/LivenessModule.sol"; import { DeputyGuardianModule } from "src/safe/DeputyGuardianModule.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; import { Deploy } from "./Deploy.s.sol"; @@ -59,7 +59,7 @@ struct GuardianConfig { /// be used as an example to guide the setup and configuration of the Safe contracts. contract DeployOwnership is Deploy { /// @notice Internal function containing the deploy logic. - function _run() internal override { + function _run(bool) internal override { console.log("start of Ownership Deployment"); // The SuperchainConfig is needed as a constructor argument to the Deputy Guardian Module deploySuperchainConfig(); diff --git a/packages/contracts-bedrock/scripts/deploy/DeploySuperchain.s.sol b/packages/contracts-bedrock/scripts/deploy/DeploySuperchain.s.sol index 74492556e1b1..f039fa47ef99 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeploySuperchain.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeploySuperchain.s.sol @@ -4,10 +4,10 @@ pragma solidity 0.8.15; import { Script } from "forge-std/Script.sol"; import { stdToml } from "forge-std/StdToml.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IProtocolVersions, ProtocolVersion } from "src/L1/interfaces/IProtocolVersions.sol"; -import { IProxyAdmin } from "src/universal/interfaces/IProxyAdmin.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IProtocolVersions, ProtocolVersion } from "interfaces/L1/IProtocolVersions.sol"; +import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; import { Solarray } from "scripts/libraries/Solarray.sol"; @@ -60,9 +60,9 @@ import { BaseDeployIO } from "scripts/deploy/BaseDeployIO.sol"; // we use variable names that are shorthand for the full contract names, for example: // - `dsi` for DeploySuperchainInput // - `dso` for DeploySuperchainOutput -// - `dio` for DeployImplementationsInput +// - `dii` for DeployImplementationsInput // - `dio` for DeployImplementationsOutput -// - `doo` for DeployOPChainInput +// - `doi` for DeployOPChainInput // - `doo` for DeployOPChainOutput // - etc. diff --git a/packages/contracts-bedrock/scripts/deploy/ReadImplementationAddresses.s.sol b/packages/contracts-bedrock/scripts/deploy/ReadImplementationAddresses.s.sol index 3992cf1cb657..8917479543e5 100644 --- a/packages/contracts-bedrock/scripts/deploy/ReadImplementationAddresses.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/ReadImplementationAddresses.s.sol @@ -2,39 +2,28 @@ pragma solidity 0.8.15; import { BaseDeployIO } from "scripts/deploy/BaseDeployIO.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; import { Script } from "forge-std/Script.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; import { DeployOPChainOutput } from "scripts/deploy/DeployOPChain.s.sol"; -import { IMIPS } from "src/cannon/interfaces/IMIPS.sol"; +import { IMIPS } from "interfaces/cannon/IMIPS.sol"; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; -import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; -import { IStaticL1ChugSplashProxy } from "src/legacy/interfaces/IL1ChugSplashProxy.sol"; +import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; +import { IStaticL1ChugSplashProxy } from "interfaces/legacy/IL1ChugSplashProxy.sol"; contract ReadImplementationAddressesInput is DeployOPChainOutput { - OPContractsManager internal _opcmProxy; - string internal _release; + OPContractsManager internal _opcm; function set(bytes4 _sel, address _addr) public override { require(_addr != address(0), "ReadImplementationAddressesInput: cannot set zero address"); - if (_sel == this.opcmProxy.selector) _opcmProxy = OPContractsManager(_addr); + if (_sel == this.opcm.selector) _opcm = OPContractsManager(_addr); else if (_sel == this.addressManager.selector) _addressManager = IAddressManager(_addr); else super.set(_sel, _addr); } - function set(bytes4 _sel, string memory _val) public { - if (_sel == this.release.selector) _release = _val; - else revert("ReadImplementationAddressesInput: unknown selector"); - } - - function opcmProxy() public view returns (OPContractsManager) { - DeployUtils.assertValidContractAddress(address(_opcmProxy)); - return _opcmProxy; - } - - function release() public view returns (string memory) { - require(bytes(_release).length != 0, "ReadImplementationAddressesInput: release not set"); - return _release; + function opcm() public view returns (OPContractsManager) { + DeployUtils.assertValidContractAddress(address(_opcm)); + return _opcm; } } @@ -154,9 +143,12 @@ contract ReadImplementationAddresses is Script { vm.prank(address(0)); _rio.set(_rio.l1StandardBridge.selector, l1SBImpl); - (address mipsLogic,) = _rii.opcmProxy().implementations(_rii.release(), "MIPS"); + address mipsLogic = _rii.opcm().implementations().mipsImpl; _rio.set(_rio.mipsSingleton.selector, mipsLogic); + address delayedWETH = _rii.opcm().implementations().delayedWETHImpl; + _rio.set(_rio.delayedWETH.selector, delayedWETH); + IAddressManager am = _rii.addressManager(); _rio.set(_rio.l1CrossDomainMessenger.selector, am.getAddress("OVM_L1CrossDomainMessenger")); diff --git a/packages/contracts-bedrock/scripts/getting-started/wallets.sh b/packages/contracts-bedrock/scripts/getting-started/wallets.sh index 1d3ebfc6bbd5..36a707c431cc 100755 --- a/packages/contracts-bedrock/scripts/getting-started/wallets.sh +++ b/packages/contracts-bedrock/scripts/getting-started/wallets.sh @@ -10,18 +10,21 @@ wallet1=$(cast wallet new) wallet2=$(cast wallet new) wallet3=$(cast wallet new) wallet4=$(cast wallet new) +wallet5=$(cast wallet new) # Grab wallet addresses address1=$(echo "$wallet1" | awk '/Address/ { print $2 }') address2=$(echo "$wallet2" | awk '/Address/ { print $2 }') address3=$(echo "$wallet3" | awk '/Address/ { print $2 }') address4=$(echo "$wallet4" | awk '/Address/ { print $2 }') +address5=$(echo "$wallet5" | awk '/Address/ { print $2 }') # Grab wallet private keys key1=$(echo "$wallet1" | awk '/Private key/ { print $3 }') key2=$(echo "$wallet2" | awk '/Private key/ { print $3 }') key3=$(echo "$wallet3" | awk '/Private key/ { print $3 }') key4=$(echo "$wallet4" | awk '/Private key/ { print $3 }') +key5=$(echo "$wallet5" | awk '/Private key/ { print $3 }') # Print out the environment variables to copy echo "# Copy the following into your .envrc file:" @@ -41,3 +44,7 @@ echo echo "# Sequencer account" echo "export GS_SEQUENCER_ADDRESS=$address4" echo "export GS_SEQUENCER_PRIVATE_KEY=$key4" +echo +echo "# Challenger account" +echo "export GS_CHALLENGER_ADDRESS=$address5" +echo "export GS_CHALLENGER_PRIVATE_KEY=$key5" diff --git a/packages/contracts-bedrock/scripts/libraries/Constants.sol b/packages/contracts-bedrock/scripts/libraries/Constants.sol index 603084ac6eef..d388fb37778e 100644 --- a/packages/contracts-bedrock/scripts/libraries/Constants.sol +++ b/packages/contracts-bedrock/scripts/libraries/Constants.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; import { GameTypes, OutputRoot, Hash } from "src/dispute/lib/Types.sol"; /// @title Constants diff --git a/packages/contracts-bedrock/scripts/libraries/DeployUtils.sol b/packages/contracts-bedrock/scripts/libraries/DeployUtils.sol index da9aea12a0ac..44d71addb255 100644 --- a/packages/contracts-bedrock/scripts/libraries/DeployUtils.sol +++ b/packages/contracts-bedrock/scripts/libraries/DeployUtils.sol @@ -12,10 +12,10 @@ import { Bytes } from "src/libraries/Bytes.sol"; import { Constants } from "src/libraries/Constants.sol"; // Interfaces -import { IProxy } from "src/universal/interfaces/IProxy.sol"; -import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; -import { IL1ChugSplashProxy, IStaticL1ChugSplashProxy } from "src/legacy/interfaces/IL1ChugSplashProxy.sol"; -import { IResolvedDelegateProxy } from "src/legacy/interfaces/IResolvedDelegateProxy.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; +import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; +import { IL1ChugSplashProxy, IStaticL1ChugSplashProxy } from "interfaces/legacy/IL1ChugSplashProxy.sol"; +import { IResolvedDelegateProxy } from "interfaces/legacy/IResolvedDelegateProxy.sol"; library DeployUtils { Vm internal constant vm = Vm(address(uint160(uint256(keccak256("hevm cheat code"))))); diff --git a/packages/contracts-bedrock/snapshots/abi/AttestationStation.json b/packages/contracts-bedrock/snapshots/abi/AttestationStation.json deleted file mode 100644 index ba7d5f9759e7..000000000000 --- a/packages/contracts-bedrock/snapshots/abi/AttestationStation.json +++ /dev/null @@ -1,128 +0,0 @@ -[ - { - "inputs": [ - { - "components": [ - { - "internalType": "address", - "name": "about", - "type": "address" - }, - { - "internalType": "bytes32", - "name": "key", - "type": "bytes32" - }, - { - "internalType": "bytes", - "name": "val", - "type": "bytes" - } - ], - "internalType": "struct AttestationStation.AttestationData[]", - "name": "_attestations", - "type": "tuple[]" - } - ], - "name": "attest", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "_about", - "type": "address" - }, - { - "internalType": "bytes32", - "name": "_key", - "type": "bytes32" - }, - { - "internalType": "bytes", - "name": "_val", - "type": "bytes" - } - ], - "name": "attest", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - }, - { - "internalType": "address", - "name": "", - "type": "address" - }, - { - "internalType": "bytes32", - "name": "", - "type": "bytes32" - } - ], - "name": "attestations", - "outputs": [ - { - "internalType": "bytes", - "name": "", - "type": "bytes" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "version", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "creator", - "type": "address" - }, - { - "indexed": true, - "internalType": "address", - "name": "about", - "type": "address" - }, - { - "indexed": true, - "internalType": "bytes32", - "name": "key", - "type": "bytes32" - }, - { - "indexed": false, - "internalType": "bytes", - "name": "val", - "type": "bytes" - } - ], - "name": "AttestationCreated", - "type": "event" - } -] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi/DelayedVetoable.json b/packages/contracts-bedrock/snapshots/abi/DelayedVetoable.json deleted file mode 100644 index d76d1c8b108b..000000000000 --- a/packages/contracts-bedrock/snapshots/abi/DelayedVetoable.json +++ /dev/null @@ -1,207 +0,0 @@ -[ - { - "inputs": [ - { - "internalType": "address", - "name": "_vetoer", - "type": "address" - }, - { - "internalType": "address", - "name": "_initiator", - "type": "address" - }, - { - "internalType": "address", - "name": "_target", - "type": "address" - }, - { - "internalType": "uint256", - "name": "_operatingDelay", - "type": "uint256" - } - ], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "stateMutability": "nonpayable", - "type": "fallback" - }, - { - "inputs": [], - "name": "delay", - "outputs": [ - { - "internalType": "uint256", - "name": "delay_", - "type": "uint256" - } - ], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "initiator", - "outputs": [ - { - "internalType": "address", - "name": "initiator_", - "type": "address" - } - ], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes32", - "name": "_callHash", - "type": "bytes32" - } - ], - "name": "queuedAt", - "outputs": [ - { - "internalType": "uint256", - "name": "queuedAt_", - "type": "uint256" - } - ], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "target", - "outputs": [ - { - "internalType": "address", - "name": "target_", - "type": "address" - } - ], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "version", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "vetoer", - "outputs": [ - { - "internalType": "address", - "name": "vetoer_", - "type": "address" - } - ], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint256", - "name": "delay", - "type": "uint256" - } - ], - "name": "DelayActivated", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "bytes32", - "name": "callHash", - "type": "bytes32" - }, - { - "indexed": false, - "internalType": "bytes", - "name": "data", - "type": "bytes" - } - ], - "name": "Forwarded", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "bytes32", - "name": "callHash", - "type": "bytes32" - }, - { - "indexed": false, - "internalType": "bytes", - "name": "data", - "type": "bytes" - } - ], - "name": "Initiated", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "bytes32", - "name": "callHash", - "type": "bytes32" - }, - { - "indexed": false, - "internalType": "bytes", - "name": "data", - "type": "bytes" - } - ], - "name": "Vetoed", - "type": "event" - }, - { - "inputs": [], - "name": "ForwardingEarly", - "type": "error" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "expected", - "type": "address" - }, - { - "internalType": "address", - "name": "actual", - "type": "address" - } - ], - "name": "Unauthorized", - "type": "error" - } -] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi/FaultDisputeGame.json b/packages/contracts-bedrock/snapshots/abi/FaultDisputeGame.json index e1e59c38701c..a2f02cce13bd 100644 --- a/packages/contracts-bedrock/snapshots/abi/FaultDisputeGame.json +++ b/packages/contracts-bedrock/snapshots/abi/FaultDisputeGame.json @@ -2,54 +2,61 @@ { "inputs": [ { - "internalType": "GameType", - "name": "_gameType", - "type": "uint32" - }, - { - "internalType": "Claim", - "name": "_absolutePrestate", - "type": "bytes32" - }, - { - "internalType": "uint256", - "name": "_maxGameDepth", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "_splitDepth", - "type": "uint256" - }, - { - "internalType": "Duration", - "name": "_clockExtension", - "type": "uint64" - }, - { - "internalType": "Duration", - "name": "_maxClockDuration", - "type": "uint64" - }, - { - "internalType": "contract IBigStepper", - "name": "_vm", - "type": "address" - }, - { - "internalType": "contract IDelayedWETH", - "name": "_weth", - "type": "address" - }, - { - "internalType": "contract IAnchorStateRegistry", - "name": "_anchorStateRegistry", - "type": "address" - }, - { - "internalType": "uint256", - "name": "_l2ChainId", - "type": "uint256" + "components": [ + { + "internalType": "GameType", + "name": "gameType", + "type": "uint32" + }, + { + "internalType": "Claim", + "name": "absolutePrestate", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "maxGameDepth", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "splitDepth", + "type": "uint256" + }, + { + "internalType": "Duration", + "name": "clockExtension", + "type": "uint64" + }, + { + "internalType": "Duration", + "name": "maxClockDuration", + "type": "uint64" + }, + { + "internalType": "contract IBigStepper", + "name": "vm", + "type": "address" + }, + { + "internalType": "contract IDelayedWETH", + "name": "weth", + "type": "address" + }, + { + "internalType": "contract IAnchorStateRegistry", + "name": "anchorStateRegistry", + "type": "address" + }, + { + "internalType": "uint256", + "name": "l2ChainId", + "type": "uint256" + } + ], + "internalType": "struct FaultDisputeGame.GameConstructorParams", + "name": "_params", + "type": "tuple" } ], "stateMutability": "nonpayable", diff --git a/packages/contracts-bedrock/snapshots/abi/L2ToL2CrossDomainMessenger.json b/packages/contracts-bedrock/snapshots/abi/L2ToL2CrossDomainMessenger.json index 6434233930db..9f51cc1c3a27 100644 --- a/packages/contracts-bedrock/snapshots/abi/L2ToL2CrossDomainMessenger.json +++ b/packages/contracts-bedrock/snapshots/abi/L2ToL2CrossDomainMessenger.json @@ -247,6 +247,11 @@ "name": "IdOriginNotL2ToL2CrossDomainMessenger", "type": "error" }, + { + "inputs": [], + "name": "InvalidChainId", + "type": "error" + }, { "inputs": [], "name": "MessageAlreadyRelayed", diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json index 7c478feb235d..b5758eca610f 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json @@ -10,6 +10,110 @@ "internalType": "contract IProtocolVersions", "name": "_protocolVersions", "type": "address" + }, + { + "internalType": "string", + "name": "_l1ContractsRelease", + "type": "string" + }, + { + "components": [ + { + "internalType": "address", + "name": "addressManager", + "type": "address" + }, + { + "internalType": "address", + "name": "proxy", + "type": "address" + }, + { + "internalType": "address", + "name": "proxyAdmin", + "type": "address" + }, + { + "internalType": "address", + "name": "l1ChugSplashProxy", + "type": "address" + }, + { + "internalType": "address", + "name": "resolvedDelegateProxy", + "type": "address" + }, + { + "internalType": "address", + "name": "anchorStateRegistry", + "type": "address" + }, + { + "internalType": "address", + "name": "permissionedDisputeGame1", + "type": "address" + }, + { + "internalType": "address", + "name": "permissionedDisputeGame2", + "type": "address" + } + ], + "internalType": "struct OPContractsManager.Blueprints", + "name": "_blueprints", + "type": "tuple" + }, + { + "components": [ + { + "internalType": "address", + "name": "l1ERC721BridgeImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "optimismPortalImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "systemConfigImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "optimismMintableERC20FactoryImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l1CrossDomainMessengerImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l1StandardBridgeImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "disputeGameFactoryImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "delayedWETHImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "mipsImpl", + "type": "address" + } + ], + "internalType": "struct OPContractsManager.Implementations", + "name": "_implementations", + "type": "tuple" } ], "stateMutability": "nonpayable", @@ -298,138 +402,68 @@ "type": "function" }, { - "inputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - }, - { - "internalType": "string", - "name": "", - "type": "string" - } - ], + "inputs": [], "name": "implementations", "outputs": [ - { - "internalType": "address", - "name": "logic", - "type": "address" - }, - { - "internalType": "bytes4", - "name": "initializer", - "type": "bytes4" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ { "components": [ { - "components": [ - { - "internalType": "address", - "name": "addressManager", - "type": "address" - }, - { - "internalType": "address", - "name": "proxy", - "type": "address" - }, - { - "internalType": "address", - "name": "proxyAdmin", - "type": "address" - }, - { - "internalType": "address", - "name": "l1ChugSplashProxy", - "type": "address" - }, - { - "internalType": "address", - "name": "resolvedDelegateProxy", - "type": "address" - }, - { - "internalType": "address", - "name": "anchorStateRegistry", - "type": "address" - }, - { - "internalType": "address", - "name": "permissionedDisputeGame1", - "type": "address" - }, - { - "internalType": "address", - "name": "permissionedDisputeGame2", - "type": "address" - } - ], - "internalType": "struct OPContractsManager.Blueprints", - "name": "blueprints", - "type": "tuple" + "internalType": "address", + "name": "l1ERC721BridgeImpl", + "type": "address" }, { - "components": [ - { - "internalType": "string", - "name": "name", - "type": "string" - }, - { - "components": [ - { - "internalType": "address", - "name": "logic", - "type": "address" - }, - { - "internalType": "bytes4", - "name": "initializer", - "type": "bytes4" - } - ], - "internalType": "struct OPContractsManager.Implementation", - "name": "info", - "type": "tuple" - } - ], - "internalType": "struct OPContractsManager.ImplementationSetter[]", - "name": "setters", - "type": "tuple[]" + "internalType": "address", + "name": "optimismPortalImpl", + "type": "address" }, { - "internalType": "string", - "name": "release", - "type": "string" + "internalType": "address", + "name": "systemConfigImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "optimismMintableERC20FactoryImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l1CrossDomainMessengerImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l1StandardBridgeImpl", + "type": "address" }, { - "internalType": "bool", - "name": "isLatest", - "type": "bool" + "internalType": "address", + "name": "disputeGameFactoryImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "delayedWETHImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "mipsImpl", + "type": "address" } ], - "internalType": "struct OPContractsManager.InitializerInputs", - "name": "_initializerInputs", + "internalType": "struct OPContractsManager.Implementations", + "name": "", "type": "tuple" } ], - "name": "initialize", - "outputs": [], - "stateMutability": "nonpayable", + "stateMutability": "view", "type": "function" }, { "inputs": [], - "name": "latestRelease", + "name": "l1ContractsRelease", "outputs": [ { "internalType": "string", @@ -529,19 +563,6 @@ "name": "Deployed", "type": "event" }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint8", - "name": "version", - "type": "uint8" - } - ], - "name": "Initialized", - "type": "event" - }, { "inputs": [ { diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json index 7c478feb235d..b5758eca610f 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json @@ -10,6 +10,110 @@ "internalType": "contract IProtocolVersions", "name": "_protocolVersions", "type": "address" + }, + { + "internalType": "string", + "name": "_l1ContractsRelease", + "type": "string" + }, + { + "components": [ + { + "internalType": "address", + "name": "addressManager", + "type": "address" + }, + { + "internalType": "address", + "name": "proxy", + "type": "address" + }, + { + "internalType": "address", + "name": "proxyAdmin", + "type": "address" + }, + { + "internalType": "address", + "name": "l1ChugSplashProxy", + "type": "address" + }, + { + "internalType": "address", + "name": "resolvedDelegateProxy", + "type": "address" + }, + { + "internalType": "address", + "name": "anchorStateRegistry", + "type": "address" + }, + { + "internalType": "address", + "name": "permissionedDisputeGame1", + "type": "address" + }, + { + "internalType": "address", + "name": "permissionedDisputeGame2", + "type": "address" + } + ], + "internalType": "struct OPContractsManager.Blueprints", + "name": "_blueprints", + "type": "tuple" + }, + { + "components": [ + { + "internalType": "address", + "name": "l1ERC721BridgeImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "optimismPortalImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "systemConfigImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "optimismMintableERC20FactoryImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l1CrossDomainMessengerImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l1StandardBridgeImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "disputeGameFactoryImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "delayedWETHImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "mipsImpl", + "type": "address" + } + ], + "internalType": "struct OPContractsManager.Implementations", + "name": "_implementations", + "type": "tuple" } ], "stateMutability": "nonpayable", @@ -298,138 +402,68 @@ "type": "function" }, { - "inputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - }, - { - "internalType": "string", - "name": "", - "type": "string" - } - ], + "inputs": [], "name": "implementations", "outputs": [ - { - "internalType": "address", - "name": "logic", - "type": "address" - }, - { - "internalType": "bytes4", - "name": "initializer", - "type": "bytes4" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ { "components": [ { - "components": [ - { - "internalType": "address", - "name": "addressManager", - "type": "address" - }, - { - "internalType": "address", - "name": "proxy", - "type": "address" - }, - { - "internalType": "address", - "name": "proxyAdmin", - "type": "address" - }, - { - "internalType": "address", - "name": "l1ChugSplashProxy", - "type": "address" - }, - { - "internalType": "address", - "name": "resolvedDelegateProxy", - "type": "address" - }, - { - "internalType": "address", - "name": "anchorStateRegistry", - "type": "address" - }, - { - "internalType": "address", - "name": "permissionedDisputeGame1", - "type": "address" - }, - { - "internalType": "address", - "name": "permissionedDisputeGame2", - "type": "address" - } - ], - "internalType": "struct OPContractsManager.Blueprints", - "name": "blueprints", - "type": "tuple" + "internalType": "address", + "name": "l1ERC721BridgeImpl", + "type": "address" }, { - "components": [ - { - "internalType": "string", - "name": "name", - "type": "string" - }, - { - "components": [ - { - "internalType": "address", - "name": "logic", - "type": "address" - }, - { - "internalType": "bytes4", - "name": "initializer", - "type": "bytes4" - } - ], - "internalType": "struct OPContractsManager.Implementation", - "name": "info", - "type": "tuple" - } - ], - "internalType": "struct OPContractsManager.ImplementationSetter[]", - "name": "setters", - "type": "tuple[]" + "internalType": "address", + "name": "optimismPortalImpl", + "type": "address" }, { - "internalType": "string", - "name": "release", - "type": "string" + "internalType": "address", + "name": "systemConfigImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "optimismMintableERC20FactoryImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l1CrossDomainMessengerImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l1StandardBridgeImpl", + "type": "address" }, { - "internalType": "bool", - "name": "isLatest", - "type": "bool" + "internalType": "address", + "name": "disputeGameFactoryImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "delayedWETHImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "mipsImpl", + "type": "address" } ], - "internalType": "struct OPContractsManager.InitializerInputs", - "name": "_initializerInputs", + "internalType": "struct OPContractsManager.Implementations", + "name": "", "type": "tuple" } ], - "name": "initialize", - "outputs": [], - "stateMutability": "nonpayable", + "stateMutability": "view", "type": "function" }, { "inputs": [], - "name": "latestRelease", + "name": "l1ContractsRelease", "outputs": [ { "internalType": "string", @@ -529,19 +563,6 @@ "name": "Deployed", "type": "event" }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint8", - "name": "version", - "type": "uint8" - } - ], - "name": "Initialized", - "type": "event" - }, { "inputs": [ { diff --git a/packages/contracts-bedrock/snapshots/abi/OptimismSuperchainERC20.json b/packages/contracts-bedrock/snapshots/abi/OptimismSuperchainERC20.json index 24ac8a88fab4..fbe697b864d8 100644 --- a/packages/contracts-bedrock/snapshots/abi/OptimismSuperchainERC20.json +++ b/packages/contracts-bedrock/snapshots/abi/OptimismSuperchainERC20.json @@ -454,6 +454,12 @@ "internalType": "uint256", "name": "amount", "type": "uint256" + }, + { + "indexed": true, + "internalType": "address", + "name": "sender", + "type": "address" } ], "name": "CrosschainBurn", @@ -473,6 +479,12 @@ "internalType": "uint256", "name": "amount", "type": "uint256" + }, + { + "indexed": true, + "internalType": "address", + "name": "sender", + "type": "address" } ], "name": "CrosschainMint", diff --git a/packages/contracts-bedrock/snapshots/abi/Optimist.json b/packages/contracts-bedrock/snapshots/abi/Optimist.json deleted file mode 100644 index 96bbc0591a30..000000000000 --- a/packages/contracts-bedrock/snapshots/abi/Optimist.json +++ /dev/null @@ -1,536 +0,0 @@ -[ - { - "inputs": [ - { - "internalType": "string", - "name": "_name", - "type": "string" - }, - { - "internalType": "string", - "name": "_symbol", - "type": "string" - }, - { - "internalType": "address", - "name": "_baseURIAttestor", - "type": "address" - }, - { - "internalType": "contract AttestationStation", - "name": "_attestationStation", - "type": "address" - }, - { - "internalType": "contract OptimistAllowlist", - "name": "_optimistAllowlist", - "type": "address" - } - ], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "inputs": [], - "name": "ATTESTATION_STATION", - "outputs": [ - { - "internalType": "contract AttestationStation", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "BASE_URI_ATTESTATION_KEY", - "outputs": [ - { - "internalType": "bytes32", - "name": "", - "type": "bytes32" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "BASE_URI_ATTESTOR", - "outputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "OPTIMIST_ALLOWLIST", - "outputs": [ - { - "internalType": "contract OptimistAllowlist", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - }, - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "name": "approve", - "outputs": [], - "stateMutability": "pure", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "owner", - "type": "address" - } - ], - "name": "balanceOf", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "baseURI", - "outputs": [ - { - "internalType": "string", - "name": "uri_", - "type": "string" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "tokenId", - "type": "uint256" - } - ], - "name": "burn", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "tokenId", - "type": "uint256" - } - ], - "name": "getApproved", - "outputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "string", - "name": "_name", - "type": "string" - }, - { - "internalType": "string", - "name": "_symbol", - "type": "string" - } - ], - "name": "initialize", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "owner", - "type": "address" - }, - { - "internalType": "address", - "name": "operator", - "type": "address" - } - ], - "name": "isApprovedForAll", - "outputs": [ - { - "internalType": "bool", - "name": "", - "type": "bool" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "_recipient", - "type": "address" - } - ], - "name": "isOnAllowList", - "outputs": [ - { - "internalType": "bool", - "name": "allowed_", - "type": "bool" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "_recipient", - "type": "address" - } - ], - "name": "mint", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "name", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "tokenId", - "type": "uint256" - } - ], - "name": "ownerOf", - "outputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "from", - "type": "address" - }, - { - "internalType": "address", - "name": "to", - "type": "address" - }, - { - "internalType": "uint256", - "name": "tokenId", - "type": "uint256" - } - ], - "name": "safeTransferFrom", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "from", - "type": "address" - }, - { - "internalType": "address", - "name": "to", - "type": "address" - }, - { - "internalType": "uint256", - "name": "tokenId", - "type": "uint256" - }, - { - "internalType": "bytes", - "name": "data", - "type": "bytes" - } - ], - "name": "safeTransferFrom", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - }, - { - "internalType": "bool", - "name": "", - "type": "bool" - } - ], - "name": "setApprovalForAll", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes4", - "name": "interfaceId", - "type": "bytes4" - } - ], - "name": "supportsInterface", - "outputs": [ - { - "internalType": "bool", - "name": "", - "type": "bool" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "symbol", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "_owner", - "type": "address" - } - ], - "name": "tokenIdOfAddress", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "pure", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "_tokenId", - "type": "uint256" - } - ], - "name": "tokenURI", - "outputs": [ - { - "internalType": "string", - "name": "uri_", - "type": "string" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "from", - "type": "address" - }, - { - "internalType": "address", - "name": "to", - "type": "address" - }, - { - "internalType": "uint256", - "name": "tokenId", - "type": "uint256" - } - ], - "name": "transferFrom", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "version", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "owner", - "type": "address" - }, - { - "indexed": true, - "internalType": "address", - "name": "approved", - "type": "address" - }, - { - "indexed": true, - "internalType": "uint256", - "name": "tokenId", - "type": "uint256" - } - ], - "name": "Approval", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "owner", - "type": "address" - }, - { - "indexed": true, - "internalType": "address", - "name": "operator", - "type": "address" - }, - { - "indexed": false, - "internalType": "bool", - "name": "approved", - "type": "bool" - } - ], - "name": "ApprovalForAll", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint8", - "name": "version", - "type": "uint8" - } - ], - "name": "Initialized", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "from", - "type": "address" - }, - { - "indexed": true, - "internalType": "address", - "name": "to", - "type": "address" - }, - { - "indexed": true, - "internalType": "uint256", - "name": "tokenId", - "type": "uint256" - } - ], - "name": "Transfer", - "type": "event" - } -] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi/OptimistAllowlist.json b/packages/contracts-bedrock/snapshots/abi/OptimistAllowlist.json deleted file mode 100644 index 87ac8f8a014f..000000000000 --- a/packages/contracts-bedrock/snapshots/abi/OptimistAllowlist.json +++ /dev/null @@ -1,138 +0,0 @@ -[ - { - "inputs": [ - { - "internalType": "contract AttestationStation", - "name": "_attestationStation", - "type": "address" - }, - { - "internalType": "address", - "name": "_allowlistAttestor", - "type": "address" - }, - { - "internalType": "address", - "name": "_coinbaseQuestAttestor", - "type": "address" - }, - { - "internalType": "address", - "name": "_optimistInviter", - "type": "address" - } - ], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "inputs": [], - "name": "ALLOWLIST_ATTESTOR", - "outputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "ATTESTATION_STATION", - "outputs": [ - { - "internalType": "contract AttestationStation", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "COINBASE_QUEST_ATTESTOR", - "outputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "COINBASE_QUEST_ELIGIBLE_ATTESTATION_KEY", - "outputs": [ - { - "internalType": "bytes32", - "name": "", - "type": "bytes32" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "OPTIMIST_CAN_MINT_ATTESTATION_KEY", - "outputs": [ - { - "internalType": "bytes32", - "name": "", - "type": "bytes32" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "OPTIMIST_INVITER", - "outputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "_claimer", - "type": "address" - } - ], - "name": "isAllowedToMint", - "outputs": [ - { - "internalType": "bool", - "name": "allowed_", - "type": "bool" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "version", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "view", - "type": "function" - } -] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi/OptimistInviter.json b/packages/contracts-bedrock/snapshots/abi/OptimistInviter.json deleted file mode 100644 index a5300b20a3c1..000000000000 --- a/packages/contracts-bedrock/snapshots/abi/OptimistInviter.json +++ /dev/null @@ -1,282 +0,0 @@ -[ - { - "inputs": [ - { - "internalType": "address", - "name": "_inviteGranter", - "type": "address" - }, - { - "internalType": "contract AttestationStation", - "name": "_attestationStation", - "type": "address" - } - ], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "inputs": [], - "name": "ATTESTATION_STATION", - "outputs": [ - { - "internalType": "contract AttestationStation", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "CAN_INVITE_ATTESTATION_KEY", - "outputs": [ - { - "internalType": "bytes32", - "name": "", - "type": "bytes32" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "CLAIMABLE_INVITE_TYPEHASH", - "outputs": [ - { - "internalType": "bytes32", - "name": "", - "type": "bytes32" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "EIP712_VERSION", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "INVITE_GRANTER", - "outputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "MIN_COMMITMENT_PERIOD", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "_claimer", - "type": "address" - }, - { - "components": [ - { - "internalType": "address", - "name": "issuer", - "type": "address" - }, - { - "internalType": "bytes32", - "name": "nonce", - "type": "bytes32" - } - ], - "internalType": "struct OptimistInviter.ClaimableInvite", - "name": "_claimableInvite", - "type": "tuple" - }, - { - "internalType": "bytes", - "name": "_signature", - "type": "bytes" - } - ], - "name": "claimInvite", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes32", - "name": "_commitment", - "type": "bytes32" - } - ], - "name": "commitInvite", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes32", - "name": "", - "type": "bytes32" - } - ], - "name": "commitmentTimestamps", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "string", - "name": "_name", - "type": "string" - } - ], - "name": "initialize", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "name": "inviteCounts", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address[]", - "name": "_accounts", - "type": "address[]" - }, - { - "internalType": "uint256", - "name": "_inviteCount", - "type": "uint256" - } - ], - "name": "setInviteCounts", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - }, - { - "internalType": "bytes32", - "name": "", - "type": "bytes32" - } - ], - "name": "usedNonces", - "outputs": [ - { - "internalType": "bool", - "name": "", - "type": "bool" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "version", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint8", - "name": "version", - "type": "uint8" - } - ], - "name": "Initialized", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "issuer", - "type": "address" - }, - { - "indexed": true, - "internalType": "address", - "name": "claimer", - "type": "address" - } - ], - "name": "InviteClaimed", - "type": "event" - } -] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi/PermissionedDisputeGame.json b/packages/contracts-bedrock/snapshots/abi/PermissionedDisputeGame.json index fd9737cc5842..eebc4adf16ea 100644 --- a/packages/contracts-bedrock/snapshots/abi/PermissionedDisputeGame.json +++ b/packages/contracts-bedrock/snapshots/abi/PermissionedDisputeGame.json @@ -2,54 +2,61 @@ { "inputs": [ { - "internalType": "GameType", - "name": "_gameType", - "type": "uint32" - }, - { - "internalType": "Claim", - "name": "_absolutePrestate", - "type": "bytes32" - }, - { - "internalType": "uint256", - "name": "_maxGameDepth", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "_splitDepth", - "type": "uint256" - }, - { - "internalType": "Duration", - "name": "_clockExtension", - "type": "uint64" - }, - { - "internalType": "Duration", - "name": "_maxClockDuration", - "type": "uint64" - }, - { - "internalType": "contract IBigStepper", - "name": "_vm", - "type": "address" - }, - { - "internalType": "contract IDelayedWETH", - "name": "_weth", - "type": "address" - }, - { - "internalType": "contract IAnchorStateRegistry", - "name": "_anchorStateRegistry", - "type": "address" - }, - { - "internalType": "uint256", - "name": "_l2ChainId", - "type": "uint256" + "components": [ + { + "internalType": "GameType", + "name": "gameType", + "type": "uint32" + }, + { + "internalType": "Claim", + "name": "absolutePrestate", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "maxGameDepth", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "splitDepth", + "type": "uint256" + }, + { + "internalType": "Duration", + "name": "clockExtension", + "type": "uint64" + }, + { + "internalType": "Duration", + "name": "maxClockDuration", + "type": "uint64" + }, + { + "internalType": "contract IBigStepper", + "name": "vm", + "type": "address" + }, + { + "internalType": "contract IDelayedWETH", + "name": "weth", + "type": "address" + }, + { + "internalType": "contract IAnchorStateRegistry", + "name": "anchorStateRegistry", + "type": "address" + }, + { + "internalType": "uint256", + "name": "l2ChainId", + "type": "uint256" + } + ], + "internalType": "struct FaultDisputeGame.GameConstructorParams", + "name": "_params", + "type": "tuple" }, { "internalType": "address", diff --git a/packages/contracts-bedrock/snapshots/abi/RISCV.json b/packages/contracts-bedrock/snapshots/abi/RISCV.json new file mode 100644 index 000000000000..1650fd3980ec --- /dev/null +++ b/packages/contracts-bedrock/snapshots/abi/RISCV.json @@ -0,0 +1,68 @@ +[ + { + "inputs": [ + { + "internalType": "contract IPreimageOracle", + "name": "_oracle", + "type": "address" + } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "inputs": [], + "name": "oracle", + "outputs": [ + { + "internalType": "contract IPreimageOracle", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes", + "name": "_stateData", + "type": "bytes" + }, + { + "internalType": "bytes", + "name": "_proof", + "type": "bytes" + }, + { + "internalType": "bytes32", + "name": "_localContext", + "type": "bytes32" + } + ], + "name": "step", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "version", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + } +] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi/SuperchainWETH.json b/packages/contracts-bedrock/snapshots/abi/SuperchainWETH.json index 32d84df7d889..e8df09a80640 100644 --- a/packages/contracts-bedrock/snapshots/abi/SuperchainWETH.json +++ b/packages/contracts-bedrock/snapshots/abi/SuperchainWETH.json @@ -143,6 +143,53 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [ + { + "internalType": "address", + "name": "_from", + "type": "address" + }, + { + "internalType": "address", + "name": "_to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "_amount", + "type": "uint256" + } + ], + "name": "relayETH", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "_chainId", + "type": "uint256" + } + ], + "name": "sendETH", + "outputs": [ + { + "internalType": "bytes32", + "name": "msgHash_", + "type": "bytes32" + } + ], + "stateMutability": "payable", + "type": "function" + }, { "inputs": [ { @@ -306,6 +353,12 @@ "internalType": "uint256", "name": "amount", "type": "uint256" + }, + { + "indexed": true, + "internalType": "address", + "name": "sender", + "type": "address" } ], "name": "CrosschainBurn", @@ -325,6 +378,12 @@ "internalType": "uint256", "name": "amount", "type": "uint256" + }, + { + "indexed": true, + "internalType": "address", + "name": "sender", + "type": "address" } ], "name": "CrosschainMint", @@ -349,6 +408,68 @@ "name": "Deposit", "type": "event" }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "source", + "type": "uint256" + } + ], + "name": "RelayETH", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "destination", + "type": "uint256" + } + ], + "name": "SendETH", + "type": "event" + }, { "anonymous": false, "inputs": [ @@ -393,6 +514,11 @@ "name": "Withdrawal", "type": "event" }, + { + "inputs": [], + "name": "InvalidCrossDomainSender", + "type": "error" + }, { "inputs": [], "name": "NotCustomGasToken", @@ -402,5 +528,10 @@ "inputs": [], "name": "Unauthorized", "type": "error" + }, + { + "inputs": [], + "name": "ZeroAddress", + "type": "error" } ] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/semver-lock.json b/packages/contracts-bedrock/snapshots/semver-lock.json index fb7b6d824392..b31b3f287e7c 100644 --- a/packages/contracts-bedrock/snapshots/semver-lock.json +++ b/packages/contracts-bedrock/snapshots/semver-lock.json @@ -1,242 +1,226 @@ { "src/L1/DataAvailabilityChallenge.sol": { - "initCodeHash": "0xbd00d6568abab3e7fc211c40d682862242f25493010a4a097bd1f3b45c8c87c3", - "sourceCodeHash": "0x58b587034a67b4bb718abbaded8ac23b082c0971105874bcc42c23f051c67f6e" - }, - "src/L1/DelayedVetoable.sol": { - "initCodeHash": "0x9fe8ade6f6332262ff1f3539ac0bf57660edbad3cf4c4cb230c2ddac18aa0a3f", - "sourceCodeHash": "0x30e83a535ef27b2e900c831c4e1a4ec2750195350011c4fdacda1da9db2d167b" + "initCodeHash": "0x2d2efa7ac3e2f96a5712344c8df0ef4657c6fe430c4aa33e68dd8327d708b6b6", + "sourceCodeHash": "0x262e3e3f144fc93fd431296251961a70e939703bf62d1c79f23239644ac8e978" }, "src/L1/L1CrossDomainMessenger.sol": { - "initCodeHash": "0x2e9cb3ceb5e55341b311f0666ef7655df4fafae75afdfbcd701cd9c9b2b017d5", - "sourceCodeHash": "0x848ec3774be17bcc8ba65a23d08e35e979b3f39f9d2ac8a810188f945c69c9ea" + "initCodeHash": "0x9e8763b2fca99b577e8374a1f8d0421eeeb3e922535728a22d24a1b658c85665", + "sourceCodeHash": "0x370dd42d9888fd53a85811190208bc0f68e4071a2e3e4b2d15f2a4b92f4707ff" }, "src/L1/L1ERC721Bridge.sol": { - "initCodeHash": "0xb3bf093ea83a24574a6093bebf5b2aea707355ed8d6702b2b5eb292e75b6ae42", - "sourceCodeHash": "0x289de9f40898b6305deecc6b60cdf566aa6c6a1444f713c3a0af23ea7878207e" + "initCodeHash": "0x7d8dbcc9b146d21ce4aabfc41a724d4c8d945956318a120127234afd53a4315c", + "sourceCodeHash": "0x48f4e1db42f82490e91fd33b05117b05c072444ed53bbd4e06e742d330a422bb" }, "src/L1/L1StandardBridge.sol": { - "initCodeHash": "0x802f72745bb9a82dc049377bb9cf6b58f35aec388aeb957b28a5e14f28d91bc1", - "sourceCodeHash": "0x24b784645b065a5393a2115a078d67f91eb09afd5e70baf81daf975381f16155" + "initCodeHash": "0x9037e1930606d8cd092738bf28dda74e8f993aade26ae5a79a9e1fe2cb1d8513", + "sourceCodeHash": "0x5b3dc434f58afd7667f82b8fdfdbea488441be0bae8bdff15cfc5a42ce670cc1" }, "src/L1/L2OutputOracle.sol": { - "initCodeHash": "0x1182bfb87c4ab399b912ca7fe18cdbf4b24c414e078fb0a55bd3c44d442d3ed1", - "sourceCodeHash": "0x4132ff37d267cb12224b75ea806c0aa7d25407b0d66ce526d7fcda8f7d223882" + "initCodeHash": "0x0e0573841e43e4c5f0df5839db2bdf387f29fed7c830f05960542e7d7109ed4a", + "sourceCodeHash": "0x8aafeffb41332fddf2fb1ef4fc033bd1f323cdc5b199c6951da73e3cb86276e6" }, "src/L1/OPContractsManager.sol": { - "initCodeHash": "0xd58cb3978affc5c1457cdd498ff8420c90aef804d4c3b62cf42ab2691986d6d2", - "sourceCodeHash": "0x7bfa6eff76176649fe600303cd60009a0f6e282cbaec55836b5ea1f8875cbeb5" + "initCodeHash": "0x320a6d4417eb0d2597b4c6f4caa37bbf9e35f38d3ad27ddb57f149c680e6afff", + "sourceCodeHash": "0x3a6ac40939df1d9f4c88caa4e6139454de5e0ad4a241b27d1bab65a3ae44610d" }, "src/L1/OptimismPortal.sol": { - "initCodeHash": "0x152167cfa18635ae4918a6eb3371a599cfa084418c0a652799cdb48bfc0ee0cc", - "sourceCodeHash": "0xbe34b82900d02f71bb0949818eabe49531f7e0d8d8bae01f6dac4a296530d1aa" + "initCodeHash": "0xa8b2f8a6d1092c5e64529736462ebb35daa9ea9e67585f7de8e3e5394682ee64", + "sourceCodeHash": "0xb71e8bc24ea9ebb5692762005f2936ba2a00bf169e1e32f504a0f6e23a349a22" }, "src/L1/OptimismPortal2.sol": { - "initCodeHash": "0x218358b48f640b3fcb2d239f00dc1cd3b11517ad46c8e1efa44953d38da63540", - "sourceCodeHash": "0x66ac1212760db53a2bb1839e4cd17dc071d9273b8e6fb80646b79e91b3371c1a" + "initCodeHash": "0xa943efcc061bc59d129649de04ef8ba6318e2ff6eb10383b09ea71e3cbac5e5e", + "sourceCodeHash": "0x73df6c482332264954659ef4bcc18b7fb02a64a727018b4ae1aed8d2ec11c959" }, "src/L1/OptimismPortalInterop.sol": { - "initCodeHash": "0x39f66ac74341ec235fbdd0d79546283210bd8ac35a2ab2c4bd36c9722ce18411", - "sourceCodeHash": "0xbb98144285b9530e336f957d10b20363b350876597e30fd34821940896a2bae8" + "initCodeHash": "0x7f8118c0abdcae94ebd08b15709b27bf7abe8fec96c74be109f2126d99f943a1", + "sourceCodeHash": "0x813fcf02c02798ebba8ed93f95eca82bdf9080c0edc5f2492c72c19b6c5f36b4" }, "src/L1/ProtocolVersions.sol": { - "initCodeHash": "0xefd4806e8737716d5d2022ca2e9e9fba0a0cb5714b026166b58e472222c7d15f", - "sourceCodeHash": "0x15205131bf420aa6d03c558bb75dd49cd7439caed7ccdcbfd89c4170a48c94f5" + "initCodeHash": "0xb0ff1661226417342001fe9f0b64c340b7c074ff71579abf05399f4e742aaca1", + "sourceCodeHash": "0xc82754087747c067d4c3ae7deed08a574acbeaec0fbacc1f80ce63313ae817ed" }, "src/L1/SuperchainConfig.sol": { - "initCodeHash": "0xfca12d9016c746e5c275b186e0ca40cfd65cf45a5665aab7589a669fea3abb47", - "sourceCodeHash": "0x39489a85bc3a5c8560f82d41b31bf7fe22f5b648f4ed538f61695a73092ea9eb" + "initCodeHash": "0xddfc0ea9a7d5b0a3a3c1080d022295af57cd9bcd6171ad0fe09287c493c9e95d", + "sourceCodeHash": "0xb9f372ce43c42179efd7d7ee7f7df29f89c484efb7cd6e83430b615d1c2592d8" }, "src/L1/SystemConfig.sol": { - "initCodeHash": "0x429058f75d97fa7a7d0166b59830909bc722324feefc40f2b41419d6335d3f37", - "sourceCodeHash": "0x5ca776041a4ddc0d28ec55db7012d669481cd4601b0e71dbd3493a67b8a7e5a5" + "initCodeHash": "0x3ba55b46516de34186ff0cc92af9ca3ff916989ecb7d2fa9e82000f648607985", + "sourceCodeHash": "0x4085b02ea01cd16172a1809ddd9be69c567f7b204cefc93f7c4d9071da812daa" }, "src/L1/SystemConfigInterop.sol": { - "initCodeHash": "0x277a61dcabed81a15739a8e9ed50615252bcc687cebea852e00191d0a1fbe11f", - "sourceCodeHash": "0x38361a4f70a19e1b7819e933932a0c9fd2bcebaaebcbc7942f5c00dfaa2c28df" + "initCodeHash": "0xed198351099bd243a7a69e64944f43a3f203b5778ac55dbec428cc4df337cd8e", + "sourceCodeHash": "0x733fd71047569d974ac39477c6b6d55ec4100f32ac40b0597a0f7bdbde2867c3" }, "src/L2/BaseFeeVault.sol": { - "initCodeHash": "0xbf49824cf37e201181484a8a423fcad8f504dc925921a2b28e83398197858dec", - "sourceCodeHash": "0x983e8e248c61e362ba6a01dd2e217a535c9bb828dc0b4421f5f27e0577f2e14c" + "initCodeHash": "0x6745b7be3895a5e8d373df0066d931bae29c47672ac46c2f5829bd0052cc6d9e", + "sourceCodeHash": "0x45ea3acdbc8d1e583d4395239c9e9956e8cddda501f2e8eea50113333390f708" }, "src/L2/CrossL2Inbox.sol": { - "initCodeHash": "0x31ecaebf368ab3333e80c6dc004b3c9f9a31f813c3138ab388bb3eead9f1b4ee", - "sourceCodeHash": "0xa1779d84a14332dcdd167293171d0fe2629d759a23d7cc34ffe2bde7e1605dbc" + "initCodeHash": "0x7a189f6dff6c19ec6f1e94d84a0d9d98a320a68812f957e50bf8b63224bb0dce", + "sourceCodeHash": "0x9bbfabb19b7f572dadae797786c2f87d892693650151bd8de6eadee3e03fc559" }, "src/L2/ETHLiquidity.sol": { - "initCodeHash": "0x713c18f95a6a746d0703f475f3ae10c106c9b9ecb64d881a2e61b8969b581371", - "sourceCodeHash": "0x0b6afdc52d1ae88d9e4bbb5dc00920e7a6bd1e9d6595bfdbae64874190f39df0" + "initCodeHash": "0xbb16de6a3f678db7301694a000f315154f25f9660c8dcec4b0bef20bc7cfdebd", + "sourceCodeHash": "0x0576b189811bd343c2cdafcb512ece2c2ea20077bef8754a3dc3e3c80210b225" }, "src/L2/GasPriceOracle.sol": { - "initCodeHash": "0x7e8c2b42e10187ad649c0bf70c5688c2a4af3c412bacaec87d63c3f93ae4cfef", - "sourceCodeHash": "0xa12ce15ded3cca681b2fc9facaebbb45d740dd6f9c9496333c1c46689c9a2d99" + "initCodeHash": "0x83d50e3b34cd1b4de32f1cced28796b07aefc526cc17ceb1903ad55f4abc90b7", + "sourceCodeHash": "0x305c72d7be9149fce7095bd4641a1a19acada3126fbc43599f674cadbf6e7d6c" }, "src/L2/L1Block.sol": { - "initCodeHash": "0xa919d2aa76a7ecdfd076e2b1dbece499cc85706075f16eb6fa7b1a0fa7b38c1b", - "sourceCodeHash": "0x692cfcbc06dba6328f6e5c6b500741df04e4bdf730b2069aeb5d168355ea7b6f" + "initCodeHash": "0x22f9b9277e33dc27df8366c2dd6e8340d294947b57116db35c6d14c41225633f", + "sourceCodeHash": "0xffb6cf768097b2d6cb6ecb2d6463c176af9acd70415aa0d2e4f017758f737eee" }, "src/L2/L1BlockInterop.sol": { - "initCodeHash": "0x62e9cc59daaf72066ac20597a666db33e9a7b3f7be71a3d47ea4841a9aca9d07", - "sourceCodeHash": "0xe57627347366d74029a0d24f0b45d7b9cf82b81c94681d0f633d5e5c37c8de4a" + "initCodeHash": "0x67e99306d9a09cac587f65cfa2c0de55da9eca184fd1fc3f4b885d2c47114483", + "sourceCodeHash": "0x9493f90136917fc95d2ac942f061c1b9cffeff6d327afb46fe4e69784e7f2100" }, "src/L2/L1FeeVault.sol": { - "initCodeHash": "0xbf49824cf37e201181484a8a423fcad8f504dc925921a2b28e83398197858dec", - "sourceCodeHash": "0xc7cda130f2bb3648e04d5a480082aa1789e16456c1280954d822b05d30100b2d" + "initCodeHash": "0x6745b7be3895a5e8d373df0066d931bae29c47672ac46c2f5829bd0052cc6d9e", + "sourceCodeHash": "0xd0471c328c1d17c5863261322bf8d5aff2e7e9e3a1135631a993aa75667621df" }, "src/L2/L2CrossDomainMessenger.sol": { - "initCodeHash": "0xc496495496b96ea0eaf417c5e56b295836c12db3e6aafe2e607563e7a50b5b65", - "sourceCodeHash": "0x56edf0f36366326a92722ae3c7502bce3d80b2ee5e354181dc09ba801437a488" + "initCodeHash": "0xcef22d29fed2e160e4c4350dee5c7671d2fd280895f9d2e4655b7060c56d5ba7", + "sourceCodeHash": "0x754c71bffa5525159bc1d60c0085a5edc23fba0384a44e8c2c1215eaa7fd17b7" }, "src/L2/L2ERC721Bridge.sol": { - "initCodeHash": "0xaed0528e8b81817a0c3b41513c02e7fd678f58e34b98f02ea33d5a770a064c2f", - "sourceCodeHash": "0xf8569c75b801f38f8a5a41e94e90f159ddc5f5412804b26e3e564755a50631b8" + "initCodeHash": "0xf30040071bb7def116eab4c26c921d6c7b24e4f02d001601f52a1ee5bd6629fc", + "sourceCodeHash": "0xc7258cb394333527bfc58d0a8ba6bd9d90b46b60ace20a500d94192fea60aef4" }, "src/L2/L2StandardBridge.sol": { - "initCodeHash": "0xcb4aa19f0cd43a35cb5c65f26c3cfd7c41f1d1e5bcc15aef6096d385df7272c9", - "sourceCodeHash": "0x89771b53b7f6e64d943afb2a4bf15395efcf20d5302b76a18e52fa7cce8cdc56" + "initCodeHash": "0x7d2089948a373c62cdae80f329d6882eea450f2048f5ad45dbcceaaf37685d80", + "sourceCodeHash": "0xd64cf85ff5ecc1bf368d16532a9e29ba2dfc0f5456bbd3f9f05212c9120a2ed1" }, "src/L2/L2StandardBridgeInterop.sol": { - "initCodeHash": "0xc4eaece28d2cfca3c51247c3cce320a167a83c7fd13aea5736549d2b25e0b139", - "sourceCodeHash": "0x9e80044adf5f83c30b520ee153b75be5a152081c9e1271e7e618ecfccd1fb4ac" + "initCodeHash": "0x024711e1689bedcdb1e9de8b1c4a440a911634a0ce0a384246b4f9cd683a23f3", + "sourceCodeHash": "0xb92682bb93a87c22e6a2506b64ff609b76376a7d09f514125173c6792c354fc4" }, "src/L2/L2ToL1MessagePasser.sol": { - "initCodeHash": "0x13fe3729beb9ed966c97bef09acb9fe5043fe651d453145073d05f2567fa988d", - "sourceCodeHash": "0xd08a2e6514dbd44e16aa312a1b27b2841a9eab5622cbd05a39c30f543fad673c" + "initCodeHash": "0xf9d82084dcef31a3737a76d8ee4e5842ea190d0f77ed4678adb3bbb95217050f", + "sourceCodeHash": "0xaef8ea36c5b78cd12e0e62811d51db627ccf0dfd2cc5479fb707a10ef0d42048" }, "src/L2/L2ToL2CrossDomainMessenger.sol": { - "initCodeHash": "0x2a1a1ee4f47175ce661ee8e4e50cfa879b082dcb5278b1d66ddda00ed77bb744", - "sourceCodeHash": "0xa76133db7f449ae742f9ba988ad86ccb5672475f61298b9fefe411b63b63e9f6" + "initCodeHash": "0x45564b97c63419cc12eadc60425c6d001857a3eea688ecaf1439ae7ede6aa9aa", + "sourceCodeHash": "0xed64736338b43a42f6bc6a88cca734403e1bb9ceafa55e4738605dfdedd1a99f" }, "src/L2/OptimismSuperchainERC20.sol": { - "initCodeHash": "0x5bc5824030ecdb531e1f615d207cb73cdaa702e198769445d0ddbe717271eba9", - "sourceCodeHash": "0x0819c9411a155dca592d19b60c4176954202e4fe5d632a4ffbf88d465461252c" + "initCodeHash": "0xdac32a1057a6bc8a8d2ffdce1db8f34950cd0ffd1454d2133865736d21869192", + "sourceCodeHash": "0x4a7924f2195074145ac8e6221d77b24cd22d97423db2053937897e9d788990e2" }, "src/L2/OptimismSuperchainERC20Beacon.sol": { - "initCodeHash": "0x23dba3ceb9e58646695c306996c9e15251ac79acc6339c1a93d10a4c79da6dab", - "sourceCodeHash": "0xf4379e49665823c877f5732f35068435ce06e2394fce6910a5e113d16cdc9f95" + "initCodeHash": "0x8a4d7cac6dd8ce583c996837893b93560297be1269f97f785a502748b25ba310", + "sourceCodeHash": "0xb57024e16b528bade5fee7c236e03ffbb3f22e6376e6852e2109298af850b43c" }, "src/L2/OptimismSuperchainERC20Factory.sol": { - "initCodeHash": "0x18a362c57f08b611db98dfde96121385e938f995c84e3547c1c03fd49f9db2fd", - "sourceCodeHash": "0x450cd89d0aae7bbc85ff57a14a6d3468c24c6743f25943f6d895d34b1456c456" + "initCodeHash": "0x44659ea207ed173db4f1b519944c09c671d49f118e9d9ab85a010b8ebaf899e7", + "sourceCodeHash": "0xa1c0346cfe6932dde05dc6c1d9505cac38434d8a8f9e1e437253b1f4115f2506" }, "src/L2/SequencerFeeVault.sol": { - "initCodeHash": "0xcaadbf08057b5d47f7704257e9385a29e42a7a08c818646d109c5952d3d35218", - "sourceCodeHash": "0x05bbc6039e5a9ff38987e7b9b89c69e2ee8aa4b7ca20dd002ea1bbd3d70f27f3" + "initCodeHash": "0x02ca6cb6eebd2d6b91cf1eab483ee00b3233a7e8ad31f0e9cafc1f645ab3c24a", + "sourceCodeHash": "0x85c740c0888368ee95607635818ee698c27582e8917f40bc590d240447376da9" }, "src/L2/SuperchainERC20.sol": { "initCodeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", - "sourceCodeHash": "0xcf39c16893cace1e7d61350bfff05a27f3ce8da8eb0ac02cb5ac7bf603f163fa" + "sourceCodeHash": "0x981dca5b09da9038a9dff071b40a880e1b52b20268c6780ef54be3bc98a4f629" }, "src/L2/SuperchainTokenBridge.sol": { - "initCodeHash": "0x1cd2afdae6dd1b6ebc17f1d529e7d74c9b8b21b02db8589b8e389e2d5523d775", - "sourceCodeHash": "0x617aa994f659c5d8ebd54128d994f86f5b175ceca095b024b8524a7898e8ae62" + "initCodeHash": "0x6b568ed564aede82a3a4cbcdb51282cad0e588a3fe6d91cf76616d3113df3901", + "sourceCodeHash": "0xcd2b49cb7cf6d18616ee8bec9183fe5b5b460941875bc0b4158c4d5390ec3b0c" }, "src/L2/SuperchainWETH.sol": { - "initCodeHash": "0x5aef986a7c9c102b1e9b3068e2a2b66adce0a71dd5f39e03694622bf494f8d97", - "sourceCodeHash": "0xa62101a23b860e97f393027c898082a1c73d50679eceb6c6793844af29702359" + "initCodeHash": "0x6ded8aeea6edf7e0ead7b0d2a12ef236f1fb7d21980a1dd564cbe86affca7927", + "sourceCodeHash": "0x11d711704a5afcae6076d017ee001b25bc705728973b1ad2e6a32274a8475f50" }, "src/L2/WETH.sol": { - "initCodeHash": "0x17ea1b1c5d5a622d51c2961fde886a5498de63584e654ed1d69ee80dddbe0b17", - "sourceCodeHash": "0x0fa0633a769e73f5937514c0003ba7947a1c275bbe5b85d78879c42f0ed8895b" + "initCodeHash": "0x480d4f8dbec1b0d3211bccbbdfb69796f3e90c784f724b1bbfd4703b0aafdeba", + "sourceCodeHash": "0xe9964aa66db1dfc86772958b4c9276697e67f7055529a43e6a49a055009bc995" }, "src/cannon/MIPS.sol": { - "initCodeHash": "0xa3cbf121bad13c00227ea4fef128853d9a86b7ec9158de894f99b58d38d7630a", - "sourceCodeHash": "0xd8467700c80b3e62fa37193dc6513bac35282094b686b50e162e157f704dde00" + "initCodeHash": "0xc10654f0e6498f424f7a5095bac36005dc7062d3813cc8f805a15005fc37406b", + "sourceCodeHash": "0x6c45dd23cb0d6f9bf4f84855ad0caf70e53dee3fe6c41454f7bf8df52ec3a9af" }, "src/cannon/MIPS2.sol": { - "initCodeHash": "0xc38c76ab3aad78c81ca01b3235b402614972d6604b22fda1e870f1bf47be1194", - "sourceCodeHash": "0x3d38b1924669d1bde756f1306601c764a6d31f428ac72667a3dd194b3388210d" + "initCodeHash": "0x7476695bb101cb45213793291124e3ec41e13a02d291837b76d8a35bfc8ec2c1", + "sourceCodeHash": "0xeaceb5d28bd58fca6a234d9291ca01424bf83576d191ee3046272bc4987d0b29" }, "src/cannon/MIPS64.sol": { - "initCodeHash": "0x93aa8d7f9fd3c22276c0d303a3fefdf8f73cc55807b35e483bba64c92d02aaef", - "sourceCodeHash": "0x171d66c651fdad2ac9c287da92689815a5b09589945ada092179508ad2326306" + "initCodeHash": "0x6516160f35a85abb65d8102fa71f03cb57518787f9af85bc951f27ee60e6bb8f", + "sourceCodeHash": "0xd0802842e5656639f33324ed6498c60013e6d9cd63c9f097090da9a0a61700a4" }, "src/cannon/PreimageOracle.sol": { - "initCodeHash": "0x5d7e8ae64f802bd9d760e3d52c0a620bd02405dc2c8795818db9183792ffe81c", - "sourceCodeHash": "0x979d8595d925c70a123e72c062fa58c9ef94777c2e93b6bc3231d6679e2e9055" + "initCodeHash": "0xf08736a5af9277a4f3498dfee84a40c9b05f1a2ba3177459bebe2b0b54f99343", + "sourceCodeHash": "0x14b952b2a00bc4ec5e149bb5fb2a973bb255f0fd3f4a42b6bd05bc3bbe51f2b1" }, "src/dispute/AnchorStateRegistry.sol": { - "initCodeHash": "0x7bdbf9dc5125c953ea1833ccf0ad0e07d25b6f6c47e23da5374413324a38c5f9", - "sourceCodeHash": "0x1d918a536d9f6c900efdf069e96c2a27bb49340d6d1ebaa92dd6b481835a9a82" + "initCodeHash": "0x4e34a0d8de12cad21f37dc66755289ee38d1568a781f8ecc7ac38a0023d167ae", + "sourceCodeHash": "0xc9a8758bdd790ea3eff1a2d7416934d34a48b4a4e5340a66bd41044b27d36952" }, "src/dispute/DelayedWETH.sol": { - "initCodeHash": "0xb31e0ff80fd69bc3f3b7d53f3fa42da4cdae393e41b8816719ce5ebe3d248688", - "sourceCodeHash": "0x1dfc68560c0805faa78360e3d4ef2d768e2f3d6c0c7183d2077a2c4277c778db" + "initCodeHash": "0xee29b653713e6c33d263cc0c24b922b7defc08a4cab8e0ee77ca25a8139ed8dd", + "sourceCodeHash": "0xe6951dbc1d15c0e7dc0e88e25fe3a3d4511ac5b96a80091d5ec08e12004477af" }, "src/dispute/DisputeGameFactory.sol": { - "initCodeHash": "0xd72eced7cb5400d93188038a707fe6c1b04077f059cd8e2f5253e871de2cee3b", - "sourceCodeHash": "0x9cb0851b6e471461f2bb369bd72eef4cffe8a0d1345546608a2aa6795540211d" + "initCodeHash": "0x35d72c68d7e408ad3f8cd7fb94e695292ab64cc7b2563609c31b315bffb713f2", + "sourceCodeHash": "0x0b480d83f7eb65e9a9de6feff1474382504e4a32076c769f535081ed99f52acf" }, "src/dispute/FaultDisputeGame.sol": { - "initCodeHash": "0xa352179f5055232764aac6b66a3ff5a6b3bfae2101d20c077f714b0ed7e40eef", - "sourceCodeHash": "0x730eff9147294c115a0a53e7e75771bcc4a517beb48457140ab929a8d1510893" + "initCodeHash": "0x423e8488731c0b0f87b435174f412c09fbf0b17eb0b8c9a03efa37d779ec0cae", + "sourceCodeHash": "0xe53b970922b309ada1c59f94d5935ffca669e909c797f17ba8a3d309c487e7e8" }, "src/legacy/DeployerWhitelist.sol": { - "initCodeHash": "0x0b8177ed75b69eddbb9ce6537683f69a9935efed86a1d6faa8feaafbd151c1bd", - "sourceCodeHash": "0xc8fe9571fcf8fcb51a4dcb00ffa97f43a9ce811c323c4926e710b28c90a9005f" + "initCodeHash": "0x53099379ed48b87f027d55712dbdd1da7d7099925426eb0531da9c0012e02c29", + "sourceCodeHash": "0xf22c94ed20c32a8ed2705a22d12c6969c3c3bad409c4efe2f95b0db74f210e10" }, "src/legacy/L1BlockNumber.sol": { - "initCodeHash": "0x542955f1a84b304eaf291f76633b03e4c87c2654f7eff46c3bea94d27346ea1f", - "sourceCodeHash": "0x898c239e6367a0971a075df18030a033cdada26983fa8a5cd6e7b88ec90d4958" + "initCodeHash": "0x60dded11d35e42fe15ef5dd94d28aae6b8ff3e67c6fbbc667a6729fcb3ca7a9a", + "sourceCodeHash": "0x53ef11021a52e9c87024a870566ec5dba1d1a12752396e654904384efdd8203e" }, "src/legacy/LegacyMessagePasser.sol": { - "initCodeHash": "0xefc6ed9e325c2d614ea0d28c3eabfff1b345f7c6054e90253c6a091c29508267", - "sourceCodeHash": "0xaa08a61448f485b277af57251d2089cc6a80ce0a763bf7184d48ffed5034ef69" - }, - "src/periphery/op-nft/AttestationStation.sol": { - "initCodeHash": "0x2e665d9ee554430980f64bcb6d2611a1cb03dbacfd58bb0d6f5d32951a267bde", - "sourceCodeHash": "0xe0bc805b22c7d04b5a9444cddd4c0e1bcb3006c69c03610494277ab2cc83f553" - }, - "src/periphery/op-nft/Optimist.sol": { - "initCodeHash": "0x8fccdef5fb6e6d51215b39acc449faad8ba15416699c9b3af77866f4297805a3", - "sourceCodeHash": "0xfa9354827b642803e10415ed30ca789be1bd23d88fac14f7adaa65c6eb1c1643" - }, - "src/periphery/op-nft/OptimistAllowlist.sol": { - "initCodeHash": "0x166dd3fc18cb238895f2faa7fdd635af48ce2c54e21ed2d6dae857c3731c4d6c", - "sourceCodeHash": "0x3a5f61046f729c9a70274b8b2a739382987ec5eb77705b259e8a3210a5f43462" - }, - "src/periphery/op-nft/OptimistInviter.sol": { - "initCodeHash": "0x28dfa6676702a7abd19609cc773158d1f958210bc0a38c008d67a002dc1df862", - "sourceCodeHash": "0x3a0a294932d6deba043f6a2b46b4e8477ee96e7fb054d7e7229a43ce4352c68d" + "initCodeHash": "0x3ca911b0578be7f8c91e7d01442a5609f04e5866768f99c8e31627c9ba79c9f0", + "sourceCodeHash": "0x62c9a6182d82692fb9c173ddb0d7978bcff2d1d4dc8cd2f10625e1e65bda6888" }, "src/safe/DeputyGuardianModule.sol": { - "initCodeHash": "0xd95e562f395d4eb6e332f4474dffab660ada9e9da7c79f58fb6052278e0904df", - "sourceCodeHash": "0x45daabe094de0287e244e6fea4f1887b9adc09b07c47dc77361b1678645a1470" + "initCodeHash": "0x5eaf823d81995ce1f703f26e31049c54c1d4902dd9873a0b4645d470f2f459a2", + "sourceCodeHash": "0x17236a91c4171ae9525eae0e59fa65bb2dc320d62677cfc7d7eb942f182619fb" }, "src/safe/LivenessGuard.sol": { - "initCodeHash": "0x9ac0b039b1591f7c00cf11cb758d118c9b42e6e08250b619d6b6fd605a43d5ee", - "sourceCodeHash": "0xc1a968b0c6fbc4d82c2821c917b273feaaa224d258886b394416e84ee250d026" + "initCodeHash": "0xc8e29e8b12f423c8cd229a38bc731240dd815d96f1b0ab96c71494dde63f6a81", + "sourceCodeHash": "0x72b8d8d855e7af8beee29330f6cb9b9069acb32e23ce940002ec9a41aa012a16" }, "src/safe/LivenessModule.sol": { - "initCodeHash": "0xcfccdd9e423c95a0ddc6e09ccb6333d5fc8429ed2b8fc872f1290d392ae13aad", - "sourceCodeHash": "0xd1479c60087f352385b6d5379ef3cc07839f671d617626b4c94ece91da781ef2" + "initCodeHash": "0xde3b3273aa37604048b5fa228b90f3b05997db613dfcda45061545a669b2476a", + "sourceCodeHash": "0x918965e52bbd358ac827ebe35998f5d8fa5ca77d8eb9ab8986b44181b9aaa48a" }, "src/universal/OptimismMintableERC20.sol": { - "initCodeHash": "0x9cd677275b175812f1d5f90a127dbf7b3592714fd842a7a0de3988d716ca3eac", - "sourceCodeHash": "0x5611d8082f68af566554d7f09640b4b1f0e3efee4da1372b68fc7fc538a35ac7" + "initCodeHash": "0x51346d105fb0ebeb8733add0d53048b5d3d6f2d762c6bb446616c7f5da5eb026", + "sourceCodeHash": "0x876c716f9159909890eef3ffd3348a7f329af07e2899ae5ddc4a7809a3b753ce" }, "src/universal/OptimismMintableERC20Factory.sol": { - "initCodeHash": "0x03ad07bd7f89a29f1850fa8b5d377daf0e1d5aef6cb458a127df520549e8e8e6", - "sourceCodeHash": "0xdb6ec93782a4a217475195507740794a4f5553b9032e7ba31dc48b81f579a940" + "initCodeHash": "0x080b30bbf556e8782dd2b355d5c9889324cf2ed1f9e6aca22a2e4dc42fab8ce7", + "sourceCodeHash": "0x47158ca0a9278bf367c4eebdb184aa31332f3b5a504f5147cf1c14777d9106ec" }, "src/universal/OptimismMintableERC721.sol": { - "initCodeHash": "0x8aa309f2676d5267b6c9e411f88dc6e4badce414b8d66b330df3f60e9836380e", - "sourceCodeHash": "0x03bf7ad4d2b751bdead9930fc8f89b8e55d40dd4b2f5670fd339e87ae81f8b49" + "initCodeHash": "0xa43e7ffce142c0f2ae6ebe22decdf146dd39246830bec5cbd7903b32c2599048", + "sourceCodeHash": "0x5222efbb8e5b650e0778687ea3b3ca8df16d1683c7180862c77fe146dd21ea79" }, "src/universal/OptimismMintableERC721Factory.sol": { - "initCodeHash": "0x5ea977ba35558c3b75bebe28900548c763d205e40d6cf7660292b8e96bf3aea8", - "sourceCodeHash": "0x063ca3a0a2e3c592173af6157e383b5aaeff752000f98648a5c71260bb26590a" + "initCodeHash": "0x8ff88eb88c3b756b51e2f011bdf4387992a4f48abb2ab0a38fe6bb50ffea3301", + "sourceCodeHash": "0x483e20d002a402034a0a5b2ff60c9e69afa4f0f9917d287dfe820b7bae08a7bb" }, "src/universal/StorageSetter.sol": { - "initCodeHash": "0x21b3059e9b13b330f76d02b61f61dcfa3abf3517a0b56afa0895c4b8291740bf", - "sourceCodeHash": "0xc1ea12a87e3a7ef9c950f0a41a4e35b60d4d9c4c816ff671dbfca663861c16f4" + "initCodeHash": "0x8831c079f7b7a52679e8a15e0ea14e30ea7bb4f93feed0fcd369942fe8c1f1ec", + "sourceCodeHash": "0x42151e2547ec5270353977fd66e78fa1fde18f362d7021cf7ddce16d5201b3ec" + }, + "src/vendor/asterisc/RISCV.sol": { + "initCodeHash": "0x7329cca924e189eeaa2d883234f6cb5fd787c8bf3339d8298e721778c2947ce5", + "sourceCodeHash": "0x02025b303a8f37b4e541f8c7936a8651402a60ea0147a53176e06b51b15a1f84" }, "src/vendor/eas/EAS.sol": { - "initCodeHash": "0xf96d1ebc530ed95e2dffebcfa2b4a1f18103235e6352d97838b77b7a2c14567b", - "sourceCodeHash": "0xbeca762929db37f1c7a2067e136c616f563ca18e85871ad7ae2d3ff55a16e6cb" + "initCodeHash": "0xce1700cfc0a8e346b0a8e8c64b6570ba731d874b434b4798fe3176f3903c404b", + "sourceCodeHash": "0xde4c41139672fc0581ba77425ab1d822a8123ceaa3ad0655be869fcc722b8add" }, "src/vendor/eas/SchemaRegistry.sol": { - "initCodeHash": "0x06ae2c0b39c215b7fa450d382916ce6f5c6f9f2d630e572db6b72d688255b3fd", - "sourceCodeHash": "0xa014d9c992f439dee8221e065828c3326ca2c4f5db0e83431c64c20f7e51ec14" + "initCodeHash": "0x2bfce526f82622288333d53ca3f43a0a94306ba1bab99241daa845f8f4b18bd4", + "sourceCodeHash": "0xf49d7b0187912a6bb67926a3222ae51121e9239495213c975b3b4b217ee57a1b" } } \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/AttestationStation.json b/packages/contracts-bedrock/snapshots/storageLayout/AttestationStation.json deleted file mode 100644 index c3c732cec14d..000000000000 --- a/packages/contracts-bedrock/snapshots/storageLayout/AttestationStation.json +++ /dev/null @@ -1,9 +0,0 @@ -[ - { - "bytes": "32", - "label": "attestations", - "offset": 0, - "slot": "0", - "type": "mapping(address => mapping(address => mapping(bytes32 => bytes)))" - } -] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/DelayedVetoable.json b/packages/contracts-bedrock/snapshots/storageLayout/DelayedVetoable.json deleted file mode 100644 index 7da3cbbe5bd6..000000000000 --- a/packages/contracts-bedrock/snapshots/storageLayout/DelayedVetoable.json +++ /dev/null @@ -1,16 +0,0 @@ -[ - { - "bytes": "32", - "label": "_delay", - "offset": 0, - "slot": "0", - "type": "uint256" - }, - { - "bytes": "32", - "label": "_queuedAt", - "offset": 0, - "slot": "1", - "type": "mapping(bytes32 => uint256)" - } -] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json index aeef539c5c20..aa8148b34cba 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json @@ -1,51 +1,30 @@ [ - { - "bytes": "1", - "label": "_initialized", - "offset": 0, - "slot": "0", - "type": "uint8" - }, - { - "bytes": "1", - "label": "_initializing", - "offset": 1, - "slot": "0", - "type": "bool" - }, { "bytes": "32", - "label": "latestRelease", + "label": "l1ContractsRelease", "offset": 0, - "slot": "1", + "slot": "0", "type": "string" }, - { - "bytes": "32", - "label": "implementations", - "offset": 0, - "slot": "2", - "type": "mapping(string => mapping(string => struct OPContractsManager.Implementation))" - }, { "bytes": "32", "label": "systemConfigs", "offset": 0, - "slot": "3", + "slot": "1", "type": "mapping(uint256 => contract ISystemConfig)" }, { "bytes": "256", "label": "blueprint", "offset": 0, - "slot": "4", + "slot": "2", "type": "struct OPContractsManager.Blueprints" }, { - "bytes": "1600", - "label": "__gap", + "bytes": "288", + "label": "implementation", "offset": 0, - "slot": "12", - "type": "uint256[50]" + "slot": "10", + "type": "struct OPContractsManager.Implementations" } ] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerInterop.json b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerInterop.json index aeef539c5c20..aa8148b34cba 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerInterop.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerInterop.json @@ -1,51 +1,30 @@ [ - { - "bytes": "1", - "label": "_initialized", - "offset": 0, - "slot": "0", - "type": "uint8" - }, - { - "bytes": "1", - "label": "_initializing", - "offset": 1, - "slot": "0", - "type": "bool" - }, { "bytes": "32", - "label": "latestRelease", + "label": "l1ContractsRelease", "offset": 0, - "slot": "1", + "slot": "0", "type": "string" }, - { - "bytes": "32", - "label": "implementations", - "offset": 0, - "slot": "2", - "type": "mapping(string => mapping(string => struct OPContractsManager.Implementation))" - }, { "bytes": "32", "label": "systemConfigs", "offset": 0, - "slot": "3", + "slot": "1", "type": "mapping(uint256 => contract ISystemConfig)" }, { "bytes": "256", "label": "blueprint", "offset": 0, - "slot": "4", + "slot": "2", "type": "struct OPContractsManager.Blueprints" }, { - "bytes": "1600", - "label": "__gap", + "bytes": "288", + "label": "implementation", "offset": 0, - "slot": "12", - "type": "uint256[50]" + "slot": "10", + "type": "struct OPContractsManager.Implementations" } ] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/Optimist.json b/packages/contracts-bedrock/snapshots/storageLayout/Optimist.json deleted file mode 100644 index 6049beb54245..000000000000 --- a/packages/contracts-bedrock/snapshots/storageLayout/Optimist.json +++ /dev/null @@ -1,86 +0,0 @@ -[ - { - "bytes": "1", - "label": "_initialized", - "offset": 0, - "slot": "0", - "type": "uint8" - }, - { - "bytes": "1", - "label": "_initializing", - "offset": 1, - "slot": "0", - "type": "bool" - }, - { - "bytes": "1600", - "label": "__gap", - "offset": 0, - "slot": "1", - "type": "uint256[50]" - }, - { - "bytes": "1600", - "label": "__gap", - "offset": 0, - "slot": "51", - "type": "uint256[50]" - }, - { - "bytes": "32", - "label": "_name", - "offset": 0, - "slot": "101", - "type": "string" - }, - { - "bytes": "32", - "label": "_symbol", - "offset": 0, - "slot": "102", - "type": "string" - }, - { - "bytes": "32", - "label": "_owners", - "offset": 0, - "slot": "103", - "type": "mapping(uint256 => address)" - }, - { - "bytes": "32", - "label": "_balances", - "offset": 0, - "slot": "104", - "type": "mapping(address => uint256)" - }, - { - "bytes": "32", - "label": "_tokenApprovals", - "offset": 0, - "slot": "105", - "type": "mapping(uint256 => address)" - }, - { - "bytes": "32", - "label": "_operatorApprovals", - "offset": 0, - "slot": "106", - "type": "mapping(address => mapping(address => bool))" - }, - { - "bytes": "1408", - "label": "__gap", - "offset": 0, - "slot": "107", - "type": "uint256[44]" - }, - { - "bytes": "1600", - "label": "__gap", - "offset": 0, - "slot": "151", - "type": "uint256[50]" - } -] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OptimistAllowlist.json b/packages/contracts-bedrock/snapshots/storageLayout/OptimistAllowlist.json deleted file mode 100644 index 0637a088a01e..000000000000 --- a/packages/contracts-bedrock/snapshots/storageLayout/OptimistAllowlist.json +++ /dev/null @@ -1 +0,0 @@ -[] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OptimistInviter.json b/packages/contracts-bedrock/snapshots/storageLayout/OptimistInviter.json deleted file mode 100644 index 5d1a6bbc43c9..000000000000 --- a/packages/contracts-bedrock/snapshots/storageLayout/OptimistInviter.json +++ /dev/null @@ -1,58 +0,0 @@ -[ - { - "bytes": "1", - "label": "_initialized", - "offset": 0, - "slot": "0", - "type": "uint8" - }, - { - "bytes": "1", - "label": "_initializing", - "offset": 1, - "slot": "0", - "type": "bool" - }, - { - "bytes": "32", - "label": "_HASHED_NAME", - "offset": 0, - "slot": "1", - "type": "bytes32" - }, - { - "bytes": "32", - "label": "_HASHED_VERSION", - "offset": 0, - "slot": "2", - "type": "bytes32" - }, - { - "bytes": "1600", - "label": "__gap", - "offset": 0, - "slot": "3", - "type": "uint256[50]" - }, - { - "bytes": "32", - "label": "commitmentTimestamps", - "offset": 0, - "slot": "53", - "type": "mapping(bytes32 => uint256)" - }, - { - "bytes": "32", - "label": "usedNonces", - "offset": 0, - "slot": "54", - "type": "mapping(address => mapping(bytes32 => bool))" - }, - { - "bytes": "32", - "label": "inviteCounts", - "offset": 0, - "slot": "55", - "type": "mapping(address => uint256)" - } -] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/RISCV.json b/packages/contracts-bedrock/snapshots/storageLayout/RISCV.json new file mode 100644 index 000000000000..a79dc13a1d36 --- /dev/null +++ b/packages/contracts-bedrock/snapshots/storageLayout/RISCV.json @@ -0,0 +1,9 @@ +[ + { + "bytes": "20", + "label": "oracle", + "offset": 0, + "slot": "0", + "type": "contract IPreimageOracle" + } +] \ No newline at end of file diff --git a/packages/contracts-bedrock/src/L1/DataAvailabilityChallenge.sol b/packages/contracts-bedrock/src/L1/DataAvailabilityChallenge.sol index 2a725fc4f200..634d6da91dd7 100644 --- a/packages/contracts-bedrock/src/L1/DataAvailabilityChallenge.sol +++ b/packages/contracts-bedrock/src/L1/DataAvailabilityChallenge.sol @@ -8,7 +8,7 @@ import { OwnableUpgradeable } from "@openzeppelin/contracts-upgradeable/access/O import { SafeCall } from "src/libraries/SafeCall.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @dev An enum representing the status of a DA challenge. enum ChallengeStatus { @@ -24,9 +24,10 @@ enum CommitmentType { } /// @dev A struct representing a single DA challenge. -/// @custom:field status The status of the challenge. /// @custom:field challenger The address that initiated the challenge. +/// @custom:field lockedBond The amount of ETH bond that was locked by the challenger. /// @custom:field startBlock The block number at which the challenge was initiated. +/// @custom:field resolvedBlock The block number at which the challenge was resolved. struct Challenge { address challenger; uint256 lockedBond; @@ -94,8 +95,8 @@ contract DataAvailabilityChallenge is OwnableUpgradeable, ISemver { event BalanceChanged(address account, uint256 balance); /// @notice Semantic version. - /// @custom:semver 1.0.1-beta.2 - string public constant version = "1.0.1-beta.2"; + /// @custom:semver 1.0.1-beta.4 + string public constant version = "1.0.1-beta.4"; /// @notice The fixed cost of resolving a challenge. /// @dev The value is estimated by measuring the cost of resolving with `bytes(0)` diff --git a/packages/contracts-bedrock/src/L1/DelayedVetoable.sol b/packages/contracts-bedrock/src/L1/DelayedVetoable.sol deleted file mode 100644 index d968af214975..000000000000 --- a/packages/contracts-bedrock/src/L1/DelayedVetoable.sol +++ /dev/null @@ -1,193 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.15; - -// Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; - -/// @title DelayedVetoable -/// @notice This contract enables a delay before a call is forwarded to a target contract, and during the delay period -/// the call can be vetoed by the authorized vetoer. -/// This contract does not support value transfers, only data is forwarded. -/// Additionally, this contract cannot be used to forward calls with data beginning with the function selector -/// of the queuedAt(bytes32) function. This is because of input validation checks which solidity performs at -/// runtime on functions which take an argument. -contract DelayedVetoable is ISemver { - /// @notice Error for when attempting to forward too early. - error ForwardingEarly(); - - /// @notice Error for unauthorized calls. - error Unauthorized(address expected, address actual); - - /// @notice An event that is emitted when the delay is activated. - /// @param delay The delay that was activated. - event DelayActivated(uint256 delay); - - /// @notice An event that is emitted when a call is initiated. - /// @param callHash The hash of the call data. - /// @param data The data of the initiated call. - event Initiated(bytes32 indexed callHash, bytes data); - - /// @notice An event that is emitted each time a call is forwarded. - /// @param callHash The hash of the call data. - /// @param data The data forwarded to the target. - event Forwarded(bytes32 indexed callHash, bytes data); - - /// @notice An event that is emitted each time a call is vetoed. - /// @param callHash The hash of the call data. - /// @param data The data forwarded to the target. - event Vetoed(bytes32 indexed callHash, bytes data); - - /// @notice The address that all calls are forwarded to after the delay. - address internal immutable TARGET; - - /// @notice The address that can veto a call. - address internal immutable VETOER; - - /// @notice The address that can initiate a call. - address internal immutable INITIATOR; - - /// @notice The delay which will be set after the initial system deployment is completed. - uint256 internal immutable OPERATING_DELAY; - - /// @notice The current amount of time to wait before forwarding a call. - uint256 internal _delay; - - /// @notice The time that a call was initiated. - mapping(bytes32 => uint256) internal _queuedAt; - - /// @notice A modifier that reverts if not called by the vetoer or by address(0) to allow - /// eth_call to interact with this proxy without needing to use low-level storage - /// inspection. We assume that nobody is able to trigger calls from address(0) during - /// normal EVM execution. - modifier readOrHandle() { - if (msg.sender == address(0)) { - _; - } else { - // This WILL halt the call frame on completion. - _handleCall(); - } - } - - /// @notice Semantic version. - /// @custom:semver 1.0.1-beta.2 - string public constant version = "1.0.1-beta.2"; - - /// @notice Sets the target admin during contract deployment. - /// @param _vetoer Address of the vetoer. - /// @param _initiator Address of the initiator. - /// @param _target Address of the target. - /// @param _operatingDelay Time to delay when the system is operational. - constructor(address _vetoer, address _initiator, address _target, uint256 _operatingDelay) { - // Note that the _delay value is not set here. Having an initial delay of 0 is helpful - // during the deployment of a new system. - VETOER = _vetoer; - INITIATOR = _initiator; - TARGET = _target; - OPERATING_DELAY = _operatingDelay; - } - - /// @notice Gets the initiator - /// @return initiator_ Initiator address. - function initiator() external virtual readOrHandle returns (address initiator_) { - initiator_ = INITIATOR; - } - - //// @notice Queries the vetoer address. - /// @return vetoer_ Vetoer address. - function vetoer() external virtual readOrHandle returns (address vetoer_) { - vetoer_ = VETOER; - } - - //// @notice Queries the target address. - /// @return target_ Target address. - function target() external readOrHandle returns (address target_) { - target_ = TARGET; - } - - /// @notice Gets the delay - /// @return delay_ Delay address. - function delay() external readOrHandle returns (uint256 delay_) { - delay_ = _delay; - } - - /// @notice Gets entries in the _queuedAt mapping. - /// @param _callHash The hash of the call data. - /// @return queuedAt_ The time the callHash was recorded. - function queuedAt(bytes32 _callHash) external readOrHandle returns (uint256 queuedAt_) { - queuedAt_ = _queuedAt[_callHash]; - } - - /// @notice Used for all calls that pass data to the contract. - fallback() external { - _handleCall(); - } - - /// @notice Receives all calls other than those made by the vetoer. - /// This enables transparent initiation and forwarding of calls to the target and avoids - /// the need for additional layers of abi encoding. - function _handleCall() internal { - // The initiator and vetoer activate the delay by passing in null data. - if (msg.data.length == 0 && _delay == 0) { - if (msg.sender != INITIATOR && msg.sender != VETOER) { - revert Unauthorized(INITIATOR, msg.sender); - } - _delay = OPERATING_DELAY; - emit DelayActivated(_delay); - return; - } - - bytes32 callHash = keccak256(msg.data); - - // Case 1: The initiator is calling the contract to initiate a call. - if (msg.sender == INITIATOR && _queuedAt[callHash] == 0) { - if (_delay == 0) { - // This forward function will halt the call frame on completion. - _forwardAndHalt(callHash); - } - _queuedAt[callHash] = block.timestamp; - emit Initiated(callHash, msg.data); - return; - } - - // Case 2: The vetoer is calling the contract to veto a call. - // Note: The vetoer retains the ability to veto even after the delay has passed. This makes censoring the vetoer - // more costly, as there is no time limit after which their transaction can be included. - if (msg.sender == VETOER && _queuedAt[callHash] != 0) { - delete _queuedAt[callHash]; - emit Vetoed(callHash, msg.data); - return; - } - - // Case 3: The call is from an unpermissioned actor. We'll forward the call if the delay has - // passed. - if (_queuedAt[callHash] == 0) { - // The call has not been initiated, so we'll treat this is an unauthorized initiation attempt. - revert Unauthorized(INITIATOR, msg.sender); - } - - if (_queuedAt[callHash] + _delay > block.timestamp) { - // Not enough time has passed, so we'll revert. - revert ForwardingEarly(); - } - - // Delete the call to prevent replays - delete _queuedAt[callHash]; - _forwardAndHalt(callHash); - } - - /// @notice Forwards the call to the target and halts the call frame. - function _forwardAndHalt(bytes32 _callHash) internal { - // Forward the call - emit Forwarded(_callHash, msg.data); - (bool success, bytes memory returndata) = TARGET.call(msg.data); - if (success == true) { - assembly { - return(add(returndata, 0x20), mload(returndata)) - } - } else { - assembly { - revert(add(returndata, 0x20), mload(returndata)) - } - } - } -} diff --git a/packages/contracts-bedrock/src/L1/L1CrossDomainMessenger.sol b/packages/contracts-bedrock/src/L1/L1CrossDomainMessenger.sol index 27be4a7332fa..071d35aea6a4 100644 --- a/packages/contracts-bedrock/src/L1/L1CrossDomainMessenger.sol +++ b/packages/contracts-bedrock/src/L1/L1CrossDomainMessenger.sol @@ -8,10 +8,10 @@ import { CrossDomainMessenger } from "src/universal/CrossDomainMessenger.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { IOptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; /// @custom:proxied true /// @title L1CrossDomainMessenger @@ -30,8 +30,8 @@ contract L1CrossDomainMessenger is CrossDomainMessenger, ISemver { ISystemConfig public systemConfig; /// @notice Semantic version. - /// @custom:semver 2.4.1-beta.2 - string public constant version = "2.4.1-beta.2"; + /// @custom:semver 2.4.1-beta.3 + string public constant version = "2.4.1-beta.3"; /// @notice Constructs the L1CrossDomainMessenger contract. constructor() CrossDomainMessenger() { diff --git a/packages/contracts-bedrock/src/L1/L1ERC721Bridge.sol b/packages/contracts-bedrock/src/L1/L1ERC721Bridge.sol index bd9b31c1e589..b1bdc1e61cbe 100644 --- a/packages/contracts-bedrock/src/L1/L1ERC721Bridge.sol +++ b/packages/contracts-bedrock/src/L1/L1ERC721Bridge.sol @@ -9,10 +9,10 @@ import { Predeploys } from "src/libraries/Predeploys.sol"; // Interfaces import { IERC721 } from "@openzeppelin/contracts/token/ERC721/IERC721.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IL2ERC721Bridge } from "src/L2/interfaces/IL2ERC721Bridge.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IL2ERC721Bridge } from "interfaces/L2/IL2ERC721Bridge.sol"; /// @custom:proxied true /// @title L1ERC721Bridge @@ -28,8 +28,8 @@ contract L1ERC721Bridge is ERC721Bridge, ISemver { ISuperchainConfig public superchainConfig; /// @notice Semantic version. - /// @custom:semver 2.2.0-beta.1 - string public constant version = "2.2.0-beta.1"; + /// @custom:semver 2.2.0-beta.2 + string public constant version = "2.2.0-beta.2"; /// @notice Constructs the L1ERC721Bridge contract. constructor() ERC721Bridge() { diff --git a/packages/contracts-bedrock/src/L1/L1StandardBridge.sol b/packages/contracts-bedrock/src/L1/L1StandardBridge.sol index 54cc833fe673..a2747096717e 100644 --- a/packages/contracts-bedrock/src/L1/L1StandardBridge.sol +++ b/packages/contracts-bedrock/src/L1/L1StandardBridge.sol @@ -8,10 +8,10 @@ import { StandardBridge } from "src/universal/StandardBridge.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; /// @custom:proxied true /// @title L1StandardBridge @@ -75,8 +75,8 @@ contract L1StandardBridge is StandardBridge, ISemver { ); /// @notice Semantic version. - /// @custom:semver 2.2.1-beta.2 - string public constant version = "2.2.1-beta.2"; + /// @custom:semver 2.2.1-beta.3 + string public constant version = "2.2.1-beta.3"; /// @notice Address of the SuperchainConfig contract. ISuperchainConfig public superchainConfig; diff --git a/packages/contracts-bedrock/src/L1/L2OutputOracle.sol b/packages/contracts-bedrock/src/L1/L2OutputOracle.sol index 99d5645dceca..1af5bd8b7aa9 100644 --- a/packages/contracts-bedrock/src/L1/L2OutputOracle.sol +++ b/packages/contracts-bedrock/src/L1/L2OutputOracle.sol @@ -8,7 +8,7 @@ import { Initializable } from "@openzeppelin/contracts/proxy/utils/Initializable import { Types } from "src/libraries/Types.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @custom:proxied true /// @title L2OutputOracle @@ -60,8 +60,8 @@ contract L2OutputOracle is Initializable, ISemver { event OutputsDeleted(uint256 indexed prevNextOutputIndex, uint256 indexed newNextOutputIndex); /// @notice Semantic version. - /// @custom:semver 1.8.1-beta.2 - string public constant version = "1.8.1-beta.2"; + /// @custom:semver 1.8.1-beta.3 + string public constant version = "1.8.1-beta.3"; /// @notice Constructs the L2OutputOracle contract. Initializes variables to the same values as /// in the getting-started config. diff --git a/packages/contracts-bedrock/src/L1/OPContractsManager.sol b/packages/contracts-bedrock/src/L1/OPContractsManager.sol index 4bf52ff228a1..aad48374704b 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManager.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManager.sol @@ -1,41 +1,35 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Libraries import { Blueprint } from "src/libraries/Blueprint.sol"; import { Constants } from "src/libraries/Constants.sol"; - -import { Initializable } from "@openzeppelin/contracts/proxy/utils/Initializable.sol"; - -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; -import { IBigStepper } from "src/dispute/interfaces/IBigStepper.sol"; -import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; -import { ISystemConfigV160 } from "src/L1/interfaces/ISystemConfigV160.sol"; -import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; - -import { IProxyAdmin } from "src/universal/interfaces/IProxyAdmin.sol"; - -import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; -import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; -import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; -import { IPermissionedDisputeGame } from "src/dispute/interfaces/IPermissionedDisputeGame.sol"; import { Claim, Duration, GameType, GameTypes } from "src/dispute/lib/Types.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; -import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { ISystemConfigV160 } from "src/L1/interfaces/ISystemConfigV160.sol"; -import { IL1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; -import { IL1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; -import { IL1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; -import { IOptimismMintableERC20Factory } from "src/universal/interfaces/IOptimismMintableERC20Factory.sol"; - -/// @custom:proxied true -contract OPContractsManager is ISemver, Initializable { +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; +import { IBigStepper } from "interfaces/dispute/IBigStepper.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; +import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; +import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; +import { IPermissionedDisputeGame } from "interfaces/dispute/IPermissionedDisputeGame.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IProtocolVersions } from "interfaces/L1/IProtocolVersions.sol"; +import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { IL1CrossDomainMessenger } from "interfaces/L1/IL1CrossDomainMessenger.sol"; +import { IL1ERC721Bridge } from "interfaces/L1/IL1ERC721Bridge.sol"; +import { IL1StandardBridge } from "interfaces/L1/IL1StandardBridge.sol"; +import { IOptimismMintableERC20Factory } from "interfaces/universal/IOptimismMintableERC20Factory.sol"; + +contract OPContractsManager is ISemver { // -------- Structs -------- /// @notice Represents the roles that can be set when deploying a standard OP Stack chain. @@ -89,19 +83,6 @@ contract OPContractsManager is ISemver, Initializable { IDelayedWETH delayedWETHPermissionlessGameProxy; } - /// @notice The logic address and initializer selector for an implementation contract. - struct Implementation { - address logic; // Address containing the deployed logic contract. - bytes4 initializer; // Function selector for the initializer. - } - - /// @notice Used to set the implementation for a contract by mapping a contract - /// name to the implementation data. - struct ImplementationSetter { - string name; // Contract name. - Implementation info; // Implementation to set. - } - /// @notice Addresses of ERC-5202 Blueprint contracts. There are used for deploying full size /// contracts, to reduce the code size of this factory contract. If it deployed full contracts /// using the `new Proxy()` syntax, the code size would get large fast, since this contract would @@ -118,19 +99,23 @@ contract OPContractsManager is ISemver, Initializable { address permissionedDisputeGame2; } - /// @notice Inputs required when initializing the OPContractsManager. To avoid 'StackTooDeep' errors, - /// all necessary inputs (excluding immutables) for initialization are bundled together in this struct. - struct InitializerInputs { - Blueprints blueprints; - ImplementationSetter[] setters; - string release; - bool isLatest; + /// @notice The latest implementation contracts for the OP Stack. + struct Implementations { + address l1ERC721BridgeImpl; + address optimismPortalImpl; + address systemConfigImpl; + address optimismMintableERC20FactoryImpl; + address l1CrossDomainMessengerImpl; + address l1StandardBridgeImpl; + address disputeGameFactoryImpl; + address delayedWETHImpl; + address mipsImpl; } // -------- Constants and Variables -------- - /// @custom:semver 1.0.0-beta.20 - string public constant version = "1.0.0-beta.20"; + /// @custom:semver 1.0.0-beta.23 + string public constant version = "1.0.0-beta.23"; /// @notice Represents the interface version so consumers know how to decode the DeployOutput struct /// that's emitted in the `Deployed` event. Whenever that struct changes, a new version should be used. @@ -142,24 +127,20 @@ contract OPContractsManager is ISemver, Initializable { /// @notice Address of the ProtocolVersions contract shared by all chains. IProtocolVersions public immutable protocolVersions; - /// @notice The latest release of the OP Contracts Manager, as a string of the format `op-contracts/vX.Y.Z`. - string public latestRelease; - - /// @notice Maps a release version to a contract name to it's implementation data. - mapping(string => mapping(string => Implementation)) public implementations; + // @notice L1 smart contracts release deployed by this version of OPCM. This is used in opcm to signal which version + // of the L1 smart contracts is deployed. It takes the format of `op-contracts/vX.Y.Z`. + string public l1ContractsRelease; /// @notice Maps an L2 Chain ID to the SystemConfig for that chain. mapping(uint256 => ISystemConfig) public systemConfigs; /// @notice Addresses of the Blueprint contracts. /// This is internal because if public the autogenerated getter method would return a tuple of - /// addresses, but we want it to return a struct. This is also set via `initialize` because - /// we can't make this an immutable variable as it is a non-value type. + /// addresses, but we want it to return a struct. Blueprints internal blueprint; - /// @notice Storage gap for future modifications, so we can expand the number of blueprints - /// without affecting other storage variables. - uint256[50] private __gap; + /// @notice Addresses of the latest implementation contracts. + Implementations internal implementation; // -------- Events -------- @@ -197,37 +178,26 @@ contract OPContractsManager is ISemver, Initializable { // -------- Methods -------- - /// @notice OPCM is proxied. Therefore the `initialize` function replaces most constructor logic for this contract. - - constructor(ISuperchainConfig _superchainConfig, IProtocolVersions _protocolVersions) { + constructor( + ISuperchainConfig _superchainConfig, + IProtocolVersions _protocolVersions, + string memory _l1ContractsRelease, + Blueprints memory _blueprints, + Implementations memory _implementations + ) { assertValidContractAddress(address(_superchainConfig)); assertValidContractAddress(address(_protocolVersions)); superchainConfig = _superchainConfig; protocolVersions = _protocolVersions; - _disableInitializers(); - } - - function initialize(InitializerInputs memory _initializerInputs) public initializer { - if (_initializerInputs.isLatest) latestRelease = _initializerInputs.release; - if (keccak256(bytes(latestRelease)) == keccak256("")) revert LatestReleaseNotSet(); + l1ContractsRelease = _l1ContractsRelease; - for (uint256 i = 0; i < _initializerInputs.setters.length; i++) { - ImplementationSetter memory setter = _initializerInputs.setters[i]; - Implementation storage impl = implementations[_initializerInputs.release][setter.name]; - if (impl.logic != address(0)) revert AlreadyReleased(); - - impl.initializer = setter.info.initializer; - impl.logic = setter.info.logic; - } - - blueprint = _initializerInputs.blueprints; + blueprint = _blueprints; + implementation = _implementations; } function deploy(DeployInput calldata _input) external returns (DeployOutput memory) { assertValidInputs(_input); - uint256 l2ChainId = _input.l2ChainId; - // The salt for a non-proxy contract is a function of the chain ID and the salt mixer. string memory saltMixer = _input.saltMixer; bytes32 salt = keccak256(abi.encode(l2ChainId, saltMixer)); @@ -266,7 +236,6 @@ contract OPContractsManager is ISemver, Initializable { payable(Blueprint.deployFrom(blueprint.l1ChugSplashProxy, salt, abi.encode(output.opChainProxyAdmin))) ); output.opChainProxyAdmin.setProxyType(address(output.l1StandardBridgeProxy), IProxyAdmin.ProxyType.CHUGSPLASH); - string memory contractName = "OVM_L1CrossDomainMessenger"; output.l1CrossDomainMessengerProxy = IL1CrossDomainMessenger( Blueprint.deployFrom(blueprint.resolvedDelegateProxy, salt, abi.encode(output.addressManager, contractName)) @@ -275,10 +244,8 @@ contract OPContractsManager is ISemver, Initializable { address(output.l1CrossDomainMessengerProxy), IProxyAdmin.ProxyType.RESOLVED ); output.opChainProxyAdmin.setImplementationName(address(output.l1CrossDomainMessengerProxy), contractName); - // Now that all proxies are deployed, we can transfer ownership of the AddressManager to the ProxyAdmin. output.addressManager.transferOwnership(address(output.opChainProxyAdmin)); - // The AnchorStateRegistry Implementation is not MCP Ready, and therefore requires an implementation per chain. // It must be deployed after the DisputeGameFactoryProxy so that it can be provided as a constructor argument. output.anchorStateRegistryImpl = IAnchorStateRegistry( @@ -301,54 +268,76 @@ contract OPContractsManager is ISemver, Initializable { ); // -------- Set and Initialize Proxy Implementations -------- - Implementation memory impl; bytes memory data; - impl = getLatestImplementation("L1ERC721Bridge"); - data = encodeL1ERC721BridgeInitializer(impl.initializer, output); - upgradeAndCall(output.opChainProxyAdmin, address(output.l1ERC721BridgeProxy), impl.logic, data); + data = encodeL1ERC721BridgeInitializer(IL1ERC721Bridge.initialize.selector, output); + upgradeAndCall( + output.opChainProxyAdmin, address(output.l1ERC721BridgeProxy), implementation.l1ERC721BridgeImpl, data + ); - impl = getLatestImplementation("OptimismPortal"); - data = encodeOptimismPortalInitializer(impl.initializer, output); - upgradeAndCall(output.opChainProxyAdmin, address(output.optimismPortalProxy), impl.logic, data); + data = encodeOptimismPortalInitializer(IOptimismPortal2.initialize.selector, output); + upgradeAndCall( + output.opChainProxyAdmin, address(output.optimismPortalProxy), implementation.optimismPortalImpl, data + ); // First we upgrade the implementation so it's version can be retrieved, then we initialize // it afterwards. See the comments in encodeSystemConfigInitializer to learn more. - impl = getLatestImplementation("SystemConfig"); - output.opChainProxyAdmin.upgrade(payable(address(output.systemConfigProxy)), impl.logic); - data = encodeSystemConfigInitializer(impl.initializer, _input, output); - upgradeAndCall(output.opChainProxyAdmin, address(output.systemConfigProxy), impl.logic, data); + output.opChainProxyAdmin.upgrade(payable(address(output.systemConfigProxy)), implementation.systemConfigImpl); + data = encodeSystemConfigInitializer(_input, output); + upgradeAndCall( + output.opChainProxyAdmin, address(output.systemConfigProxy), implementation.systemConfigImpl, data + ); - impl = getLatestImplementation("OptimismMintableERC20Factory"); - data = encodeOptimismMintableERC20FactoryInitializer(impl.initializer, output); - upgradeAndCall(output.opChainProxyAdmin, address(output.optimismMintableERC20FactoryProxy), impl.logic, data); + data = encodeOptimismMintableERC20FactoryInitializer(IOptimismMintableERC20Factory.initialize.selector, output); + upgradeAndCall( + output.opChainProxyAdmin, + address(output.optimismMintableERC20FactoryProxy), + implementation.optimismMintableERC20FactoryImpl, + data + ); - impl = getLatestImplementation("L1CrossDomainMessenger"); - data = encodeL1CrossDomainMessengerInitializer(impl.initializer, output); - upgradeAndCall(output.opChainProxyAdmin, address(output.l1CrossDomainMessengerProxy), impl.logic, data); + data = encodeL1CrossDomainMessengerInitializer(IL1CrossDomainMessenger.initialize.selector, output); + upgradeAndCall( + output.opChainProxyAdmin, + address(output.l1CrossDomainMessengerProxy), + implementation.l1CrossDomainMessengerImpl, + data + ); - impl = getLatestImplementation("L1StandardBridge"); - data = encodeL1StandardBridgeInitializer(impl.initializer, output); - upgradeAndCall(output.opChainProxyAdmin, address(output.l1StandardBridgeProxy), impl.logic, data); + data = encodeL1StandardBridgeInitializer(IL1StandardBridge.initialize.selector, output); + upgradeAndCall( + output.opChainProxyAdmin, address(output.l1StandardBridgeProxy), implementation.l1StandardBridgeImpl, data + ); - impl = getLatestImplementation("DelayedWETH"); - data = encodeDelayedWETHInitializer(impl.initializer, _input); + data = encodeDelayedWETHInitializer(IDelayedWETH.initialize.selector, _input); // Eventually we will switch from DelayedWETHPermissionedGameProxy to DelayedWETHPermissionlessGameProxy. - upgradeAndCall(output.opChainProxyAdmin, address(output.delayedWETHPermissionedGameProxy), impl.logic, data); + upgradeAndCall( + output.opChainProxyAdmin, + address(output.delayedWETHPermissionedGameProxy), + implementation.delayedWETHImpl, + data + ); // We set the initial owner to this contract, set game implementations, then transfer ownership. - impl = getLatestImplementation("DisputeGameFactory"); - data = encodeDisputeGameFactoryInitializer(impl.initializer, _input); - upgradeAndCall(output.opChainProxyAdmin, address(output.disputeGameFactoryProxy), impl.logic, data); + data = encodeDisputeGameFactoryInitializer(IDisputeGameFactory.initialize.selector, _input); + upgradeAndCall( + output.opChainProxyAdmin, + address(output.disputeGameFactoryProxy), + implementation.disputeGameFactoryImpl, + data + ); output.disputeGameFactoryProxy.setImplementation( GameTypes.PERMISSIONED_CANNON, IDisputeGame(address(output.permissionedDisputeGame)) ); output.disputeGameFactoryProxy.transferOwnership(address(_input.roles.opChainProxyAdminOwner)); - impl.logic = address(output.anchorStateRegistryImpl); - impl.initializer = IAnchorStateRegistry.initialize.selector; - data = encodeAnchorStateRegistryInitializer(impl.initializer, _input); - upgradeAndCall(output.opChainProxyAdmin, address(output.anchorStateRegistryProxy), impl.logic, data); + data = encodeAnchorStateRegistryInitializer(IAnchorStateRegistry.initialize.selector, _input); + upgradeAndCall( + output.opChainProxyAdmin, + address(output.anchorStateRegistryProxy), + address(output.anchorStateRegistryImpl), + data + ); // -------- Finalize Deployment -------- // Transfer ownership of the ProxyAdmin from this contract to the specified owner. @@ -402,13 +391,6 @@ contract OPContractsManager is ISemver, Initializable { return Blueprint.deployFrom(blueprint.proxy, salt, abi.encode(_proxyAdmin)); } - /// @notice Returns the implementation data for a contract name. Makes a copy of the internal - // Implementation struct in storage to prevent accidental mutation of the internal data. - function getLatestImplementation(string memory _name) internal view returns (Implementation memory) { - Implementation storage impl = implementations[latestRelease][_name]; - return Implementation({ logic: impl.logic, initializer: impl.initializer }); - } - // -------- Initializer Encoding -------- /// @notice Helper method for encoding the L1ERC721Bridge initializer data. @@ -445,7 +427,6 @@ contract OPContractsManager is ISemver, Initializable { /// @notice Helper method for encoding the SystemConfig initializer data. function encodeSystemConfigInitializer( - bytes4 _selector, DeployInput memory _input, DeployOutput memory _output ) @@ -454,50 +435,22 @@ contract OPContractsManager is ISemver, Initializable { virtual returns (bytes memory) { - // We inspect the SystemConfig contract and determine it's signature here. This is required - // because this OPCM contract is being developed in a repository that no longer contains the - // SystemConfig contract that was released as part of `op-contracts/v1.6.0`, but in production - // it needs to support that version, in addition to the version currently on develop. - string memory semver = _output.systemConfigProxy.version(); - if (keccak256(abi.encode(semver)) == keccak256(abi.encode(string("2.2.0")))) { - // We are using the op-contracts/v1.6.0 SystemConfig contract. - ( - IResourceMetering.ResourceConfig memory referenceResourceConfig, - ISystemConfigV160.Addresses memory opChainAddrs - ) = defaultSystemConfigV160Params(_selector, _input, _output); - - return abi.encodeWithSelector( - _selector, - _input.roles.systemConfigOwner, - _input.basefeeScalar, - _input.blobBasefeeScalar, - bytes32(uint256(uint160(_input.roles.batcher))), // batcherHash - _input.gasLimit, - _input.roles.unsafeBlockSigner, - referenceResourceConfig, - chainIdToBatchInboxAddress(_input.l2ChainId), - opChainAddrs - ); - } else { - // We are using the latest SystemConfig contract from the repo. - ( - IResourceMetering.ResourceConfig memory referenceResourceConfig, - ISystemConfig.Addresses memory opChainAddrs - ) = defaultSystemConfigParams(_selector, _input, _output); - - return abi.encodeWithSelector( - _selector, - _input.roles.systemConfigOwner, - _input.basefeeScalar, - _input.blobBasefeeScalar, - bytes32(uint256(uint160(_input.roles.batcher))), // batcherHash - _input.gasLimit, - _input.roles.unsafeBlockSigner, - referenceResourceConfig, - chainIdToBatchInboxAddress(_input.l2ChainId), - opChainAddrs - ); - } + bytes4 selector = ISystemConfig.initialize.selector; + (IResourceMetering.ResourceConfig memory referenceResourceConfig, ISystemConfig.Addresses memory opChainAddrs) = + defaultSystemConfigParams(selector, _input, _output); + + return abi.encodeWithSelector( + selector, + _input.roles.systemConfigOwner, + _input.basefeeScalar, + _input.blobBasefeeScalar, + bytes32(uint256(uint160(_input.roles.batcher))), // batcherHash + _input.gasLimit, + _input.roles.unsafeBlockSigner, + referenceResourceConfig, + chainIdToBatchInboxAddress(_input.l2ChainId), + opChainAddrs + ); } /// @notice Helper method for encoding the OptimismMintableERC20Factory initializer data. @@ -599,7 +552,7 @@ contract OPContractsManager is ISemver, Initializable { _input.disputeSplitDepth, _input.disputeClockExtension, _input.disputeMaxClockDuration, - IBigStepper(getLatestImplementation("MIPS").logic), + IBigStepper(implementation.mipsImpl), IDelayedWETH(payable(address(_output.delayedWETHPermissionedGameProxy))), IAnchorStateRegistry(address(_output.anchorStateRegistryProxy)), _input.l2ChainId, @@ -645,45 +598,6 @@ contract OPContractsManager is ISemver, Initializable { assertValidContractAddress(opChainAddrs_.optimismMintableERC20Factory); } - /// @notice Returns default, standard config arguments for the SystemConfig initializer. - /// This is used by subclasses to reduce code duplication. - function defaultSystemConfigV160Params( - bytes4, /* selector */ - DeployInput memory, /* _input */ - DeployOutput memory _output - ) - internal - view - virtual - returns ( - IResourceMetering.ResourceConfig memory resourceConfig_, - ISystemConfigV160.Addresses memory opChainAddrs_ - ) - { - // We use assembly to easily convert from IResourceMetering.ResourceConfig to ResourceMetering.ResourceConfig. - // This is required because we have not yet fully migrated the codebase to be interface-based. - IResourceMetering.ResourceConfig memory resourceConfig = Constants.DEFAULT_RESOURCE_CONFIG(); - assembly ("memory-safe") { - resourceConfig_ := resourceConfig - } - - opChainAddrs_ = ISystemConfigV160.Addresses({ - l1CrossDomainMessenger: address(_output.l1CrossDomainMessengerProxy), - l1ERC721Bridge: address(_output.l1ERC721BridgeProxy), - l1StandardBridge: address(_output.l1StandardBridgeProxy), - disputeGameFactory: address(_output.disputeGameFactoryProxy), - optimismPortal: address(_output.optimismPortalProxy), - optimismMintableERC20Factory: address(_output.optimismMintableERC20FactoryProxy) - }); - - assertValidContractAddress(opChainAddrs_.l1CrossDomainMessenger); - assertValidContractAddress(opChainAddrs_.l1ERC721Bridge); - assertValidContractAddress(opChainAddrs_.l1StandardBridge); - assertValidContractAddress(opChainAddrs_.disputeGameFactory); - assertValidContractAddress(opChainAddrs_.optimismPortal); - assertValidContractAddress(opChainAddrs_.optimismMintableERC20Factory); - } - /// @notice Makes an external call to the target to initialize the proxy with the specified data. /// First performs safety checks to ensure the target, implementation, and proxy admin are valid. function upgradeAndCall( @@ -710,4 +624,9 @@ contract OPContractsManager is ISemver, Initializable { function blueprints() public view returns (Blueprints memory) { return blueprint; } + + /// @notice Returns the implementation contract addresses. + function implementations() public view returns (Implementations memory) { + return implementation; + } } diff --git a/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol b/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol index 19de5537b41c..b40ea48b2019 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol @@ -1,25 +1,30 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Contracts import { OPContractsManager } from "src/L1/OPContractsManager.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -/// @custom:proxied true +// Interfaces +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IProtocolVersions } from "interfaces/L1/IProtocolVersions.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { ISystemConfigInterop } from "interfaces/L1/ISystemConfigInterop.sol"; + contract OPContractsManagerInterop is OPContractsManager { constructor( ISuperchainConfig _superchainConfig, - IProtocolVersions _protocolVersions + IProtocolVersions _protocolVersions, + string memory _l1ContractsRelease, + Blueprints memory _blueprints, + Implementations memory _implementations ) - OPContractsManager(_superchainConfig, _protocolVersions) + OPContractsManager(_superchainConfig, _protocolVersions, _l1ContractsRelease, _blueprints, _implementations) { } // The `SystemConfigInterop` contract has an extra `address _dependencyManager` argument // that we must account for. function encodeSystemConfigInitializer( - bytes4 _selector, DeployInput memory _input, DeployOutput memory _output ) @@ -29,8 +34,9 @@ contract OPContractsManagerInterop is OPContractsManager { override returns (bytes memory) { + bytes4 selector = ISystemConfigInterop.initialize.selector; (IResourceMetering.ResourceConfig memory referenceResourceConfig, ISystemConfig.Addresses memory opChainAddrs) = - defaultSystemConfigParams(_selector, _input, _output); + defaultSystemConfigParams(selector, _input, _output); // TODO For now we assume that the dependency manager is the same as system config owner. // This is currently undefined since it's not part of the standard config, so we may need @@ -40,7 +46,7 @@ contract OPContractsManagerInterop is OPContractsManager { address dependencyManager = address(_input.roles.systemConfigOwner); return abi.encodeWithSelector( - _selector, + selector, _input.roles.systemConfigOwner, _input.basefeeScalar, _input.blobBasefeeScalar, diff --git a/packages/contracts-bedrock/src/L1/OptimismPortal.sol b/packages/contracts-bedrock/src/L1/OptimismPortal.sol index 2f9f53787581..6137558991ea 100644 --- a/packages/contracts-bedrock/src/L1/OptimismPortal.sol +++ b/packages/contracts-bedrock/src/L1/OptimismPortal.sol @@ -30,12 +30,12 @@ import { // Interfaces import { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { IL2OutputOracle } from "src/L1/interfaces/IL2OutputOracle.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IL2OutputOracle } from "interfaces/L1/IL2OutputOracle.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IL1Block } from "interfaces/L2/IL1Block.sol"; /// @custom:proxied true /// @title OptimismPortal @@ -146,9 +146,9 @@ contract OptimismPortal is Initializable, ResourceMetering, ISemver { } /// @notice Semantic version. - /// @custom:semver 2.8.1-beta.4 + /// @custom:semver 2.8.1-beta.5 function version() public pure virtual returns (string memory) { - return "2.8.1-beta.4"; + return "2.8.1-beta.5"; } /// @notice Constructs the OptimismPortal contract. diff --git a/packages/contracts-bedrock/src/L1/OptimismPortal2.sol b/packages/contracts-bedrock/src/L1/OptimismPortal2.sol index 985711ed18e5..494ad7a67c54 100644 --- a/packages/contracts-bedrock/src/L1/OptimismPortal2.sol +++ b/packages/contracts-bedrock/src/L1/OptimismPortal2.sol @@ -38,13 +38,13 @@ import { GameStatus, GameType, Claim, Timestamp, Hash } from "src/dispute/lib/Ty // Interfaces import { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; -import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { IL1Block } from "interfaces/L2/IL1Block.sol"; /// @custom:proxied true /// @title OptimismPortal2 @@ -183,9 +183,9 @@ contract OptimismPortal2 is Initializable, ResourceMetering, ISemver { } /// @notice Semantic version. - /// @custom:semver 3.11.0-beta.6 + /// @custom:semver 3.11.0-beta.7 function version() public pure virtual returns (string memory) { - return "3.11.0-beta.6"; + return "3.11.0-beta.7"; } /// @notice Constructs the OptimismPortal contract. diff --git a/packages/contracts-bedrock/src/L1/OptimismPortalInterop.sol b/packages/contracts-bedrock/src/L1/OptimismPortalInterop.sol index 4c238c415d34..7c21244bf5a1 100644 --- a/packages/contracts-bedrock/src/L1/OptimismPortalInterop.sol +++ b/packages/contracts-bedrock/src/L1/OptimismPortalInterop.sol @@ -3,13 +3,15 @@ pragma solidity 0.8.15; // Contracts import { OptimismPortal2 } from "src/L1/OptimismPortal2.sol"; -import { L1BlockInterop, ConfigType } from "src/L2/L1BlockInterop.sol"; // Libraries import { Predeploys } from "src/libraries/Predeploys.sol"; import { Constants } from "src/libraries/Constants.sol"; import { Unauthorized } from "src/libraries/PortalErrors.sol"; +// Interfaces +import { IL1BlockInterop, ConfigType } from "interfaces/L2/IL1BlockInterop.sol"; + /// @custom:proxied true /// @title OptimismPortalInterop /// @notice The OptimismPortal is a low-level contract responsible for passing messages between L1 @@ -23,9 +25,9 @@ contract OptimismPortalInterop is OptimismPortal2 { OptimismPortal2(_proofMaturityDelaySeconds, _disputeGameFinalityDelaySeconds) { } - /// @custom:semver +interop-beta.2 + /// @custom:semver +interop-beta.4 function version() public pure override returns (string memory) { - return string.concat(super.version(), "+interop-beta.2"); + return string.concat(super.version(), "+interop-beta.4"); } /// @notice Sets static configuration options for the L2 system. @@ -48,7 +50,7 @@ contract OptimismPortalInterop is OptimismPortal2 { uint256(0), // value uint64(SYSTEM_DEPOSIT_GAS_LIMIT), // gasLimit false, // isCreation, - abi.encodeCall(L1BlockInterop.setConfig, (_type, _value)) + abi.encodeCall(IL1BlockInterop.setConfig, (_type, _value)) ) ); } diff --git a/packages/contracts-bedrock/src/L1/ProtocolVersions.sol b/packages/contracts-bedrock/src/L1/ProtocolVersions.sol index 8d4982ecea0c..2253e38e35fe 100644 --- a/packages/contracts-bedrock/src/L1/ProtocolVersions.sol +++ b/packages/contracts-bedrock/src/L1/ProtocolVersions.sol @@ -1,10 +1,15 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Contracts import { OwnableUpgradeable } from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; + +// Libraries import { Storage } from "src/libraries/Storage.sol"; +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; + /// @notice ProtocolVersion is a numeric identifier of the protocol version. type ProtocolVersion is uint256; @@ -36,8 +41,8 @@ contract ProtocolVersions is OwnableUpgradeable, ISemver { event ConfigUpdate(uint256 indexed version, UpdateType indexed updateType, bytes data); /// @notice Semantic version. - /// @custom:semver 1.0.1-beta.3 - string public constant version = "1.0.1-beta.3"; + /// @custom:semver 1.0.1-beta.5 + string public constant version = "1.0.1-beta.5"; /// @notice Constructs the ProtocolVersion contract. Cannot set /// the owner to `address(0)` due to the Ownable contract's diff --git a/packages/contracts-bedrock/src/L1/SuperchainConfig.sol b/packages/contracts-bedrock/src/L1/SuperchainConfig.sol index 51b13936c81b..cceedbfd9320 100644 --- a/packages/contracts-bedrock/src/L1/SuperchainConfig.sol +++ b/packages/contracts-bedrock/src/L1/SuperchainConfig.sol @@ -1,10 +1,15 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Contracts import { Initializable } from "@openzeppelin/contracts/proxy/utils/Initializable.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; + +// Libraries import { Storage } from "src/libraries/Storage.sol"; +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; + /// @custom:proxied true /// @custom:audit none This contracts is not yet audited. /// @title SuperchainConfig @@ -36,8 +41,8 @@ contract SuperchainConfig is Initializable, ISemver { event ConfigUpdate(UpdateType indexed updateType, bytes data); /// @notice Semantic version. - /// @custom:semver 1.1.1-beta.1 - string public constant version = "1.1.1-beta.1"; + /// @custom:semver 1.1.1-beta.3 + string public constant version = "1.1.1-beta.3"; /// @notice Constructs the SuperchainConfig contract. constructor() { diff --git a/packages/contracts-bedrock/src/L1/SystemConfig.sol b/packages/contracts-bedrock/src/L1/SystemConfig.sol index afb9525403c7..e268c4be4f51 100644 --- a/packages/contracts-bedrock/src/L1/SystemConfig.sol +++ b/packages/contracts-bedrock/src/L1/SystemConfig.sol @@ -11,9 +11,9 @@ import { Constants } from "src/libraries/Constants.sol"; import { GasPayingToken, IGasToken } from "src/libraries/GasPayingToken.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IOptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; /// @custom:proxied true /// @title SystemConfig @@ -137,9 +137,9 @@ contract SystemConfig is OwnableUpgradeable, ISemver, IGasToken { event ConfigUpdate(uint256 indexed version, UpdateType indexed updateType, bytes data); /// @notice Semantic version. - /// @custom:semver 2.3.0-beta.5 + /// @custom:semver 2.3.0-beta.7 function version() public pure virtual returns (string memory) { - return "2.3.0-beta.5"; + return "2.3.0-beta.7"; } /// @notice Constructs the SystemConfig contract. Cannot set @@ -224,7 +224,6 @@ contract SystemConfig is OwnableUpgradeable, ISemver, IGasToken { _setGasPayingToken(_addresses.gasPayingToken); _setResourceConfig(_config); - require(_gasLimit >= minimumGasLimit(), "SystemConfig: gas limit too low"); } /// @notice Returns the minimum L2 gas limit that can be safely set for the system to diff --git a/packages/contracts-bedrock/src/L1/SystemConfigInterop.sol b/packages/contracts-bedrock/src/L1/SystemConfigInterop.sol index 032109286596..ba8aaa518222 100644 --- a/packages/contracts-bedrock/src/L1/SystemConfigInterop.sol +++ b/packages/contracts-bedrock/src/L1/SystemConfigInterop.sol @@ -3,9 +3,7 @@ pragma solidity 0.8.15; // Contracts import { ERC20 } from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; -import { IOptimismPortalInterop as IOptimismPortal } from "src/L1/interfaces/IOptimismPortalInterop.sol"; import { SystemConfig } from "src/L1/SystemConfig.sol"; -import { ConfigType } from "src/L2/L1BlockInterop.sol"; // Libraries import { Constants } from "src/libraries/Constants.sol"; @@ -14,7 +12,9 @@ import { StaticConfig } from "src/libraries/StaticConfig.sol"; import { Storage } from "src/libraries/Storage.sol"; // Interfaces -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; +import { IOptimismPortalInterop as IOptimismPortal } from "interfaces/L1/IOptimismPortalInterop.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; +import { ConfigType } from "interfaces/L2/IL1BlockInterop.sol"; /// @custom:proxied true /// @title SystemConfigInterop @@ -68,9 +68,9 @@ contract SystemConfigInterop is SystemConfig { Storage.setAddress(DEPENDENCY_MANAGER_SLOT, _dependencyManager); } - /// @custom:semver +interop-beta.3 + /// @custom:semver +interop-beta.6 function version() public pure override returns (string memory) { - return string.concat(super.version(), "+interop-beta.3"); + return string.concat(super.version(), "+interop-beta.6"); } /// @notice Internal setter for the gas paying token address, includes validation. diff --git a/packages/contracts-bedrock/src/L1/interfaces/IDelayedVetoable.sol b/packages/contracts-bedrock/src/L1/interfaces/IDelayedVetoable.sol deleted file mode 100644 index 53fd16812763..000000000000 --- a/packages/contracts-bedrock/src/L1/interfaces/IDelayedVetoable.sol +++ /dev/null @@ -1,23 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -interface IDelayedVetoable { - error ForwardingEarly(); - error Unauthorized(address expected, address actual); - - event DelayActivated(uint256 delay); - event Forwarded(bytes32 indexed callHash, bytes data); - event Initiated(bytes32 indexed callHash, bytes data); - event Vetoed(bytes32 indexed callHash, bytes data); - - fallback() external; - - function delay() external returns (uint256 delay_); - function initiator() external returns (address initiator_); - function queuedAt(bytes32 _callHash) external returns (uint256 queuedAt_); - function target() external returns (address target_); - function version() external view returns (string memory); - function vetoer() external returns (address vetoer_); - - function __constructor__(address _vetoer, address _initiator, address _target, uint256 _operatingDelay) external; -} diff --git a/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigV160.sol b/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigV160.sol deleted file mode 100644 index 210b0ddf8e5e..000000000000 --- a/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigV160.sol +++ /dev/null @@ -1,85 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; - -/// @notice This interface corresponds to the op-contracts/v1.6.0 release of the SystemConfig -/// contract, which has a semver of 2.2.0 as specified in -/// https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts%2Fv1.6.0 -interface ISystemConfigV160 { - enum UpdateType { - BATCHER, - FEE_SCALARS, - GAS_LIMIT, - UNSAFE_BLOCK_SIGNER - } - - struct Addresses { - address l1CrossDomainMessenger; - address l1ERC721Bridge; - address l1StandardBridge; - address disputeGameFactory; - address optimismPortal; - address optimismMintableERC20Factory; - } - - event ConfigUpdate(uint256 indexed version, UpdateType indexed updateType, bytes data); - event Initialized(uint8 version); - event OwnershipTransferred(address indexed previousOwner, address indexed newOwner); - - function BATCH_INBOX_SLOT() external view returns (bytes32); - function DISPUTE_GAME_FACTORY_SLOT() external view returns (bytes32); - function L1_CROSS_DOMAIN_MESSENGER_SLOT() external view returns (bytes32); - function L1_ERC_721_BRIDGE_SLOT() external view returns (bytes32); - function L1_STANDARD_BRIDGE_SLOT() external view returns (bytes32); - function OPTIMISM_MINTABLE_ERC20_FACTORY_SLOT() external view returns (bytes32); - function OPTIMISM_PORTAL_SLOT() external view returns (bytes32); - function START_BLOCK_SLOT() external view returns (bytes32); - function UNSAFE_BLOCK_SIGNER_SLOT() external view returns (bytes32); - function VERSION() external view returns (uint256); - function basefeeScalar() external view returns (uint32); - function batchInbox() external view returns (address addr_); - function batcherHash() external view returns (bytes32); - function blobbasefeeScalar() external view returns (uint32); - function disputeGameFactory() external view returns (address addr_); - function gasLimit() external view returns (uint64); - function gasPayingToken() external view returns (address addr_, uint8 decimals_); - function gasPayingTokenName() external view returns (string memory name_); - function gasPayingTokenSymbol() external view returns (string memory symbol_); - function initialize( - address _owner, - uint256 _basefeeScalar, - uint256 _blobbasefeeScalar, - bytes32 _batcherHash, - uint64 _gasLimit, - address _unsafeBlockSigner, - IResourceMetering.ResourceConfig memory _config, - address _batchInbox, - Addresses memory _addresses - ) - external; - function isCustomGasToken() external view returns (bool); - function l1CrossDomainMessenger() external view returns (address addr_); - function l1ERC721Bridge() external view returns (address addr_); - function l1StandardBridge() external view returns (address addr_); - function maximumGasLimit() external pure returns (uint64); - function minimumGasLimit() external view returns (uint64); - function optimismMintableERC20Factory() external view returns (address addr_); - function optimismPortal() external view returns (address addr_); - function overhead() external view returns (uint256); - function owner() external view returns (address); - function renounceOwnership() external; - function resourceConfig() external view returns (IResourceMetering.ResourceConfig memory); - function scalar() external view returns (uint256); - function setBatcherHash(bytes32 _batcherHash) external; - function setGasConfig(uint256 _overhead, uint256 _scalar) external; - function setGasConfigEcotone(uint32 _basefeeScalar, uint32 _blobbasefeeScalar) external; - function setGasLimit(uint64 _gasLimit) external; - function setUnsafeBlockSigner(address _unsafeBlockSigner) external; - function startBlock() external view returns (uint256 startBlock_); - function transferOwnership(address newOwner) external; // nosemgrep - function unsafeBlockSigner() external view returns (address addr_); - function version() external pure returns (string memory); - - function __constructor__() external; -} diff --git a/packages/contracts-bedrock/src/L2/BaseFeeVault.sol b/packages/contracts-bedrock/src/L2/BaseFeeVault.sol index 2fd33b9290bf..6f4e67b9d538 100644 --- a/packages/contracts-bedrock/src/L2/BaseFeeVault.sol +++ b/packages/contracts-bedrock/src/L2/BaseFeeVault.sol @@ -1,19 +1,23 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +// Contracts import { FeeVault } from "src/L2/FeeVault.sol"; +// Libraries import { Types } from "src/libraries/Types.sol"; +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; + /// @custom:proxied true /// @custom:predeploy 0x4200000000000000000000000000000000000019 /// @title BaseFeeVault /// @notice The BaseFeeVault accumulates the base fee that is paid by transactions. contract BaseFeeVault is FeeVault, ISemver { /// @notice Semantic version. - /// @custom:semver 1.5.0-beta.3 - string public constant version = "1.5.0-beta.3"; + /// @custom:semver 1.5.0-beta.5 + string public constant version = "1.5.0-beta.5"; /// @notice Constructs the BaseFeeVault contract. /// @param _recipient Wallet that will receive the fees. diff --git a/packages/contracts-bedrock/src/L2/CrossDomainOwnable.sol b/packages/contracts-bedrock/src/L2/CrossDomainOwnable.sol index 3b532f589001..658cfc50f8b5 100644 --- a/packages/contracts-bedrock/src/L2/CrossDomainOwnable.sol +++ b/packages/contracts-bedrock/src/L2/CrossDomainOwnable.sol @@ -1,7 +1,10 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; +// Contracts import { Ownable } from "@openzeppelin/contracts/access/Ownable.sol"; + +// Libraries import { AddressAliasHelper } from "src/vendor/AddressAliasHelper.sol"; /// @title CrossDomainOwnable diff --git a/packages/contracts-bedrock/src/L2/CrossDomainOwnable2.sol b/packages/contracts-bedrock/src/L2/CrossDomainOwnable2.sol index 0711daffce3e..491921066b65 100644 --- a/packages/contracts-bedrock/src/L2/CrossDomainOwnable2.sol +++ b/packages/contracts-bedrock/src/L2/CrossDomainOwnable2.sol @@ -8,7 +8,7 @@ import { Ownable } from "@openzeppelin/contracts/access/Ownable.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; // Interfaces -import { IL2CrossDomainMessenger } from "src/L2/interfaces/IL2CrossDomainMessenger.sol"; +import { IL2CrossDomainMessenger } from "interfaces/L2/IL2CrossDomainMessenger.sol"; /// @title CrossDomainOwnable2 /// @notice This contract extends the OpenZeppelin `Ownable` contract for L2 contracts to be owned diff --git a/packages/contracts-bedrock/src/L2/CrossDomainOwnable3.sol b/packages/contracts-bedrock/src/L2/CrossDomainOwnable3.sol index e940a026df05..e5661a94ed88 100644 --- a/packages/contracts-bedrock/src/L2/CrossDomainOwnable3.sol +++ b/packages/contracts-bedrock/src/L2/CrossDomainOwnable3.sol @@ -8,7 +8,7 @@ import { Ownable } from "@openzeppelin/contracts/access/Ownable.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; // Interfaces -import { IL2CrossDomainMessenger } from "src/L2/interfaces/IL2CrossDomainMessenger.sol"; +import { IL2CrossDomainMessenger } from "interfaces/L2/IL2CrossDomainMessenger.sol"; /// @title CrossDomainOwnable3 /// @notice This contract extends the OpenZeppelin `Ownable` contract for L2 contracts to be owned diff --git a/packages/contracts-bedrock/src/L2/CrossL2Inbox.sol b/packages/contracts-bedrock/src/L2/CrossL2Inbox.sol index b330dfb0ceb4..4762940bd1c7 100644 --- a/packages/contracts-bedrock/src/L2/CrossL2Inbox.sol +++ b/packages/contracts-bedrock/src/L2/CrossL2Inbox.sol @@ -1,12 +1,15 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.25; +// Libraries import { Predeploys } from "src/libraries/Predeploys.sol"; import { TransientContext, TransientReentrancyAware } from "src/libraries/TransientContext.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; import { SafeCall } from "src/libraries/SafeCall.sol"; -import { IDependencySet } from "src/L2/interfaces/IDependencySet.sol"; -import { IL1BlockInterop } from "src/L2/interfaces/IL1BlockInterop.sol"; + +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IDependencySet } from "interfaces/L2/IDependencySet.sol"; +import { IL1BlockInterop } from "interfaces/L2/IL1BlockInterop.sol"; /// @notice Thrown when the caller is not DEPOSITOR_ACCOUNT when calling `setInteropStart()` error NotDepositor(); @@ -73,8 +76,8 @@ contract CrossL2Inbox is ISemver, TransientReentrancyAware { address internal constant DEPOSITOR_ACCOUNT = 0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001; /// @notice Semantic version. - /// @custom:semver 1.0.0-beta.9 - string public constant version = "1.0.0-beta.9"; + /// @custom:semver 1.0.0-beta.11 + string public constant version = "1.0.0-beta.11"; /// @notice Emitted when a cross chain message is being executed. /// @param msgHash Hash of message payload being executed. diff --git a/packages/contracts-bedrock/src/L2/ETHLiquidity.sol b/packages/contracts-bedrock/src/L2/ETHLiquidity.sol index 6118288df77d..8962f791c26c 100644 --- a/packages/contracts-bedrock/src/L2/ETHLiquidity.sol +++ b/packages/contracts-bedrock/src/L2/ETHLiquidity.sol @@ -9,8 +9,8 @@ import { Unauthorized, NotCustomGasToken } from "src/libraries/errors/CommonErro import { Predeploys } from "src/libraries/Predeploys.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IL1Block } from "interfaces/L2/IL1Block.sol"; /// @title ETHLiquidity /// @notice The ETHLiquidity contract allows other contracts to access ETH liquidity without @@ -23,8 +23,8 @@ contract ETHLiquidity is ISemver { event LiquidityMinted(address indexed caller, uint256 value); /// @notice Semantic version. - /// @custom:semver 1.0.0-beta.3 - string public constant version = "1.0.0-beta.3"; + /// @custom:semver 1.0.0-beta.4 + string public constant version = "1.0.0-beta.4"; /// @notice Allows an address to lock ETH liquidity into this contract. function burn() external payable { diff --git a/packages/contracts-bedrock/src/L2/FeeVault.sol b/packages/contracts-bedrock/src/L2/FeeVault.sol index 856985d7827b..86c94fc3f6dc 100644 --- a/packages/contracts-bedrock/src/L2/FeeVault.sol +++ b/packages/contracts-bedrock/src/L2/FeeVault.sol @@ -6,7 +6,7 @@ import { SafeCall } from "src/libraries/SafeCall.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; // Interfaces -import { IL2ToL1MessagePasser } from "src/L2/interfaces/IL2ToL1MessagePasser.sol"; +import { IL2ToL1MessagePasser } from "interfaces/L2/IL2ToL1MessagePasser.sol"; // Libraries import { Types } from "src/libraries/Types.sol"; diff --git a/packages/contracts-bedrock/src/L2/GasPriceOracle.sol b/packages/contracts-bedrock/src/L2/GasPriceOracle.sol index 45f14fe173d0..11b6c897db85 100644 --- a/packages/contracts-bedrock/src/L2/GasPriceOracle.sol +++ b/packages/contracts-bedrock/src/L2/GasPriceOracle.sol @@ -7,8 +7,8 @@ import { Predeploys } from "src/libraries/Predeploys.sol"; import { Constants } from "src/libraries/Constants.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IL1Block } from "interfaces/L2/IL1Block.sol"; /// @custom:proxied true /// @custom:predeploy 0x420000000000000000000000000000000000000F @@ -29,8 +29,8 @@ contract GasPriceOracle is ISemver { uint256 public constant DECIMALS = 6; /// @notice Semantic version. - /// @custom:semver 1.3.1-beta.3 - string public constant version = "1.3.1-beta.3"; + /// @custom:semver 1.3.1-beta.4 + string public constant version = "1.3.1-beta.4"; /// @notice This is the intercept value for the linear regression used to estimate the final size of the /// compressed transaction. diff --git a/packages/contracts-bedrock/src/L2/L1Block.sol b/packages/contracts-bedrock/src/L2/L1Block.sol index 0b2ffd2c5782..3767b80988da 100644 --- a/packages/contracts-bedrock/src/L2/L1Block.sol +++ b/packages/contracts-bedrock/src/L2/L1Block.sol @@ -1,11 +1,14 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +// Libraries import { Constants } from "src/libraries/Constants.sol"; import { GasPayingToken, IGasToken } from "src/libraries/GasPayingToken.sol"; import { NotDepositor } from "src/libraries/L1BlockErrors.sol"; +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; + /// @custom:proxied true /// @custom:predeploy 0x4200000000000000000000000000000000000015 /// @title L1Block @@ -57,9 +60,9 @@ contract L1Block is ISemver, IGasToken { /// @notice The latest L1 blob base fee. uint256 public blobBaseFee; - /// @custom:semver 1.5.1-beta.3 + /// @custom:semver 1.5.1-beta.5 function version() public pure virtual returns (string memory) { - return "1.5.1-beta.3"; + return "1.5.1-beta.5"; } /// @notice Returns the gas paying token, its decimals, name and symbol. diff --git a/packages/contracts-bedrock/src/L2/L1BlockInterop.sol b/packages/contracts-bedrock/src/L2/L1BlockInterop.sol index 2cf6bd96c7d4..7b92b202a052 100644 --- a/packages/contracts-bedrock/src/L2/L1BlockInterop.sol +++ b/packages/contracts-bedrock/src/L2/L1BlockInterop.sol @@ -49,9 +49,9 @@ contract L1BlockInterop is L1Block { /// keccak256(abi.encode(uint256(keccak256("l1Block.identifier.isDeposit")) - 1)) & ~bytes32(uint256(0xff)) uint256 internal constant IS_DEPOSIT_SLOT = 0x921bd3a089295c6e5540e8fba8195448d253efd6f2e3e495b499b627dc36a300; - /// @custom:semver +interop-beta.1 + /// @custom:semver +interop-beta.3 function version() public pure override returns (string memory) { - return string.concat(super.version(), "+interop-beta.1"); + return string.concat(super.version(), "+interop-beta.3"); } /// @notice Returns whether the call was triggered from a a deposit or not. diff --git a/packages/contracts-bedrock/src/L2/L1FeeVault.sol b/packages/contracts-bedrock/src/L2/L1FeeVault.sol index c80c40b98493..1d8a2520c184 100644 --- a/packages/contracts-bedrock/src/L2/L1FeeVault.sol +++ b/packages/contracts-bedrock/src/L2/L1FeeVault.sol @@ -1,19 +1,23 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +// Contracts import { FeeVault } from "src/L2/FeeVault.sol"; +// Libraries import { Types } from "src/libraries/Types.sol"; +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; + /// @custom:proxied true /// @custom:predeploy 0x420000000000000000000000000000000000001A /// @title L1FeeVault /// @notice The L1FeeVault accumulates the L1 portion of the transaction fees. contract L1FeeVault is FeeVault, ISemver { /// @notice Semantic version. - /// @custom:semver 1.5.0-beta.3 - string public constant version = "1.5.0-beta.3"; + /// @custom:semver 1.5.0-beta.5 + string public constant version = "1.5.0-beta.5"; /// @notice Constructs the L1FeeVault contract. /// @param _recipient Wallet that will receive the fees. diff --git a/packages/contracts-bedrock/src/L2/L2CrossDomainMessenger.sol b/packages/contracts-bedrock/src/L2/L2CrossDomainMessenger.sol index 2461e46d2cf5..668b1756f5a2 100644 --- a/packages/contracts-bedrock/src/L2/L2CrossDomainMessenger.sol +++ b/packages/contracts-bedrock/src/L2/L2CrossDomainMessenger.sol @@ -9,9 +9,9 @@ import { AddressAliasHelper } from "src/vendor/AddressAliasHelper.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { IL2ToL1MessagePasser } from "src/L2/interfaces/IL2ToL1MessagePasser.sol"; -import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IL2ToL1MessagePasser } from "interfaces/L2/IL2ToL1MessagePasser.sol"; +import { IL1Block } from "interfaces/L2/IL1Block.sol"; /// @custom:proxied true /// @custom:predeploy 0x4200000000000000000000000000000000000007 @@ -20,8 +20,8 @@ import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; /// L2 on the L2 side. Users are generally encouraged to use this contract instead of lower /// level message passing contracts. contract L2CrossDomainMessenger is CrossDomainMessenger, ISemver { - /// @custom:semver 2.1.1-beta.4 - string public constant version = "2.1.1-beta.4"; + /// @custom:semver 2.1.1-beta.5 + string public constant version = "2.1.1-beta.5"; /// @notice Constructs the L2CrossDomainMessenger contract. constructor() CrossDomainMessenger() { diff --git a/packages/contracts-bedrock/src/L2/L2ERC721Bridge.sol b/packages/contracts-bedrock/src/L2/L2ERC721Bridge.sol index 85c6856e629d..c774c3ad4389 100644 --- a/packages/contracts-bedrock/src/L2/L2ERC721Bridge.sol +++ b/packages/contracts-bedrock/src/L2/L2ERC721Bridge.sol @@ -9,10 +9,10 @@ import { ERC165Checker } from "@openzeppelin/contracts/utils/introspection/ERC16 import { Predeploys } from "src/libraries/Predeploys.sol"; // Interfaces -import { IL1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; -import { IOptimismMintableERC721 } from "src/universal/interfaces/IOptimismMintableERC721.sol"; -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { IL1ERC721Bridge } from "interfaces/L1/IL1ERC721Bridge.sol"; +import { IOptimismMintableERC721 } from "interfaces/universal/IOptimismMintableERC721.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @custom:proxied true /// @custom:predeploy 0x4200000000000000000000000000000000000014 @@ -26,8 +26,8 @@ import { ISemver } from "src/universal/interfaces/ISemver.sol"; /// wait for the one-week challenge period to elapse before their Optimism-native NFT /// can be refunded on L2. contract L2ERC721Bridge is ERC721Bridge, ISemver { - /// @custom:semver 1.8.0-beta.2 - string public constant version = "1.8.0-beta.2"; + /// @custom:semver 1.8.0-beta.3 + string public constant version = "1.8.0-beta.3"; /// @notice Constructs the L2ERC721Bridge contract. constructor() ERC721Bridge() { diff --git a/packages/contracts-bedrock/src/L2/L2StandardBridge.sol b/packages/contracts-bedrock/src/L2/L2StandardBridge.sol index 63bda3209fbb..02fac4ee1dda 100644 --- a/packages/contracts-bedrock/src/L2/L2StandardBridge.sol +++ b/packages/contracts-bedrock/src/L2/L2StandardBridge.sol @@ -3,15 +3,15 @@ pragma solidity 0.8.15; // Contracts import { StandardBridge } from "src/universal/StandardBridge.sol"; -import { OptimismMintableERC20 } from "src/universal/OptimismMintableERC20.sol"; // Libraries import { Predeploys } from "src/libraries/Predeploys.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; -import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; +import { OptimismMintableERC20 } from "src/universal/OptimismMintableERC20.sol"; +import { IL1Block } from "interfaces/L2/IL1Block.sol"; /// @custom:proxied true /// @custom:predeploy 0x4200000000000000000000000000000000000010 @@ -58,9 +58,9 @@ contract L2StandardBridge is StandardBridge, ISemver { ); /// @notice Semantic version. - /// @custom:semver 1.11.1-beta.3 + /// @custom:semver 1.11.1-beta.5 function version() public pure virtual returns (string memory) { - return "1.11.1-beta.3"; + return "1.11.1-beta.5"; } /// @notice Constructs the L2StandardBridge contract. diff --git a/packages/contracts-bedrock/src/L2/L2StandardBridgeInterop.sol b/packages/contracts-bedrock/src/L2/L2StandardBridgeInterop.sol index e17ef29dd964..be7323e0953d 100644 --- a/packages/contracts-bedrock/src/L2/L2StandardBridgeInterop.sol +++ b/packages/contracts-bedrock/src/L2/L2StandardBridgeInterop.sol @@ -10,8 +10,8 @@ import { Predeploys } from "src/libraries/Predeploys.sol"; // Interfaces import { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; import { IERC20Metadata } from "@openzeppelin/contracts/token/ERC20/extensions/IERC20Metadata.sol"; -import { IOptimismERC20Factory } from "src/L2/interfaces/IOptimismERC20Factory.sol"; -import { IMintableAndBurnableERC20 } from "src/L2/interfaces/IMintableAndBurnableERC20.sol"; +import { IOptimismERC20Factory } from "interfaces/L2/IOptimismERC20Factory.sol"; +import { IMintableAndBurnableERC20 } from "interfaces/L2/IMintableAndBurnableERC20.sol"; /// @notice Thrown when the decimals of the tokens are not the same. error InvalidDecimals(); @@ -40,9 +40,9 @@ contract L2StandardBridgeInterop is L2StandardBridge { event Converted(address indexed from, address indexed to, address indexed caller, uint256 amount); /// @notice Semantic version. - /// @custom:semver +interop-beta.2 + /// @custom:semver +interop-beta.4 function version() public pure override returns (string memory) { - return string.concat(super.version(), "+interop-beta.2"); + return string.concat(super.version(), "+interop-beta.4"); } /// @notice Converts `amount` of `from` token to `to` token. diff --git a/packages/contracts-bedrock/src/L2/L2ToL1MessagePasser.sol b/packages/contracts-bedrock/src/L2/L2ToL1MessagePasser.sol index 94b8213983e1..b17bda73c583 100644 --- a/packages/contracts-bedrock/src/L2/L2ToL1MessagePasser.sol +++ b/packages/contracts-bedrock/src/L2/L2ToL1MessagePasser.sol @@ -1,11 +1,14 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Libraries import { Types } from "src/libraries/Types.sol"; import { Hashing } from "src/libraries/Hashing.sol"; import { Encoding } from "src/libraries/Encoding.sol"; import { Burn } from "src/libraries/Burn.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; + +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @custom:proxied true /// @custom:predeploy 0x4200000000000000000000000000000000000016 @@ -48,8 +51,8 @@ contract L2ToL1MessagePasser is ISemver { /// @param amount Amount of ETh that was burned. event WithdrawerBalanceBurnt(uint256 indexed amount); - /// @custom:semver 1.1.1-beta.1 - string public constant version = "1.1.1-beta.1"; + /// @custom:semver 1.1.1-beta.3 + string public constant version = "1.1.1-beta.3"; /// @notice Allows users to withdraw ETH by sending directly to this contract. receive() external payable { diff --git a/packages/contracts-bedrock/src/L2/L2ToL2CrossDomainMessenger.sol b/packages/contracts-bedrock/src/L2/L2ToL2CrossDomainMessenger.sol index 6b1d7327dbc0..2162638ddc43 100644 --- a/packages/contracts-bedrock/src/L2/L2ToL2CrossDomainMessenger.sol +++ b/packages/contracts-bedrock/src/L2/L2ToL2CrossDomainMessenger.sol @@ -1,14 +1,18 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.25; +// Libraries import { Encoding } from "src/libraries/Encoding.sol"; import { Hashing } from "src/libraries/Hashing.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; -import { CrossL2Inbox, Identifier } from "src/L2/CrossL2Inbox.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; import { SafeCall } from "src/libraries/SafeCall.sol"; import { TransientReentrancyAware } from "src/libraries/TransientContext.sol"; +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IDependencySet } from "interfaces/L2/IDependencySet.sol"; +import { ICrossL2Inbox, Identifier } from "interfaces/L2/ICrossL2Inbox.sol"; + /// @notice Thrown when a non-written slot in transient storage is attempted to be read from. error NotEntered(); @@ -39,6 +43,9 @@ error ReentrantCall(); /// @notice Thrown when a call to the target contract during message relay fails. error TargetCallFailed(); +/// @notice Thrown when attempting to use a chain ID that is not in the dependency set. +error InvalidChainId(); + /// @custom:proxied true /// @custom:predeploy 0x4200000000000000000000000000000000000023 /// @title L2ToL2CrossDomainMessenger @@ -65,8 +72,8 @@ contract L2ToL2CrossDomainMessenger is ISemver, TransientReentrancyAware { uint16 public constant messageVersion = uint16(0); /// @notice Semantic version. - /// @custom:semver 1.0.0-beta.10 - string public constant version = "1.0.0-beta.10"; + /// @custom:semver 1.0.0-beta.13 + string public constant version = "1.0.0-beta.13"; /// @notice Mapping of message hashes to boolean receipt values. Note that a message will only be present in this /// mapping if it has successfully been relayed on this chain, and can therefore not be relayed again. @@ -130,6 +137,7 @@ contract L2ToL2CrossDomainMessenger is ISemver, TransientReentrancyAware { if (_destination == block.chainid) revert MessageDestinationSameChain(); if (_target == Predeploys.CROSS_L2_INBOX) revert MessageTargetCrossL2Inbox(); if (_target == Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) revert MessageTargetL2ToL2CrossDomainMessenger(); + if (!IDependencySet(Predeploys.L1_BLOCK_ATTRIBUTES).isInDependencySet(_destination)) revert InvalidChainId(); uint256 nonce = messageNonce(); emit SentMessage(_destination, _target, nonce, msg.sender, _message); @@ -159,7 +167,7 @@ contract L2ToL2CrossDomainMessenger is ISemver, TransientReentrancyAware { } // Signal that this is a cross chain call that needs to have the identifier validated - CrossL2Inbox(Predeploys.CROSS_L2_INBOX).validateMessage(_id, keccak256(_sentMessage)); + ICrossL2Inbox(Predeploys.CROSS_L2_INBOX).validateMessage(_id, keccak256(_sentMessage)); // Decode the payload (uint256 destination, address target, uint256 nonce, address sender, bytes memory message) = diff --git a/packages/contracts-bedrock/src/L2/OptimismSuperchainERC20.sol b/packages/contracts-bedrock/src/L2/OptimismSuperchainERC20.sol index c323d8b7577b..53ba32dc051b 100644 --- a/packages/contracts-bedrock/src/L2/OptimismSuperchainERC20.sol +++ b/packages/contracts-bedrock/src/L2/OptimismSuperchainERC20.sol @@ -1,12 +1,17 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.25; -import { IOptimismSuperchainERC20 } from "src/L2/interfaces/IOptimismSuperchainERC20.sol"; -import { Predeploys } from "src/libraries/Predeploys.sol"; -import { SuperchainERC20 } from "src/L2/SuperchainERC20.sol"; +// Contracts import { Initializable } from "@openzeppelin/contracts-v5/proxy/utils/Initializable.sol"; +import { SuperchainERC20 } from "src/L2/SuperchainERC20.sol"; + +// Libraries +import { Predeploys } from "src/libraries/Predeploys.sol"; import { ZeroAddress, Unauthorized } from "src/libraries/errors/CommonErrors.sol"; +// Interfaces +import { IOptimismSuperchainERC20 } from "interfaces/L2/IOptimismSuperchainERC20.sol"; + /// @custom:proxied true /// @title OptimismSuperchainERC20 /// @notice OptimismSuperchainERC20 is a standard extension of the base ERC20 token contract that unifies ERC20 token @@ -58,8 +63,8 @@ contract OptimismSuperchainERC20 is SuperchainERC20, Initializable { } /// @notice Semantic version. - /// @custom:semver 1.0.0-beta.9 - string public constant override version = "1.0.0-beta.9"; + /// @custom:semver 1.0.0-beta.12 + string public constant override version = "1.0.0-beta.12"; /// @notice Constructs the OptimismSuperchainERC20 contract. constructor() { diff --git a/packages/contracts-bedrock/src/L2/OptimismSuperchainERC20Beacon.sol b/packages/contracts-bedrock/src/L2/OptimismSuperchainERC20Beacon.sol index e2b3dc437b0f..58cc15c7b9e0 100644 --- a/packages/contracts-bedrock/src/L2/OptimismSuperchainERC20Beacon.sol +++ b/packages/contracts-bedrock/src/L2/OptimismSuperchainERC20Beacon.sol @@ -1,18 +1,21 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { IBeacon } from "@openzeppelin/contracts/proxy/beacon/IBeacon.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +// Libraries import { Predeploys } from "src/libraries/Predeploys.sol"; +// Interfaces +import { IBeacon } from "@openzeppelin/contracts/proxy/beacon/IBeacon.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; + /// @custom:proxied true /// @custom:predeployed 0x4200000000000000000000000000000000000027 /// @title OptimismSuperchainERC20Beacon /// @notice OptimismSuperchainERC20Beacon is the beacon proxy for the OptimismSuperchainERC20 implementation. contract OptimismSuperchainERC20Beacon is IBeacon, ISemver { /// @notice Semantic version. - /// @custom:semver 1.0.0-beta.2 - string public constant version = "1.0.0-beta.2"; + /// @custom:semver 1.0.0-beta.4 + string public constant version = "1.0.0-beta.4"; /// @inheritdoc IBeacon function implementation() external pure override returns (address) { diff --git a/packages/contracts-bedrock/src/L2/OptimismSuperchainERC20Factory.sol b/packages/contracts-bedrock/src/L2/OptimismSuperchainERC20Factory.sol index 454e3b455d62..8430cdcd2b04 100644 --- a/packages/contracts-bedrock/src/L2/OptimismSuperchainERC20Factory.sol +++ b/packages/contracts-bedrock/src/L2/OptimismSuperchainERC20Factory.sol @@ -1,11 +1,16 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.25; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { OptimismSuperchainERC20 } from "src/L2/OptimismSuperchainERC20.sol"; -import { Predeploys } from "src/libraries/Predeploys.sol"; +// Contracts import { BeaconProxy } from "@openzeppelin/contracts-v5/proxy/beacon/BeaconProxy.sol"; +import { OptimismSuperchainERC20 } from "src/L2/OptimismSuperchainERC20.sol"; + +// Libraries import { CREATE3 } from "@rari-capital/solmate/src/utils/CREATE3.sol"; +import { Predeploys } from "src/libraries/Predeploys.sol"; + +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @custom:proxied /// @custom:predeployed 0x4200000000000000000000000000000000000026 @@ -22,8 +27,8 @@ contract OptimismSuperchainERC20Factory is ISemver { ); /// @notice Semantic version. - /// @custom:semver 1.0.0-beta.4 - string public constant version = "1.0.0-beta.4"; + /// @custom:semver 1.0.0-beta.6 + string public constant version = "1.0.0-beta.6"; /// @notice Mapping of the deployed OptimismSuperchainERC20 to the remote token address. /// This is used to keep track of the token deployments. diff --git a/packages/contracts-bedrock/src/L2/SequencerFeeVault.sol b/packages/contracts-bedrock/src/L2/SequencerFeeVault.sol index 69a78219e5bd..e5089ff33ac8 100644 --- a/packages/contracts-bedrock/src/L2/SequencerFeeVault.sol +++ b/packages/contracts-bedrock/src/L2/SequencerFeeVault.sol @@ -1,19 +1,23 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +// Contracts import { FeeVault } from "src/L2/FeeVault.sol"; +// Libraries import { Types } from "src/libraries/Types.sol"; +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; + /// @custom:proxied true /// @custom:predeploy 0x4200000000000000000000000000000000000011 /// @title SequencerFeeVault /// @notice The SequencerFeeVault is the contract that holds any fees paid to the Sequencer during /// transaction processing and block production. contract SequencerFeeVault is FeeVault, ISemver { - /// @custom:semver 1.5.0-beta.3 - string public constant version = "1.5.0-beta.3"; + /// @custom:semver 1.5.0-beta.5 + string public constant version = "1.5.0-beta.5"; /// @notice Constructs the SequencerFeeVault contract. /// @param _recipient Wallet that will receive the fees. diff --git a/packages/contracts-bedrock/src/L2/SuperchainERC20.sol b/packages/contracts-bedrock/src/L2/SuperchainERC20.sol index b9e6bbfbf780..061c2d867b68 100644 --- a/packages/contracts-bedrock/src/L2/SuperchainERC20.sol +++ b/packages/contracts-bedrock/src/L2/SuperchainERC20.sol @@ -1,21 +1,26 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.25; -import { IERC7802, IERC165 } from "src/L2/interfaces/IERC7802.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { Predeploys } from "src/libraries/Predeploys.sol"; +// Contracts import { ERC20 } from "@solady-v0.0.245/tokens/ERC20.sol"; -import { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; + +// Libraries +import { Predeploys } from "src/libraries/Predeploys.sol"; import { Unauthorized } from "src/libraries/errors/CommonErrors.sol"; +// Interfaces +import { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IERC7802, IERC165 } from "interfaces/L2/IERC7802.sol"; + /// @title SuperchainERC20 /// @notice A standard ERC20 extension implementing IERC7802 for unified cross-chain fungibility across /// the Superchain. Allows the SuperchainTokenBridge to mint and burn tokens as needed. abstract contract SuperchainERC20 is ERC20, IERC7802, ISemver { /// @notice Semantic version. - /// @custom:semver 1.0.0-beta.5 + /// @custom:semver 1.0.0-beta.8 function version() external view virtual returns (string memory) { - return "1.0.0-beta.5"; + return "1.0.0-beta.8"; } /// @notice Allows the SuperchainTokenBridge to mint tokens. @@ -26,7 +31,7 @@ abstract contract SuperchainERC20 is ERC20, IERC7802, ISemver { _mint(_to, _amount); - emit CrosschainMint(_to, _amount); + emit CrosschainMint(_to, _amount, msg.sender); } /// @notice Allows the SuperchainTokenBridge to burn tokens. @@ -37,7 +42,7 @@ abstract contract SuperchainERC20 is ERC20, IERC7802, ISemver { _burn(_from, _amount); - emit CrosschainBurn(_from, _amount); + emit CrosschainBurn(_from, _amount, msg.sender); } /// @inheritdoc IERC165 diff --git a/packages/contracts-bedrock/src/L2/SuperchainTokenBridge.sol b/packages/contracts-bedrock/src/L2/SuperchainTokenBridge.sol index 104f8ef95043..fc8c3d961088 100644 --- a/packages/contracts-bedrock/src/L2/SuperchainTokenBridge.sol +++ b/packages/contracts-bedrock/src/L2/SuperchainTokenBridge.sol @@ -6,9 +6,9 @@ import { Predeploys } from "src/libraries/Predeploys.sol"; import { ZeroAddress, Unauthorized } from "src/libraries/errors/CommonErrors.sol"; // Interfaces -import { ISuperchainERC20 } from "src/L2/interfaces/ISuperchainERC20.sol"; -import { IERC7802, IERC165 } from "src/L2/interfaces/IERC7802.sol"; -import { IL2ToL2CrossDomainMessenger } from "src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol"; +import { ISuperchainERC20 } from "interfaces/L2/ISuperchainERC20.sol"; +import { IERC7802, IERC165 } from "interfaces/L2/IERC7802.sol"; +import { IL2ToL2CrossDomainMessenger } from "interfaces/L2/IL2ToL2CrossDomainMessenger.sol"; /// @custom:proxied true /// @custom:predeploy 0x4200000000000000000000000000000000000028 @@ -46,8 +46,8 @@ contract SuperchainTokenBridge { address internal constant MESSENGER = Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER; /// @notice Semantic version. - /// @custom:semver 1.0.0-beta.3 - string public constant version = "1.0.0-beta.3"; + /// @custom:semver 1.0.0-beta.4 + string public constant version = "1.0.0-beta.4"; /// @notice Sends tokens to a target address on another chain. /// @dev Tokens are burned on the source chain. diff --git a/packages/contracts-bedrock/src/L2/SuperchainWETH.sol b/packages/contracts-bedrock/src/L2/SuperchainWETH.sol index 29e179eba82c..ab6ff44a33ab 100644 --- a/packages/contracts-bedrock/src/L2/SuperchainWETH.sol +++ b/packages/contracts-bedrock/src/L2/SuperchainWETH.sol @@ -5,16 +5,18 @@ pragma solidity 0.8.15; import { WETH98 } from "src/universal/WETH98.sol"; // Libraries +import { NotCustomGasToken, Unauthorized, ZeroAddress } from "src/libraries/errors/CommonErrors.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; import { Preinstalls } from "src/libraries/Preinstalls.sol"; +import { SafeSend } from "src/universal/SafeSend.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; -import { IETHLiquidity } from "src/L2/interfaces/IETHLiquidity.sol"; -import { IERC7802, IERC165 } from "src/L2/interfaces/IERC7802.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IL2ToL2CrossDomainMessenger } from "interfaces/L2/IL2ToL2CrossDomainMessenger.sol"; +import { IL1Block } from "interfaces/L2/IL1Block.sol"; +import { IETHLiquidity } from "interfaces/L2/IETHLiquidity.sol"; +import { IERC7802, IERC165 } from "interfaces/L2/IERC7802.sol"; import { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; -import { Unauthorized, NotCustomGasToken } from "src/libraries/errors/CommonErrors.sol"; /// @custom:proxied true /// @custom:predeploy 0x4200000000000000000000000000000000000024 @@ -23,9 +25,26 @@ import { Unauthorized, NotCustomGasToken } from "src/libraries/errors/CommonErro /// within the superchain. SuperchainWETH can be converted into native ETH on chains that /// do not use a custom gas token. contract SuperchainWETH is WETH98, IERC7802, ISemver { + /// @notice Thrown when attempting to relay a message and the cross domain message sender is not SuperchainWETH. + error InvalidCrossDomainSender(); + + /// @notice Emitted when ETH is sent from one chain to another. + /// @param from Address of the sender. + /// @param to Address of the recipient. + /// @param amount Amount of ETH sent. + /// @param destination Chain ID of the destination chain. + event SendETH(address indexed from, address indexed to, uint256 amount, uint256 destination); + + /// @notice Emitted whenever ETH is successfully relayed on this chain. + /// @param from Address of the msg.sender of sendETH on the source chain. + /// @param to Address of the recipient. + /// @param amount Amount of ETH relayed. + /// @param source Chain ID of the source chain. + event RelayETH(address indexed from, address indexed to, uint256 amount, uint256 source); + /// @notice Semantic version. - /// @custom:semver 1.0.0-beta.10 - string public constant version = "1.0.0-beta.10"; + /// @custom:semver 1.0.0-beta.13 + string public constant version = "1.0.0-beta.13"; /// @inheritdoc WETH98 function deposit() public payable override { @@ -69,12 +88,13 @@ contract SuperchainWETH is WETH98, IERC7802, ISemver { _mint(_to, _amount); - // Mint from ETHLiquidity contract. + // Withdraw from ETHLiquidity contract. if (!IL1Block(Predeploys.L1_BLOCK_ATTRIBUTES).isCustomGasToken()) { + // NOTE: 'mint' will soon change to 'withdraw'. IETHLiquidity(Predeploys.ETH_LIQUIDITY).mint(_amount); } - emit CrosschainMint(_to, _amount); + emit CrosschainMint(_to, _amount, msg.sender); } /// @notice Allows the SuperchainTokenBridge to burn tokens. @@ -85,12 +105,13 @@ contract SuperchainWETH is WETH98, IERC7802, ISemver { _burn(_from, _amount); - // Burn to ETHLiquidity contract. + // Deposit to ETHLiquidity contract. if (!IL1Block(Predeploys.L1_BLOCK_ATTRIBUTES).isCustomGasToken()) { + // NOTE: 'burn' will soon change to 'deposit'. IETHLiquidity(Predeploys.ETH_LIQUIDITY).burn{ value: _amount }(); } - emit CrosschainBurn(_from, _amount); + emit CrosschainBurn(_from, _amount, msg.sender); } /// @inheritdoc IERC165 @@ -98,4 +119,53 @@ contract SuperchainWETH is WETH98, IERC7802, ISemver { return _interfaceId == type(IERC7802).interfaceId || _interfaceId == type(IERC20).interfaceId || _interfaceId == type(IERC165).interfaceId; } + + /// @notice Sends ETH to some target address on another chain. + /// @param _to Address to send ETH to. + /// @param _chainId Chain ID of the destination chain. + /// @return msgHash_ Hash of the message sent. + function sendETH(address _to, uint256 _chainId) external payable returns (bytes32 msgHash_) { + if (_to == address(0)) revert ZeroAddress(); + + if (IL1Block(Predeploys.L1_BLOCK_ATTRIBUTES).isCustomGasToken()) { + revert NotCustomGasToken(); + } + + // NOTE: 'burn' will soon change to 'deposit'. + IETHLiquidity(Predeploys.ETH_LIQUIDITY).burn{ value: msg.value }(); + + msgHash_ = IL2ToL2CrossDomainMessenger(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER).sendMessage({ + _destination: _chainId, + _target: address(this), + _message: abi.encodeCall(this.relayETH, (msg.sender, _to, msg.value)) + }); + + emit SendETH(msg.sender, _to, msg.value, _chainId); + } + + /// @notice Relays ETH received from another chain. + /// @param _from Address of the msg.sender of sendETH on the source chain. + /// @param _to Address to relay ETH to. + /// @param _amount Amount of ETH to relay. + function relayETH(address _from, address _to, uint256 _amount) external { + if (msg.sender != Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) revert Unauthorized(); + + (address crossDomainMessageSender, uint256 source) = + IL2ToL2CrossDomainMessenger(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER).crossDomainMessageContext(); + + if (crossDomainMessageSender != address(this)) revert InvalidCrossDomainSender(); + + if (IL1Block(Predeploys.L1_BLOCK_ATTRIBUTES).isCustomGasToken()) { + // Since ETH is not the native asset on custom gas token chains, send SuperchainWETH to the recipient. + _mint(_to, _amount); + } else { + // NOTE: 'mint' will soon change to 'withdraw'. + IETHLiquidity(Predeploys.ETH_LIQUIDITY).mint(_amount); + + // This is a forced ETH send to the recipient, the recipient should NOT expect to be called. + new SafeSend{ value: _amount }(payable(_to)); + } + + emit RelayETH(_from, _to, _amount, source); + } } diff --git a/packages/contracts-bedrock/src/L2/WETH.sol b/packages/contracts-bedrock/src/L2/WETH.sol index dacd62c36de9..5dc716fca569 100644 --- a/packages/contracts-bedrock/src/L2/WETH.sol +++ b/packages/contracts-bedrock/src/L2/WETH.sol @@ -8,14 +8,14 @@ import { WETH98 } from "src/universal/WETH98.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IL1Block } from "interfaces/L2/IL1Block.sol"; /// @title WETH contract that reads the name and symbol from the L1Block contract. /// Allows for nice rendering of token names for chains using custom gas token. contract WETH is WETH98, ISemver { - /// @custom:semver 1.1.0-beta.3 - string public constant version = "1.1.0-beta.3"; + /// @custom:semver 1.1.0-beta.4 + string public constant version = "1.1.0-beta.4"; /// @notice Returns the name of the wrapped native asset. Will be "Wrapped Ether" /// if the native asset is Ether. diff --git a/packages/contracts-bedrock/src/L2/interfaces/ISuperchainWETH.sol b/packages/contracts-bedrock/src/L2/interfaces/ISuperchainWETH.sol deleted file mode 100644 index fa10b237dbb5..000000000000 --- a/packages/contracts-bedrock/src/L2/interfaces/ISuperchainWETH.sol +++ /dev/null @@ -1,17 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -import { IWETH98 } from "src/universal/interfaces/IWETH98.sol"; -import { IERC7802 } from "src/L2/interfaces/IERC7802.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; - -interface ISuperchainWETH is IWETH98, IERC7802, ISemver { - error Unauthorized(); - error NotCustomGasToken(); - - function balanceOf(address src) external view returns (uint256); - function withdraw(uint256 _amount) external; - function supportsInterface(bytes4 _interfaceId) external view returns (bool); - - function __constructor__() external; -} diff --git a/packages/contracts-bedrock/src/cannon/MIPS.sol b/packages/contracts-bedrock/src/cannon/MIPS.sol index 5efab0ce98f1..b1fad0b788c8 100644 --- a/packages/contracts-bedrock/src/cannon/MIPS.sol +++ b/packages/contracts-bedrock/src/cannon/MIPS.sol @@ -1,14 +1,17 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { IPreimageOracle } from "./interfaces/IPreimageOracle.sol"; +// Libraries import { MIPSInstructions as ins } from "src/cannon/libraries/MIPSInstructions.sol"; import { MIPSSyscalls as sys } from "src/cannon/libraries/MIPSSyscalls.sol"; import { MIPSState as st } from "src/cannon/libraries/MIPSState.sol"; import { MIPSMemory } from "src/cannon/libraries/MIPSMemory.sol"; import { InvalidRMWInstruction } from "src/cannon/libraries/CannonErrors.sol"; +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; + /// @title MIPS /// @notice The MIPS contract emulates a single MIPS instruction. /// Note that delay slots are isolated instructions: @@ -44,8 +47,8 @@ contract MIPS is ISemver { } /// @notice The semantic version of the MIPS contract. - /// @custom:semver 1.2.1-beta.7 - string public constant version = "1.2.1-beta.7"; + /// @custom:semver 1.2.1-beta.10 + string public constant version = "1.2.1-beta.10"; /// @notice The preimage oracle contract. IPreimageOracle internal immutable ORACLE; @@ -180,7 +183,7 @@ contract MIPS is ISemver { }); (v0, v1, state.preimageOffset, state.memRoot,,) = sys.handleSysRead(args); } else if (syscall_no == sys.SYS_WRITE) { - (v0, v1, state.preimageKey, state.preimageOffset) = sys.handleSysWrite({ + sys.SysWriteParams memory args = sys.SysWriteParams({ _a0: a0, _a1: a1, _a2: a2, @@ -189,6 +192,7 @@ contract MIPS is ISemver { _proofOffset: MIPSMemory.memoryProofOffset(STEP_PROOF_OFFSET, 1), _memRoot: state.memRoot }); + (v0, v1, state.preimageKey, state.preimageOffset) = sys.handleSysWrite(args); } else if (syscall_no == sys.SYS_FCNTL) { (v0, v1) = sys.handleSysFcntl(a0, a1); } diff --git a/packages/contracts-bedrock/src/cannon/MIPS2.sol b/packages/contracts-bedrock/src/cannon/MIPS2.sol index 1d73473fde24..e2af829be6e9 100644 --- a/packages/contracts-bedrock/src/cannon/MIPS2.sol +++ b/packages/contracts-bedrock/src/cannon/MIPS2.sol @@ -1,8 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { IPreimageOracle } from "./interfaces/IPreimageOracle.sol"; +// Libraries import { MIPSMemory } from "src/cannon/libraries/MIPSMemory.sol"; import { MIPSSyscalls as sys } from "src/cannon/libraries/MIPSSyscalls.sol"; import { MIPSState as st } from "src/cannon/libraries/MIPSState.sol"; @@ -12,6 +11,10 @@ import { InvalidMemoryProof, InvalidRMWInstruction, InvalidSecondMemoryProof } from "src/cannon/libraries/CannonErrors.sol"; +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; + /// @title MIPS2 /// @notice The MIPS2 contract emulates a single MIPS instruction. /// It differs from MIPS.sol in that it supports multi-threading. @@ -60,8 +63,8 @@ contract MIPS2 is ISemver { } /// @notice The semantic version of the MIPS2 contract. - /// @custom:semver 1.0.0-beta.22 - string public constant version = "1.0.0-beta.22"; + /// @custom:semver 1.0.0-beta.25 + string public constant version = "1.0.0-beta.25"; /// @notice The preimage oracle contract. IPreimageOracle internal immutable ORACLE; @@ -459,7 +462,7 @@ contract MIPS2 is ISemver { // Encapsulate execution to avoid stack-too-deep error (v0, v1) = execSysRead(state, args); } else if (syscall_no == sys.SYS_WRITE) { - (v0, v1, state.preimageKey, state.preimageOffset) = sys.handleSysWrite({ + sys.SysWriteParams memory args = sys.SysWriteParams({ _a0: a0, _a1: a1, _a2: a2, @@ -468,6 +471,7 @@ contract MIPS2 is ISemver { _proofOffset: MIPSMemory.memoryProofOffset(MEM_PROOF_OFFSET, 1), _memRoot: state.memRoot }); + (v0, v1, state.preimageKey, state.preimageOffset) = sys.handleSysWrite(args); } else if (syscall_no == sys.SYS_FCNTL) { (v0, v1) = sys.handleSysFcntl(a0, a1); } else if (syscall_no == sys.SYS_GETTID) { diff --git a/packages/contracts-bedrock/src/cannon/MIPS64.sol b/packages/contracts-bedrock/src/cannon/MIPS64.sol index 53dd0649405d..51e60ba37cea 100644 --- a/packages/contracts-bedrock/src/cannon/MIPS64.sol +++ b/packages/contracts-bedrock/src/cannon/MIPS64.sol @@ -1,8 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { IPreimageOracle } from "./interfaces/IPreimageOracle.sol"; +// Libraries import { MIPS64Memory } from "src/cannon/libraries/MIPS64Memory.sol"; import { MIPS64Syscalls as sys } from "src/cannon/libraries/MIPS64Syscalls.sol"; import { MIPS64State as st } from "src/cannon/libraries/MIPS64State.sol"; @@ -13,6 +12,10 @@ import { InvalidMemoryProof, InvalidRMWInstruction, InvalidSecondMemoryProof } from "src/cannon/libraries/CannonErrors.sol"; +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; + /// @title MIPS64 /// @notice The MIPS64 contract emulates a single MIPS instruction. /// It differs from MIPS.sol in that it supports MIPS64 instructions and multi-tasking. @@ -64,8 +67,8 @@ contract MIPS64 is ISemver { } /// @notice The semantic version of the MIPS64 contract. - /// @custom:semver 1.0.0-beta.4 - string public constant version = "1.0.0-beta.4"; + /// @custom:semver 1.0.0-beta.7 + string public constant version = "1.0.0-beta.7"; /// @notice The preimage oracle contract. IPreimageOracle internal immutable ORACLE; @@ -501,7 +504,7 @@ contract MIPS64 is ISemver { // Encapsulate execution to avoid stack-too-deep error (v0, v1) = execSysRead(state, args); } else if (syscall_no == sys.SYS_WRITE) { - (v0, v1, state.preimageKey, state.preimageOffset) = sys.handleSysWrite({ + sys.SysWriteParams memory args = sys.SysWriteParams({ _a0: a0, _a1: a1, _a2: a2, @@ -510,6 +513,7 @@ contract MIPS64 is ISemver { _proofOffset: MIPS64Memory.memoryProofOffset(MEM_PROOF_OFFSET, 1), _memRoot: state.memRoot }); + (v0, v1, state.preimageKey, state.preimageOffset) = sys.handleSysWrite(args); } else if (syscall_no == sys.SYS_FCNTL) { (v0, v1) = sys.handleSysFcntl(a0, a1); } else if (syscall_no == sys.SYS_GETTID) { diff --git a/packages/contracts-bedrock/src/cannon/PreimageOracle.sol b/packages/contracts-bedrock/src/cannon/PreimageOracle.sol index 7ac7c33d99d2..b45a4ac4edd2 100644 --- a/packages/contracts-bedrock/src/cannon/PreimageOracle.sol +++ b/packages/contracts-bedrock/src/cannon/PreimageOracle.sol @@ -26,7 +26,7 @@ import { import { LPPMetaData } from "src/cannon/libraries/CannonTypes.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @title PreimageOracle /// @notice A contract for storing permissioned pre-images. @@ -51,8 +51,8 @@ contract PreimageOracle is ISemver { uint256 public constant PRECOMPILE_CALL_RESERVED_GAS = 100_000; /// @notice The semantic version of the Preimage Oracle contract. - /// @custom:semver 1.1.3-beta.6 - string public constant version = "1.1.3-beta.6"; + /// @custom:semver 1.1.3-beta.8 + string public constant version = "1.1.3-beta.8"; //////////////////////////////////////////////////////////////// // Authorized Preimage Parts // diff --git a/packages/contracts-bedrock/src/cannon/libraries/CannonErrors.sol b/packages/contracts-bedrock/src/cannon/libraries/CannonErrors.sol index 3649852cec6f..dd0e78a3a33a 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/CannonErrors.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/CannonErrors.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity ^0.8.0; /// @notice Thrown when a passed part offset is out of bounds. error PartOffsetOOB(); diff --git a/packages/contracts-bedrock/src/cannon/libraries/CannonTypes.sol b/packages/contracts-bedrock/src/cannon/libraries/CannonTypes.sol index 2e7c50ed862d..26d0a17edae4 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/CannonTypes.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/CannonTypes.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity ^0.8.0; using LPPMetadataLib for LPPMetaData global; diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPS64Arch.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPS64Arch.sol index 34a8d39f5d42..a1d689e731db 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPS64Arch.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPS64Arch.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity ^0.8.0; library MIPS64Arch { uint64 internal constant WORD_SIZE = 64; diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPS64Instructions.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPS64Instructions.sol index d24ec036e819..6191cdfbe0a4 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPS64Instructions.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPS64Instructions.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity ^0.8.0; +// Libraries import { MIPS64Memory } from "src/cannon/libraries/MIPS64Memory.sol"; import { MIPS64State as st } from "src/cannon/libraries/MIPS64State.sol"; import { MIPS64Arch as arch } from "src/cannon/libraries/MIPS64Arch.sol"; diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPS64Memory.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPS64Memory.sol index 9ed97396e10f..2f77fcd599c4 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPS64Memory.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPS64Memory.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity ^0.8.0; +// Libraries import { InvalidMemoryProof } from "src/cannon/libraries/CannonErrors.sol"; library MIPS64Memory { diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPS64State.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPS64State.sol index 09678148f3e6..c7102dea0fdd 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPS64State.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPS64State.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity ^0.8.0; +// Libraries import { InvalidExitedValue } from "src/cannon/libraries/CannonErrors.sol"; library MIPS64State { diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPS64Syscalls.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPS64Syscalls.sol index 10954c64748e..a5b8201ca655 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPS64Syscalls.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPS64Syscalls.sol @@ -1,9 +1,10 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity ^0.8.0; +// Libraries import { MIPS64Memory } from "src/cannon/libraries/MIPS64Memory.sol"; import { MIPS64State as st } from "src/cannon/libraries/MIPS64State.sol"; -import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; import { PreimageKeyLib } from "src/cannon/PreimageKeyLib.sol"; import { MIPS64Arch as arch } from "src/cannon/libraries/MIPS64Arch.sol"; @@ -29,6 +30,23 @@ library MIPS64Syscalls { bytes32 memRoot; } + /// @custom:field _a0 The file descriptor. + /// @custom:field _a1 The memory address to read from. + /// @custom:field _a2 The number of bytes to read. + /// @custom:field _preimageKey The current preimaageKey. + /// @custom:field _preimageOffset The current preimageOffset. + /// @custom:field _proofOffset The offset of the memory proof in calldata. + /// @custom:field _memRoot The current memory root. + struct SysWriteParams { + uint64 _a0; + uint64 _a1; + uint64 _a2; + bytes32 _preimageKey; + uint64 _preimageOffset; + uint256 _proofOffset; + bytes32 _memRoot; + } + uint64 internal constant U64_MASK = 0xFFffFFffFFffFFff; uint64 internal constant PAGE_ADDR_MASK = 4095; uint64 internal constant PAGE_SIZE = 4096; @@ -308,26 +326,11 @@ library MIPS64Syscalls { } /// @notice Like a Linux write syscall. Splits unaligned writes into aligned writes. - /// @param _a0 The file descriptor. - /// @param _a1 The memory address to read from. - /// @param _a2 The number of bytes to read. - /// @param _preimageKey The current preimaageKey. - /// @param _preimageOffset The current preimageOffset. - /// @param _proofOffset The offset of the memory proof in calldata. - /// @param _memRoot The current memory root. /// @return v0_ The number of bytes written, or -1 on error. /// @return v1_ The error code, or 0 if empty. /// @return newPreimageKey_ The new preimageKey. /// @return newPreimageOffset_ The new preimageOffset. - function handleSysWrite( - uint64 _a0, - uint64 _a1, - uint64 _a2, - bytes32 _preimageKey, - uint64 _preimageOffset, - uint256 _proofOffset, - bytes32 _memRoot - ) + function handleSysWrite(SysWriteParams memory _args) internal pure returns (uint64 v0_, uint64 v1_, bytes32 newPreimageKey_, uint64 newPreimageOffset_) @@ -337,20 +340,22 @@ library MIPS64Syscalls { // returns: v0_ = written, v1_ = err code v0_ = uint64(0); v1_ = uint64(0); - newPreimageKey_ = _preimageKey; - newPreimageOffset_ = _preimageOffset; + newPreimageKey_ = _args._preimageKey; + newPreimageOffset_ = _args._preimageOffset; - if (_a0 == FD_STDOUT || _a0 == FD_STDERR || _a0 == FD_HINT_WRITE) { - v0_ = _a2; // tell program we have written everything + if (_args._a0 == FD_STDOUT || _args._a0 == FD_STDERR || _args._a0 == FD_HINT_WRITE) { + v0_ = _args._a2; // tell program we have written everything } // pre-image oracle - else if (_a0 == FD_PREIMAGE_WRITE) { + else if (_args._a0 == FD_PREIMAGE_WRITE) { // mask the addr to align it to 4 bytes - uint64 mem = MIPS64Memory.readMem(_memRoot, _a1 & arch.ADDRESS_MASK, _proofOffset); - bytes32 key = _preimageKey; + uint64 mem = MIPS64Memory.readMem(_args._memRoot, _args._a1 & arch.ADDRESS_MASK, _args._proofOffset); + bytes32 key = _args._preimageKey; // Construct pre-image key from memory // We use assembly for more precise ops, and no var count limit + uint64 _a1 = _args._a1; + uint64 _a2 = _args._a2; assembly { let alignment := and(_a1, EXT_MASK) // the read might not start at an aligned address let space := sub(WORD_SIZE_BYTES, alignment) // remaining space in memory word @@ -360,11 +365,12 @@ library MIPS64Syscalls { mem := and(shr(mul(sub(space, _a2), 8), mem), mask) // align value to right, mask it key := or(key, mem) // insert into key } + _args._a2 = _a2; // Write pre-image key to oracle newPreimageKey_ = key; newPreimageOffset_ = 0; // reset offset, to read new pre-image data from the start - v0_ = _a2; + v0_ = _args._a2; } else { v0_ = U64_MASK; v1_ = EBADF; diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPSInstructions.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPSInstructions.sol index fa4d1451b54b..70ed5e20c3bc 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPSInstructions.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPSInstructions.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity ^0.8.0; +// Libraries import { MIPSMemory } from "src/cannon/libraries/MIPSMemory.sol"; import { MIPSState as st } from "src/cannon/libraries/MIPSState.sol"; diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPSMemory.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPSMemory.sol index 015955954b5a..1d3942f70509 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPSMemory.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPSMemory.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity ^0.8.0; +// Libraries import { InvalidMemoryProof } from "src/cannon/libraries/CannonErrors.sol"; library MIPSMemory { diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPSState.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPSState.sol index f9631e29e082..b2982b5b16af 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPSState.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPSState.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity ^0.8.0; +// Libraries import { InvalidExitedValue } from "src/cannon/libraries/CannonErrors.sol"; library MIPSState { diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol index ec0cfdded894..8fa62dbbad78 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol @@ -1,9 +1,10 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity ^0.8.0; +// Libraries import { MIPSMemory } from "src/cannon/libraries/MIPSMemory.sol"; import { MIPSState as st } from "src/cannon/libraries/MIPSState.sol"; -import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; import { PreimageKeyLib } from "src/cannon/PreimageKeyLib.sol"; library MIPSSyscalls { @@ -28,6 +29,23 @@ library MIPSSyscalls { bytes32 memRoot; } + /// @custom:field _a0 The file descriptor. + /// @custom:field _a1 The memory address to read from. + /// @custom:field _a2 The number of bytes to read. + /// @custom:field _preimageKey The current preimaageKey. + /// @custom:field _preimageOffset The current preimageOffset. + /// @custom:field _proofOffset The offset of the memory proof in calldata. + /// @custom:field _memRoot The current memory root. + struct SysWriteParams { + uint32 _a0; + uint32 _a1; + uint32 _a2; + bytes32 _preimageKey; + uint32 _preimageOffset; + uint256 _proofOffset; + bytes32 _memRoot; + } + uint32 internal constant SYS_MMAP = 4090; uint32 internal constant SYS_BRK = 4045; uint32 internal constant SYS_CLONE = 4120; @@ -298,26 +316,11 @@ library MIPSSyscalls { } /// @notice Like a Linux write syscall. Splits unaligned writes into aligned writes. - /// @param _a0 The file descriptor. - /// @param _a1 The memory address to read from. - /// @param _a2 The number of bytes to read. - /// @param _preimageKey The current preimaageKey. - /// @param _preimageOffset The current preimageOffset. - /// @param _proofOffset The offset of the memory proof in calldata. - /// @param _memRoot The current memory root. /// @return v0_ The number of bytes written, or -1 on error. /// @return v1_ The error code, or 0 if empty. /// @return newPreimageKey_ The new preimageKey. /// @return newPreimageOffset_ The new preimageOffset. - function handleSysWrite( - uint32 _a0, - uint32 _a1, - uint32 _a2, - bytes32 _preimageKey, - uint32 _preimageOffset, - uint256 _proofOffset, - bytes32 _memRoot - ) + function handleSysWrite(SysWriteParams memory _args) internal pure returns (uint32 v0_, uint32 v1_, bytes32 newPreimageKey_, uint32 newPreimageOffset_) @@ -327,20 +330,22 @@ library MIPSSyscalls { // returns: v0_ = written, v1_ = err code v0_ = uint32(0); v1_ = uint32(0); - newPreimageKey_ = _preimageKey; - newPreimageOffset_ = _preimageOffset; + newPreimageKey_ = _args._preimageKey; + newPreimageOffset_ = _args._preimageOffset; - if (_a0 == FD_STDOUT || _a0 == FD_STDERR || _a0 == FD_HINT_WRITE) { - v0_ = _a2; // tell program we have written everything + if (_args._a0 == FD_STDOUT || _args._a0 == FD_STDERR || _args._a0 == FD_HINT_WRITE) { + v0_ = _args._a2; // tell program we have written everything } // pre-image oracle - else if (_a0 == FD_PREIMAGE_WRITE) { + else if (_args._a0 == FD_PREIMAGE_WRITE) { // mask the addr to align it to 4 bytes - uint32 mem = MIPSMemory.readMem(_memRoot, _a1 & 0xFFffFFfc, _proofOffset); - bytes32 key = _preimageKey; + uint32 mem = MIPSMemory.readMem(_args._memRoot, _args._a1 & 0xFFffFFfc, _args._proofOffset); + bytes32 key = _args._preimageKey; // Construct pre-image key from memory // We use assembly for more precise ops, and no var count limit + uint32 _a1 = _args._a1; + uint32 _a2 = _args._a2; assembly { let alignment := and(_a1, 3) // the read might not start at an aligned address let space := sub(4, alignment) // remaining space in memory word @@ -350,11 +355,12 @@ library MIPSSyscalls { mem := and(shr(mul(sub(space, _a2), 8), mem), mask) // align value to right, mask it key := or(key, mem) // insert into key } + _args._a2 = _a2; // Write pre-image key to oracle newPreimageKey_ = key; newPreimageOffset_ = 0; // reset offset, to read new pre-image data from the start - v0_ = _a2; + v0_ = _args._a2; } else { v0_ = 0xFFffFFff; v1_ = EBADF; diff --git a/packages/contracts-bedrock/src/dispute/AnchorStateRegistry.sol b/packages/contracts-bedrock/src/dispute/AnchorStateRegistry.sol index a1b1531523f4..0f48b9f2a4e9 100644 --- a/packages/contracts-bedrock/src/dispute/AnchorStateRegistry.sol +++ b/packages/contracts-bedrock/src/dispute/AnchorStateRegistry.sol @@ -10,11 +10,11 @@ import { Unauthorized } from "src/libraries/errors/CommonErrors.sol"; import { UnregisteredGame, InvalidGameStatus } from "src/dispute/lib/Errors.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; -import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; /// @custom:proxied true /// @title AnchorStateRegistry @@ -30,8 +30,8 @@ contract AnchorStateRegistry is Initializable, ISemver { } /// @notice Semantic version. - /// @custom:semver 2.0.1-beta.4 - string public constant version = "2.0.1-beta.4"; + /// @custom:semver 2.0.1-beta.5 + string public constant version = "2.0.1-beta.5"; /// @notice DisputeGameFactory address. IDisputeGameFactory internal immutable DISPUTE_GAME_FACTORY; diff --git a/packages/contracts-bedrock/src/dispute/DelayedWETH.sol b/packages/contracts-bedrock/src/dispute/DelayedWETH.sol index 3438c22e3679..f3966d8ac89f 100644 --- a/packages/contracts-bedrock/src/dispute/DelayedWETH.sol +++ b/packages/contracts-bedrock/src/dispute/DelayedWETH.sol @@ -6,8 +6,8 @@ import { OwnableUpgradeable } from "@openzeppelin/contracts-upgradeable/access/O import { WETH98 } from "src/universal/WETH98.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; /// @custom:proxied true /// @title DelayedWETH @@ -32,8 +32,8 @@ contract DelayedWETH is OwnableUpgradeable, WETH98, ISemver { event Unwrap(address indexed src, uint256 wad); /// @notice Semantic version. - /// @custom:semver 1.2.0-beta.3 - string public constant version = "1.2.0-beta.3"; + /// @custom:semver 1.2.0-beta.4 + string public constant version = "1.2.0-beta.4"; /// @notice Returns a withdrawal request for the given address. mapping(address => mapping(address => WithdrawalRequest)) public withdrawals; diff --git a/packages/contracts-bedrock/src/dispute/DisputeGameFactory.sol b/packages/contracts-bedrock/src/dispute/DisputeGameFactory.sol index 4a512f0ca2aa..583f432f3e0c 100644 --- a/packages/contracts-bedrock/src/dispute/DisputeGameFactory.sol +++ b/packages/contracts-bedrock/src/dispute/DisputeGameFactory.sol @@ -10,8 +10,8 @@ import { GameType, Claim, GameId, Timestamp, Hash, LibGameId } from "src/dispute import { NoImplementation, IncorrectBondAmount, GameAlreadyExists } from "src/dispute/lib/Errors.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; /// @custom:proxied true /// @title DisputeGameFactory @@ -49,8 +49,8 @@ contract DisputeGameFactory is OwnableUpgradeable, ISemver { } /// @notice Semantic version. - /// @custom:semver 1.0.1-beta.3 - string public constant version = "1.0.1-beta.3"; + /// @custom:semver 1.0.1-beta.4 + string public constant version = "1.0.1-beta.4"; /// @notice `gameImpls` is a mapping that maps `GameType`s to their respective /// `IDisputeGame` implementations. diff --git a/packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol b/packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol index 2bd5ec67e96c..02a2cee3ca83 100644 --- a/packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol +++ b/packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol @@ -55,10 +55,10 @@ import { } from "src/dispute/lib/Errors.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; -import { IBigStepper, IPreimageOracle } from "src/dispute/interfaces/IBigStepper.sol"; -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; +import { IBigStepper, IPreimageOracle } from "interfaces/dispute/IBigStepper.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; /// @title FaultDisputeGame /// @notice An implementation of the `IFaultDisputeGame` interface. @@ -86,6 +86,21 @@ contract FaultDisputeGame is Clone, ISemver { address counteredBy; } + /// @notice Parameters for creating a new FaultDisputeGame. We place this into a struct to + /// avoid stack-too-deep errors when compiling without the optimizer enabled. + struct GameConstructorParams { + GameType gameType; + Claim absolutePrestate; + uint256 maxGameDepth; + uint256 splitDepth; + Duration clockExtension; + Duration maxClockDuration; + IBigStepper vm; + IDelayedWETH weth; + IAnchorStateRegistry anchorStateRegistry; + uint256 l2ChainId; + } + //////////////////////////////////////////////////////////////// // Events // //////////////////////////////////////////////////////////////// @@ -146,8 +161,8 @@ contract FaultDisputeGame is Clone, ISemver { uint256 internal constant HEADER_BLOCK_NUMBER_INDEX = 8; /// @notice Semantic version. - /// @custom:semver 1.3.1-beta.7 - string public constant version = "1.3.1-beta.7"; + /// @custom:semver 1.3.1-beta.9 + string public constant version = "1.3.1-beta.9"; /// @notice The starting timestamp of the game Timestamp public createdAt; @@ -189,69 +204,52 @@ contract FaultDisputeGame is Clone, ISemver { /// @notice The latest finalized output root, serving as the anchor for output bisection. OutputRoot public startingOutputRoot; - /// @param _gameType The type ID of the game. - /// @param _absolutePrestate The absolute prestate of the instruction trace. - /// @param _maxGameDepth The maximum depth of bisection. - /// @param _splitDepth The final depth of the output bisection portion of the game. - /// @param _clockExtension The clock extension to perform when the remaining duration is less than the extension. - /// @param _maxClockDuration The maximum amount of time that may accumulate on a team's chess clock. - /// @param _vm An onchain VM that performs single instruction steps on an FPP trace. - /// @param _weth WETH contract for holding ETH. - /// @param _anchorStateRegistry The contract that stores the anchor state for each game type. - /// @param _l2ChainId Chain ID of the L2 network this contract argues about. - constructor( - GameType _gameType, - Claim _absolutePrestate, - uint256 _maxGameDepth, - uint256 _splitDepth, - Duration _clockExtension, - Duration _maxClockDuration, - IBigStepper _vm, - IDelayedWETH _weth, - IAnchorStateRegistry _anchorStateRegistry, - uint256 _l2ChainId - ) { + /// @param _params Parameters for creating a new FaultDisputeGame. + constructor(GameConstructorParams memory _params) { // The max game depth may not be greater than `LibPosition.MAX_POSITION_BITLEN - 1`. - if (_maxGameDepth > LibPosition.MAX_POSITION_BITLEN - 1) revert MaxDepthTooLarge(); + if (_params.maxGameDepth > LibPosition.MAX_POSITION_BITLEN - 1) revert MaxDepthTooLarge(); // The split depth plus one cannot be greater than or equal to the max game depth. We add // an additional depth to the split depth to avoid a bug in trace ancestor lookup. We know // that the case where the split depth is the max value for uint256 is equivalent to the // second check though we do need to check it explicitly to avoid an overflow. - if (_splitDepth == type(uint256).max || _splitDepth + 1 >= _maxGameDepth) revert InvalidSplitDepth(); + if (_params.splitDepth == type(uint256).max || _params.splitDepth + 1 >= _params.maxGameDepth) { + revert InvalidSplitDepth(); + } // The split depth cannot be 0 or 1 to stay in bounds of clock extension arithmetic. - if (_splitDepth < 2) revert InvalidSplitDepth(); + if (_params.splitDepth < 2) revert InvalidSplitDepth(); // The PreimageOracle challenge period must fit into uint64 so we can safely use it here. // Runtime check was added instead of changing the ABI since the contract is already // deployed in production. We perform the same check within the PreimageOracle for the // benefit of developers but also perform this check here defensively. - if (_vm.oracle().challengePeriod() > type(uint64).max) revert InvalidChallengePeriod(); + if (_params.vm.oracle().challengePeriod() > type(uint64).max) revert InvalidChallengePeriod(); // Determine the maximum clock extension which is either the split depth extension or the // maximum game depth extension depending on the configuration of these contracts. - uint256 splitDepthExtension = uint256(_clockExtension.raw()) * 2; - uint256 maxGameDepthExtension = uint256(_clockExtension.raw()) + uint256(_vm.oracle().challengePeriod()); + uint256 splitDepthExtension = uint256(_params.clockExtension.raw()) * 2; + uint256 maxGameDepthExtension = + uint256(_params.clockExtension.raw()) + uint256(_params.vm.oracle().challengePeriod()); uint256 maxClockExtension = Math.max(splitDepthExtension, maxGameDepthExtension); // The maximum clock extension must fit into a uint64. if (maxClockExtension > type(uint64).max) revert InvalidClockExtension(); // The maximum clock extension may not be greater than the maximum clock duration. - if (uint64(maxClockExtension) > _maxClockDuration.raw()) revert InvalidClockExtension(); + if (uint64(maxClockExtension) > _params.maxClockDuration.raw()) revert InvalidClockExtension(); // Set up initial game state. - GAME_TYPE = _gameType; - ABSOLUTE_PRESTATE = _absolutePrestate; - MAX_GAME_DEPTH = _maxGameDepth; - SPLIT_DEPTH = _splitDepth; - CLOCK_EXTENSION = _clockExtension; - MAX_CLOCK_DURATION = _maxClockDuration; - VM = _vm; - WETH = _weth; - ANCHOR_STATE_REGISTRY = _anchorStateRegistry; - L2_CHAIN_ID = _l2ChainId; + GAME_TYPE = _params.gameType; + ABSOLUTE_PRESTATE = _params.absolutePrestate; + MAX_GAME_DEPTH = _params.maxGameDepth; + SPLIT_DEPTH = _params.splitDepth; + CLOCK_EXTENSION = _params.clockExtension; + MAX_CLOCK_DURATION = _params.maxClockDuration; + VM = _params.vm; + WETH = _params.weth; + ANCHOR_STATE_REGISTRY = _params.anchorStateRegistry; + L2_CHAIN_ID = _params.l2ChainId; } /// @notice Initializes the contract. diff --git a/packages/contracts-bedrock/src/dispute/PermissionedDisputeGame.sol b/packages/contracts-bedrock/src/dispute/PermissionedDisputeGame.sol index 373498f55bfa..de907695d006 100644 --- a/packages/contracts-bedrock/src/dispute/PermissionedDisputeGame.sol +++ b/packages/contracts-bedrock/src/dispute/PermissionedDisputeGame.sol @@ -5,14 +5,9 @@ pragma solidity 0.8.15; import { FaultDisputeGame } from "src/dispute/FaultDisputeGame.sol"; // Libraries -import { GameType, Claim, Duration } from "src/dispute/lib/Types.sol"; +import { Claim } from "src/dispute/lib/Types.sol"; import { BadAuth } from "src/dispute/lib/Errors.sol"; -// Interfaces -import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; -import { IBigStepper } from "src/dispute/interfaces/IBigStepper.sol"; - /// @title PermissionedDisputeGame /// @notice PermissionedDisputeGame is a contract that inherits from `FaultDisputeGame`, and contains two roles: /// - The `challenger` role, which is allowed to challenge a dispute. @@ -36,44 +31,15 @@ contract PermissionedDisputeGame is FaultDisputeGame { _; } - /// @param _gameType The type ID of the game. - /// @param _absolutePrestate The absolute prestate of the instruction trace. - /// @param _maxGameDepth The maximum depth of bisection. - /// @param _splitDepth The final depth of the output bisection portion of the game. - /// @param _clockExtension The clock extension to perform when the remaining duration is less than the extension. - /// @param _maxClockDuration The maximum amount of time that may accumulate on a team's chess clock. - /// @param _vm An onchain VM that performs single instruction steps on an FPP trace. - /// @param _weth WETH contract for holding ETH. - /// @param _anchorStateRegistry The contract that stores the anchor state for each game type. - /// @param _l2ChainId Chain ID of the L2 network this contract argues about. + /// @param _params Parameters for creating a new FaultDisputeGame. /// @param _proposer Address that is allowed to create instances of this contract. /// @param _challenger Address that is allowed to challenge instances of this contract. constructor( - GameType _gameType, - Claim _absolutePrestate, - uint256 _maxGameDepth, - uint256 _splitDepth, - Duration _clockExtension, - Duration _maxClockDuration, - IBigStepper _vm, - IDelayedWETH _weth, - IAnchorStateRegistry _anchorStateRegistry, - uint256 _l2ChainId, + GameConstructorParams memory _params, address _proposer, address _challenger ) - FaultDisputeGame( - _gameType, - _absolutePrestate, - _maxGameDepth, - _splitDepth, - _clockExtension, - _maxClockDuration, - _vm, - _weth, - _anchorStateRegistry, - _l2ChainId - ) + FaultDisputeGame(_params) { PROPOSER = _proposer; CHALLENGER = _challenger; diff --git a/packages/contracts-bedrock/src/dispute/lib/Errors.sol b/packages/contracts-bedrock/src/dispute/lib/Errors.sol index 6e95bfa5a342..0dd4af187505 100644 --- a/packages/contracts-bedrock/src/dispute/lib/Errors.sol +++ b/packages/contracts-bedrock/src/dispute/lib/Errors.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.15; +// Libraries import { GameType, Hash, Claim } from "src/dispute/lib/LibUDT.sol"; //////////////////////////////////////////////////////////////// diff --git a/packages/contracts-bedrock/src/dispute/lib/LibUDT.sol b/packages/contracts-bedrock/src/dispute/lib/LibUDT.sol index bfc110f09526..780738a79743 100644 --- a/packages/contracts-bedrock/src/dispute/lib/LibUDT.sol +++ b/packages/contracts-bedrock/src/dispute/lib/LibUDT.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.15; +// Libraries import { Position } from "src/dispute/lib/LibPosition.sol"; using LibClaim for Claim global; diff --git a/packages/contracts-bedrock/src/dispute/lib/Types.sol b/packages/contracts-bedrock/src/dispute/lib/Types.sol index 70df7b7912ba..74106888fb05 100644 --- a/packages/contracts-bedrock/src/dispute/lib/Types.sol +++ b/packages/contracts-bedrock/src/dispute/lib/Types.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.15; +// Libraries import { Position, Hash, diff --git a/packages/contracts-bedrock/src/governance/GovernanceToken.sol b/packages/contracts-bedrock/src/governance/GovernanceToken.sol index 6fb1041555a0..3843c3241b2b 100644 --- a/packages/contracts-bedrock/src/governance/GovernanceToken.sol +++ b/packages/contracts-bedrock/src/governance/GovernanceToken.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Contracts import { ERC20 } from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; import { ERC20Burnable } from "@openzeppelin/contracts/token/ERC20/extensions/ERC20Burnable.sol"; import { ERC20Votes, ERC20Permit } from "@openzeppelin/contracts/token/ERC20/extensions/ERC20Votes.sol"; diff --git a/packages/contracts-bedrock/src/governance/MintManager.sol b/packages/contracts-bedrock/src/governance/MintManager.sol index 43ef618ba051..0f58e391c512 100644 --- a/packages/contracts-bedrock/src/governance/MintManager.sol +++ b/packages/contracts-bedrock/src/governance/MintManager.sol @@ -5,7 +5,7 @@ pragma solidity 0.8.15; import { Ownable } from "@openzeppelin/contracts/access/Ownable.sol"; // Interfaces -import { IGovernanceToken } from "src/governance/interfaces/IGovernanceToken.sol"; +import { IGovernanceToken } from "interfaces/governance/IGovernanceToken.sol"; /// @title MintManager /// @notice Set as `owner` of the governance token and responsible for the token inflation diff --git a/packages/contracts-bedrock/src/legacy/AddressManager.sol b/packages/contracts-bedrock/src/legacy/AddressManager.sol index daaac412e0a0..e3b2e9274cc9 100644 --- a/packages/contracts-bedrock/src/legacy/AddressManager.sol +++ b/packages/contracts-bedrock/src/legacy/AddressManager.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Contracts import { Ownable } from "@openzeppelin/contracts/access/Ownable.sol"; /// @custom:legacy true diff --git a/packages/contracts-bedrock/src/legacy/DeployerWhitelist.sol b/packages/contracts-bedrock/src/legacy/DeployerWhitelist.sol index a7a6313edf4b..05a7798aeca3 100644 --- a/packages/contracts-bedrock/src/legacy/DeployerWhitelist.sol +++ b/packages/contracts-bedrock/src/legacy/DeployerWhitelist.sol @@ -1,7 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @custom:legacy true /// @custom:proxied true @@ -41,8 +42,8 @@ contract DeployerWhitelist is ISemver { } /// @notice Semantic version. - /// @custom:semver 1.1.1-beta.1 - string public constant version = "1.1.1-beta.1"; + /// @custom:semver 1.1.1-beta.3 + string public constant version = "1.1.1-beta.3"; /// @notice Adds or removes an address from the deployment whitelist. /// @param _deployer Address to update permissions for. diff --git a/packages/contracts-bedrock/src/legacy/L1BlockNumber.sol b/packages/contracts-bedrock/src/legacy/L1BlockNumber.sol index 19a595a3fad3..6e0e33fa9487 100644 --- a/packages/contracts-bedrock/src/legacy/L1BlockNumber.sol +++ b/packages/contracts-bedrock/src/legacy/L1BlockNumber.sol @@ -5,8 +5,8 @@ pragma solidity 0.8.15; import { Predeploys } from "src/libraries/Predeploys.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IL1Block } from "interfaces/L2/IL1Block.sol"; /// @custom:legacy true /// @custom:proxied true @@ -18,8 +18,8 @@ import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; /// contract instead. contract L1BlockNumber is ISemver { /// @notice Semantic version. - /// @custom:semver 1.1.1-beta.2 - string public constant version = "1.1.1-beta.2"; + /// @custom:semver 1.1.1-beta.3 + string public constant version = "1.1.1-beta.3"; /// @notice Returns the L1 block number. receive() external payable { diff --git a/packages/contracts-bedrock/src/legacy/L1ChugSplashProxy.sol b/packages/contracts-bedrock/src/legacy/L1ChugSplashProxy.sol index 28125a23f90f..a2a62707c6ee 100644 --- a/packages/contracts-bedrock/src/legacy/L1ChugSplashProxy.sol +++ b/packages/contracts-bedrock/src/legacy/L1ChugSplashProxy.sol @@ -1,8 +1,11 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Libraries import { Constants } from "src/libraries/Constants.sol"; -import { IL1ChugSplashDeployer } from "src/legacy/interfaces/IL1ChugSplashProxy.sol"; + +// Interfaces +import { IL1ChugSplashDeployer } from "interfaces/legacy/IL1ChugSplashProxy.sol"; /// @custom:legacy true /// @title L1ChugSplashProxy diff --git a/packages/contracts-bedrock/src/legacy/LegacyMessagePasser.sol b/packages/contracts-bedrock/src/legacy/LegacyMessagePasser.sol index b90d1263f7a9..bdc81ad2839e 100644 --- a/packages/contracts-bedrock/src/legacy/LegacyMessagePasser.sol +++ b/packages/contracts-bedrock/src/legacy/LegacyMessagePasser.sol @@ -1,7 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @custom:legacy true /// @custom:proxied true @@ -14,8 +15,8 @@ contract LegacyMessagePasser is ISemver { mapping(bytes32 => bool) public sentMessages; /// @notice Semantic version. - /// @custom:semver 1.1.1-beta.1 - string public constant version = "1.1.1-beta.1"; + /// @custom:semver 1.1.1-beta.3 + string public constant version = "1.1.1-beta.3"; /// @notice Passes a message to L1. /// @param _message Message to pass to L1. diff --git a/packages/contracts-bedrock/src/legacy/LegacyMintableERC20.sol b/packages/contracts-bedrock/src/legacy/LegacyMintableERC20.sol index b3b588258242..8efd0bf0c493 100644 --- a/packages/contracts-bedrock/src/legacy/LegacyMintableERC20.sol +++ b/packages/contracts-bedrock/src/legacy/LegacyMintableERC20.sol @@ -1,7 +1,10 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Contracts import { ERC20 } from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; + +// Interfaces import { ILegacyMintableERC20 } from "src/universal/OptimismMintableERC20.sol"; /// @title LegacyMintableERC20 diff --git a/packages/contracts-bedrock/src/legacy/ResolvedDelegateProxy.sol b/packages/contracts-bedrock/src/legacy/ResolvedDelegateProxy.sol index 3005456570ff..d4e4a96f7ff1 100644 --- a/packages/contracts-bedrock/src/legacy/ResolvedDelegateProxy.sol +++ b/packages/contracts-bedrock/src/legacy/ResolvedDelegateProxy.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Contracts import { AddressManager } from "src/legacy/AddressManager.sol"; /// @custom:legacy true diff --git a/packages/contracts-bedrock/src/libraries/Arithmetic.sol b/packages/contracts-bedrock/src/libraries/Arithmetic.sol index dfd47274f7f9..140affaa718d 100644 --- a/packages/contracts-bedrock/src/libraries/Arithmetic.sol +++ b/packages/contracts-bedrock/src/libraries/Arithmetic.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; +// Libraries import { SignedMath } from "@openzeppelin/contracts/utils/math/SignedMath.sol"; import { FixedPointMathLib } from "@rari-capital/solmate/src/utils/FixedPointMathLib.sol"; diff --git a/packages/contracts-bedrock/src/libraries/Constants.sol b/packages/contracts-bedrock/src/libraries/Constants.sol index 1cbd61d21a5e..6dcf3611956a 100644 --- a/packages/contracts-bedrock/src/libraries/Constants.sol +++ b/packages/contracts-bedrock/src/libraries/Constants.sol @@ -1,7 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; +// Interfaces +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; /// @title Constants /// @notice Constants is a library for storing constants. Simple! Don't put everything in here, just diff --git a/packages/contracts-bedrock/src/libraries/Encoding.sol b/packages/contracts-bedrock/src/libraries/Encoding.sol index 84d5f732f5f6..5aa4ee7d3d8a 100644 --- a/packages/contracts-bedrock/src/libraries/Encoding.sol +++ b/packages/contracts-bedrock/src/libraries/Encoding.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; +// Libraries import { Types } from "src/libraries/Types.sol"; import { Hashing } from "src/libraries/Hashing.sol"; import { RLPWriter } from "src/libraries/rlp/RLPWriter.sol"; diff --git a/packages/contracts-bedrock/src/libraries/GasPayingToken.sol b/packages/contracts-bedrock/src/libraries/GasPayingToken.sol index 37c06840bd59..bf53367476b5 100644 --- a/packages/contracts-bedrock/src/libraries/GasPayingToken.sol +++ b/packages/contracts-bedrock/src/libraries/GasPayingToken.sol @@ -1,9 +1,10 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; +// Libraries +import { LibString } from "@solady/utils/LibString.sol"; import { Storage } from "src/libraries/Storage.sol"; import { Constants } from "src/libraries/Constants.sol"; -import { LibString } from "@solady/utils/LibString.sol"; /// @title IGasToken /// @notice Implemented by contracts that are aware of the custom gas token used diff --git a/packages/contracts-bedrock/src/libraries/Hashing.sol b/packages/contracts-bedrock/src/libraries/Hashing.sol index 0f0f15678f97..b736ad9e4b7e 100644 --- a/packages/contracts-bedrock/src/libraries/Hashing.sol +++ b/packages/contracts-bedrock/src/libraries/Hashing.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; +// Libraries import { Types } from "src/libraries/Types.sol"; import { Encoding } from "src/libraries/Encoding.sol"; diff --git a/packages/contracts-bedrock/src/libraries/rlp/RLPReader.sol b/packages/contracts-bedrock/src/libraries/rlp/RLPReader.sol index 559c53391f70..f342658cb45c 100644 --- a/packages/contracts-bedrock/src/libraries/rlp/RLPReader.sol +++ b/packages/contracts-bedrock/src/libraries/rlp/RLPReader.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.8; +// Libraries import { EmptyItem, UnexpectedString, @@ -8,7 +9,7 @@ import { ContentLengthMismatch, InvalidHeader, UnexpectedList -} from "./RLPErrors.sol"; +} from "src/libraries/rlp/RLPErrors.sol"; /// @custom:attribution https://github.com/hamdiallam/Solidity-RLP /// @title RLPReader diff --git a/packages/contracts-bedrock/src/libraries/trie/MerkleTrie.sol b/packages/contracts-bedrock/src/libraries/trie/MerkleTrie.sol index fa500894b165..cbbf9fdf3d65 100644 --- a/packages/contracts-bedrock/src/libraries/trie/MerkleTrie.sol +++ b/packages/contracts-bedrock/src/libraries/trie/MerkleTrie.sol @@ -1,8 +1,9 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { Bytes } from "../Bytes.sol"; -import { RLPReader } from "../rlp/RLPReader.sol"; +// Libraries +import { Bytes } from "src/libraries/Bytes.sol"; +import { RLPReader } from "src/libraries/rlp/RLPReader.sol"; /// @title MerkleTrie /// @notice MerkleTrie is a small library for verifying standard Ethereum Merkle-Patricia trie diff --git a/packages/contracts-bedrock/src/libraries/trie/SecureMerkleTrie.sol b/packages/contracts-bedrock/src/libraries/trie/SecureMerkleTrie.sol index e8eab17917a1..56992fba9972 100644 --- a/packages/contracts-bedrock/src/libraries/trie/SecureMerkleTrie.sol +++ b/packages/contracts-bedrock/src/libraries/trie/SecureMerkleTrie.sol @@ -1,7 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { MerkleTrie } from "./MerkleTrie.sol"; +// Libraries +import { MerkleTrie } from "src/libraries/trie/MerkleTrie.sol"; /// @title SecureMerkleTrie /// @notice SecureMerkleTrie is a thin wrapper around the MerkleTrie library that hashes the input diff --git a/packages/contracts-bedrock/src/periphery/AssetReceiver.sol b/packages/contracts-bedrock/src/periphery/AssetReceiver.sol index ce59d8ef753e..0ee2d52a29bc 100644 --- a/packages/contracts-bedrock/src/periphery/AssetReceiver.sol +++ b/packages/contracts-bedrock/src/periphery/AssetReceiver.sol @@ -1,9 +1,10 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; +// Contracts import { ERC20 } from "@rari-capital/solmate/src/tokens/ERC20.sol"; import { ERC721 } from "@rari-capital/solmate/src/tokens/ERC721.sol"; -import { Transactor } from "./Transactor.sol"; +import { Transactor } from "src/periphery/Transactor.sol"; /// @title AssetReceiver /// @notice AssetReceiver is a minimal contract for receiving funds assets in the form of either diff --git a/packages/contracts-bedrock/src/periphery/Transactor.sol b/packages/contracts-bedrock/src/periphery/Transactor.sol index 01c8cc384ad1..168367b4109e 100644 --- a/packages/contracts-bedrock/src/periphery/Transactor.sol +++ b/packages/contracts-bedrock/src/periphery/Transactor.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; +// Contracts import { Owned } from "@rari-capital/solmate/src/auth/Owned.sol"; /// @title Transactor diff --git a/packages/contracts-bedrock/src/periphery/TransferOnion.sol b/packages/contracts-bedrock/src/periphery/TransferOnion.sol index db473a1828ce..292eb9c34232 100644 --- a/packages/contracts-bedrock/src/periphery/TransferOnion.sol +++ b/packages/contracts-bedrock/src/periphery/TransferOnion.sol @@ -1,8 +1,11 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Contracts import { ReentrancyGuard } from "@openzeppelin/contracts/security/ReentrancyGuard.sol"; import { ERC20 } from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; + +// Libraries import { SafeERC20 } from "@openzeppelin/contracts/token/ERC20/utils/SafeERC20.sol"; /// @title TransferOnion diff --git a/packages/contracts-bedrock/src/periphery/drippie/Drippie.sol b/packages/contracts-bedrock/src/periphery/drippie/Drippie.sol index 014694813e46..c7240d015a97 100644 --- a/packages/contracts-bedrock/src/periphery/drippie/Drippie.sol +++ b/packages/contracts-bedrock/src/periphery/drippie/Drippie.sol @@ -1,8 +1,11 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { AssetReceiver } from "../AssetReceiver.sol"; -import { IDripCheck } from "./IDripCheck.sol"; +// Contracts +import { AssetReceiver } from "src/periphery/AssetReceiver.sol"; + +// Interfaces +import { IDripCheck } from "src/periphery/drippie/IDripCheck.sol"; /// @title Drippie /// @notice Drippie is a system for managing automated contract interactions. A specific interaction diff --git a/packages/contracts-bedrock/src/periphery/drippie/dripchecks/CheckBalanceLow.sol b/packages/contracts-bedrock/src/periphery/drippie/dripchecks/CheckBalanceLow.sol index ff7121051c76..d889a4fa4422 100644 --- a/packages/contracts-bedrock/src/periphery/drippie/dripchecks/CheckBalanceLow.sol +++ b/packages/contracts-bedrock/src/periphery/drippie/dripchecks/CheckBalanceLow.sol @@ -1,7 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { IDripCheck } from "../IDripCheck.sol"; +// Interfaces +import { IDripCheck } from "src/periphery/drippie/IDripCheck.sol"; /// @title CheckBalanceLow /// @notice DripCheck for checking if an account's balance is below a given threshold. diff --git a/packages/contracts-bedrock/src/periphery/drippie/dripchecks/CheckGelatoLow.sol b/packages/contracts-bedrock/src/periphery/drippie/dripchecks/CheckGelatoLow.sol index a5ef463f89e6..a3c79b3fc367 100644 --- a/packages/contracts-bedrock/src/periphery/drippie/dripchecks/CheckGelatoLow.sol +++ b/packages/contracts-bedrock/src/periphery/drippie/dripchecks/CheckGelatoLow.sol @@ -1,8 +1,9 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { IDripCheck } from "../IDripCheck.sol"; -import { IGelatoTreasury } from "src/vendor/interfaces/IGelatoTreasury.sol"; +// Interfaces +import { IGelatoTreasury } from "interfaces/vendor/IGelatoTreasury.sol"; +import { IDripCheck } from "src/periphery/drippie/IDripCheck.sol"; /// @title CheckGelatoLow /// @notice DripCheck for checking if an account's Gelato ETH balance is below some threshold. diff --git a/packages/contracts-bedrock/src/periphery/drippie/dripchecks/CheckSecrets.sol b/packages/contracts-bedrock/src/periphery/drippie/dripchecks/CheckSecrets.sol index 5cfc4251b456..f255c2e6964e 100644 --- a/packages/contracts-bedrock/src/periphery/drippie/dripchecks/CheckSecrets.sol +++ b/packages/contracts-bedrock/src/periphery/drippie/dripchecks/CheckSecrets.sol @@ -1,7 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { IDripCheck } from "../IDripCheck.sol"; +// Interfaces +import { IDripCheck } from "src/periphery/drippie/IDripCheck.sol"; /// @title CheckSecrets /// @notice DripCheck that checks if specific secrets exist (or not). Supports having a secret that diff --git a/packages/contracts-bedrock/src/periphery/drippie/dripchecks/CheckTrue.sol b/packages/contracts-bedrock/src/periphery/drippie/dripchecks/CheckTrue.sol index 1ce7138945c3..18d4956f8e69 100644 --- a/packages/contracts-bedrock/src/periphery/drippie/dripchecks/CheckTrue.sol +++ b/packages/contracts-bedrock/src/periphery/drippie/dripchecks/CheckTrue.sol @@ -1,7 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { IDripCheck } from "../IDripCheck.sol"; +// Interfaces +import { IDripCheck } from "src/periphery/drippie/IDripCheck.sol"; /// @title CheckTrue /// @notice DripCheck that always returns true. diff --git a/packages/contracts-bedrock/src/periphery/faucet/Faucet.sol b/packages/contracts-bedrock/src/periphery/faucet/Faucet.sol index cce6a83b3e13..66cab547317b 100644 --- a/packages/contracts-bedrock/src/periphery/faucet/Faucet.sol +++ b/packages/contracts-bedrock/src/periphery/faucet/Faucet.sol @@ -1,10 +1,15 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { IFaucetAuthModule } from "src/periphery/faucet/authmodules/IFaucetAuthModule.sol"; -import { SafeCall } from "src/libraries/SafeCall.sol"; +// Contracts import { SafeSend } from "src/universal/SafeSend.sol"; +// Libraries +import { SafeCall } from "src/libraries/SafeCall.sol"; + +// Interfaces +import { IFaucetAuthModule } from "src/periphery/faucet/authmodules/IFaucetAuthModule.sol"; + /// @title Faucet /// @notice Faucet contract that drips ETH to users. contract Faucet { diff --git a/packages/contracts-bedrock/src/periphery/faucet/authmodules/AdminFaucetAuthModule.sol b/packages/contracts-bedrock/src/periphery/faucet/authmodules/AdminFaucetAuthModule.sol index 115810d9d139..c9077943610a 100644 --- a/packages/contracts-bedrock/src/periphery/faucet/authmodules/AdminFaucetAuthModule.sol +++ b/packages/contracts-bedrock/src/periphery/faucet/authmodules/AdminFaucetAuthModule.sol @@ -1,10 +1,13 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Contracts import { EIP712 } from "@openzeppelin/contracts/utils/cryptography/draft-EIP712.sol"; import { SignatureChecker } from "@openzeppelin/contracts/utils/cryptography/SignatureChecker.sol"; -import { IFaucetAuthModule } from "./IFaucetAuthModule.sol"; -import { Faucet } from "../Faucet.sol"; + +// Interfaces +import { IFaucetAuthModule } from "src/periphery/faucet/authmodules/IFaucetAuthModule.sol"; +import { Faucet } from "src/periphery/faucet/Faucet.sol"; /// @title AdminFaucetAuthModule /// @notice FaucetAuthModule that allows an admin to sign off on a given faucet drip. Takes an admin diff --git a/packages/contracts-bedrock/src/periphery/faucet/authmodules/IFaucetAuthModule.sol b/packages/contracts-bedrock/src/periphery/faucet/authmodules/IFaucetAuthModule.sol index a94071dd2bef..b4d91febf4a1 100644 --- a/packages/contracts-bedrock/src/periphery/faucet/authmodules/IFaucetAuthModule.sol +++ b/packages/contracts-bedrock/src/periphery/faucet/authmodules/IFaucetAuthModule.sol @@ -1,7 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { Faucet } from "../Faucet.sol"; +// Contracts +import { Faucet } from "src/periphery/faucet/Faucet.sol"; /// @title IFaucetAuthModule /// @notice Interface for faucet authentication modules. diff --git a/packages/contracts-bedrock/src/periphery/op-nft/AttestationStation.sol b/packages/contracts-bedrock/src/periphery/op-nft/AttestationStation.sol deleted file mode 100644 index 4d15862d4358..000000000000 --- a/packages/contracts-bedrock/src/periphery/op-nft/AttestationStation.sol +++ /dev/null @@ -1,59 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.15; - -import { ISemver } from "src/universal/interfaces/ISemver.sol"; - -/// @title AttestationStation -/// @author Optimism Collective -/// @author Gitcoin -/// @notice Where attestations live. -contract AttestationStation is ISemver { - /// @notice Struct representing data that is being attested. - /// @custom:field about Address for which the attestation is about. - /// @custom:field key A bytes32 key for the attestation. - /// @custom:field val The attestation as arbitrary bytes. - struct AttestationData { - address about; - bytes32 key; - bytes val; - } - - /// @notice Maps addresses to attestations. Creator => About => Key => Value. - mapping(address => mapping(address => mapping(bytes32 => bytes))) public attestations; - - /// @notice Emitted when Attestation is created. - /// @param creator Address that made the attestation. - /// @param about Address attestation is about. - /// @param key Key of the attestation. - /// @param val Value of the attestation. - event AttestationCreated(address indexed creator, address indexed about, bytes32 indexed key, bytes val); - - /// @notice Semantic version. - /// @custom:semver 1.2.1-beta.1 - string public constant version = "1.2.1-beta.1"; - - /// @notice Allows anyone to create an attestation. - /// @param _about Address that the attestation is about. - /// @param _key A key used to namespace the attestation. - /// @param _val An arbitrary value stored as part of the attestation. - function attest(address _about, bytes32 _key, bytes memory _val) public { - attestations[msg.sender][_about][_key] = _val; - - emit AttestationCreated(msg.sender, _about, _key, _val); - } - - /// @notice Allows anyone to create attestations. - /// @param _attestations An array of AttestationData structs. - function attest(AttestationData[] calldata _attestations) external { - uint256 length = _attestations.length; - for (uint256 i = 0; i < length;) { - AttestationData memory attestation = _attestations[i]; - - attest(attestation.about, attestation.key, attestation.val); - - unchecked { - ++i; - } - } - } -} diff --git a/packages/contracts-bedrock/src/periphery/op-nft/Optimist.sol b/packages/contracts-bedrock/src/periphery/op-nft/Optimist.sol deleted file mode 100644 index b15c0f00044c..000000000000 --- a/packages/contracts-bedrock/src/periphery/op-nft/Optimist.sol +++ /dev/null @@ -1,124 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.15; - -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { ERC721BurnableUpgradeable } from - "@openzeppelin/contracts-upgradeable/token/ERC721/extensions/ERC721BurnableUpgradeable.sol"; -import { AttestationStation } from "src/periphery/op-nft/AttestationStation.sol"; -import { OptimistAllowlist } from "src/periphery/op-nft/OptimistAllowlist.sol"; -import { Strings } from "@openzeppelin/contracts/utils/Strings.sol"; - -/// @author Optimism Collective -/// @author Gitcoin -/// @title Optimist -/// @notice A Soul Bound Token for real humans only(tm). -contract Optimist is ERC721BurnableUpgradeable, ISemver { - /// @notice Attestation key used by the attestor to attest the baseURI. - bytes32 public constant BASE_URI_ATTESTATION_KEY = bytes32("optimist.base-uri"); - - /// @notice Attestor who attests to baseURI. - address public immutable BASE_URI_ATTESTOR; - - /// @notice Address of the AttestationStation contract. - AttestationStation public immutable ATTESTATION_STATION; - - /// @notice Address of the OptimistAllowlist contract. - OptimistAllowlist public immutable OPTIMIST_ALLOWLIST; - - /// @notice Semantic version. - /// @custom:semver 2.1.1-beta.1 - string public constant version = "2.1.1-beta.1"; - - /// @param _name Token name. - /// @param _symbol Token symbol. - /// @param _baseURIAttestor Address of the baseURI attestor. - /// @param _attestationStation Address of the AttestationStation contract. - /// @param _optimistAllowlist Address of the OptimistAllowlist contract - constructor( - string memory _name, - string memory _symbol, - address _baseURIAttestor, - AttestationStation _attestationStation, - OptimistAllowlist _optimistAllowlist - ) { - BASE_URI_ATTESTOR = _baseURIAttestor; - ATTESTATION_STATION = _attestationStation; - OPTIMIST_ALLOWLIST = _optimistAllowlist; - initialize(_name, _symbol); - } - - /// @notice Initializes the Optimist contract. - /// @param _name Token name. - /// @param _symbol Token symbol. - function initialize(string memory _name, string memory _symbol) public initializer { - __ERC721_init(_name, _symbol); - __ERC721Burnable_init(); - } - - /// @notice Allows an address to mint an Optimist NFT. Token ID is the uint256 representation - /// of the recipient's address. Recipients must be permitted to mint, eventually anyone - /// will be able to mint. One token per address. - /// @param _recipient Address of the token recipient. - function mint(address _recipient) public { - require(isOnAllowList(_recipient), "Optimist: address is not on allowList"); - _safeMint(_recipient, tokenIdOfAddress(_recipient)); - } - - /// @notice Returns the baseURI for all tokens. - /// @return uri_ BaseURI for all tokens. - function baseURI() public view returns (string memory uri_) { - uri_ = string( - abi.encodePacked( - ATTESTATION_STATION.attestations(BASE_URI_ATTESTOR, address(this), bytes32("optimist.base-uri")) - ) - ); - } - - /// @notice Returns the token URI for a given token by ID - /// @param _tokenId Token ID to query. - /// @return uri_ Token URI for the given token by ID. - function tokenURI(uint256 _tokenId) public view virtual override returns (string memory uri_) { - uri_ = string( - abi.encodePacked( - baseURI(), - "/", - // Properly format the token ID as a 20 byte hex string (address). - Strings.toHexString(_tokenId, 20), - ".json" - ) - ); - } - - /// @notice Checks OptimistAllowlist to determine whether a given address is allowed to mint - /// the Optimist NFT. Since the Optimist NFT will also be used as part of the - /// Citizens House, mints are currently restricted. Eventually anyone will be able - /// to mint. - /// @return allowed_ Whether or not the address is allowed to mint yet. - function isOnAllowList(address _recipient) public view returns (bool allowed_) { - allowed_ = OPTIMIST_ALLOWLIST.isAllowedToMint(_recipient); - } - - /// @notice Returns the token ID for the token owned by a given address. This is the uint256 - /// representation of the given address. - /// @return Token ID for the token owned by the given address. - function tokenIdOfAddress(address _owner) public pure returns (uint256) { - return uint256(uint160(_owner)); - } - - /// @notice Disabled for the Optimist NFT (Soul Bound Token). - function approve(address, uint256) public pure override { - revert("Optimist: soul bound token"); - } - - /// @notice Disabled for the Optimist NFT (Soul Bound Token). - function setApprovalForAll(address, bool) public virtual override { - revert("Optimist: soul bound token"); - } - - /// @notice Prevents transfers of the Optimist NFT (Soul Bound Token). - /// @param _from Address of the token sender. - /// @param _to Address of the token recipient. - function _beforeTokenTransfer(address _from, address _to, uint256) internal virtual override { - require(_from == address(0) || _to == address(0), "Optimist: soul bound token"); - } -} diff --git a/packages/contracts-bedrock/src/periphery/op-nft/OptimistAllowlist.sol b/packages/contracts-bedrock/src/periphery/op-nft/OptimistAllowlist.sol deleted file mode 100644 index ffa46116a4c5..000000000000 --- a/packages/contracts-bedrock/src/periphery/op-nft/OptimistAllowlist.sol +++ /dev/null @@ -1,104 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.15; - -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { AttestationStation } from "src/periphery/op-nft/AttestationStation.sol"; -import { OptimistConstants } from "src/periphery/op-nft/libraries/OptimistConstants.sol"; - -/// @title OptimistAllowlist -/// @notice Source of truth for whether an address is able to mint an Optimist NFT. -/// isAllowedToMint function checks various signals to return boolean value -/// for whether an address is eligible or not. -contract OptimistAllowlist is ISemver { - /// @notice Attestation key used by the AllowlistAttestor to manually add addresses to the - /// allowlist. - bytes32 public constant OPTIMIST_CAN_MINT_ATTESTATION_KEY = bytes32("optimist.can-mint"); - - /// @notice Attestation key used by Coinbase to issue attestations for Quest participants. - bytes32 public constant COINBASE_QUEST_ELIGIBLE_ATTESTATION_KEY = bytes32("coinbase.quest-eligible"); - - /// @notice Address of the AttestationStation contract. - AttestationStation public immutable ATTESTATION_STATION; - - /// @notice Attestor that issues 'optimist.can-mint' attestations. - address public immutable ALLOWLIST_ATTESTOR; - - /// @notice Attestor that issues 'coinbase.quest-eligible' attestations. - address public immutable COINBASE_QUEST_ATTESTOR; - - /// @notice Address of OptimistInviter contract that issues 'optimist.can-mint-from-invite' - /// attestations. - address public immutable OPTIMIST_INVITER; - - /// @notice Semantic version. - /// @custom:semver 1.1.1-beta.1 - string public constant version = "1.1.1-beta.1"; - - /// @param _attestationStation Address of the AttestationStation contract. - /// @param _allowlistAttestor Address of the allowlist attestor. - /// @param _coinbaseQuestAttestor Address of the Coinbase Quest attestor. - /// @param _optimistInviter Address of the OptimistInviter contract. - constructor( - AttestationStation _attestationStation, - address _allowlistAttestor, - address _coinbaseQuestAttestor, - address _optimistInviter - ) { - ATTESTATION_STATION = _attestationStation; - ALLOWLIST_ATTESTOR = _allowlistAttestor; - COINBASE_QUEST_ATTESTOR = _coinbaseQuestAttestor; - OPTIMIST_INVITER = _optimistInviter; - } - - /// @notice Checks whether a given address is allowed to mint the Optimist NFT yet. Since the - /// Optimist NFT will also be used as part of the Citizens House, mints are currently - /// restricted. Eventually anyone will be able to mint. - /// Currently, address is allowed to mint if it satisfies any of the following: - /// 1) Has a valid 'optimist.can-mint' attestation from the allowlist attestor. - /// 2) Has a valid 'coinbase.quest-eligible' attestation from Coinbase Quest attestor - /// 3) Has a valid 'optimist.can-mint-from-invite' attestation from the OptimistInviter - /// contract. - /// @param _claimer Address to check. - /// @return allowed_ Whether or not the address is allowed to mint yet. - function isAllowedToMint(address _claimer) public view returns (bool allowed_) { - allowed_ = _hasAttestationFromAllowlistAttestor(_claimer) || _hasAttestationFromCoinbaseQuestAttestor(_claimer) - || _hasAttestationFromOptimistInviter(_claimer); - } - - /// @notice Checks whether an address has a valid 'optimist.can-mint' attestation from the - /// allowlist attestor. - /// @param _claimer Address to check. - /// @return valid_ Whether or not the address has a valid attestation. - function _hasAttestationFromAllowlistAttestor(address _claimer) internal view returns (bool valid_) { - // Expected attestation value is bytes32("true") - valid_ = _hasValidAttestation(ALLOWLIST_ATTESTOR, _claimer, OPTIMIST_CAN_MINT_ATTESTATION_KEY); - } - - /// @notice Checks whether an address has a valid attestation from the Coinbase attestor. - /// @param _claimer Address to check. - /// @return valid_ Whether or not the address has a valid attestation. - function _hasAttestationFromCoinbaseQuestAttestor(address _claimer) internal view returns (bool valid_) { - // Expected attestation value is bytes32("true") - valid_ = _hasValidAttestation(COINBASE_QUEST_ATTESTOR, _claimer, COINBASE_QUEST_ELIGIBLE_ATTESTATION_KEY); - } - - /// @notice Checks whether an address has a valid attestation from the OptimistInviter contract. - /// @param _claimer Address to check. - /// @return valid_ Whether or not the address has a valid attestation. - function _hasAttestationFromOptimistInviter(address _claimer) internal view returns (bool valid_) { - // Expected attestation value is the inviter's address - valid_ = _hasValidAttestation( - OPTIMIST_INVITER, _claimer, OptimistConstants.OPTIMIST_CAN_MINT_FROM_INVITE_ATTESTATION_KEY - ); - } - - /// @notice Checks whether an address has a valid truthy attestation. - /// Any attestation val other than bytes32("") is considered truthy. - /// @param _creator Address that made the attestation. - /// @param _about Address attestation is about. - /// @param _key Key of the attestation. - /// @return valid_ Whether or not the address has a valid truthy attestation. - function _hasValidAttestation(address _creator, address _about, bytes32 _key) internal view returns (bool valid_) { - valid_ = ATTESTATION_STATION.attestations(_creator, _about, _key).length > 0; - } -} diff --git a/packages/contracts-bedrock/src/periphery/op-nft/OptimistInviter.sol b/packages/contracts-bedrock/src/periphery/op-nft/OptimistInviter.sol deleted file mode 100644 index ae0ab9d92657..000000000000 --- a/packages/contracts-bedrock/src/periphery/op-nft/OptimistInviter.sol +++ /dev/null @@ -1,235 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.15; - -import { OptimistConstants } from "src/periphery/op-nft/libraries/OptimistConstants.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { AttestationStation } from "src/periphery/op-nft/AttestationStation.sol"; -import { SignatureChecker } from "@openzeppelin/contracts/utils/cryptography/SignatureChecker.sol"; -import { EIP712Upgradeable } from "@openzeppelin/contracts-upgradeable/utils/cryptography/draft-EIP712Upgradeable.sol"; - -/// @custom:upgradeable -/// @title OptimistInviter -/// @notice OptimistInviter issues "optimist.can-invite" and "optimist.can-mint-from-invite" -/// attestations. Accounts that have invites can issue signatures that allow other -/// accounts to claim an invite. The invitee uses a claim and reveal flow to claim the -/// invite to an address of their choosing. -/// -/// Parties involved: -/// 1) INVITE_GRANTER: trusted account that can allow accounts to issue invites -/// 2) issuer: account that is allowed to issue invites -/// 3) claimer: account that receives the invites -/// -/// Flow: -/// 1) INVITE_GRANTER calls _setInviteCount to allow an issuer to issue a certain number -/// of invites, and also creates a "optimist.can-invite" attestation for the issuer -/// 2) Off-chain, the issuer signs (EIP-712) a ClaimableInvite to produce a signature -/// 3) Off-chain, invite issuer sends the plaintext ClaimableInvite and the signature -/// to the recipient -/// 4) claimer chooses an address they want to receive the invite on -/// 5) claimer commits the hash of the address they want to receive the invite on and the -/// received signature keccak256(abi.encode(addressToReceiveTo, receivedSignature)) -/// using the commitInvite function -/// 6) claimer waits for the MIN_COMMITMENT_PERIOD to pass. -/// 7) claimer reveals the plaintext ClaimableInvite and the signature using the -/// claimInvite function, receiving the "optimist.can-mint-from-invite" attestation -contract OptimistInviter is ISemver, EIP712Upgradeable { - /// @notice Emitted when an invite is claimed. - /// @param issuer Address that issued the signature. - /// @param claimer Address that claimed the invite. - event InviteClaimed(address indexed issuer, address indexed claimer); - - /// @notice Version used for the EIP712 domain separator. This version is separated from the - /// contract semver because the EIP712 domain separator is used to sign messages, and - /// changing the domain separator invalidates all existing signatures. We should only - /// bump this version if we make a major change to the signature scheme. - string public constant EIP712_VERSION = "1.0.0"; - - /// @notice EIP712 typehash for the ClaimableInvite type. - bytes32 public constant CLAIMABLE_INVITE_TYPEHASH = keccak256("ClaimableInvite(address issuer,bytes32 nonce)"); - - /// @notice Attestation key for that signals that an account was allowed to issue invites - bytes32 public constant CAN_INVITE_ATTESTATION_KEY = bytes32("optimist.can-invite"); - - /// @notice Granter who can set accounts' invite counts. - address public immutable INVITE_GRANTER; - - /// @notice Address of the AttestationStation contract. - AttestationStation public immutable ATTESTATION_STATION; - - /// @notice Minimum age of a commitment (in seconds) before it can be revealed using - /// claimInvite. Currently set to 60 seconds. - /// - /// Prevents an attacker from front-running a commitment by taking the signature in the - /// claimInvite call and quickly committing and claiming it before the the claimer's - /// transaction succeeds. With this, frontrunning a commitment requires that an attacker - /// be able to prevent the honest claimer's claimInvite transaction from being included - /// for this long. - uint256 public constant MIN_COMMITMENT_PERIOD = 60; - - /// @notice Struct that represents a claimable invite that will be signed by the issuer. - /// @custom:field issuer Address that issued the signature. Reason this is explicitly included, - /// and not implicitly assumed to be the recovered address from the - /// signature is that the issuer may be using a ERC-1271 compatible - /// contract wallet, where the recovered address is not the same as the - /// issuer, or the signature is not an ECDSA signature at all. - /// @custom:field nonce Pseudorandom nonce to prevent replay attacks. - struct ClaimableInvite { - address issuer; - bytes32 nonce; - } - - /// @notice Maps from hashes to the timestamp when they were committed. - mapping(bytes32 => uint256) public commitmentTimestamps; - - /// @notice Maps from addresses to nonces to whether or not they have been used. - mapping(address => mapping(bytes32 => bool)) public usedNonces; - - /// @notice Maps from addresses to number of invites they have. - mapping(address => uint256) public inviteCounts; - - /// @notice Semantic version. - /// @custom:semver 1.1.1-beta.1 - string public constant version = "1.1.1-beta.1"; - - /// @param _inviteGranter Address of the invite granter. - /// @param _attestationStation Address of the AttestationStation contract. - constructor(address _inviteGranter, AttestationStation _attestationStation) { - INVITE_GRANTER = _inviteGranter; - ATTESTATION_STATION = _attestationStation; - } - - /// @notice Initializes this contract, setting the EIP712 context. - /// Only update the EIP712_VERSION when there is a change to the signature scheme. - /// After the EIP712 version is changed, any signatures issued off-chain but not - /// claimed yet will no longer be accepted by the claimInvite function. Please make - /// sure to notify the issuers that they must re-issue their invite signatures. - /// @param _name Contract name. - function initialize(string memory _name) public initializer { - __EIP712_init(_name, EIP712_VERSION); - } - - /// @notice Allows invite granter to set the number of invites an address has. - /// @param _accounts An array of accounts to update the invite counts of. - /// @param _inviteCount Number of invites to set to. - function setInviteCounts(address[] calldata _accounts, uint256 _inviteCount) public { - // Only invite granter can grant invites - require(msg.sender == INVITE_GRANTER, "OptimistInviter: only invite granter can grant invites"); - - uint256 length = _accounts.length; - - AttestationStation.AttestationData[] memory attestations = new AttestationStation.AttestationData[](length); - - for (uint256 i; i < length;) { - // Set invite count for account to _inviteCount - inviteCounts[_accounts[i]] = _inviteCount; - - // Create an attestation for posterity that the account is allowed to create invites - attestations[i] = AttestationStation.AttestationData({ - about: _accounts[i], - key: CAN_INVITE_ATTESTATION_KEY, - val: bytes("true") - }); - - unchecked { - ++i; - } - } - - ATTESTATION_STATION.attest(attestations); - } - - /// @notice Allows anyone (but likely the claimer) to commit a received signature along with the - /// address to claim to. - /// - /// Before calling this function, the claimer should have received a signature from the - /// issuer off-chain. The claimer then calls this function with the hash of the - /// claimer's address and the received signature. This is necessary to prevent - /// front-running when the invitee is claiming the invite. Without a commit and reveal - /// scheme, anyone who is watching the mempool can take the signature being submitted - /// and front run the transaction to claim the invite to their own address. - /// - /// The same commitment can only be made once, and the function reverts if the - /// commitment has already been made. This prevents griefing where a malicious party can - /// prevent the original claimer from being able to claimInvite. - /// @param _commitment A hash of the claimer and signature concatenated. - /// keccak256(abi.encode(_claimer, _signature)) - function commitInvite(bytes32 _commitment) public { - // Check that the commitment hasn't already been made. This prevents griefing where - // a malicious party continuously re-submits the same commitment, preventing the original - // claimer from claiming their invite by resetting the minimum commitment period. - require(commitmentTimestamps[_commitment] == 0, "OptimistInviter: commitment already made"); - - commitmentTimestamps[_commitment] = block.timestamp; - } - - /// @notice Allows anyone to reveal a commitment and claim an invite. - /// The hash, keccak256(abi.encode(_claimer, _signature)), should have been already - /// committed using commitInvite. Before issuing the "optimist.can-mint-from-invite" - /// attestation, this function checks that - /// 1) the hash corresponding to the _claimer and the _signature was committed - /// 2) MIN_COMMITMENT_PERIOD has passed since the commitment was made. - /// 3) the _signature is signed correctly by the issuer - /// 4) the _signature hasn't already been used to claim an invite before - /// 5) the _signature issuer has not used up all of their invites - /// This function doesn't require that the _claimer is calling this function. - /// @param _claimer Address that will be granted the invite. - /// @param _claimableInvite ClaimableInvite struct containing the issuer and nonce. - /// @param _signature Signature signed over the claimable invite. - function claimInvite(address _claimer, ClaimableInvite calldata _claimableInvite, bytes memory _signature) public { - uint256 commitmentTimestamp = commitmentTimestamps[keccak256(abi.encode(_claimer, _signature))]; - - // Make sure the claimer and signature have been committed. - require(commitmentTimestamp > 0, "OptimistInviter: claimer and signature have not been committed yet"); - - // Check that MIN_COMMITMENT_PERIOD has passed since the commitment was made. - require( - commitmentTimestamp + MIN_COMMITMENT_PERIOD <= block.timestamp, - "OptimistInviter: minimum commitment period has not elapsed yet" - ); - - // Generate a EIP712 typed data hash to compare against the signature. - bytes32 digest = _hashTypedDataV4( - keccak256(abi.encode(CLAIMABLE_INVITE_TYPEHASH, _claimableInvite.issuer, _claimableInvite.nonce)) - ); - - // Uses SignatureChecker, which supports both regular ECDSA signatures from EOAs as well as - // ERC-1271 signatures from contract wallets or multi-sigs. This means that if the issuer - // wants to revoke a signature, they can use a smart contract wallet to issue the signature, - // then invalidate the signature after issuing it. - require( - SignatureChecker.isValidSignatureNow(_claimableInvite.issuer, digest, _signature), - "OptimistInviter: invalid signature" - ); - - // The issuer's signature commits to a nonce to prevent replay attacks. - // This checks that the nonce has not been used for this issuer before. The nonces are - // scoped to the issuer address, so the same nonce can be used by different issuers without - // clashing. - require( - usedNonces[_claimableInvite.issuer][_claimableInvite.nonce] == false, - "OptimistInviter: nonce has already been used" - ); - - // Set the nonce as used for the issuer so that it cannot be replayed. - usedNonces[_claimableInvite.issuer][_claimableInvite.nonce] = true; - - // Failing this check means that the issuer has used up all of their existing invites. - require(inviteCounts[_claimableInvite.issuer] > 0, "OptimistInviter: issuer has no invites"); - - // Reduce the issuer's invite count by 1. Can be unchecked because we check above that - // count is > 0. - unchecked { - --inviteCounts[_claimableInvite.issuer]; - } - - // Create the attestation that the claimer can mint from the issuer's invite. - // The invite issuer is included in the data of the attestation. - ATTESTATION_STATION.attest( - _claimer, - OptimistConstants.OPTIMIST_CAN_MINT_FROM_INVITE_ATTESTATION_KEY, - abi.encode(_claimableInvite.issuer) - ); - - emit InviteClaimed(_claimableInvite.issuer, _claimer); - } -} diff --git a/packages/contracts-bedrock/src/periphery/op-nft/libraries/OptimistConstants.sol b/packages/contracts-bedrock/src/periphery/op-nft/libraries/OptimistConstants.sol deleted file mode 100644 index 225f77889495..000000000000 --- a/packages/contracts-bedrock/src/periphery/op-nft/libraries/OptimistConstants.sol +++ /dev/null @@ -1,9 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.15; - -/// @title OptimistConstants -/// @notice Library for storing Optimist related constants that are shared in multiple contracts. -library OptimistConstants { - /// @notice Attestation key issued by OptimistInviter allowing the attested account to mint. - bytes32 internal constant OPTIMIST_CAN_MINT_FROM_INVITE_ATTESTATION_KEY = bytes32("optimist.can-mint-from-invite"); -} diff --git a/packages/contracts-bedrock/src/safe/DeputyGuardianModule.sol b/packages/contracts-bedrock/src/safe/DeputyGuardianModule.sol index 673cf2a1efb6..a742c452ef0b 100644 --- a/packages/contracts-bedrock/src/safe/DeputyGuardianModule.sol +++ b/packages/contracts-bedrock/src/safe/DeputyGuardianModule.sol @@ -10,12 +10,12 @@ import { Unauthorized } from "src/libraries/PortalErrors.sol"; import { GameType, Timestamp } from "src/dispute/lib/Types.sol"; // Interfaces -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; -import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @title DeputyGuardianModule /// @notice This module is intended to be enabled on the Security Council Safe, which will own the Guardian role in the @@ -48,8 +48,8 @@ contract DeputyGuardianModule is ISemver { address internal immutable DEPUTY_GUARDIAN; /// @notice Semantic version. - /// @custom:semver 2.0.1-beta.4 - string public constant version = "2.0.1-beta.4"; + /// @custom:semver 2.0.1-beta.5 + string public constant version = "2.0.1-beta.5"; // Constructor to initialize the Safe and baseModule instances constructor(Safe _safe, ISuperchainConfig _superchainConfig, address _deputyGuardian) { diff --git a/packages/contracts-bedrock/src/safe/LivenessGuard.sol b/packages/contracts-bedrock/src/safe/LivenessGuard.sol index aa9a231a4b25..46c0072f7ba4 100644 --- a/packages/contracts-bedrock/src/safe/LivenessGuard.sol +++ b/packages/contracts-bedrock/src/safe/LivenessGuard.sol @@ -1,12 +1,17 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Safe import { GnosisSafe as Safe } from "safe-contracts/GnosisSafe.sol"; import { Guard as BaseGuard } from "safe-contracts/base/GuardManager.sol"; -import { SafeSigners } from "src/safe/SafeSigners.sol"; import { Enum } from "safe-contracts/common/Enum.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; + +// Libraries import { EnumerableSet } from "@openzeppelin/contracts/utils/structs/EnumerableSet.sol"; +import { SafeSigners } from "src/safe/SafeSigners.sol"; + +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @title LivenessGuard /// @notice This Guard contract is used to track the liveness of Safe owners. @@ -25,8 +30,8 @@ contract LivenessGuard is ISemver, BaseGuard { event OwnerRecorded(address owner); /// @notice Semantic version. - /// @custom:semver 1.0.1-beta.2 - string public constant version = "1.0.1-beta.2"; + /// @custom:semver 1.0.1-beta.4 + string public constant version = "1.0.1-beta.4"; /// @notice The safe account for which this contract will be the guard. Safe internal immutable SAFE; diff --git a/packages/contracts-bedrock/src/safe/LivenessModule.sol b/packages/contracts-bedrock/src/safe/LivenessModule.sol index cd41c6e2dd53..a033507176cc 100644 --- a/packages/contracts-bedrock/src/safe/LivenessModule.sol +++ b/packages/contracts-bedrock/src/safe/LivenessModule.sol @@ -1,11 +1,16 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Safe import { GnosisSafe as Safe } from "safe-contracts/GnosisSafe.sol"; import { Enum } from "safe-contracts/common/Enum.sol"; import { OwnerManager } from "safe-contracts/base/OwnerManager.sol"; + +// Contracts import { LivenessGuard } from "src/safe/LivenessGuard.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; + +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @title LivenessModule /// @notice This module is intended to be used in conjunction with the LivenessGuard. In the event @@ -53,8 +58,8 @@ contract LivenessModule is ISemver { uint256 internal constant GUARD_STORAGE_SLOT = 0x4a204f620c8c5ccdca3fd54d003badd85ba500436a431f0cbda4f558c93c34c8; /// @notice Semantic version. - /// @custom:semver 1.2.1-beta.1 - string public constant version = "1.2.1-beta.1"; + /// @custom:semver 1.2.1-beta.3 + string public constant version = "1.2.1-beta.3"; // Constructor to initialize the Safe and baseModule instances constructor( diff --git a/packages/contracts-bedrock/src/universal/CrossDomainMessenger.sol b/packages/contracts-bedrock/src/universal/CrossDomainMessenger.sol index 66c724d3eba9..85d801f0a1a6 100644 --- a/packages/contracts-bedrock/src/universal/CrossDomainMessenger.sol +++ b/packages/contracts-bedrock/src/universal/CrossDomainMessenger.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Libraries import { Initializable } from "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol"; import { SafeCall } from "src/libraries/SafeCall.sol"; import { Hashing } from "src/libraries/Hashing.sol"; diff --git a/packages/contracts-bedrock/src/universal/ERC721Bridge.sol b/packages/contracts-bedrock/src/universal/ERC721Bridge.sol index 52217fab713c..c989da56c7b5 100644 --- a/packages/contracts-bedrock/src/universal/ERC721Bridge.sol +++ b/packages/contracts-bedrock/src/universal/ERC721Bridge.sol @@ -1,10 +1,15 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; -import { Address } from "@openzeppelin/contracts/utils/Address.sol"; +// Contracts import { Initializable } from "@openzeppelin/contracts/proxy/utils/Initializable.sol"; +// Libraries +import { Address } from "@openzeppelin/contracts/utils/Address.sol"; + +// Interfaces +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; + /// @title ERC721Bridge /// @notice ERC721Bridge is a base contract for the L1 and L2 ERC721 bridges. abstract contract ERC721Bridge is Initializable { diff --git a/packages/contracts-bedrock/src/universal/OptimismMintableERC20.sol b/packages/contracts-bedrock/src/universal/OptimismMintableERC20.sol index 34d601cbb65b..62aaf45b2fc3 100644 --- a/packages/contracts-bedrock/src/universal/OptimismMintableERC20.sol +++ b/packages/contracts-bedrock/src/universal/OptimismMintableERC20.sol @@ -1,14 +1,19 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Contracts import { ERC20 } from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; import { ERC20Permit } from "@openzeppelin/contracts/token/ERC20/extensions/draft-ERC20Permit.sol"; -import { IERC165 } from "@openzeppelin/contracts/utils/introspection/IERC165.sol"; -import { IOptimismMintableERC20 } from "src/universal/interfaces/IOptimismMintableERC20.sol"; -import { ILegacyMintableERC20 } from "src/universal/interfaces/ILegacyMintableERC20.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; + +// Libraries import { Preinstalls } from "src/libraries/Preinstalls.sol"; +// Interfaces +import { IERC165 } from "@openzeppelin/contracts/utils/introspection/IERC165.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IOptimismMintableERC20 } from "interfaces/universal/IOptimismMintableERC20.sol"; +import { ILegacyMintableERC20 } from "interfaces/universal/ILegacyMintableERC20.sol"; + /// @title OptimismMintableERC20 /// @notice OptimismMintableERC20 is a standard extension of the base ERC20 token contract designed /// to allow the StandardBridge contracts to mint and burn tokens. This makes it possible to @@ -42,8 +47,8 @@ contract OptimismMintableERC20 is ERC20Permit, ISemver { } /// @notice Semantic version. - /// @custom:semver 1.4.0-beta.2 - string public constant version = "1.4.0-beta.2"; + /// @custom:semver 1.4.0-beta.4 + string public constant version = "1.4.0-beta.4"; /// @notice Getter function for the permit2 address. It deterministically deployed /// so it will always be at the same address. It is also included as a preinstall, diff --git a/packages/contracts-bedrock/src/universal/OptimismMintableERC20Factory.sol b/packages/contracts-bedrock/src/universal/OptimismMintableERC20Factory.sol index d7a9cd3372cd..62d2de8e7f3c 100644 --- a/packages/contracts-bedrock/src/universal/OptimismMintableERC20Factory.sol +++ b/packages/contracts-bedrock/src/universal/OptimismMintableERC20Factory.sol @@ -1,10 +1,13 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { OptimismMintableERC20 } from "src/universal/OptimismMintableERC20.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +// Contracts import { Initializable } from "@openzeppelin/contracts/proxy/utils/Initializable.sol"; -import { IOptimismERC20Factory } from "src/L2/interfaces/IOptimismERC20Factory.sol"; +import { OptimismMintableERC20 } from "src/universal/OptimismMintableERC20.sol"; + +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IOptimismERC20Factory } from "interfaces/L2/IOptimismERC20Factory.sol"; /// @custom:proxied true /// @custom:predeployed 0x4200000000000000000000000000000000000012 @@ -48,8 +51,8 @@ contract OptimismMintableERC20Factory is ISemver, Initializable, IOptimismERC20F /// the OptimismMintableERC20 token contract since this contract /// is responsible for deploying OptimismMintableERC20 contracts. /// @notice Semantic version. - /// @custom:semver 1.10.1-beta.4 - string public constant version = "1.10.1-beta.4"; + /// @custom:semver 1.10.1-beta.6 + string public constant version = "1.10.1-beta.6"; /// @notice Constructs the OptimismMintableERC20Factory contract. constructor() { diff --git a/packages/contracts-bedrock/src/universal/OptimismMintableERC721.sol b/packages/contracts-bedrock/src/universal/OptimismMintableERC721.sol index 9dd05e10d1fe..63d4eb29a140 100644 --- a/packages/contracts-bedrock/src/universal/OptimismMintableERC721.sol +++ b/packages/contracts-bedrock/src/universal/OptimismMintableERC721.sol @@ -1,11 +1,16 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Contracts import { ERC721Enumerable } from "@openzeppelin/contracts/token/ERC721/extensions/ERC721Enumerable.sol"; import { ERC721 } from "@openzeppelin/contracts/token/ERC721/ERC721.sol"; + +// Libraries import { Strings } from "@openzeppelin/contracts/utils/Strings.sol"; -import { IOptimismMintableERC721 } from "src/universal/interfaces/IOptimismMintableERC721.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; + +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IOptimismMintableERC721 } from "interfaces/universal/IOptimismMintableERC721.sol"; /// @title OptimismMintableERC721 /// @notice This contract is the remote representation for some token that lives on another network, @@ -41,8 +46,8 @@ contract OptimismMintableERC721 is ERC721Enumerable, ISemver { } /// @notice Semantic version. - /// @custom:semver 1.3.1-beta.3 - string public constant version = "1.3.1-beta.3"; + /// @custom:semver 1.3.1-beta.5 + string public constant version = "1.3.1-beta.5"; /// @param _bridge Address of the bridge on this network. /// @param _remoteChainId Chain ID where the remote token is deployed. diff --git a/packages/contracts-bedrock/src/universal/OptimismMintableERC721Factory.sol b/packages/contracts-bedrock/src/universal/OptimismMintableERC721Factory.sol index 7350e0fae0de..aa137378fccd 100644 --- a/packages/contracts-bedrock/src/universal/OptimismMintableERC721Factory.sol +++ b/packages/contracts-bedrock/src/universal/OptimismMintableERC721Factory.sol @@ -1,8 +1,11 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Contracts import { OptimismMintableERC721 } from "src/universal/OptimismMintableERC721.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; + +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @title OptimismMintableERC721Factory /// @notice Factory contract for creating OptimismMintableERC721 contracts. @@ -25,8 +28,8 @@ contract OptimismMintableERC721Factory is ISemver { event OptimismMintableERC721Created(address indexed localToken, address indexed remoteToken, address deployer); /// @notice Semantic version. - /// @custom:semver 1.4.1-beta.4 - string public constant version = "1.4.1-beta.4"; + /// @custom:semver 1.4.1-beta.6 + string public constant version = "1.4.1-beta.6"; /// @notice The semver MUST be bumped any time that there is a change in /// the OptimismMintableERC721 token contract since this contract diff --git a/packages/contracts-bedrock/src/universal/Proxy.sol b/packages/contracts-bedrock/src/universal/Proxy.sol index b50b08efca83..4fd53dae06ba 100644 --- a/packages/contracts-bedrock/src/universal/Proxy.sol +++ b/packages/contracts-bedrock/src/universal/Proxy.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Libraries import { Constants } from "src/libraries/Constants.sol"; /// @title Proxy diff --git a/packages/contracts-bedrock/src/universal/ProxyAdmin.sol b/packages/contracts-bedrock/src/universal/ProxyAdmin.sol index dec119398c0f..9e7cd9082428 100644 --- a/packages/contracts-bedrock/src/universal/ProxyAdmin.sol +++ b/packages/contracts-bedrock/src/universal/ProxyAdmin.sol @@ -8,11 +8,11 @@ import { Ownable } from "@openzeppelin/contracts/access/Ownable.sol"; import { Constants } from "src/libraries/Constants.sol"; // Interfaces -import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; -import { IL1ChugSplashProxy } from "src/legacy/interfaces/IL1ChugSplashProxy.sol"; -import { IStaticL1ChugSplashProxy } from "src/legacy/interfaces/IL1ChugSplashProxy.sol"; -import { IStaticERC1967Proxy } from "src/universal/interfaces/IStaticERC1967Proxy.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; +import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; +import { IL1ChugSplashProxy } from "interfaces/legacy/IL1ChugSplashProxy.sol"; +import { IStaticL1ChugSplashProxy } from "interfaces/legacy/IL1ChugSplashProxy.sol"; +import { IStaticERC1967Proxy } from "interfaces/universal/IStaticERC1967Proxy.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; /// @title ProxyAdmin /// @notice This is an auxiliary contract meant to be assigned as the admin of an ERC1967 Proxy, diff --git a/packages/contracts-bedrock/src/universal/StandardBridge.sol b/packages/contracts-bedrock/src/universal/StandardBridge.sol index 57af2247a65a..ff01560ad1a8 100644 --- a/packages/contracts-bedrock/src/universal/StandardBridge.sol +++ b/packages/contracts-bedrock/src/universal/StandardBridge.sol @@ -1,18 +1,22 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; -import { ERC165Checker } from "@openzeppelin/contracts/utils/introspection/ERC165Checker.sol"; +// Contracts +import { Initializable } from "@openzeppelin/contracts/proxy/utils/Initializable.sol"; + +// Libraries import { Address } from "@openzeppelin/contracts/utils/Address.sol"; +import { ERC165Checker } from "@openzeppelin/contracts/utils/introspection/ERC165Checker.sol"; import { SafeERC20 } from "@openzeppelin/contracts/token/ERC20/utils/SafeERC20.sol"; import { SafeCall } from "src/libraries/SafeCall.sol"; -import { IOptimismMintableERC20 } from "src/universal/interfaces/IOptimismMintableERC20.sol"; -import { ILegacyMintableERC20 } from "src/universal/interfaces/ILegacyMintableERC20.sol"; -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; -import { OptimismMintableERC20 } from "src/universal/OptimismMintableERC20.sol"; -import { Initializable } from "@openzeppelin/contracts/proxy/utils/Initializable.sol"; import { Constants } from "src/libraries/Constants.sol"; +// Interfaces +import { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; +import { IOptimismMintableERC20 } from "interfaces/universal/IOptimismMintableERC20.sol"; +import { ILegacyMintableERC20 } from "interfaces/universal/ILegacyMintableERC20.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; + /// @custom:upgradeable /// @title StandardBridge /// @notice StandardBridge is a base contract for the L1 and L2 standard ERC20 bridges. It handles @@ -294,7 +298,7 @@ abstract contract StandardBridge is Initializable { "StandardBridge: wrong remote token for Optimism Mintable ERC20 local token" ); - OptimismMintableERC20(_localToken).mint(_to, _amount); + IOptimismMintableERC20(_localToken).mint(_to, _amount); } else { deposits[_localToken][_remoteToken] = deposits[_localToken][_remoteToken] - _amount; IERC20(_localToken).safeTransfer(_to, _amount); @@ -364,7 +368,7 @@ abstract contract StandardBridge is Initializable { "StandardBridge: wrong remote token for Optimism Mintable ERC20 local token" ); - OptimismMintableERC20(_localToken).burn(_from, _amount); + IOptimismMintableERC20(_localToken).burn(_from, _amount); } else { IERC20(_localToken).safeTransferFrom(_from, address(this), _amount); deposits[_localToken][_remoteToken] = deposits[_localToken][_remoteToken] + _amount; diff --git a/packages/contracts-bedrock/src/universal/StorageSetter.sol b/packages/contracts-bedrock/src/universal/StorageSetter.sol index 5bd53a75b366..9656ca21c5d2 100644 --- a/packages/contracts-bedrock/src/universal/StorageSetter.sol +++ b/packages/contracts-bedrock/src/universal/StorageSetter.sol @@ -1,9 +1,12 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +// Libraries import { Storage } from "src/libraries/Storage.sol"; +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; + /// @title StorageSetter /// @notice A simple contract that allows setting arbitrary storage slots. /// WARNING: this contract is not safe to be called by untrusted parties. @@ -16,8 +19,8 @@ contract StorageSetter is ISemver { } /// @notice Semantic version. - /// @custom:semver 1.2.1-beta.2 - string public constant version = "1.2.1-beta.2"; + /// @custom:semver 1.2.1-beta.4 + string public constant version = "1.2.1-beta.4"; /// @notice Stores a bytes32 `_value` at `_slot`. Any storage slots that /// are packed should be set through this interface. diff --git a/packages/contracts-bedrock/src/vendor/asterisc/RISCV.sol b/packages/contracts-bedrock/src/vendor/asterisc/RISCV.sol new file mode 100644 index 000000000000..4a9304935f5c --- /dev/null +++ b/packages/contracts-bedrock/src/vendor/asterisc/RISCV.sol @@ -0,0 +1,1651 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.25; + +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; +import { IBigStepper } from "interfaces/dispute/IBigStepper.sol"; + +/// @title RISCV +/// @notice The RISCV contract emulates a single RISCV hart cycle statelessly, using memory proofs to verify the +/// instruction and optional memory access' inclusion in the memory merkle root provided in the trusted +/// prestate witness. +/// @dev https://github.com/ethereum-optimism/asterisc +contract RISCV is IBigStepper { + /// @notice The preimage oracle contract. + IPreimageOracle public oracle; + + /// @notice The version of the contract. + /// @custom:semver 1.2.0-rc.1 + string public constant version = "1.2.0-rc.1"; + + /// @param _oracle The preimage oracle contract. + constructor(IPreimageOracle _oracle) { + oracle = _oracle; + } + + /// @inheritdoc IBigStepper + function step(bytes calldata _stateData, bytes calldata _proof, bytes32 _localContext) public returns (bytes32) { + assembly { + function revertWithCode(code) { + mstore(0, code) + revert(0, 0x20) + } + + function preimageOraclePos() -> out { + // slot of preimageOraclePos field + out := 0 + } + + // + // Yul64 - functions to do 64 bit math - see yul64.go + // + function u64Mask() -> out { + // max uint64 + out := shr(192, not(0)) // 256-64 = 192 + } + + function u32Mask() -> out { + out := U64(shr(toU256(224), not(0))) // 256-32 = 224 + } + + function toU64(v) -> out { + out := v + } + + function shortToU64(v) -> out { + out := v + } + + function shortToU256(v) -> out { + out := v + } + + function longToU256(v) -> out { + out := v + } + + function u256ToU64(v) -> out { + out := and(v, U256(u64Mask())) + } + + function u64ToU256(v) -> out { + out := v + } + + function mask32Signed64(v) -> out { + out := signExtend64(and64(v, u32Mask()), toU64(31)) + } + + function u64Mod() -> out { + // 1 << 64 + out := shl(toU256(64), toU256(1)) + } + + function u64TopBit() -> out { + // 1 << 63 + out := shl(toU256(63), toU256(1)) + } + + function signExtend64(v, bit) -> out { + switch and(v, shl(bit, 1)) + case 0 { + // fill with zeroes, by masking + out := U64(and(U256(v), shr(sub(toU256(63), bit), U256(u64Mask())))) + } + default { + // fill with ones, by or-ing + out := U64(or(U256(v), shl(bit, shr(bit, U256(u64Mask()))))) + } + } + + function signExtend64To256(v) -> out { + switch and(U256(v), u64TopBit()) + case 0 { out := v } + default { out := or(shl(toU256(64), not(0)), v) } + } + + function add64(x, y) -> out { + out := U64(mod(add(U256(x), U256(y)), u64Mod())) + } + + function sub64(x, y) -> out { + out := U64(mod(sub(U256(x), U256(y)), u64Mod())) + } + + function mul64(x, y) -> out { + out := u256ToU64(mul(U256(x), U256(y))) + } + + function div64(x, y) -> out { + out := u256ToU64(div(U256(x), U256(y))) + } + + function sdiv64(x, y) -> out { + // note: signed overflow semantics are the same between Go and EVM assembly + out := u256ToU64(sdiv(signExtend64To256(x), signExtend64To256(y))) + } + + function mod64(x, y) -> out { + out := U64(mod(U256(x), U256(y))) + } + + function smod64(x, y) -> out { + out := u256ToU64(smod(signExtend64To256(x), signExtend64To256(y))) + } + + function not64(x) -> out { + out := u256ToU64(not(U256(x))) + } + + function lt64(x, y) -> out { + out := U64(lt(U256(x), U256(y))) + } + + function gt64(x, y) -> out { + out := U64(gt(U256(x), U256(y))) + } + + function slt64(x, y) -> out { + out := U64(slt(signExtend64To256(x), signExtend64To256(y))) + } + + function sgt64(x, y) -> out { + out := U64(sgt(signExtend64To256(x), signExtend64To256(y))) + } + + function eq64(x, y) -> out { + out := U64(eq(U256(x), U256(y))) + } + + function iszero64(x) -> out { + out := iszero(U256(x)) + } + + function and64(x, y) -> out { + out := U64(and(U256(x), U256(y))) + } + + function or64(x, y) -> out { + out := U64(or(U256(x), U256(y))) + } + + function xor64(x, y) -> out { + out := U64(xor(U256(x), U256(y))) + } + + function shl64(x, y) -> out { + out := u256ToU64(shl(U256(x), U256(y))) + } + + function shr64(x, y) -> out { + out := U64(shr(U256(x), U256(y))) + } + + function sar64(x, y) -> out { + out := u256ToU64(sar(U256(x), signExtend64To256(y))) + } + + // type casts, no-op in yul + function b32asBEWord(v) -> out { + out := v + } + function beWordAsB32(v) -> out { + out := v + } + function U64(v) -> out { + out := v + } + function U256(v) -> out { + out := v + } + function toU256(v) -> out { + out := v + } + + // + // Bit hacking util + // + function bitlen(x) -> n { + if gt(x, sub(shl(128, 1), 1)) { + x := shr(128, x) + n := add(n, 128) + } + if gt(x, sub(shl(64, 1), 1)) { + x := shr(64, x) + n := add(n, 64) + } + if gt(x, sub(shl(32, 1), 1)) { + x := shr(32, x) + n := add(n, 32) + } + if gt(x, sub(shl(16, 1), 1)) { + x := shr(16, x) + n := add(n, 16) + } + if gt(x, sub(shl(8, 1), 1)) { + x := shr(8, x) + n := add(n, 8) + } + if gt(x, sub(shl(4, 1), 1)) { + x := shr(4, x) + n := add(n, 4) + } + if gt(x, sub(shl(2, 1), 1)) { + x := shr(2, x) + n := add(n, 2) + } + if gt(x, sub(shl(1, 1), 1)) { + x := shr(1, x) + n := add(n, 1) + } + if gt(x, 0) { n := add(n, 1) } + } + + function endianSwap(x) -> out { + for { let i := 0 } lt(i, 32) { i := add(i, 1) } { + out := or(shl(8, out), and(x, 0xff)) + x := shr(8, x) + } + } + + // + // State layout + // + function stateSizeMemRoot() -> out { + out := 32 + } + function stateSizePreimageKey() -> out { + out := 32 + } + function stateSizePreimageOffset() -> out { + out := 8 + } + function stateSizePC() -> out { + out := 8 + } + function stateSizeExitCode() -> out { + out := 1 + } + function stateSizeExited() -> out { + out := 1 + } + function stateSizeStep() -> out { + out := 8 + } + function stateSizeHeap() -> out { + out := 8 + } + function stateSizeLoadReservation() -> out { + out := 8 + } + function stateSizeRegisters() -> out { + out := mul(8, 32) + } + + function stateOffsetMemRoot() -> out { + out := 0 + } + function stateOffsetPreimageKey() -> out { + out := add(stateOffsetMemRoot(), stateSizeMemRoot()) + } + function stateOffsetPreimageOffset() -> out { + out := add(stateOffsetPreimageKey(), stateSizePreimageKey()) + } + function stateOffsetPC() -> out { + out := add(stateOffsetPreimageOffset(), stateSizePreimageOffset()) + } + function stateOffsetExitCode() -> out { + out := add(stateOffsetPC(), stateSizePC()) + } + function stateOffsetExited() -> out { + out := add(stateOffsetExitCode(), stateSizeExitCode()) + } + function stateOffsetStep() -> out { + out := add(stateOffsetExited(), stateSizeExited()) + } + function stateOffsetHeap() -> out { + out := add(stateOffsetStep(), stateSizeStep()) + } + function stateOffsetLoadReservation() -> out { + out := add(stateOffsetHeap(), stateSizeHeap()) + } + function stateOffsetRegisters() -> out { + out := add(stateOffsetLoadReservation(), stateSizeLoadReservation()) + } + function stateSize() -> out { + out := add(stateOffsetRegisters(), stateSizeRegisters()) + } + + // + // Initial EVM memory / calldata checks + // + if iszero(eq(mload(0x40), 0x80)) { + // expected memory check: no allocated memory (start after scratch + free-mem-ptr + zero slot = 0x80) + revert(0, 0) + } + if iszero(eq(_stateData.offset, 132)) { + // 32*4+4 = 132 expected state data offset + revert(0, 0) + } + if iszero(eq(calldataload(sub(_stateData.offset, 32)), stateSize())) { + // user-provided state size must match expected state size + revert(0, 0) + } + function paddedLen(v) -> out { + // padded to multiple of 32 bytes + let padding := mod(sub(32, mod(v, 32)), 32) + out := add(v, padding) + } + if iszero(eq(_proof.offset, add(add(_stateData.offset, paddedLen(stateSize())), 32))) { + // 132+stateSize+padding+32 = expected proof offset + revert(0, 0) + } + function proofContentOffset() -> out { + // since we can't reference proof.offset in functions, blame Yul + // 132+362+(32-362%32)+32=548 + out := 548 + } + if iszero(eq(_proof.offset, proofContentOffset())) { revert(0, 0) } + + // + // State loading + // + function memStateOffset() -> out { + out := 0x80 + } + // copy the state calldata into memory, so we can mutate it + mstore(0x40, add(memStateOffset(), stateSize())) // alloc, update free mem pointer + calldatacopy(memStateOffset(), _stateData.offset, stateSize()) // same format in memory as in calldata + + // + // State access + // + function readState(offset, length) -> out { + out := mload(add(memStateOffset(), offset)) // note: the state variables are all big-endian encoded + out := shr(shl(3, sub(32, length)), out) // shift-right to right-align data and reduce to desired length + } + function writeState(offset, length, data) { + let memOffset := add(memStateOffset(), offset) + // left-aligned mask of length bytes + let mask := shl(shl(3, sub(32, length)), not(0)) + let prev := mload(memOffset) + // align data to left + data := shl(shl(3, sub(32, length)), data) + // mask out data from previous word, and apply new data + let result := or(and(prev, not(mask)), data) + mstore(memOffset, result) + } + + function getMemRoot() -> out { + out := readState(stateOffsetMemRoot(), stateSizeMemRoot()) + } + function setMemRoot(v) { + writeState(stateOffsetMemRoot(), stateSizeMemRoot(), v) + } + + function getPreimageKey() -> out { + out := readState(stateOffsetPreimageKey(), stateSizePreimageKey()) + } + function setPreimageKey(k) { + writeState(stateOffsetPreimageKey(), stateSizePreimageKey(), k) + } + + function getPreimageOffset() -> out { + out := readState(stateOffsetPreimageOffset(), stateSizePreimageOffset()) + } + function setPreimageOffset(v) { + writeState(stateOffsetPreimageOffset(), stateSizePreimageOffset(), v) + } + + function getPC() -> out { + out := readState(stateOffsetPC(), stateSizePC()) + } + function setPC(v) { + writeState(stateOffsetPC(), stateSizePC(), v) + } + + function getExited() -> out { + out := readState(stateOffsetExited(), stateSizeExited()) + } + function setExited() { + writeState(stateOffsetExited(), stateSizeExited(), 1) + } + + function getExitCode() -> out { + out := readState(stateOffsetExitCode(), stateSizeExitCode()) + } + function setExitCode(v) { + writeState(stateOffsetExitCode(), stateSizeExitCode(), v) + } + + function getStep() -> out { + out := readState(stateOffsetStep(), stateSizeStep()) + } + function setStep(v) { + writeState(stateOffsetStep(), stateSizeStep(), v) + } + + function getHeap() -> out { + out := readState(stateOffsetHeap(), stateSizeHeap()) + } + function setHeap(v) { + writeState(stateOffsetHeap(), stateSizeHeap(), v) + } + + function getLoadReservation() -> out { + out := readState(stateOffsetLoadReservation(), stateSizeLoadReservation()) + } + function setLoadReservation(addr) { + writeState(stateOffsetLoadReservation(), stateSizeLoadReservation(), addr) + } + + function getRegister(reg) -> out { + if gt64(reg, toU64(31)) { revertWithCode(0xbad4e9) } // cannot load invalid register + + let offset := add64(toU64(stateOffsetRegisters()), mul64(reg, toU64(8))) + out := readState(offset, 8) + } + function setRegister(reg, v) { + if iszero64(reg) { + // reg 0 must stay 0 + // v is a HINT, but no hints are specified by standard spec, or used by us. + leave + } + if gt64(reg, toU64(31)) { revertWithCode(0xbad4e9) } // unknown register + + let offset := add64(toU64(stateOffsetRegisters()), mul64(reg, toU64(8))) + writeState(offset, 8, v) + } + + // + // State output + // + function vmStatus() -> status { + switch getExited() + case 1 { + switch getExitCode() + case 0 { status := 0 } + // VMStatusValid + case 1 { status := 1 } + // VMStatusInvalid + default { status := 2 } // VMStatusPanic + } + default { status := 3 } // VMStatusUnfinished + } + + function computeStateHash() -> out { + // Log the RISC-V state for debugging + log0(memStateOffset(), stateSize()) + + out := keccak256(memStateOffset(), stateSize()) + out := or(and(not(shl(248, 0xFF)), out), shl(248, vmStatus())) + } + + // + // Parse - functions to parse RISC-V instructions - see parse.go + // + function parseImmTypeI(instr) -> out { + out := signExtend64(shr64(toU64(20), instr), toU64(11)) + } + + function parseImmTypeS(instr) -> out { + out := + signExtend64( + or64(shl64(toU64(5), shr64(toU64(25), instr)), and64(shr64(toU64(7), instr), toU64(0x1F))), + toU64(11) + ) + } + + function parseImmTypeB(instr) -> out { + out := + signExtend64( + or64( + or64( + shl64(toU64(1), and64(shr64(toU64(8), instr), toU64(0xF))), + shl64(toU64(5), and64(shr64(toU64(25), instr), toU64(0x3F))) + ), + or64( + shl64(toU64(11), and64(shr64(toU64(7), instr), toU64(1))), + shl64(toU64(12), shr64(toU64(31), instr)) + ) + ), + toU64(12) + ) + } + + function parseImmTypeU(instr) -> out { + out := signExtend64(shr64(toU64(12), instr), toU64(19)) + } + + function parseImmTypeJ(instr) -> out { + out := + signExtend64( + or64( + or64( + and64(shr64(toU64(21), instr), shortToU64(0x3FF)), // 10 bits for index 0:9 + shl64(toU64(10), and64(shr64(toU64(20), instr), toU64(1))) // 1 bit for index 10 + ), + or64( + shl64(toU64(11), and64(shr64(toU64(12), instr), toU64(0xFF))), // 8 bits for index 11:18 + shl64(toU64(19), shr64(toU64(31), instr)) // 1 bit for index 19 + ) + ), + toU64(19) + ) + } + + function parseOpcode(instr) -> out { + out := and64(instr, toU64(0x7F)) + } + + function parseRd(instr) -> out { + out := and64(shr64(toU64(7), instr), toU64(0x1F)) + } + + function parseFunct3(instr) -> out { + out := and64(shr64(toU64(12), instr), toU64(0x7)) + } + + function parseRs1(instr) -> out { + out := and64(shr64(toU64(15), instr), toU64(0x1F)) + } + + function parseRs2(instr) -> out { + out := and64(shr64(toU64(20), instr), toU64(0x1F)) + } + + function parseFunct7(instr) -> out { + out := shr64(toU64(25), instr) + } + + // + // Memory functions + // + function proofOffset(proofIndex) -> offset { + // proof size: 64-5+1=60 (a 64-bit mem-address branch to 32 byte leaf, incl leaf itself), all 32 bytes + offset := mul64(mul64(toU64(proofIndex), toU64(60)), toU64(32)) + offset := add64(offset, proofContentOffset()) + } + + function hashPair(a, b) -> h { + mstore(0, a) + mstore(0x20, b) + h := keccak256(0, 0x40) + } + + function getMemoryB32(addr, proofIndex) -> out { + if and64(addr, toU64(31)) { + // quick addr alignment check + revertWithCode(0xbad10ad0) // addr not aligned with 32 bytes + } + let offset := proofOffset(proofIndex) + let leaf := calldataload(offset) + offset := add64(offset, toU64(32)) + + let path := shr64(toU64(5), addr) // 32 bytes of memory per leaf + let node := leaf // starting from the leaf node, work back up by combining with siblings, to reconstruct + // the root + for { let i := 0 } lt(i, sub(64, 5)) { i := add(i, 1) } { + let sibling := calldataload(offset) + offset := add64(offset, toU64(32)) + switch and64(shr64(toU64(i), path), toU64(1)) + case 0 { node := hashPair(node, sibling) } + case 1 { node := hashPair(sibling, node) } + } + let memRoot := getMemRoot() + if iszero(eq(b32asBEWord(node), b32asBEWord(memRoot))) { + // verify the root matches + revertWithCode(0xbadf00d1) // bad memory proof + } + out := leaf + } + + // warning: setMemoryB32 does not verify the proof, + // it assumes the same memory proof has been verified with getMemoryB32 + function setMemoryB32(addr, v, proofIndex) { + if and64(addr, toU64(31)) { revertWithCode(0xbad10ad0) } // addr not aligned with 32 bytes + + let offset := proofOffset(proofIndex) + let leaf := v + offset := add64(offset, toU64(32)) + let path := shr64(toU64(5), addr) // 32 bytes of memory per leaf + let node := leaf // starting from the leaf node, work back up by combining with siblings, to reconstruct + // the root + for { let i := 0 } lt(i, sub(64, 5)) { i := add(i, 1) } { + let sibling := calldataload(offset) + offset := add64(offset, toU64(32)) + + switch and64(shr64(toU64(i), path), toU64(1)) + case 0 { node := hashPair(node, sibling) } + case 1 { node := hashPair(sibling, node) } + } + setMemRoot(node) // store new memRoot + } + + // load unaligned, optionally signed, little-endian, integer of 1 ... 8 bytes from memory + function loadMem(addr, size, signed, proofIndexL, proofIndexR) -> out { + if gt(size, 8) { revertWithCode(0xbad512e0) } // cannot load more than 8 bytes + // load/verify left part + let leftAddr := and64(addr, not64(toU64(31))) + let left := b32asBEWord(getMemoryB32(leftAddr, proofIndexL)) + let alignment := sub64(addr, leftAddr) + + let right := 0 + let rightAddr := and64(add64(addr, sub64(size, toU64(1))), not64(toU64(31))) + let leftShamt := sub64(sub64(toU64(32), alignment), size) + let rightShamt := toU64(0) + if iszero64(eq64(leftAddr, rightAddr)) { + // if unaligned, use second proof for the right part + if eq(proofIndexR, 0xff) { revertWithCode(0xbad22220) } // unexpected need for right-side proof in + // loadMem + // load/verify right part + right := b32asBEWord(getMemoryB32(rightAddr, proofIndexR)) + // left content is aligned to right of 32 bytes + leftShamt := toU64(0) + rightShamt := sub64(sub64(toU64(64), alignment), size) + } + + let addr_ := addr + let size_ := size + // left: prepare for byte-taking by right-aligning + left := shr(u64ToU256(shl64(toU64(3), leftShamt)), left) + // right: right-align for byte-taking by right-aligning + right := shr(u64ToU256(shl64(toU64(3), rightShamt)), right) + // loop: + for { let i := 0 } lt(i, size_) { i := add(i, 1) } { + // translate to reverse byte lookup, since we are reading little-endian memory, and need the highest + // byte first. + // effAddr := (addr + size - 1 - i) &^ 31 + let effAddr := and64(sub64(sub64(add64(addr_, size_), toU64(1)), toU64(i)), not64(toU64(31))) + // take a byte from either left or right, depending on the effective address + let b := toU256(0) + switch eq64(effAddr, leftAddr) + case 1 { + b := and(left, toU256(0xff)) + left := shr(toU256(8), left) + } + case 0 { + b := and(right, toU256(0xff)) + right := shr(toU256(8), right) + } + // append it to the output + out := or64(shl64(toU64(8), out), u256ToU64(b)) + } + + if signed { + let signBitShift := sub64(shl64(toU64(3), size_), toU64(1)) + out := signExtend64(out, signBitShift) + } + } + + // Splits the value into a left and a right part, each with a mask (identify data) and a patch (diff + // content). + function leftAndRight(alignment, size, value) -> leftMask, rightMask, leftPatch, rightPatch { + let start := alignment + let end := add64(alignment, size) + for { let i := 0 } lt(i, 64) { i := add(i, 1) } { + let index := toU64(i) + let leftSide := lt64(index, toU64(32)) + switch leftSide + case 1 { + leftPatch := shl(8, leftPatch) + leftMask := shl(8, leftMask) + } + case 0 { + rightPatch := shl(8, rightPatch) + rightMask := shl(8, rightMask) + } + if and64(eq64(lt64(index, start), toU64(0)), lt64(index, end)) { + // if alignment <= i < alignment+size + let b := and(shr(u64ToU256(shl64(toU64(3), sub64(index, alignment))), value), toU256(0xff)) + switch leftSide + case 1 { + leftPatch := or(leftPatch, b) + leftMask := or(leftMask, toU256(0xff)) + } + case 0 { + rightPatch := or(rightPatch, b) + rightMask := or(rightMask, toU256(0xff)) + } + } + } + } + + function storeMemUnaligned(addr, size, value, proofIndexL, proofIndexR) { + if gt(size, 32) { revertWithCode(0xbad512e1) } // cannot store more than 32 bytes + + let leftAddr := and64(addr, not64(toU64(31))) + let rightAddr := and64(add64(addr, sub64(size, toU64(1))), not64(toU64(31))) + let alignment := sub64(addr, leftAddr) + let leftMask, rightMask, leftPatch, rightPatch := leftAndRight(alignment, size, value) + + // load the left base + let left := b32asBEWord(getMemoryB32(leftAddr, proofIndexL)) + // apply the left patch + left := or(and(left, not(leftMask)), leftPatch) + // write the left + setMemoryB32(leftAddr, beWordAsB32(left), proofIndexL) + + // if aligned: nothing more to do here + if eq64(leftAddr, rightAddr) { leave } + if eq(proofIndexR, 0xff) { revertWithCode(0xbad22221) } // unexpected need for right-side proof in + // storeMem + // load the right base (with updated mem root) + let right := b32asBEWord(getMemoryB32(rightAddr, proofIndexR)) + // apply the right patch + right := or(and(right, not(rightMask)), rightPatch) + // write the right (with updated mem root) + setMemoryB32(rightAddr, beWordAsB32(right), proofIndexR) + } + + function storeMem(addr, size, value, proofIndexL, proofIndexR) { + storeMemUnaligned(addr, size, u64ToU256(value), proofIndexL, proofIndexR) + } + + // + // Preimage oracle interactions + // + function writePreimageKey(addr, count) -> out { + // adjust count down, so we only have to read a single 32 byte leaf of memory + let alignment := and64(addr, toU64(31)) + let maxData := sub64(toU64(32), alignment) + if gt64(count, maxData) { count := maxData } + + let dat := b32asBEWord(getMemoryB32(sub64(addr, alignment), 1)) + // shift out leading bits + dat := shl(u64ToU256(shl64(toU64(3), alignment)), dat) + // shift to right end, remove trailing bits + dat := shr(u64ToU256(shl64(toU64(3), sub64(toU64(32), count))), dat) + + let bits := shl(toU256(3), u64ToU256(count)) + + let preImageKey := getPreimageKey() + + // Append to key content by bit-shifting + let key := b32asBEWord(preImageKey) + key := shl(bits, key) + key := or(key, dat) + + // We reset the pre-image value offset back to 0 (the right part of the merkle pair) + setPreimageKey(beWordAsB32(key)) + setPreimageOffset(toU64(0)) + out := count + } + + function readPreimagePart(key, offset) -> dat, datlen { + let addr := sload(preimageOraclePos()) // calling Oracle.readPreimage(bytes32,uint256) + let memPtr := mload(0x40) // get pointer to free memory for preimage interactions + mstore(memPtr, shl(224, 0xe03110e1)) // (32-4)*8=224: right-pad the function selector, and then store it + // as prefix + mstore(add(memPtr, 0x04), key) + mstore(add(memPtr, 0x24), offset) + let res := call(gas(), addr, 0, memPtr, 0x44, 0x00, 0x40) // output into scratch space + if res { + // 1 on success + dat := mload(0x00) + datlen := mload(0x20) + leave + } + revertWithCode(0xbadf00d0) + } + + // Original implementation is at src/cannon/PreimageKeyLib.sol + // but it cannot be used because this is inside assembly block + function localize(preImageKey, localContext_) -> localizedKey { + // Grab the current free memory pointer to restore later. + let ptr := mload(0x40) + // Store the local data key and caller next to each other in memory for hashing. + mstore(0, preImageKey) + mstore(0x20, caller()) + mstore(0x40, localContext_) + // Localize the key with the above `localize` operation. + localizedKey := or(and(keccak256(0, 0x60), not(shl(248, 0xFF))), shl(248, 1)) + // Restore the free memory pointer. + mstore(0x40, ptr) + } + + function readPreimageValue(addr, count, localContext_) -> out { + let preImageKey := getPreimageKey() + let offset := getPreimageOffset() + // If the preimage key is a local key, localize it in the context of the caller. + let preImageKeyPrefix := shr(248, preImageKey) // 256-8=248 + if eq(preImageKeyPrefix, 1) { preImageKey := localize(preImageKey, localContext_) } + // make call to pre-image oracle contract + let pdatB32, pdatlen := readPreimagePart(preImageKey, offset) + if iszero64(pdatlen) { + // EOF + out := toU64(0) + leave + } + let alignment := and64(addr, toU64(31)) // how many bytes addr is offset from being left-aligned + let maxData := sub64(toU64(32), alignment) // higher alignment leaves less room for data this step + if gt64(count, maxData) { count := maxData } + if gt64(count, pdatlen) { + // cannot read more than pdatlen + count := pdatlen + } + + let addr_ := addr + let count_ := count + let bits := shl64(toU64(3), sub64(toU64(32), count_)) // 32-count, in bits + let mask := not(sub(shl(u64ToU256(bits), toU256(1)), toU256(1))) // left-aligned mask for count bytes + let alignmentBits := u64ToU256(shl64(toU64(3), alignment)) + mask := shr(alignmentBits, mask) // mask of count bytes, shifted by alignment + let pdat := shr(alignmentBits, b32asBEWord(pdatB32)) // pdat, shifted by alignment + + // update pre-image reader with updated offset + let newOffset := add64(offset, count_) + setPreimageOffset(newOffset) + + out := count_ + + let node := getMemoryB32(sub64(addr_, alignment), 1) + let dat := and(b32asBEWord(node), not(mask)) // keep old bytes outside of mask + dat := or(dat, and(pdat, mask)) // fill with bytes from pdat + setMemoryB32(sub64(addr_, alignment), beWordAsB32(dat), 1) + } + + // + // Syscall handling + // + function sysCall(localContext_) { + let a7 := getRegister(toU64(17)) + switch a7 + case 93 { + // exit the calling thread. No multi-thread support yet, so just exit. + let a0 := getRegister(toU64(10)) + setExitCode(and(a0, 0xff)) + setExited() + // program stops here, no need to change registers. + } + case 94 { + // exit-group + let a0 := getRegister(toU64(10)) + setExitCode(and(a0, 0xff)) + setExited() + } + case 214 { + // brk + // Go sys_linux_riscv64 runtime will only ever call brk(NULL), i.e. first argument (register a0) set + // to 0. + + // brk(0) changes nothing about the memory, and returns the current page break + let v := shl64(toU64(30), toU64(1)) // set program break at 1 GiB + setRegister(toU64(10), v) + setRegister(toU64(11), toU64(0)) // no error + } + case 222 { + // mmap + // A0 = addr (hint) + let addr := getRegister(toU64(10)) + // A1 = n (length) + let length := getRegister(toU64(11)) + // A2 = prot (memory protection type, can ignore) + // A3 = flags (shared with other process and or written back to file) + let flags := getRegister(toU64(13)) + // A4 = fd (file descriptor, can ignore because we support anon memory only) + let fd := getRegister(toU64(14)) + // A5 = offset (offset in file, we don't support any non-anon memory, so we can ignore this) + + let errCode := 0 + // ensure MAP_ANONYMOUS is set and fd == -1 + switch or(iszero(and(flags, 0x20)), not(eq(fd, u64Mask()))) + case 1 { + addr := u64Mask() + errCode := toU64(0x4d) + } + default { + switch addr + case 0 { + // No hint, allocate it ourselves, by as much as the requested length. + // Increase the length to align it with desired page size if necessary. + let align := and64(length, shortToU64(4095)) + if align { length := add64(length, sub64(shortToU64(4096), align)) } + let prevHeap := getHeap() + addr := prevHeap + setHeap(add64(prevHeap, length)) // increment heap with length + } + default { + // allow hinted memory address (leave it in A0 as return argument) + } + } + + setRegister(toU64(10), addr) + setRegister(toU64(11), errCode) + } + case 63 { + // read + let fd := getRegister(toU64(10)) // A0 = fd + let addr := getRegister(toU64(11)) // A1 = *buf addr + let count := getRegister(toU64(12)) // A2 = count + let n := 0 + let errCode := 0 + switch fd + case 0 { + // stdin + n := toU64(0) // never read anything from stdin + errCode := toU64(0) + } + case 3 { + // hint-read + // say we read it all, to continue execution after reading the hint-write ack response + n := count + errCode := toU64(0) + } + case 5 { + // preimage read + n := readPreimageValue(addr, count, localContext_) + errCode := toU64(0) + } + default { + n := u64Mask() // -1 (reading error) + errCode := toU64(0x4d) // EBADF + } + setRegister(toU64(10), n) + setRegister(toU64(11), errCode) + } + case 64 { + // write + let fd := getRegister(toU64(10)) // A0 = fd + let addr := getRegister(toU64(11)) // A1 = *buf addr + let count := getRegister(toU64(12)) // A2 = count + let n := 0 + let errCode := 0 + switch fd + case 1 { + // stdout + n := count // write completes fully in single instruction step + errCode := toU64(0) + } + case 2 { + // stderr + n := count // write completes fully in single instruction step + errCode := toU64(0) + } + case 4 { + // hint-write + n := count + errCode := toU64(0) + } + case 6 { + // pre-image key-write + n := writePreimageKey(addr, count) + errCode := toU64(0) // no error + } + default { + // any other file, including (3) hint read (5) preimage read + n := u64Mask() // -1 (writing error) + errCode := toU64(0x4d) // EBADF + } + setRegister(toU64(10), n) + setRegister(toU64(11), errCode) + } + case 25 { + // fcntl - file descriptor manipulation / info lookup + let fd := getRegister(toU64(10)) // A0 = fd + let cmd := getRegister(toU64(11)) // A1 = cmd + let out := 0 + let errCode := 0 + switch cmd + case 0x1 { + // F_GETFD: get file descriptor flags + switch fd + case 0 { + // stdin + out := toU64(0) // no flag set + } + case 1 { + // stdout + out := toU64(0) // no flag set + } + case 2 { + // stderr + out := toU64(0) // no flag set + } + case 3 { + // hint-read + out := toU64(0) // no flag set + } + case 4 { + // hint-write + out := toU64(0) // no flag set + } + case 5 { + // pre-image read + out := toU64(0) // no flag set + } + case 6 { + // pre-image write + out := toU64(0) // no flag set + } + default { + out := u64Mask() + errCode := toU64(0x4d) //EBADF + } + } + case 0x3 { + // F_GETFL: get file descriptor flags + switch fd + case 0 { + // stdin + out := toU64(0) // O_RDONLY + } + case 1 { + // stdout + out := toU64(1) // O_WRONLY + } + case 2 { + // stderr + out := toU64(1) // O_WRONLY + } + case 3 { + // hint-read + out := toU64(0) // O_RDONLY + } + case 4 { + // hint-write + out := toU64(1) // O_WRONLY + } + case 5 { + // pre-image read + out := toU64(0) // O_RDONLY + } + case 6 { + // pre-image write + out := toU64(1) // O_WRONLY + } + default { + out := u64Mask() + errCode := toU64(0x4d) // EBADF + } + } + default { + // no other commands: don't allow changing flags, duplicating FDs, etc. + out := u64Mask() + errCode := toU64(0x16) // EINVAL (cmd not recognized by this kernel) + } + setRegister(toU64(10), out) + setRegister(toU64(11), errCode) // EBADF + } + case 56 { + // openat - the Go linux runtime will try to open optional /sys/kernel files for performance hints + setRegister(toU64(10), u64Mask()) + setRegister(toU64(11), toU64(0xd)) // EACCES - no access allowed + } + case 113 { + // clock_gettime + let addr := getRegister(toU64(11)) // addr of timespec struct + // write 1337s + 42ns as time + let value := or(shortToU256(1337), shl(shortToU256(64), toU256(42))) + storeMemUnaligned(addr, toU64(16), value, 1, 2) + setRegister(toU64(10), toU64(0)) + setRegister(toU64(11), toU64(0)) + } + case 220 { + // clone - not supported + setRegister(toU64(10), toU64(1)) + setRegister(toU64(11), toU64(0)) + } + case 163 { + // getrlimit + let res := getRegister(toU64(10)) + let addr := getRegister(toU64(11)) + switch res + case 0x7 { + // RLIMIT_NOFILE + // first 8 bytes: soft limit. 1024 file handles max open + // second 8 bytes: hard limit + storeMemUnaligned( + addr, toU64(16), or(shortToU256(1024), shl(toU256(64), shortToU256(1024))), 1, 2 + ) + setRegister(toU64(10), toU64(0)) + setRegister(toU64(11), toU64(0)) + } + default { revertWithCode(0xf0012) } // unrecognized resource limit lookup + } + case 261 { + // prlimit64 -- unsupported, we have getrlimit, is prlimit64 even called? + revertWithCode(0xf001ca11) // unsupported system call + } + case 422 { + // futex - not supported, for now + revertWithCode(0xf001ca11) // unsupported system call + } + case 101 { + // nanosleep - not supported, for now + revertWithCode(0xf001ca11) // unsupported system call + } + default { + // Ignore(no-op) unsupported system calls + setRegister(toU64(10), toU64(0)) + setRegister(toU64(11), toU64(0)) + } + } + + // + // Instruction execution + // + if getExited() { + // early exit if we can + mstore(0, computeStateHash()) + return(0, 0x20) + } + setStep(add64(getStep(), toU64(1))) + + let _pc := getPC() + let instr := loadMem(_pc, toU64(4), false, 0, 0xff) // raw instruction + + // these fields are ignored if not applicable to the instruction type / opcode + let opcode := parseOpcode(instr) + let rd := parseRd(instr) // destination register index + let funct3 := parseFunct3(instr) + let rs1 := parseRs1(instr) // source register 1 index + let rs2 := parseRs2(instr) // source register 2 index + let funct7 := parseFunct7(instr) + + switch opcode + case 0x03 { + let pc_ := _pc + // 000_0011: memory loading + // LB, LH, LW, LD, LBU, LHU, LWU + let imm := parseImmTypeI(instr) + let signed := iszero64(and64(funct3, toU64(4))) // 4 = 100 -> bitflag + let size := shl64(and64(funct3, toU64(3)), toU64(1)) // 3 = 11 -> 1, 2, 4, 8 bytes size + let rs1Value := getRegister(rs1) + let memIndex := add64(rs1Value, signExtend64(imm, toU64(11))) + let rdValue := loadMem(memIndex, size, signed, 1, 2) + setRegister(rd, rdValue) + setPC(add64(pc_, toU64(4))) + } + case 0x23 { + let pc_ := _pc + // 010_0011: memory storing + // SB, SH, SW, SD + let imm := parseImmTypeS(instr) + let size := shl64(funct3, toU64(1)) + let value := getRegister(rs2) + let rs1Value := getRegister(rs1) + let memIndex := add64(rs1Value, signExtend64(imm, toU64(11))) + storeMem(memIndex, size, value, 1, 2) + setPC(add64(pc_, toU64(4))) + } + case 0x63 { + // 110_0011: branching + let rs1Value := getRegister(rs1) + let rs2Value := getRegister(rs2) + let branchHit := toU64(0) + switch funct3 + case 0 { + // 000 = BEQ + branchHit := eq64(rs1Value, rs2Value) + } + case 1 { + // 001 = BNE + branchHit := and64(not64(eq64(rs1Value, rs2Value)), toU64(1)) + } + case 4 { + // 100 = BLT + branchHit := slt64(rs1Value, rs2Value) + } + case 5 { + // 101 = BGE + branchHit := and64(not64(slt64(rs1Value, rs2Value)), toU64(1)) + } + case 6 { + // 110 = BLTU + branchHit := lt64(rs1Value, rs2Value) + } + case 7 { + // 111 := BGEU + branchHit := and64(not64(lt64(rs1Value, rs2Value)), toU64(1)) + } + switch branchHit + case 0 { _pc := add64(_pc, toU64(4)) } + default { + let imm := parseImmTypeB(instr) + // imm12 is a signed offset, in multiples of 2 bytes. + // So it's really 13 bits with a hardcoded 0 bit. + _pc := add64(_pc, imm) + } + // not like the other opcodes: nothing to write to rd register, and PC has already changed + setPC(_pc) + } + case 0x13 { + // 001_0011: immediate arithmetic and logic + let rs1Value := getRegister(rs1) + let imm := parseImmTypeI(instr) + let rdValue := 0 + switch funct3 + case 0 { + // 000 = ADDI + rdValue := add64(rs1Value, imm) + } + case 1 { + // 001 = SLLI + rdValue := shl64(and64(imm, toU64(0x3F)), rs1Value) // lower 6 bits in 64 bit mode + } + case 2 { + // 010 = SLTI + rdValue := slt64(rs1Value, imm) + } + case 3 { + // 011 = SLTIU + rdValue := lt64(rs1Value, imm) + } + case 4 { + // 100 = XORI + rdValue := xor64(rs1Value, imm) + } + case 5 { + // 101 = SR~ + switch shr64(toU64(6), imm) + // in rv64i the top 6 bits select the shift type + case 0x00 { + // 000000 = SRLI + rdValue := shr64(and64(imm, toU64(0x3F)), rs1Value) // lower 6 bits in 64 bit mode + } + case 0x10 { + // 010000 = SRAI + rdValue := sar64(and64(imm, toU64(0x3F)), rs1Value) // lower 6 bits in 64 bit mode + } + } + case 6 { + // 110 = ORI + rdValue := or64(rs1Value, imm) + } + case 7 { + // 111 = ANDI + rdValue := and64(rs1Value, imm) + } + setRegister(rd, rdValue) + setPC(add64(_pc, toU64(4))) + } + case 0x1B { + // 001_1011: immediate arithmetic and logic signed 32 bit + let rs1Value := getRegister(rs1) + let imm := parseImmTypeI(instr) + let rdValue := 0 + switch funct3 + case 0 { + // 000 = ADDIW + rdValue := mask32Signed64(add64(rs1Value, imm)) + } + case 1 { + // 001 = SLLIW + rdValue := mask32Signed64(shl64(and64(imm, toU64(0x1F)), rs1Value)) + } + case 5 { + // 101 = SR~ + let shamt := and64(imm, toU64(0x1F)) + switch shr64(toU64(5), imm) + // top 7 bits select the shift type + case 0x00 { + // 0000000 = SRLIW + rdValue := signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), toU64(31)) + } + case 0x20 { + // 0100000 = SRAIW + rdValue := signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), sub64(toU64(31), shamt)) + } + } + setRegister(rd, rdValue) + setPC(add64(_pc, toU64(4))) + } + case 0x33 { + // 011_0011: register arithmetic and logic + let rs1Value := getRegister(rs1) + let rs2Value := getRegister(rs2) + let rdValue := 0 + switch funct7 + case 1 { + // RV M extension + switch funct3 + case 0 { + // 000 = MUL: signed x signed + rdValue := mul64(rs1Value, rs2Value) + } + case 1 { + // 001 = MULH: upper bits of signed x signed + rdValue := + u256ToU64(shr(toU256(64), mul(signExtend64To256(rs1Value), signExtend64To256(rs2Value)))) + } + case 2 { + // 010 = MULHSU: upper bits of signed x unsigned + rdValue := u256ToU64(shr(toU256(64), mul(signExtend64To256(rs1Value), u64ToU256(rs2Value)))) + } + case 3 { + // 011 = MULHU: upper bits of unsigned x unsigned + rdValue := u256ToU64(shr(toU256(64), mul(u64ToU256(rs1Value), u64ToU256(rs2Value)))) + } + case 4 { + // 100 = DIV + switch rs2Value + case 0 { rdValue := u64Mask() } + default { rdValue := sdiv64(rs1Value, rs2Value) } + } + case 5 { + // 101 = DIVU + switch rs2Value + case 0 { rdValue := u64Mask() } + default { rdValue := div64(rs1Value, rs2Value) } + } + case 6 { + // 110 = REM + switch rs2Value + case 0 { rdValue := rs1Value } + default { rdValue := smod64(rs1Value, rs2Value) } + } + case 7 { + // 111 = REMU + switch rs2Value + case 0 { rdValue := rs1Value } + default { rdValue := mod64(rs1Value, rs2Value) } + } + } + default { + switch funct3 + case 0 { + // 000 = ADD/SUB + switch funct7 + case 0x00 { + // 0000000 = ADD + rdValue := add64(rs1Value, rs2Value) + } + case 0x20 { + // 0100000 = SUB + rdValue := sub64(rs1Value, rs2Value) + } + } + case 1 { + // 001 = SLL + rdValue := shl64(and64(rs2Value, toU64(0x3F)), rs1Value) // only the low 6 bits are consider in + // RV6VI + } + case 2 { + // 010 = SLT + rdValue := slt64(rs1Value, rs2Value) + } + case 3 { + // 011 = SLTU + rdValue := lt64(rs1Value, rs2Value) + } + case 4 { + // 100 = XOR + rdValue := xor64(rs1Value, rs2Value) + } + case 5 { + // 101 = SR~ + switch funct7 + case 0x00 { + // 0000000 = SRL + rdValue := shr64(and64(rs2Value, toU64(0x3F)), rs1Value) // logical: fill with zeroes + } + case 0x20 { + // 0100000 = SRA + rdValue := sar64(and64(rs2Value, toU64(0x3F)), rs1Value) // arithmetic: sign bit is extended + } + } + case 6 { + // 110 = OR + rdValue := or64(rs1Value, rs2Value) + } + case 7 { + // 111 = AND + rdValue := and64(rs1Value, rs2Value) + } + } + setRegister(rd, rdValue) + setPC(add64(_pc, toU64(4))) + } + case 0x3B { + // 011_1011: register arithmetic and logic in 32 bits + let rs1Value := getRegister(rs1) + let rs2Value := getRegister(rs2) + let rdValue := 0 + switch funct7 + case 1 { + // RV M extension + switch funct3 + case 0 { + // 000 = MULW + rdValue := mask32Signed64(mul64(and64(rs1Value, u32Mask()), and64(rs2Value, u32Mask()))) + } + case 4 { + // 100 = DIVW + switch rs2Value + case 0 { rdValue := u64Mask() } + default { + rdValue := mask32Signed64(sdiv64(mask32Signed64(rs1Value), mask32Signed64(rs2Value))) + } + } + case 5 { + // 101 = DIVUW + switch rs2Value + case 0 { rdValue := u64Mask() } + default { + rdValue := mask32Signed64(div64(and64(rs1Value, u32Mask()), and64(rs2Value, u32Mask()))) + } + } + case 6 { + // 110 = REMW + switch rs2Value + case 0 { rdValue := mask32Signed64(rs1Value) } + default { + rdValue := mask32Signed64(smod64(mask32Signed64(rs1Value), mask32Signed64(rs2Value))) + } + } + case 7 { + // 111 = REMUW + switch rs2Value + case 0 { rdValue := mask32Signed64(rs1Value) } + default { + rdValue := mask32Signed64(mod64(and64(rs1Value, u32Mask()), and64(rs2Value, u32Mask()))) + } + } + } + default { + switch funct3 + case 0 { + // 000 = ADDW/SUBW + switch funct7 + case 0x00 { + // 0000000 = ADDW + rdValue := mask32Signed64(add64(and64(rs1Value, u32Mask()), and64(rs2Value, u32Mask()))) + } + case 0x20 { + // 0100000 = SUBW + rdValue := mask32Signed64(sub64(and64(rs1Value, u32Mask()), and64(rs2Value, u32Mask()))) + } + } + case 1 { + // 001 = SLLW + rdValue := mask32Signed64(shl64(and64(rs2Value, toU64(0x1F)), rs1Value)) + } + case 5 { + // 101 = SR~ + let shamt := and64(rs2Value, toU64(0x1F)) + switch funct7 + case 0x00 { + // 0000000 = SRLW + rdValue := signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), toU64(31)) + } + case 0x20 { + // 0100000 = SRAW + rdValue := signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), sub64(toU64(31), shamt)) + } + } + } + setRegister(rd, rdValue) + setPC(add64(_pc, toU64(4))) + } + case 0x37 { + // 011_0111: LUI = Load upper immediate + let imm := parseImmTypeU(instr) + let rdValue := shl64(toU64(12), imm) + setRegister(rd, rdValue) + setPC(add64(_pc, toU64(4))) + } + case 0x17 { + // 001_0111: AUIPC = Add upper immediate to PC + let imm := parseImmTypeU(instr) + let rdValue := add64(_pc, signExtend64(shl64(toU64(12), imm), toU64(31))) + setRegister(rd, rdValue) + setPC(add64(_pc, toU64(4))) + } + case 0x6F { + // 110_1111: JAL = Jump and link + let imm := parseImmTypeJ(instr) + let rdValue := add64(_pc, toU64(4)) + setRegister(rd, rdValue) + setPC(add64(_pc, signExtend64(shl64(toU64(1), imm), toU64(20)))) // signed offset in multiples of 2 + // bytes (last bit is there, but ignored) + } + case 0x67 { + // 110_0111: JALR = Jump and link register + let rs1Value := getRegister(rs1) + let imm := parseImmTypeI(instr) + let rdValue := add64(_pc, toU64(4)) + setRegister(rd, rdValue) + setPC(and64(add64(rs1Value, signExtend64(imm, toU64(11))), xor64(u64Mask(), toU64(1)))) // least + // significant bit is set to 0 + } + case 0x73 { + // 111_0011: environment things + switch funct3 + case 0 { + // 000 = ECALL/EBREAK + switch shr64(toU64(20), instr) + // I-type, top 12 bits + case 0 { + // imm12 = 000000000000 ECALL + sysCall(_localContext) + setPC(add64(_pc, toU64(4))) + } + default { + // imm12 = 000000000001 EBREAK + setPC(add64(_pc, toU64(4))) // ignore breakpoint + } + } + default { + // CSR instructions + setRegister(rd, toU64(0)) // ignore CSR instructions + setPC(add64(_pc, toU64(4))) + } + } + case 0x2F { + // 010_1111: RV{32,64}A and RV{32,64}A atomic operations extension + // acquire and release bits: + // aq := and64(shr64(toU64(1), funct7), toU64(1)) + // rl := and64(funct7, toU64(1)) + // if none set: unordered + // if aq is set: no following mem ops observed before acquire mem op + // if rl is set: release mem op not observed before earlier mem ops + // if both set: sequentially consistent + // These are no-op here because there is no pipeline of mem ops to acquire/release. + + // 0b010 == RV32A W variants + // 0b011 == RV64A D variants + let size := shl64(funct3, toU64(1)) + if or(lt64(size, toU64(4)), gt64(size, toU64(8))) { revertWithCode(0xbada70) } // bad AMO size + + let addr := getRegister(rs1) + if and64(addr, toU64(3)) { + // quick addr alignment check + revertWithCode(0xbad10ad0) // addr not aligned with 4 bytes + } + + let op := shr64(toU64(2), funct7) + switch op + case 0x2 { + // 00010 = LR = Load Reserved + let v := loadMem(addr, size, true, 1, 2) + setRegister(rd, v) + setLoadReservation(addr) + } + case 0x3 { + // 00011 = SC = Store Conditional + let rdValue := toU64(1) + if eq64(addr, getLoadReservation()) { + let rs2Value := getRegister(rs2) + storeMem(addr, size, rs2Value, 1, 2) + rdValue := toU64(0) + } + setRegister(rd, rdValue) + setLoadReservation(toU64(0)) + } + default { + // AMO: Atomic Memory Operation + let rs2Value := getRegister(rs2) + if eq64(size, toU64(4)) { rs2Value := mask32Signed64(rs2Value) } + let value := rs2Value + let v := loadMem(addr, size, true, 1, 2) + let rdValue := v + switch op + case 0x0 { + // 00000 = AMOADD = add + v := add64(v, value) + } + case 0x1 { + // 00001 = AMOSWAP + v := value + } + case 0x4 { + // 00100 = AMOXOR = xor + v := xor64(v, value) + } + case 0x8 { + // 01000 = AMOOR = or + v := or64(v, value) + } + case 0xc { + // 01100 = AMOAND = and + v := and64(v, value) + } + case 0x10 { + // 10000 = AMOMIN = min signed + if slt64(value, v) { v := value } + } + case 0x14 { + // 10100 = AMOMAX = max signed + if sgt64(value, v) { v := value } + } + case 0x18 { + // 11000 = AMOMINU = min unsigned + if lt64(value, v) { v := value } + } + case 0x1c { + // 11100 = AMOMAXU = max unsigned + if gt64(value, v) { v := value } + } + default { revertWithCode(0xf001a70) } // unknown atomic operation + + storeMem(addr, size, v, 1, 3) // after overwriting 1, proof 2 is no longer valid + setRegister(rd, rdValue) + } + setPC(add64(_pc, toU64(4))) + } + case 0x0F { + // 000_1111: fence + // Used to impose additional ordering constraints; flushing the mem operation pipeline. + // This VM doesn't have a pipeline, nor additional harts, so this is a no-op. + // FENCE / FENCE.TSO / FENCE.I all no-op: there's nothing to synchronize. + setPC(add64(_pc, toU64(4))) + } + case 0x07 { + // FLW/FLD: floating point load word/double + setPC(add64(_pc, toU64(4))) // no-op this. + } + case 0x27 { + // FSW/FSD: floating point store word/double + setPC(add64(_pc, toU64(4))) // no-op this. + } + case 0x53 { + // FADD etc. no-op is enough to pass Go runtime check + setPC(add64(_pc, toU64(4))) // no-op this. + } + default { revertWithCode(0xf001c0de) } // unknown instruction opcode + + mstore(0, computeStateHash()) + return(0, 0x20) + } + } +} diff --git a/packages/contracts-bedrock/src/vendor/eas/EAS.sol b/packages/contracts-bedrock/src/vendor/eas/EAS.sol index 1cebdc819343..dd8840789d50 100644 --- a/packages/contracts-bedrock/src/vendor/eas/EAS.sol +++ b/packages/contracts-bedrock/src/vendor/eas/EAS.sol @@ -2,7 +2,7 @@ pragma solidity 0.8.19; import { Address } from "@openzeppelin/contracts/utils/Address.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; import { EIP1271Verifier } from "src/vendor/eas/eip1271/EIP1271Verifier.sol"; import { ISchemaResolver } from "src/vendor/eas/resolver/ISchemaResolver.sol"; @@ -80,8 +80,8 @@ contract EAS is IEAS, ISemver, EIP1271Verifier { uint256[MAX_GAP - 3] private __gap; /// @notice Semantic version. - /// @custom:semver 1.4.1-beta.1 - string public constant version = "1.4.1-beta.1"; + /// @custom:semver 1.4.1-beta.2 + string public constant version = "1.4.1-beta.2"; /// @dev Creates a new EAS instance. constructor() EIP1271Verifier("EAS", "1.3.0") { } diff --git a/packages/contracts-bedrock/src/vendor/eas/SchemaRegistry.sol b/packages/contracts-bedrock/src/vendor/eas/SchemaRegistry.sol index 1adca3d6c3e9..98f87c35b53c 100644 --- a/packages/contracts-bedrock/src/vendor/eas/SchemaRegistry.sol +++ b/packages/contracts-bedrock/src/vendor/eas/SchemaRegistry.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.19; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; import { ISchemaResolver } from "src/vendor/eas/resolver/ISchemaResolver.sol"; import { EMPTY_UID, MAX_GAP } from "src/vendor/eas/Common.sol"; import { ISchemaRegistry, SchemaRecord } from "src/vendor/eas/ISchemaRegistry.sol"; @@ -20,8 +20,8 @@ contract SchemaRegistry is ISchemaRegistry, ISemver { uint256[MAX_GAP - 1] private __gap; /// @notice Semantic version. - /// @custom:semver 1.3.1-beta.1 - string public constant version = "1.3.1-beta.1"; + /// @custom:semver 1.3.1-beta.2 + string public constant version = "1.3.1-beta.2"; /// @inheritdoc ISchemaRegistry function register(string calldata schema, ISchemaResolver resolver, bool revocable) external returns (bytes32) { diff --git a/packages/contracts-bedrock/src/vendor/eas/resolver/ISchemaResolver.sol b/packages/contracts-bedrock/src/vendor/eas/resolver/ISchemaResolver.sol index 51aac1ece23b..d2089f83af86 100644 --- a/packages/contracts-bedrock/src/vendor/eas/resolver/ISchemaResolver.sol +++ b/packages/contracts-bedrock/src/vendor/eas/resolver/ISchemaResolver.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { Attestation } from "../Common.sol"; +import { Attestation } from "src/vendor/eas/Common.sol"; /// @title ISchemaResolver /// @notice The interface of an optional schema resolver. diff --git a/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol b/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol index a2ab917a0d2b..ab0ca82c61fd 100644 --- a/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol +++ b/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol @@ -6,7 +6,7 @@ import { ChallengeStatus, Challenge, CommitmentType -} from "src/L1/interfaces/IDataAvailabilityChallenge.sol"; +} from "interfaces/L1/IDataAvailabilityChallenge.sol"; import { computeCommitmentKeccak256 } from "src/L1/DataAvailabilityChallenge.sol"; import { CommonTest } from "test/setup/CommonTest.sol"; import { Preinstalls } from "src/libraries/Preinstalls.sol"; @@ -58,6 +58,7 @@ contract DataAvailabilityChallengeTest is CommonTest { // EntryPoint will revert if using amount > type(uint112).max. vm.assume(sender != Preinstalls.EntryPoint_v060); vm.assume(sender != address(dataAvailabilityChallenge)); + vm.assume(sender != deploy.mustGetAddress("DataAvailabilityChallenge")); vm.assume(sender.balance == 0); vm.deal(sender, amount); diff --git a/packages/contracts-bedrock/test/L1/DelayedVetoable.t.sol b/packages/contracts-bedrock/test/L1/DelayedVetoable.t.sol deleted file mode 100644 index a0dd2d8c138e..000000000000 --- a/packages/contracts-bedrock/test/L1/DelayedVetoable.t.sol +++ /dev/null @@ -1,262 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.15; - -import { Test } from "forge-std/Test.sol"; -import { IDelayedVetoable } from "src/L1/interfaces/IDelayedVetoable.sol"; -import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; - -contract DelayedVetoable_Init is Test { - error Unauthorized(address expected, address actual); - error ForwardingEarly(); - - event Initiated(bytes32 indexed callHash, bytes data); - event Forwarded(bytes32 indexed callHash, bytes data); - event Vetoed(bytes32 indexed callHash, bytes data); - - address target; - address initiator; - address vetoer; - uint256 operatingDelay = 14 days; - IDelayedVetoable delayedVetoable; - - function setUp() public { - initiator = makeAddr("initiator"); - vetoer = makeAddr("vetoer"); - target = makeAddr("target"); - vm.deal(initiator, 10000 ether); - vm.deal(vetoer, 10000 ether); - - delayedVetoable = IDelayedVetoable( - DeployUtils.create1({ - _name: "DelayedVetoable", - _args: DeployUtils.encodeConstructor( - abi.encodeCall(IDelayedVetoable.__constructor__, (vetoer, initiator, address(target), operatingDelay)) - ) - }) - ); - - // Most tests will use the operating delay, so we call as the initiator with null data - // to set the delay. For tests that need to use the initial zero delay, we'll modify the - // value in storage. - vm.prank(initiator); - (bool success,) = address(delayedVetoable).call(hex""); - assertTrue(success); - } - - /// @dev This function is used to prevent initiating the delay unintentionally. - /// It should only be used on tests prior to the delay being activated. - /// @param data The data to be used in the call. - function assumeNonzeroData(bytes memory data) internal pure { - vm.assume(data.length > 0); - } - - /// @dev This function is used to ensure that the data does not clash with the queuedAt function selector. - /// @param data The data to be used in the call. - function assumeNoClash(bytes calldata data) internal pure { - if (data.length >= 4) { - vm.assume(bytes4(data[0:4]) != bytes4(keccak256("queuedAt(bytes32)"))); - } - } -} - -contract DelayedVetoable_Getters_Test is DelayedVetoable_Init { - /// @dev The getters return the expected values when called by the zero address. - function test_getters_succeeds() external { - vm.startPrank(address(0)); - assertEq(delayedVetoable.initiator(), initiator); - assertEq(delayedVetoable.vetoer(), vetoer); - assertEq(delayedVetoable.target(), target); - assertEq(delayedVetoable.delay(), operatingDelay); - assertEq(delayedVetoable.queuedAt(keccak256(abi.encode(0))), 0); - } -} - -contract DelayedVetoable_Getters_TestFail is DelayedVetoable_Init { - /// @dev Check that getter calls from unauthorized entities will revert. - function test_getters_notZeroAddress_reverts() external { - vm.expectRevert(abi.encodeWithSelector(Unauthorized.selector, initiator, address(this))); - delayedVetoable.initiator(); - vm.expectRevert(abi.encodeWithSelector(Unauthorized.selector, initiator, address(this))); - delayedVetoable.vetoer(); - vm.expectRevert(abi.encodeWithSelector(Unauthorized.selector, initiator, address(this))); - delayedVetoable.target(); - vm.expectRevert(abi.encodeWithSelector(Unauthorized.selector, initiator, address(this))); - delayedVetoable.delay(); - vm.expectRevert(abi.encodeWithSelector(Unauthorized.selector, initiator, address(this))); - delayedVetoable.queuedAt(keccak256(abi.encode(0))); - } -} - -contract DelayedVetoable_HandleCall_Test is DelayedVetoable_Init { - /// @dev A call can be initiated by the initiator. - function testFuzz_handleCall_initiation_succeeds(bytes calldata data) external { - assumeNoClash(data); - vm.expectEmit(true, false, false, true, address(delayedVetoable)); - emit Initiated(keccak256(data), data); - - vm.prank(initiator); - (bool success,) = address(delayedVetoable).call(data); - assertTrue(success); - } - - /// @dev The delay is inititially set to zero and the call is immediately forwarded. - function testFuzz_handleCall_initialForwardingImmediately_succeeds( - bytes calldata inData, - bytes calldata outData - ) - external - { - assumeNonzeroData(inData); - assumeNoClash(inData); - - // Reset the delay to zero - vm.store(address(delayedVetoable), bytes32(uint256(0)), bytes32(uint256(0))); - - vm.mockCall(target, inData, outData); - vm.expectEmit(true, false, false, true, address(delayedVetoable)); - vm.expectCall({ callee: target, data: inData }); - emit Forwarded(keccak256(inData), inData); - vm.prank(initiator); - (bool success, bytes memory returnData) = address(delayedVetoable).call(inData); - assertTrue(success); - assertEq(returnData, outData); - - // Check that the callHash is not stored for future forwarding - bytes32 callHash = keccak256(inData); - vm.prank(address(0)); - assertEq(delayedVetoable.queuedAt(callHash), 0); - } - - /// @dev Calls are not forwarded until the delay has passed. - function testFuzz_handleCall_forwardingWithDelay_succeeds(bytes calldata data) external { - assumeNonzeroData(data); - assumeNoClash(data); - - vm.prank(initiator); - (bool success,) = address(delayedVetoable).call(data); - - // Check that the call is in the _queuedAt mapping - bytes32 callHash = keccak256(data); - vm.prank(address(0)); - assertEq(delayedVetoable.queuedAt(callHash), block.timestamp); - - vm.warp(block.timestamp + operatingDelay); - vm.expectEmit(true, false, false, true, address(delayedVetoable)); - emit Forwarded(keccak256(data), data); - - vm.expectCall({ callee: target, data: data }); - (success,) = address(delayedVetoable).call(data); - assertTrue(success); - } -} - -contract DelayedVetoable_HandleCall_TestFail is DelayedVetoable_Init { - /// @dev Only the initiator can initiate a call. - function test_handleCall_unauthorizedInitiation_reverts() external { - vm.expectRevert(abi.encodeWithSelector(IDelayedVetoable.Unauthorized.selector, initiator, address(this))); - (bool revertsAsExpected,) = address(delayedVetoable).call(hex"00001234"); - assertTrue(revertsAsExpected); - } - - /// @dev The call cannot be forwarded until the delay has passed. - function testFuzz_handleCall_forwardingTooSoon_reverts(bytes calldata data) external { - assumeNoClash(data); - vm.prank(initiator); - (bool success,) = address(delayedVetoable).call(data); - assertTrue(success); - - vm.expectRevert(IDelayedVetoable.ForwardingEarly.selector); - (bool revertsAsExpected,) = address(delayedVetoable).call(data); - assertTrue(revertsAsExpected); - } - - /// @dev The call cannot be forwarded a second time. - function testFuzz_handleCall_forwardingTwice_reverts(bytes calldata data) external { - assumeNoClash(data); - - // Initiate the call - vm.prank(initiator); - (bool success,) = address(delayedVetoable).call(data); - assertTrue(success); - - vm.warp(block.timestamp + operatingDelay); - vm.expectEmit(true, false, false, true, address(delayedVetoable)); - emit Forwarded(keccak256(data), data); - - // Forward the call - vm.expectCall({ callee: target, data: data }); - (success,) = address(delayedVetoable).call(data); - assertTrue(success); - - // Attempt to forward the same call again. - vm.expectRevert(abi.encodeWithSelector(IDelayedVetoable.Unauthorized.selector, initiator, address(this))); - (bool revertsAsExpected,) = address(delayedVetoable).call(data); - assertTrue(revertsAsExpected); - } - - /// @dev If the target reverts, it is bubbled up. - function testFuzz_handleCall_forwardingTargetReverts_reverts( - bytes calldata inData, - bytes calldata outData - ) - external - { - assumeNoClash(inData); - - // Initiate the call - vm.prank(initiator); - (bool success,) = address(delayedVetoable).call(inData); - assertTrue(success); - - vm.warp(block.timestamp + operatingDelay); - vm.expectEmit(true, false, false, true, address(delayedVetoable)); - emit Forwarded(keccak256(inData), inData); - - vm.mockCallRevert(target, inData, outData); - - // Forward the call - vm.expectRevert(outData); - (bool revertsAsExpected,) = address(delayedVetoable).call(inData); - assertTrue(revertsAsExpected); - } - - function testFuzz_handleCall_forwardingTargetRetValue_succeeds( - bytes calldata inData, - bytes calldata outData - ) - external - { - assumeNoClash(inData); - - // Initiate the call - vm.prank(initiator); - (bool success,) = address(delayedVetoable).call(inData); - assertTrue(success); - - vm.warp(block.timestamp + operatingDelay); - vm.expectEmit(true, false, false, true, address(delayedVetoable)); - emit Forwarded(keccak256(inData), inData); - - vm.mockCall(target, inData, outData); - - // Forward the call - (bool success2, bytes memory retData) = address(delayedVetoable).call(inData); - assertTrue(success2); - assertEq(keccak256(retData), keccak256(outData)); - } - - /// @dev A test documenting the single instance in which the contract is not 'transparent' to the initiator. - function testFuzz_handleCall_queuedAtClash_reverts() external { - // This will get us calldata with the same function selector as the queuedAt function, but - // with the incorrect input data length. - bytes memory inData = abi.encodePacked(keccak256("queuedAt(bytes32)")); - - // Reset the delay to zero - vm.store(address(delayedVetoable), bytes32(uint256(0)), bytes32(uint256(0))); - - vm.prank(initiator); - vm.expectRevert(bytes("")); - (bool revertsAsExpected,) = address(delayedVetoable).call(inData); - assertTrue(revertsAsExpected); - } -} diff --git a/packages/contracts-bedrock/test/L1/L1CrossDomainMessenger.t.sol b/packages/contracts-bedrock/test/L1/L1CrossDomainMessenger.t.sol index 23c1365e9156..d7c5dc29b348 100644 --- a/packages/contracts-bedrock/test/L1/L1CrossDomainMessenger.t.sol +++ b/packages/contracts-bedrock/test/L1/L1CrossDomainMessenger.t.sol @@ -4,6 +4,7 @@ pragma solidity 0.8.15; // Testing utilities import { CommonTest } from "test/setup/CommonTest.sol"; import { Reverter } from "test/mocks/Callers.sol"; +import { stdError } from "forge-std/StdError.sol"; // Libraries import { AddressAliasHelper } from "src/vendor/AddressAliasHelper.sol"; @@ -12,10 +13,10 @@ import { Hashing } from "src/libraries/Hashing.sol"; import { Encoding } from "src/libraries/Encoding.sol"; // Target contract dependencies -import { IL1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; -import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; +import { IL1CrossDomainMessenger } from "interfaces/L1/IL1CrossDomainMessenger.sol"; +import { IOptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; contract L1CrossDomainMessenger_Test is CommonTest { /// @dev The receiver address @@ -173,21 +174,93 @@ contract L1CrossDomainMessenger_Test is CommonTest { assertEq(l1CrossDomainMessenger.failedMessages(hash), false); } - /// @dev Tests that relayMessage reverts if attempting to relay a message - /// sent to an L1 system contract. - function test_relayMessage_toSystemContract_reverts() external { - // set the target to be the OptimismPortal - address target = address(optimismPortal); + /// @dev Tests that relayMessage reverts if caller is optimismPortal and the value sent does not match the amount + function test_relayMessage_fromOtherMessengerValueMismatch_reverts() external { + address target = alice; address sender = Predeploys.L2_CROSS_DOMAIN_MESSENGER; bytes memory message = hex"1111"; + // set the value of op.l2Sender() to be the L2CrossDomainMessenger. + vm.store(address(optimismPortal), bytes32(senderSlotIndex), bytes32(abi.encode(sender))); + + // correctly sending as OptimismPortal but amount does not match msg.value + vm.deal(address(optimismPortal), 10 ether); + vm.prank(address(optimismPortal)); + vm.expectRevert(stdError.assertionError); + l1CrossDomainMessenger.relayMessage{ value: 10 ether }( + Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), sender, target, 9 ether, 0, message + ); + } + + /// @dev Tests that relayMessage reverts if a failed message is attempted to be replayed via the optimismPortal + function test_relayMessage_fromOtherMessengerFailedMessageReplay_reverts() external { + address target = alice; + address sender = Predeploys.L2_CROSS_DOMAIN_MESSENGER; + bytes memory message = hex"1111"; + + // set the value of op.l2Sender() to be the L2 Cross Domain Messenger. + vm.store(address(optimismPortal), bytes32(senderSlotIndex), bytes32(abi.encode(sender))); + + // make a failed message + vm.etch(target, hex"fe"); vm.prank(address(optimismPortal)); - vm.expectRevert("CrossDomainMessenger: message cannot be replayed"); l1CrossDomainMessenger.relayMessage( Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), sender, target, 0, 0, message ); - vm.store(address(optimismPortal), 0, bytes32(abi.encode(sender))); + // cannot replay messages when optimism portal is msg.sender + vm.prank(address(optimismPortal)); + vm.expectRevert(stdError.assertionError); + l1CrossDomainMessenger.relayMessage( + Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), sender, target, 0, 0, message + ); + } + + /// @dev Tests that relayMessage reverts if attempting to relay a message + /// with l1CrossDomainMessenger as the target + function test_relayMessage_toSelf_reverts() external { + address sender = Predeploys.L2_CROSS_DOMAIN_MESSENGER; + bytes memory message = hex"1111"; + + vm.store(address(optimismPortal), bytes32(senderSlotIndex), bytes32(abi.encode(sender))); + + vm.prank(address(optimismPortal)); + vm.expectRevert("CrossDomainMessenger: cannot send message to blocked system address"); + l1CrossDomainMessenger.relayMessage( + Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), + sender, + address(l1CrossDomainMessenger), + 0, + 0, + message + ); + } + + /// @dev Tests that relayMessage reverts if attempting to relay a message + /// with optimismPortal as the target + function test_relayMessage_toOptimismPortal_reverts() external { + address sender = Predeploys.L2_CROSS_DOMAIN_MESSENGER; + bytes memory message = hex"1111"; + + vm.store(address(optimismPortal), bytes32(senderSlotIndex), bytes32(abi.encode(sender))); + + vm.prank(address(optimismPortal)); + vm.expectRevert("CrossDomainMessenger: cannot send message to blocked system address"); + l1CrossDomainMessenger.relayMessage( + Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), sender, address(optimismPortal), 0, 0, message + ); + } + + /// @dev Tests that the relayMessage function reverts if the message called by non-optimismPortal but not a failed + /// message + function test_relayMessage_relayingNewMessageByExternalUser_reverts() external { + address target = address(alice); + address sender = Predeploys.L2_CROSS_DOMAIN_MESSENGER; + bytes memory message = hex"1111"; + + vm.store(address(optimismPortal), bytes32(senderSlotIndex), bytes32(abi.encode(sender))); + + vm.prank(bob); vm.expectRevert("CrossDomainMessenger: message cannot be replayed"); l1CrossDomainMessenger.relayMessage( Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), sender, target, 0, 0, message diff --git a/packages/contracts-bedrock/test/L1/L1ERC721Bridge.t.sol b/packages/contracts-bedrock/test/L1/L1ERC721Bridge.t.sol index 3949bf30f4eb..88913a76ba37 100644 --- a/packages/contracts-bedrock/test/L1/L1ERC721Bridge.t.sol +++ b/packages/contracts-bedrock/test/L1/L1ERC721Bridge.t.sol @@ -11,10 +11,10 @@ import { ERC721 } from "@openzeppelin/contracts/token/ERC721/ERC721.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; // Interfaces -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; -import { IL1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; -import { IL2ERC721Bridge } from "src/L2/interfaces/IL2ERC721Bridge.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; +import { IL1ERC721Bridge } from "interfaces/L1/IL1ERC721Bridge.sol"; +import { IL2ERC721Bridge } from "interfaces/L2/IL2ERC721Bridge.sol"; /// @dev Test ERC721 contract. contract TestERC721 is ERC721 { @@ -237,6 +237,14 @@ contract L1ERC721Bridge_Test is CommonTest { assertEq(localToken.ownerOf(tokenId), alice); } + /// @dev Tests that `bridgeERC721To` reverts if the to address is the zero address. + function test_bridgeERC721To_toZeroAddress_reverts() external { + // Bridge the token. + vm.prank(bob); + vm.expectRevert("ERC721Bridge: nft recipient cannot be address(0)"); + l1ERC721Bridge.bridgeERC721To(address(localToken), address(remoteToken), address(0), tokenId, 1234, hex"5678"); + } + /// @dev Tests that the ERC721 bridge successfully finalizes a withdrawal. function test_finalizeBridgeERC721_succeeds() external { // Bridge the token. diff --git a/packages/contracts-bedrock/test/L1/L1StandardBridge.t.sol b/packages/contracts-bedrock/test/L1/L1StandardBridge.t.sol index 97ef01262ab6..4cc1b8f35994 100644 --- a/packages/contracts-bedrock/test/L1/L1StandardBridge.t.sol +++ b/packages/contracts-bedrock/test/L1/L1StandardBridge.t.sol @@ -14,10 +14,10 @@ import { Predeploys } from "src/libraries/Predeploys.sol"; import { AddressAliasHelper } from "src/vendor/AddressAliasHelper.sol"; // Interfaces -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; -import { IL1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IOptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; +import { IL1StandardBridge } from "interfaces/L1/IL1StandardBridge.sol"; contract L1StandardBridge_Getter_Test is CommonTest { /// @dev Test that the accessors return the correct initialized values. diff --git a/packages/contracts-bedrock/test/L1/L2OutputOracle.t.sol b/packages/contracts-bedrock/test/L1/L2OutputOracle.t.sol index 490ae07c927d..e4d53ddd9e78 100644 --- a/packages/contracts-bedrock/test/L1/L2OutputOracle.t.sol +++ b/packages/contracts-bedrock/test/L1/L2OutputOracle.t.sol @@ -15,7 +15,7 @@ import { Proxy } from "src/universal/Proxy.sol"; // Target contract import { L2OutputOracle } from "src/L1/L2OutputOracle.sol"; -import { IL2OutputOracle } from "src/L1/interfaces/IL2OutputOracle.sol"; +import { IL2OutputOracle } from "interfaces/L1/IL2OutputOracle.sol"; contract L2OutputOracle_TestBase is CommonTest { function setUp() public override { @@ -99,6 +99,31 @@ contract L2OutputOracle_getter_Test is L2OutputOracle_TestBase { l2OutputOracle.getL2Output(nextOutputIndex + 1); } + /// @dev Tests that `getL2OutputAfter` of an L2 block number returns the L2 output of the `getL2OutputIndexAfter` of + /// that block number. + function test_getL2OutputAfter_succeeds() external { + uint8 iterations = 5; + + Types.OutputProposal memory output; + Types.OutputProposal memory expectedOutput; + + for (uint8 i; i < iterations; i++) { + proposeAnotherOutput(); + } + + uint256 latestBlockNumber = l2OutputOracle.latestBlockNumber(); + for (uint8 i = iterations - 1; i > 0; i--) { + uint256 index = l2OutputOracle.getL2OutputIndexAfter(latestBlockNumber); + output = l2OutputOracle.getL2OutputAfter(latestBlockNumber); + expectedOutput = l2OutputOracle.getL2Output(index); + assertEq(output.outputRoot, expectedOutput.outputRoot); + assertEq(output.timestamp, expectedOutput.timestamp); + assertEq(output.l2BlockNumber, expectedOutput.l2BlockNumber); + + latestBlockNumber -= l2OutputOracle.SUBMISSION_INTERVAL(); + } + } + /// @dev Tests that `getL2OutputIndexAfter` returns the correct value /// when the input is the exact block number of the proposal. function test_getL2OutputIndexAfter_sameBlock_succeeds() external { diff --git a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol index 5b2260fce992..418eebd9e64b 100644 --- a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol +++ b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol @@ -7,16 +7,19 @@ import { DeployOPChainInput } from "scripts/deploy/DeployOPChain.s.sol"; import { DeployOPChain_TestBase } from "test/opcm/DeployOPChain.t.sol"; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IProtocolVersions } from "interfaces/L1/IProtocolVersions.sol"; // Exposes internal functions for testing. contract OPContractsManager_Harness is OPContractsManager { constructor( ISuperchainConfig _superchainConfig, - IProtocolVersions _protocolVersions + IProtocolVersions _protocolVersions, + string memory _l1ContractsRelease, + Blueprints memory _blueprints, + Implementations memory _implementations ) - OPContractsManager(_superchainConfig, _protocolVersions) + OPContractsManager(_superchainConfig, _protocolVersions, _l1ContractsRelease, _blueprints, _implementations) { } function chainIdToBatchInboxAddress_exposed(uint256 l2ChainId) public pure returns (address) { @@ -49,7 +52,7 @@ contract OPContractsManager_Deploy_Test is DeployOPChain_TestBase { doi.set(doi.basefeeScalar.selector, basefeeScalar); doi.set(doi.blobBaseFeeScalar.selector, blobBaseFeeScalar); doi.set(doi.l2ChainId.selector, l2ChainId); - doi.set(doi.opcmProxy.selector, address(opcm)); + doi.set(doi.opcm.selector, address(opcm)); doi.set(doi.gasLimit.selector, gasLimit); doi.set(doi.disputeGameType.selector, disputeGameType); @@ -116,12 +119,17 @@ contract OPContractsManager_InternalMethods_Test is Test { function setUp() public { ISuperchainConfig superchainConfigProxy = ISuperchainConfig(makeAddr("superchainConfig")); IProtocolVersions protocolVersionsProxy = IProtocolVersions(makeAddr("protocolVersions")); + OPContractsManager.Blueprints memory emptyBlueprints; + OPContractsManager.Implementations memory emptyImpls; vm.etch(address(superchainConfigProxy), hex"01"); vm.etch(address(protocolVersionsProxy), hex"01"); opcmHarness = new OPContractsManager_Harness({ _superchainConfig: superchainConfigProxy, - _protocolVersions: protocolVersionsProxy + _protocolVersions: protocolVersionsProxy, + _l1ContractsRelease: "dev", + _blueprints: emptyBlueprints, + _implementations: emptyImpls }); } diff --git a/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol b/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol index f19e7ca6f5c2..b575fdacff41 100644 --- a/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol +++ b/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol @@ -22,11 +22,11 @@ import { AddressAliasHelper } from "src/vendor/AddressAliasHelper.sol"; import "src/libraries/PortalErrors.sol"; // Interfaces -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; -import { IL2OutputOracle } from "src/L1/interfaces/IL2OutputOracle.sol"; -import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; -import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; +import { IL2OutputOracle } from "interfaces/L1/IL2OutputOracle.sol"; +import { IL1Block } from "interfaces/L2/IL1Block.sol"; +import { IOptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; contract OptimismPortal_Test is CommonTest { address depositor; @@ -201,7 +201,6 @@ contract OptimismPortal_Test is CommonTest { } /// @dev Tests that `depositTransaction` succeeds when msg.sender == tx.origin and non-custom gas is used. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositTransaction_senderIsOrigin_succeeds( address _to, uint256 _mint, @@ -227,7 +226,6 @@ contract OptimismPortal_Test is CommonTest { } /// @dev Tests that `depositTransaction` succeeds when msg.sender != tx.origin and non-custom gas is used. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositTransaction_senderNotOrigin_succeeds( address _to, uint256 _mint, @@ -310,7 +308,6 @@ contract OptimismPortal_Test is CommonTest { } /// @dev Tests that `depositTransaction` succeeds for an EOA. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositTransaction_eoa_succeeds( address _to, uint64 _gasLimit, @@ -355,7 +352,6 @@ contract OptimismPortal_Test is CommonTest { } /// @dev Tests that `depositTransaction` succeeds for a contract. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositTransaction_contract_succeeds( address _to, uint64 _gasLimit, @@ -1006,7 +1002,10 @@ contract OptimismPortal_FinalizeWithdrawal_Test is CommonTest { ) ); - uint256 bobBalanceBefore = address(bob).balance; + // Fund the portal so that we can withdraw ETH. + vm.store(address(optimismPortal), bytes32(uint256(61)), bytes32(uint256(0xFFFFFFFF))); + vm.deal(address(optimismPortal), 0xFFFFFFFF); + uint256 bobBalanceBefore = bob.balance; vm.expectEmit(true, true, true, true); emit WithdrawalProven(_withdrawalHash_noData, alice, bob); @@ -1019,7 +1018,69 @@ contract OptimismPortal_FinalizeWithdrawal_Test is CommonTest { emit WithdrawalFinalized(_withdrawalHash_noData, true); optimismPortal.finalizeWithdrawalTransaction(_defaultTx_noData); - assertEq(address(bob).balance, bobBalanceBefore + 100); + assertEq(bob.balance, bobBalanceBefore + 100); + } + + /// @dev Tests that `finalizeWithdrawalTransaction` succeeds when _tx.data is empty and with a custom gas token. + function test_finalizeWithdrawalTransaction_noTxDataNonEtherGasToken_succeeds() external { + Types.WithdrawalTransaction memory _defaultTx_noData = Types.WithdrawalTransaction({ + nonce: 0, + sender: alice, + target: bob, + value: 100, + gasLimit: 100_000, + data: hex"" + }); + // Get withdrawal proof data we can use for testing. + ( + bytes32 _stateRoot_noData, + bytes32 _storageRoot_noData, + bytes32 _outputRoot_noData, + bytes32 _withdrawalHash_noData, + bytes[] memory _withdrawalProof_noData + ) = ffi.getProveWithdrawalTransactionInputs(_defaultTx_noData); + // Setup a dummy output root proof for reuse. + Types.OutputRootProof memory _outputRootProof_noData = Types.OutputRootProof({ + version: bytes32(uint256(0)), + stateRoot: _stateRoot_noData, + messagePasserStorageRoot: _storageRoot_noData, + latestBlockhash: bytes32(uint256(0)) + }); + + // Configure the oracle to return the output root we've prepared. + vm.mockCall( + address(l2OutputOracle), + abi.encodePacked(IL2OutputOracle.getL2Output.selector), + abi.encode( + Types.OutputProposal( + _outputRoot_noData, + l2OutputOracle.getL2Output(_proposedOutputIndex).timestamp, + uint128(_proposedBlockNumber) + ) + ) + ); + + // Fund the portal so that we can withdraw ETH. + vm.store(address(optimismPortal), bytes32(uint256(61)), bytes32(uint256(0xFFFFFFFF))); + deal(address(L1Token), address(optimismPortal), 0xFFFFFFFF); + // modify the gas token to be non ether + vm.mockCall( + address(systemConfig), abi.encodeCall(systemConfig.gasPayingToken, ()), abi.encode(address(L1Token), 18) + ); + uint256 bobBalanceBefore = L1Token.balanceOf(bob); + + vm.expectEmit(true, true, true, true); + emit WithdrawalProven(_withdrawalHash_noData, alice, bob); + optimismPortal.proveWithdrawalTransaction( + _defaultTx_noData, _proposedOutputIndex, _outputRootProof_noData, _withdrawalProof_noData + ); + + vm.warp(block.timestamp + l2OutputOracle.FINALIZATION_PERIOD_SECONDS() + 1); + vm.expectEmit(true, true, false, true); + emit WithdrawalFinalized(_withdrawalHash_noData, true); + optimismPortal.finalizeWithdrawalTransaction(_defaultTx_noData); + + assertEq(L1Token.balanceOf(bob), bobBalanceBefore + 100); } /// @dev Tests that `finalizeWithdrawalTransaction` reverts if the finalization period @@ -1149,7 +1210,6 @@ contract OptimismPortal_FinalizeWithdrawal_Test is CommonTest { } /// @dev Tests that `finalizeWithdrawalTransaction` succeeds. - /// forge-config: ciheavy.fuzz.runs = 8192 function testDiff_finalizeWithdrawalTransaction_succeeds( address _sender, address _target, @@ -1272,7 +1332,6 @@ contract OptimismPortalResourceFuzz_Test is CommonTest { uint256 constant MAX_GAS_LIMIT = 30_000_000; /// @dev Test that various values of the resource metering config will not break deposits. - /// forge-config: ciheavy.fuzz.runs = 10000 function testFuzz_systemConfigDeposit_succeeds( uint32 _maxResourceLimit, uint8 _elasticityMultiplier, @@ -1291,8 +1350,13 @@ contract OptimismPortalResourceFuzz_Test is CommonTest { uint64 gasLimit = systemConfig.gasLimit(); // Bound resource config + _systemTxMaxGas = uint32(bound(_systemTxMaxGas, 0, gasLimit - 21000)); _maxResourceLimit = uint32(bound(_maxResourceLimit, 21000, MAX_GAS_LIMIT / 8)); + _maxResourceLimit = uint32(bound(_maxResourceLimit, 21000, gasLimit - _systemTxMaxGas)); + _maximumBaseFee = uint128(bound(_maximumBaseFee, 1, type(uint128).max)); + _minimumBaseFee = uint32(bound(_minimumBaseFee, 0, _maximumBaseFee - 1)); _gasLimit = uint64(bound(_gasLimit, 21000, _maxResourceLimit)); + _gasLimit = uint64(bound(_gasLimit, 0, gasLimit)); _prevBaseFee = uint128(bound(_prevBaseFee, 0, 3 gwei)); _prevBoughtGas = uint64(bound(_prevBoughtGas, 0, _maxResourceLimit - _gasLimit)); _blockDiff = uint8(bound(_blockDiff, 0, 3)); @@ -1300,11 +1364,16 @@ contract OptimismPortalResourceFuzz_Test is CommonTest { _elasticityMultiplier = uint8(bound(_elasticityMultiplier, 1, type(uint8).max)); // Prevent values that would cause reverts - vm.assume(gasLimit >= _gasLimit); - vm.assume(_minimumBaseFee < _maximumBaseFee); vm.assume(uint256(_maxResourceLimit) + uint256(_systemTxMaxGas) <= gasLimit); vm.assume(((_maxResourceLimit / _elasticityMultiplier) * _elasticityMultiplier) == _maxResourceLimit); + // Although we typically want to limit the usage of vm.assume, we've constructed the above + // bounds to satisfy the assumptions listed in this specific section. These assumptions + // serve only to act as an additional sanity check on top of the bounds and should not + // result in an unnecessary number of test rejections. + vm.assume(gasLimit >= _gasLimit); + vm.assume(_minimumBaseFee < _maximumBaseFee); + // Base fee can increase quickly and mean that we can't buy the amount of gas we want. // Here we add a VM assumption to bound the potential increase. // Compute the maximum possible increase in base fee. @@ -1407,7 +1476,6 @@ contract OptimismPortalWithMockERC20_Test is OptimismPortal_FinalizeWithdrawal_T } /// @dev Tests that `depositERC20Transaction` succeeds when msg.sender == tx.origin. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositERC20Transaction_senderIsOrigin_succeeds( address _to, uint256 _mint, @@ -1433,7 +1501,6 @@ contract OptimismPortalWithMockERC20_Test is OptimismPortal_FinalizeWithdrawal_T } /// @dev Tests that `depositERC20Transaction` succeeds when msg.sender != tx.origin. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositERC20Transaction_senderNotOrigin_succeeds( address _to, uint256 _mint, @@ -1632,7 +1699,6 @@ contract OptimismPortalWithMockERC20_Test is OptimismPortal_FinalizeWithdrawal_T } /// @dev Tests that `depositTransaction` succeeds when a custom gas token is used but the msg.value is zero. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositTransaction_customGasTokenWithNoValueAndSenderIsOrigin_succeeds( address _to, uint256 _value, @@ -1656,7 +1722,6 @@ contract OptimismPortalWithMockERC20_Test is OptimismPortal_FinalizeWithdrawal_T } /// @dev Tests that `depositTransaction` succeeds when a custom gas token is used but the msg.value is zero. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositTransaction_customGasTokenWithNoValueAndSenderNotOrigin_succeeds( address _to, uint256 _value, diff --git a/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol b/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol index d252609e5eea..830323936a60 100644 --- a/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol +++ b/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol @@ -23,12 +23,12 @@ import "src/dispute/lib/Types.sol"; import "src/libraries/PortalErrors.sol"; // Interfaces -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; -import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; -import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; -import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; +import { IL1Block } from "interfaces/L2/IL1Block.sol"; +import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; contract OptimismPortal2_Test is CommonTest { address depositor; @@ -211,7 +211,6 @@ contract OptimismPortal2_Test is CommonTest { } /// @dev Tests that `depositTransaction` succeeds for an EOA. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositTransaction_eoa_succeeds( address _to, uint64 _gasLimit, @@ -256,7 +255,6 @@ contract OptimismPortal2_Test is CommonTest { } /// @dev Tests that `depositTransaction` succeeds for a contract. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositTransaction_contract_succeeds( address _to, uint64 _gasLimit, @@ -802,6 +800,169 @@ contract OptimismPortal2_FinalizeWithdrawal_Test is CommonTest { assert(address(bob).balance == bobBalanceBefore + 100); } + /// @dev Tests that `finalizeWithdrawalTransaction` reverts if the target reverts and caller is the + /// ESTIMATION_ADDRESS. + function test_finalizeWithdrawalTransaction_targetFailsAndCallerIsEstimationAddress_reverts() external { + vm.etch(bob, hex"fe"); // Contract with just the invalid opcode. + + vm.prank(alice); + vm.expectEmit(true, true, true, true); + emit WithdrawalProven(_withdrawalHash, alice, bob); + optimismPortal2.proveWithdrawalTransaction(_defaultTx, _proposedGameIndex, _outputRootProof, _withdrawalProof); + + // Warp and resolve the dispute game. + game.resolveClaim(0, 0); + game.resolve(); + vm.warp(block.timestamp + optimismPortal2.proofMaturityDelaySeconds() + 1 seconds); + + vm.startPrank(alice, Constants.ESTIMATION_ADDRESS); + vm.expectRevert(GasEstimation.selector); + optimismPortal2.finalizeWithdrawalTransaction(_defaultTx); + } + + /// @dev Tests that `finalizeWithdrawalTransaction` succeeds when _tx.data is empty. + function test_finalizeWithdrawalTransaction_noTxData_succeeds() external { + Types.WithdrawalTransaction memory _defaultTx_noData = Types.WithdrawalTransaction({ + nonce: 0, + sender: alice, + target: bob, + value: 100, + gasLimit: 100_000, + data: hex"" + }); + // Get withdrawal proof data we can use for testing. + ( + bytes32 _stateRoot_noData, + bytes32 _storageRoot_noData, + bytes32 _outputRoot_noData, + bytes32 _withdrawalHash_noData, + bytes[] memory _withdrawalProof_noData + ) = ffi.getProveWithdrawalTransactionInputs(_defaultTx_noData); + // Setup a dummy output root proof for reuse. + Types.OutputRootProof memory _outputRootProof_noData = Types.OutputRootProof({ + version: bytes32(uint256(0)), + stateRoot: _stateRoot_noData, + messagePasserStorageRoot: _storageRoot_noData, + latestBlockhash: bytes32(uint256(0)) + }); + uint256 _proposedBlockNumber_noData = 0xFF; + IFaultDisputeGame game_noData = IFaultDisputeGame( + payable( + address( + disputeGameFactory.create( + optimismPortal2.respectedGameType(), + Claim.wrap(_outputRoot_noData), + abi.encode(_proposedBlockNumber_noData) + ) + ) + ) + ); + uint256 _proposedGameIndex_noData = disputeGameFactory.gameCount() - 1; + // Warp beyond the chess clocks and finalize the game. + vm.warp(block.timestamp + game_noData.maxClockDuration().raw() + 1 seconds); + // Fund the portal so that we can withdraw ETH. + vm.store(address(optimismPortal2), bytes32(uint256(61)), bytes32(uint256(0xFFFFFFFF))); + vm.deal(address(optimismPortal2), 0xFFFFFFFF); + + uint256 bobBalanceBefore = bob.balance; + + vm.expectEmit(address(optimismPortal2)); + emit WithdrawalProven(_withdrawalHash_noData, alice, bob); + vm.expectEmit(address(optimismPortal2)); + emit WithdrawalProvenExtension1(_withdrawalHash_noData, address(this)); + optimismPortal2.proveWithdrawalTransaction({ + _tx: _defaultTx_noData, + _disputeGameIndex: _proposedGameIndex_noData, + _outputRootProof: _outputRootProof_noData, + _withdrawalProof: _withdrawalProof_noData + }); + + // Warp and resolve the dispute game. + game_noData.resolveClaim(0, 0); + game_noData.resolve(); + vm.warp(block.timestamp + optimismPortal2.proofMaturityDelaySeconds() + 1 seconds); + + vm.expectEmit(true, true, false, true); + emit WithdrawalFinalized(_withdrawalHash_noData, true); + optimismPortal2.finalizeWithdrawalTransaction(_defaultTx_noData); + + assert(bob.balance == bobBalanceBefore + 100); + } + + /// @dev Tests that `finalizeWithdrawalTransaction` succeeds when _tx.data is empty and with a custom gas token. + function test_finalizeWithdrawalTransaction_noTxDataNonEtherGasToken_succeeds() external { + Types.WithdrawalTransaction memory _defaultTx_noData = Types.WithdrawalTransaction({ + nonce: 0, + sender: alice, + target: bob, + value: 100, + gasLimit: 100_000, + data: hex"" + }); + // Get withdrawal proof data we can use for testing. + ( + bytes32 _stateRoot_noData, + bytes32 _storageRoot_noData, + bytes32 _outputRoot_noData, + bytes32 _withdrawalHash_noData, + bytes[] memory _withdrawalProof_noData + ) = ffi.getProveWithdrawalTransactionInputs(_defaultTx_noData); + // Setup a dummy output root proof for reuse. + Types.OutputRootProof memory _outputRootProof_noData = Types.OutputRootProof({ + version: bytes32(uint256(0)), + stateRoot: _stateRoot_noData, + messagePasserStorageRoot: _storageRoot_noData, + latestBlockhash: bytes32(uint256(0)) + }); + uint256 _proposedBlockNumber_noData = 0xFF; + IFaultDisputeGame game_noData = IFaultDisputeGame( + payable( + address( + disputeGameFactory.create( + optimismPortal2.respectedGameType(), + Claim.wrap(_outputRoot_noData), + abi.encode(_proposedBlockNumber_noData) + ) + ) + ) + ); + uint256 _proposedGameIndex_noData = disputeGameFactory.gameCount() - 1; + // Warp beyond the chess clocks and finalize the game. + vm.warp(block.timestamp + game_noData.maxClockDuration().raw() + 1 seconds); + // Fund the portal so that we can withdraw ETH. + vm.store(address(optimismPortal2), bytes32(uint256(61)), bytes32(uint256(0xFFFFFFFF))); + deal(address(L1Token), address(optimismPortal2), 0xFFFFFFFF); + + // modify the gas token to be non ether + vm.mockCall( + address(systemConfig), abi.encodeCall(systemConfig.gasPayingToken, ()), abi.encode(address(L1Token), 18) + ); + + uint256 bobBalanceBefore = L1Token.balanceOf(bob); + + vm.expectEmit(address(optimismPortal2)); + emit WithdrawalProven(_withdrawalHash_noData, alice, bob); + vm.expectEmit(address(optimismPortal2)); + emit WithdrawalProvenExtension1(_withdrawalHash_noData, address(this)); + optimismPortal2.proveWithdrawalTransaction({ + _tx: _defaultTx_noData, + _disputeGameIndex: _proposedGameIndex_noData, + _outputRootProof: _outputRootProof_noData, + _withdrawalProof: _withdrawalProof_noData + }); + + // Warp and resolve the dispute game. + game_noData.resolveClaim(0, 0); + game_noData.resolve(); + vm.warp(block.timestamp + optimismPortal2.proofMaturityDelaySeconds() + 1 seconds); + + vm.expectEmit(true, true, false, true); + emit WithdrawalFinalized(_withdrawalHash_noData, true); + optimismPortal2.finalizeWithdrawalTransaction(_defaultTx_noData); + + assert(L1Token.balanceOf(bob) == bobBalanceBefore + 100); + } + /// @dev Tests that `finalizeWithdrawalTransaction` succeeds. function test_finalizeWithdrawalTransaction_provenWithdrawalHashEther_succeeds() external { uint256 bobBalanceBefore = address(bob).balance; @@ -1163,7 +1324,6 @@ contract OptimismPortal2_FinalizeWithdrawal_Test is CommonTest { } /// @dev Tests that `finalizeWithdrawalTransaction` succeeds. - /// forge-config: ciheavy.fuzz.runs = 8192 function testDiff_finalizeWithdrawalTransaction_succeeds( address _sender, address _target, @@ -1447,7 +1607,6 @@ contract OptimismPortal2_ResourceFuzz_Test is CommonTest { } /// @dev Test that various values of the resource metering config will not break deposits. - /// forge-config: ciheavy.fuzz.runs = 10000 function testFuzz_systemConfigDeposit_succeeds( uint32 _maxResourceLimit, uint8 _elasticityMultiplier, @@ -1466,8 +1625,13 @@ contract OptimismPortal2_ResourceFuzz_Test is CommonTest { uint64 gasLimit = systemConfig.gasLimit(); // Bound resource config + _systemTxMaxGas = uint32(bound(_systemTxMaxGas, 0, gasLimit - 21000)); _maxResourceLimit = uint32(bound(_maxResourceLimit, 21000, MAX_GAS_LIMIT / 8)); + _maxResourceLimit = uint32(bound(_maxResourceLimit, 21000, gasLimit - _systemTxMaxGas)); + _maximumBaseFee = uint128(bound(_maximumBaseFee, 1, type(uint128).max)); + _minimumBaseFee = uint32(bound(_minimumBaseFee, 0, _maximumBaseFee - 1)); _gasLimit = uint64(bound(_gasLimit, 21000, _maxResourceLimit)); + _gasLimit = uint64(bound(_gasLimit, 0, gasLimit)); _prevBaseFee = uint128(bound(_prevBaseFee, 0, 3 gwei)); _prevBoughtGas = uint64(bound(_prevBoughtGas, 0, _maxResourceLimit - _gasLimit)); _blockDiff = uint8(bound(_blockDiff, 0, 3)); @@ -1475,12 +1639,16 @@ contract OptimismPortal2_ResourceFuzz_Test is CommonTest { _elasticityMultiplier = uint8(bound(_elasticityMultiplier, 1, type(uint8).max)); // Prevent values that would cause reverts - vm.assume(gasLimit >= _gasLimit); - vm.assume(_minimumBaseFee < _maximumBaseFee); - vm.assume(_baseFeeMaxChangeDenominator > 1); vm.assume(uint256(_maxResourceLimit) + uint256(_systemTxMaxGas) <= gasLimit); vm.assume(((_maxResourceLimit / _elasticityMultiplier) * _elasticityMultiplier) == _maxResourceLimit); + // Although we typically want to limit the usage of vm.assume, we've constructed the above + // bounds to satisfy the assumptions listed in this specific section. These assumptions + // serve only to act as an additional sanity check on top of the bounds and should not + // result in an unnecessary number of test rejections. + vm.assume(gasLimit >= _gasLimit); + vm.assume(_minimumBaseFee < _maximumBaseFee); + // Base fee can increase quickly and mean that we can't buy the amount of gas we want. // Here we add a VM assumption to bound the potential increase. // Compute the maximum possible increase in base fee. @@ -1575,7 +1743,7 @@ contract OptimismPortal2WithMockERC20_Test is OptimismPortal2_FinalizeWithdrawal ); // Deposit the token into the portal - optimismPortal.depositERC20Transaction(_to, _mint, _value, _gasLimit, _isCreation, _data); + optimismPortal2.depositERC20Transaction(_to, _mint, _value, _gasLimit, _isCreation, _data); // Assert final balance equals the deposited amount assertEq(token.balanceOf(address(optimismPortal2)), _mint); @@ -1583,7 +1751,6 @@ contract OptimismPortal2WithMockERC20_Test is OptimismPortal2_FinalizeWithdrawal } /// @dev Tests that `depositERC20Transaction` succeeds when msg.sender == tx.origin. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositERC20Transaction_senderIsOrigin_succeeds( address _to, uint256 _mint, @@ -1609,7 +1776,6 @@ contract OptimismPortal2WithMockERC20_Test is OptimismPortal2_FinalizeWithdrawal } /// @dev Tests that `depositERC20Transaction` succeeds when msg.sender != tx.origin. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositERC20Transaction_senderNotOrigin_succeeds( address _to, uint256 _mint, @@ -1657,7 +1823,7 @@ contract OptimismPortal2WithMockERC20_Test is OptimismPortal2_FinalizeWithdrawal ); // Mock the token balance - vm.mockCall(address(token), abi.encodeCall(token.balanceOf, (address(optimismPortal))), abi.encode(0)); + vm.mockCall(address(token), abi.encodeCall(token.balanceOf, (address(optimismPortal2))), abi.encode(0)); // Call minimumGasLimit(0) before vm.expectRevert to ensure vm.expectRevert is for depositERC20Transaction uint64 gasLimit = optimismPortal2.minimumGasLimit(0); @@ -1723,7 +1889,7 @@ contract OptimismPortal2WithMockERC20_Test is OptimismPortal2_FinalizeWithdrawal ); // Deposit the token into the portal - optimismPortal2.depositERC20Transaction(address(0), _amount, 0, optimismPortal.minimumGasLimit(0), false, ""); + optimismPortal2.depositERC20Transaction(address(0), _amount, 0, optimismPortal2.minimumGasLimit(0), false, ""); // Check that the balance has been correctly updated assertEq(optimismPortal2.balance(), _amount); @@ -1742,7 +1908,7 @@ contract OptimismPortal2WithMockERC20_Test is OptimismPortal2_FinalizeWithdrawal // Deposit the token into the portal optimismPortal2.depositERC20Transaction( - address(bob), _defaultTx.value, 0, optimismPortal.minimumGasLimit(0), false, "" + address(bob), _defaultTx.value, 0, optimismPortal2.minimumGasLimit(0), false, "" ); assertEq(optimismPortal2.balance(), _defaultTx.value); @@ -1817,7 +1983,6 @@ contract OptimismPortal2WithMockERC20_Test is OptimismPortal2_FinalizeWithdrawal } /// @dev Tests that `depositTransaction` succeeds when a custom gas token is used but the msg.value is zero. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositTransaction_customGasTokenWithNoValueAndSenderIsOrigin_succeeds( address _to, uint256 _value, @@ -1841,7 +2006,6 @@ contract OptimismPortal2WithMockERC20_Test is OptimismPortal2_FinalizeWithdrawal } /// @dev Tests that `depositTransaction` succeeds when a custom gas token is used but the msg.value is zero. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositTransaction_customGasTokenWithNoValueAndSenderNotOrigin_succeeds( address _to, uint256 _value, diff --git a/packages/contracts-bedrock/test/L1/OptimismPortalInterop.t.sol b/packages/contracts-bedrock/test/L1/OptimismPortalInterop.t.sol index bd6d3b5d7de9..0c08cab3e2d1 100644 --- a/packages/contracts-bedrock/test/L1/OptimismPortalInterop.t.sol +++ b/packages/contracts-bedrock/test/L1/OptimismPortalInterop.t.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Testing utilities +// Testing import { CommonTest } from "test/setup/CommonTest.sol"; // Libraries @@ -9,13 +9,9 @@ import { Constants } from "src/libraries/Constants.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; import "src/libraries/PortalErrors.sol"; -// Target contract dependencies -import "src/libraries/PortalErrors.sol"; -import { OptimismPortalInterop } from "src/L1/OptimismPortalInterop.sol"; -import { L1BlockInterop, ConfigType } from "src/L2/L1BlockInterop.sol"; - // Interfaces -import { IOptimismPortalInterop } from "src/L1/interfaces/IOptimismPortalInterop.sol"; +import { IL1BlockInterop, ConfigType } from "interfaces/L2/IL1BlockInterop.sol"; +import { IOptimismPortalInterop } from "interfaces/L1/IOptimismPortalInterop.sol"; contract OptimismPortalInterop_Test is CommonTest { /// @notice Marked virtual to be overridden in @@ -35,7 +31,7 @@ contract OptimismPortalInterop_Test is CommonTest { _mint: 0, _gasLimit: 200_000, _isCreation: false, - _data: abi.encodeCall(L1BlockInterop.setConfig, (ConfigType.SET_GAS_PAYING_TOKEN, _value)) + _data: abi.encodeCall(IL1BlockInterop.setConfig, (ConfigType.SET_GAS_PAYING_TOKEN, _value)) }); vm.prank(address(_optimismPortalInterop().systemConfig())); @@ -58,7 +54,7 @@ contract OptimismPortalInterop_Test is CommonTest { _mint: 0, _gasLimit: 200_000, _isCreation: false, - _data: abi.encodeCall(L1BlockInterop.setConfig, (ConfigType.ADD_DEPENDENCY, _value)) + _data: abi.encodeCall(IL1BlockInterop.setConfig, (ConfigType.ADD_DEPENDENCY, _value)) }); vm.prank(address(_optimismPortalInterop().systemConfig())); @@ -81,7 +77,7 @@ contract OptimismPortalInterop_Test is CommonTest { _mint: 0, _gasLimit: 200_000, _isCreation: false, - _data: abi.encodeCall(L1BlockInterop.setConfig, (ConfigType.REMOVE_DEPENDENCY, _value)) + _data: abi.encodeCall(IL1BlockInterop.setConfig, (ConfigType.REMOVE_DEPENDENCY, _value)) }); vm.prank(address(_optimismPortalInterop().systemConfig())); diff --git a/packages/contracts-bedrock/test/L1/ProtocolVersions.t.sol b/packages/contracts-bedrock/test/L1/ProtocolVersions.t.sol index fc6ea447d231..28d9ef2b0b83 100644 --- a/packages/contracts-bedrock/test/L1/ProtocolVersions.t.sol +++ b/packages/contracts-bedrock/test/L1/ProtocolVersions.t.sol @@ -6,8 +6,8 @@ import { CommonTest } from "test/setup/CommonTest.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; // Interfaces -import { IProxy } from "src/universal/interfaces/IProxy.sol"; -import { IProtocolVersions, ProtocolVersion } from "src/L1/interfaces/IProtocolVersions.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; +import { IProtocolVersions, ProtocolVersion } from "interfaces/L1/IProtocolVersions.sol"; contract ProtocolVersions_Init is CommonTest { event ConfigUpdate(uint256 indexed version, IProtocolVersions.UpdateType indexed updateType, bytes data); diff --git a/packages/contracts-bedrock/test/L1/ResourceMetering.t.sol b/packages/contracts-bedrock/test/L1/ResourceMetering.t.sol index 18f5ba82283f..d49aa2337bbf 100644 --- a/packages/contracts-bedrock/test/L1/ResourceMetering.t.sol +++ b/packages/contracts-bedrock/test/L1/ResourceMetering.t.sol @@ -11,7 +11,7 @@ import { ResourceMetering } from "src/L1/ResourceMetering.sol"; import { Constants } from "src/libraries/Constants.sol"; // Interfaces -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; contract MeterUser is ResourceMetering { ResourceMetering.ResourceConfig public innerConfig; @@ -201,20 +201,21 @@ contract ResourceMetering_Test is Test { function testFuzz_meter_largeBlockDiff_succeeds(uint64 _amount, uint256 _blockDiff) external { // This test fails if the following line is commented out. // At 12 seconds per block, this number is effectively unreachable. - vm.assume(_blockDiff < 433576281058164217753225238677900874458691); + _blockDiff = uint256(bound(_blockDiff, 0, 433576281058164217753225238677900874458690)); ResourceMetering.ResourceConfig memory rcfg = meter.resourceConfig(); uint64 target = uint64(rcfg.maxResourceLimit) / uint64(rcfg.elasticityMultiplier); uint64 elasticityMultiplier = uint64(rcfg.elasticityMultiplier); - vm.assume(_amount < target * elasticityMultiplier); + _amount = uint64(bound(_amount, 0, target * elasticityMultiplier)); + vm.roll(initialBlockNum + _blockDiff); meter.use(_amount); } function testFuzz_meter_useGas_succeeds(uint64 _amount) external { (, uint64 prevBoughtGas,) = meter.params(); - vm.assume(prevBoughtGas + _amount <= meter.resourceConfig().maxResourceLimit); + _amount = uint64(bound(_amount, 0, meter.resourceConfig().maxResourceLimit - prevBoughtGas)); meter.use(_amount); diff --git a/packages/contracts-bedrock/test/L1/SuperchainConfig.t.sol b/packages/contracts-bedrock/test/L1/SuperchainConfig.t.sol index 2772ec0c2a3c..ed51e019acaf 100644 --- a/packages/contracts-bedrock/test/L1/SuperchainConfig.t.sol +++ b/packages/contracts-bedrock/test/L1/SuperchainConfig.t.sol @@ -4,10 +4,10 @@ pragma solidity 0.8.15; import { CommonTest } from "test/setup/CommonTest.sol"; // Target contract dependencies -import { IProxy } from "src/universal/interfaces/IProxy.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; // Target contract -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; diff --git a/packages/contracts-bedrock/test/L1/SystemConfig.t.sol b/packages/contracts-bedrock/test/L1/SystemConfig.t.sol index a6311c02b1e4..f7cea088bcf4 100644 --- a/packages/contracts-bedrock/test/L1/SystemConfig.t.sol +++ b/packages/contracts-bedrock/test/L1/SystemConfig.t.sol @@ -13,9 +13,9 @@ import { Predeploys } from "src/libraries/Predeploys.sol"; import { GasPayingToken } from "src/libraries/GasPayingToken.sol"; // Interfaces -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { IL1Block } from "interfaces/L2/IL1Block.sol"; contract SystemConfig_Init is CommonTest { event ConfigUpdate(uint256 indexed version, ISystemConfig.UpdateType indexed updateType, bytes data); @@ -255,6 +255,19 @@ contract SystemConfig_Init_ResourceConfig is SystemConfig_Init { _initializeWithResourceConfig(config, "SystemConfig: gas limit too low"); } + /// @dev Tests that `setResourceConfig` reverts if the gas limit is too low. + function test_setResourceConfig_elasticityMultiplierIs0_reverts() external { + IResourceMetering.ResourceConfig memory config = IResourceMetering.ResourceConfig({ + maxResourceLimit: 20_000_000, + elasticityMultiplier: 0, + baseFeeMaxChangeDenominator: 8, + systemTxMaxGas: 1_000_000, + minimumBaseFee: 1 gwei, + maximumBaseFee: 2 gwei + }); + _initializeWithResourceConfig(config, "SystemConfig: elasticity multiplier cannot be 0"); + } + /// @dev Tests that `setResourceConfig` reverts if the elasticity multiplier /// and max resource limit are configured such that there is a loss of precision. function test_setResourceConfig_badPrecision_reverts() external { @@ -372,12 +385,21 @@ contract SystemConfig_Init_CustomGasToken is SystemConfig_Init { // don't use multicall3's address vm.assume(_token != MULTICALL3_ADDRESS); - vm.assume(bytes(_name).length <= 32); - vm.assume(bytes(_symbol).length <= 32); + // Using vm.assume() would cause too many test rejections. + string memory name = _name; + if (bytes(_name).length > 32) { + name = _name[:32]; + } + + // Using vm.assume() would cause too many test rejections. + string memory symbol = _symbol; + if (bytes(_symbol).length > 32) { + symbol = _symbol[:32]; + } vm.mockCall(_token, abi.encodeCall(token.decimals, ()), abi.encode(18)); - vm.mockCall(_token, abi.encodeCall(token.name, ()), abi.encode(_name)); - vm.mockCall(_token, abi.encodeCall(token.symbol, ()), abi.encode(_symbol)); + vm.mockCall(_token, abi.encodeCall(token.name, ()), abi.encode(name)); + vm.mockCall(_token, abi.encodeCall(token.symbol, ()), abi.encode(symbol)); cleanStorageAndInit(_token); @@ -390,8 +412,8 @@ contract SystemConfig_Init_CustomGasToken is SystemConfig_Init { assertEq(systemConfig.gasPayingTokenSymbol(), "ETH"); } else { assertEq(addr, _token); - assertEq(systemConfig.gasPayingTokenName(), _name); - assertEq(systemConfig.gasPayingTokenSymbol(), _symbol); + assertEq(systemConfig.gasPayingTokenName(), name); + assertEq(systemConfig.gasPayingTokenSymbol(), symbol); } } @@ -542,7 +564,7 @@ contract SystemConfig_Setters_TestFail is SystemConfig_Init { /// @dev Tests that `setEIP1559Params` reverts if the elasticity is zero. function test_setEIP1559Params_zeroElasticity_reverts(uint32 _denominator) external { - vm.assume(_denominator >= 1); + _denominator = uint32(bound(_denominator, 1, type(uint32).max)); vm.prank(systemConfig.owner()); vm.expectRevert("SystemConfig: elasticity must be >= 1"); systemConfig.setEIP1559Params({ _denominator: _denominator, _elasticity: 0 }); diff --git a/packages/contracts-bedrock/test/L1/SystemConfigInterop.t.sol b/packages/contracts-bedrock/test/L1/SystemConfigInterop.t.sol index 426dba30c72a..8fd6daeed846 100644 --- a/packages/contracts-bedrock/test/L1/SystemConfigInterop.t.sol +++ b/packages/contracts-bedrock/test/L1/SystemConfigInterop.t.sol @@ -6,7 +6,6 @@ import { CommonTest } from "test/setup/CommonTest.sol"; // Contracts import { ERC20 } from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; -import { ConfigType } from "src/L2/L1BlockInterop.sol"; // Libraries import { Constants } from "src/libraries/Constants.sol"; @@ -14,9 +13,10 @@ import { StaticConfig } from "src/libraries/StaticConfig.sol"; import { GasPayingToken } from "src/libraries/GasPayingToken.sol"; // Interfaces -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { ISystemConfigInterop } from "src/L1/interfaces/ISystemConfigInterop.sol"; -import { IOptimismPortalInterop } from "src/L1/interfaces/IOptimismPortalInterop.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { ISystemConfigInterop } from "interfaces/L1/ISystemConfigInterop.sol"; +import { IOptimismPortalInterop } from "interfaces/L1/IOptimismPortalInterop.sol"; +import { ConfigType } from "interfaces/L2/IL1BlockInterop.sol"; contract SystemConfigInterop_Test is CommonTest { /// @notice Marked virtual to be overridden in @@ -26,6 +26,19 @@ contract SystemConfigInterop_Test is CommonTest { super.setUp(); } + /// @dev Tests that when the decimals is not 18, initialization reverts. + function test_initialize_decimalsIsNot18_reverts(uint8 decimals) external { + vm.assume(decimals != 18); + address _token = address(L1Token); + + vm.mockCall(_token, abi.encodeCall(ERC20.name, ()), abi.encode("Token")); + vm.mockCall(_token, abi.encodeCall(ERC20.symbol, ()), abi.encode("TKN")); + vm.mockCall(_token, abi.encodeCall(ERC20.decimals, ()), abi.encode(decimals)); + + vm.expectRevert("SystemConfig: bad decimals of gas paying token"); + _cleanStorageAndInit(_token); + } + /// @dev Tests that the gas paying token can be set. function testFuzz_setGasPayingToken_succeeds( address _token, @@ -38,12 +51,21 @@ contract SystemConfigInterop_Test is CommonTest { vm.assume(_token != address(0)); vm.assume(_token != Constants.ETHER); - vm.assume(bytes(_name).length <= 32); - vm.assume(bytes(_symbol).length <= 32); + // Using vm.assume() would cause too many test rejections. + string memory name = _name; + if (bytes(_name).length > 32) { + name = _name[:32]; + } + + // Using vm.assume() would cause too many test rejections. + string memory symbol = _symbol; + if (bytes(_symbol).length > 32) { + symbol = _symbol[:32]; + } vm.mockCall(_token, abi.encodeCall(ERC20.decimals, ()), abi.encode(18)); - vm.mockCall(_token, abi.encodeCall(ERC20.name, ()), abi.encode(_name)); - vm.mockCall(_token, abi.encodeCall(ERC20.symbol, ()), abi.encode(_symbol)); + vm.mockCall(_token, abi.encodeCall(ERC20.name, ()), abi.encode(name)); + vm.mockCall(_token, abi.encodeCall(ERC20.symbol, ()), abi.encode(symbol)); vm.expectCall( address(optimismPortal), @@ -54,8 +76,8 @@ contract SystemConfigInterop_Test is CommonTest { StaticConfig.encodeSetGasPayingToken({ _token: _token, _decimals: 18, - _name: GasPayingToken.sanitize(_name), - _symbol: GasPayingToken.sanitize(_symbol) + _name: GasPayingToken.sanitize(name), + _symbol: GasPayingToken.sanitize(symbol) }) ) ) diff --git a/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol b/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol index 25b6e711c1ec..100019034df6 100644 --- a/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol +++ b/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol @@ -20,7 +20,7 @@ import { NotDepositor, InteropStartAlreadySet } from "src/L2/CrossL2Inbox.sol"; -import { IL1BlockInterop } from "src/L2/interfaces/IL1BlockInterop.sol"; +import { IL1BlockInterop } from "interfaces/L2/IL1BlockInterop.sol"; /// @title CrossL2InboxWithModifiableTransientStorage /// @dev CrossL2Inbox contract with methods to modify the transient storage. diff --git a/packages/contracts-bedrock/test/L2/GasPriceOracle.t.sol b/packages/contracts-bedrock/test/L2/GasPriceOracle.t.sol index 0a1d97c9351e..f013325ef7e7 100644 --- a/packages/contracts-bedrock/test/L2/GasPriceOracle.t.sol +++ b/packages/contracts-bedrock/test/L2/GasPriceOracle.t.sol @@ -117,6 +117,26 @@ contract GasPriceOracleBedrock_Test is GasPriceOracle_Test { vm.expectRevert("GasPriceOracle: Fjord can only be activated after Ecotone"); gasPriceOracle.setFjord(); } + + /// @dev Tests that `getL1Fee` returns the expected value when both fjord and ecotone are not active + function test_getL1Fee_whenFjordAndEcotoneNotActive_succeeds() external { + vm.store(address(gasPriceOracle), bytes32(uint256(0)), bytes32(0)); + bytes memory data = hex"1111"; + + uint256 price = gasPriceOracle.getL1Fee(data); + assertEq(price, 28_600); // ((((16 * data.length(i.e 2)) * (68 * 16)) + l1FeeOverhead(i.e. 310)) * + // l1BaseFee(i.e. 2M) * + // l1FeeScalar(i.e. 10)) / 1e6 + } + + /// @dev Tests that `getL1GasUsed` returns the expected value when both fjord and ecotone are not active + function test_getL1GasUsed_whenFjordAndEcotoneNotActive_succeeds() external { + vm.store(address(gasPriceOracle), bytes32(uint256(0)), bytes32(0)); + bytes memory data = hex"1111"; + + uint256 gas = gasPriceOracle.getL1GasUsed(data); + assertEq(gas, 1_430); // 1398 + (16 * data.length(i.e 2)) + } } contract GasPriceOracleEcotone_Test is GasPriceOracle_Test { diff --git a/packages/contracts-bedrock/test/L2/L1Block.t.sol b/packages/contracts-bedrock/test/L2/L1Block.t.sol index 762553a2ff2f..d3e3b7d02e49 100644 --- a/packages/contracts-bedrock/test/L2/L1Block.t.sol +++ b/packages/contracts-bedrock/test/L2/L1Block.t.sol @@ -169,31 +169,40 @@ contract L1BlockCustomGasToken_Test is L1BlockTest { function testFuzz_setGasPayingToken_succeeds( address _token, uint8 _decimals, - string memory _name, - string memory _symbol + string calldata _name, + string calldata _symbol ) external { vm.assume(_token != address(0)); vm.assume(_token != Constants.ETHER); - vm.assume(bytes(_name).length <= 32); - vm.assume(bytes(_symbol).length <= 32); - bytes32 name = bytes32(abi.encodePacked(_name)); - bytes32 symbol = bytes32(abi.encodePacked(_symbol)); + // Using vm.assume() would cause too many test rejections. + string memory name = _name; + if (bytes(_name).length > 32) { + name = _name[:32]; + } + bytes32 b32name = bytes32(abi.encodePacked(name)); + + // Using vm.assume() would cause too many test rejections. + string memory symbol = _symbol; + if (bytes(_symbol).length > 32) { + symbol = _symbol[:32]; + } + bytes32 b32symbol = bytes32(abi.encodePacked(symbol)); vm.expectEmit(address(l1Block)); - emit GasPayingTokenSet({ token: _token, decimals: _decimals, name: name, symbol: symbol }); + emit GasPayingTokenSet({ token: _token, decimals: _decimals, name: b32name, symbol: b32symbol }); vm.prank(depositor); - l1Block.setGasPayingToken({ _token: _token, _decimals: _decimals, _name: name, _symbol: symbol }); + l1Block.setGasPayingToken({ _token: _token, _decimals: _decimals, _name: b32name, _symbol: b32symbol }); (address token, uint8 decimals) = l1Block.gasPayingToken(); assertEq(token, _token); assertEq(decimals, _decimals); - assertEq(_name, l1Block.gasPayingTokenName()); - assertEq(_symbol, l1Block.gasPayingTokenSymbol()); + assertEq(name, l1Block.gasPayingTokenName()); + assertEq(symbol, l1Block.gasPayingTokenSymbol()); assertTrue(l1Block.isCustomGasToken()); } diff --git a/packages/contracts-bedrock/test/L2/L1BlockInterop.t.sol b/packages/contracts-bedrock/test/L2/L1BlockInterop.t.sol index c8779342c3bf..a5e086c86d58 100644 --- a/packages/contracts-bedrock/test/L2/L1BlockInterop.t.sol +++ b/packages/contracts-bedrock/test/L2/L1BlockInterop.t.sol @@ -1,17 +1,17 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Testing utilities +// Testing import { CommonTest } from "test/setup/CommonTest.sol"; // Libraries import { StaticConfig } from "src/libraries/StaticConfig.sol"; - -// Target contract dependencies -import { L1BlockInterop, ConfigType } from "src/L2/L1BlockInterop.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; import "src/libraries/L1BlockErrors.sol"; +// Interfaces +import { IL1BlockInterop, ConfigType } from "interfaces/L2/IL1BlockInterop.sol"; + contract L1BlockInteropTest is CommonTest { event GasPayingTokenSet(address indexed token, uint8 indexed decimals, bytes32 name, bytes32 symbol); event DependencyAdded(uint256 indexed chainId); @@ -199,8 +199,8 @@ contract L1BlockInteropTest is CommonTest { } /// @dev Returns the L1BlockInterop instance. - function _l1BlockInterop() internal view returns (L1BlockInterop) { - return L1BlockInterop(address(l1Block)); + function _l1BlockInterop() internal view returns (IL1BlockInterop) { + return IL1BlockInterop(address(l1Block)); } } @@ -261,7 +261,7 @@ contract L1BlockInteropSetL1BlockValuesInterop_Test is L1BlockInteropTest { vm.prank(_l1BlockInterop().DEPOSITOR_ACCOUNT()); (bool success,) = address(l1Block).call( - abi.encodePacked(L1BlockInterop.setL1BlockValuesInterop.selector, setValuesEcotoneCalldata) + abi.encodePacked(IL1BlockInterop.setL1BlockValuesInterop.selector, setValuesEcotoneCalldata) ); assertTrue(success, "function call failed"); diff --git a/packages/contracts-bedrock/test/L2/L2CrossDomainMessenger.t.sol b/packages/contracts-bedrock/test/L2/L2CrossDomainMessenger.t.sol index 131851783c79..33b8e0bfb881 100644 --- a/packages/contracts-bedrock/test/L2/L2CrossDomainMessenger.t.sol +++ b/packages/contracts-bedrock/test/L2/L2CrossDomainMessenger.t.sol @@ -5,6 +5,7 @@ pragma solidity 0.8.15; import { CommonTest } from "test/setup/CommonTest.sol"; import { Reverter } from "test/mocks/Callers.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; +import { stdError } from "forge-std/StdError.sol"; // Libraries import { Hashing } from "src/libraries/Hashing.sol"; @@ -13,8 +14,8 @@ import { Types } from "src/libraries/Types.sol"; import { AddressAliasHelper } from "src/vendor/AddressAliasHelper.sol"; // Interfaces -import { IL2CrossDomainMessenger } from "src/L2/interfaces/IL2CrossDomainMessenger.sol"; -import { IL2ToL1MessagePasser } from "src/L2/interfaces/IL2ToL1MessagePasser.sol"; +import { IL2CrossDomainMessenger } from "interfaces/L2/IL2CrossDomainMessenger.sol"; +import { IL2ToL1MessagePasser } from "interfaces/L2/IL2ToL1MessagePasser.sol"; contract L2CrossDomainMessenger_Test is CommonTest { /// @dev Receiver address for testing @@ -148,17 +149,103 @@ contract L2CrossDomainMessenger_Test is CommonTest { assertEq(l2CrossDomainMessenger.failedMessages(hash), false); } - /// @dev Tests that `relayMessage` reverts if attempting to relay - /// a message sent to an L1 system contract. - function test_relayMessage_toSystemContract_reverts() external { - address target = address(l2ToL1MessagePasser); + /// @dev Tests that relayMessage reverts if the value sent does not match the amount + function test_relayMessage_fromOtherMessengerValueMismatch_reverts() external { + // set the target to be alice + address target = alice; address sender = address(l1CrossDomainMessenger); address caller = AddressAliasHelper.applyL1ToL2Alias(address(l1CrossDomainMessenger)); bytes memory message = hex"1111"; + // cannot send a message where the amount inputted does not match the msg.value + vm.deal(caller, 10 ether); vm.prank(caller); + vm.expectRevert(stdError.assertionError); + l2CrossDomainMessenger.relayMessage{ value: 10 ether }( + Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), sender, target, 9 ether, 0, message + ); + } + + /// @dev Tests that relayMessage reverts if a failed message is attempted to be replayed and the caller is the other + /// messenger + function test_relayMessage_fromOtherMessengerFailedMessageReplay_reverts() external { + // set the target to be alice + address target = alice; + address sender = address(l1CrossDomainMessenger); + address caller = AddressAliasHelper.applyL1ToL2Alias(address(l1CrossDomainMessenger)); + bytes memory message = hex"1111"; + + // make a failed message + vm.etch(target, hex"fe"); + vm.prank(caller); + l2CrossDomainMessenger.relayMessage( + Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), sender, target, 0, 0, message + ); + + // cannot replay messages when the caller is the other messenger + vm.prank(caller); + vm.expectRevert(stdError.assertionError); + l2CrossDomainMessenger.relayMessage( + Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), sender, target, 0, 0, message + ); + } + + /// @dev Tests that relayMessage reverts if attempting to relay a message + /// sent to self + function test_relayMessage_toSelf_reverts() external { + address sender = address(l1CrossDomainMessenger); + address caller = AddressAliasHelper.applyL1ToL2Alias(address(l1CrossDomainMessenger)); + bytes memory message = hex"1111"; + + vm.store(address(optimismPortal), bytes32(0), bytes32(abi.encode(sender))); + + vm.prank(caller); + vm.expectRevert("CrossDomainMessenger: cannot send message to blocked system address"); + l2CrossDomainMessenger.relayMessage( + Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), + sender, + address(l2CrossDomainMessenger), + 0, + 0, + message + ); + } + + /// @dev Tests that relayMessage reverts if attempting to relay a message + /// sent to the l2ToL1MessagePasser address + function test_relayMessage_toL2ToL1MessagePasser_reverts() external { + address sender = address(l1CrossDomainMessenger); + address caller = AddressAliasHelper.applyL1ToL2Alias(address(l1CrossDomainMessenger)); + bytes memory message = hex"1111"; + + vm.store(address(optimismPortal), bytes32(0), bytes32(abi.encode(sender))); + + vm.prank(caller); + vm.expectRevert("CrossDomainMessenger: cannot send message to blocked system address"); + l2CrossDomainMessenger.relayMessage( + Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), + sender, + address(l2ToL1MessagePasser), + 0, + 0, + message + ); + } + + /// @dev Tests that the relayMessage function reverts if the message called by non-optimismPortal but not a failed + /// message + function test_relayMessage_relayingNewMessageByExternalUser_reverts() external { + address target = address(alice); + address sender = address(l1CrossDomainMessenger); + bytes memory message = hex"1111"; + + vm.store(address(optimismPortal), bytes32(0), bytes32(abi.encode(sender))); + + vm.prank(bob); vm.expectRevert("CrossDomainMessenger: message cannot be replayed"); - l1CrossDomainMessenger.relayMessage(Encoding.encodeVersionedNonce(0, 1), sender, target, 0, 0, message); + l2CrossDomainMessenger.relayMessage( + Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), sender, target, 0, 0, message + ); } /// @dev Tests that `relayMessage` correctly resets the `xDomainMessageSender` diff --git a/packages/contracts-bedrock/test/L2/L2ERC721Bridge.t.sol b/packages/contracts-bedrock/test/L2/L2ERC721Bridge.t.sol index 5e7798824c8d..1fbb2cae0b88 100644 --- a/packages/contracts-bedrock/test/L2/L2ERC721Bridge.t.sol +++ b/packages/contracts-bedrock/test/L2/L2ERC721Bridge.t.sol @@ -9,8 +9,8 @@ import { ERC721 } from "@openzeppelin/contracts/token/ERC721/ERC721.sol"; import { OptimismMintableERC721 } from "src/universal/OptimismMintableERC721.sol"; // Interfaces -import { IL1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; -import { IL2ERC721Bridge } from "src/L2/interfaces/IL2ERC721Bridge.sol"; +import { IL1ERC721Bridge } from "interfaces/L1/IL1ERC721Bridge.sol"; +import { IL2ERC721Bridge } from "interfaces/L2/IL2ERC721Bridge.sol"; contract TestERC721 is ERC721 { constructor() ERC721("Test", "TST") { } @@ -227,6 +227,14 @@ contract L2ERC721Bridge_Test is CommonTest { assertEq(localToken.ownerOf(tokenId), alice); } + /// @dev Tests that `bridgeERC721To` reverts if the to address is the zero address. + function test_bridgeERC721To_toZeroAddress_reverts() external { + // Bridge the token. + vm.prank(bob); + vm.expectRevert("ERC721Bridge: nft recipient cannot be address(0)"); + l2ERC721Bridge.bridgeERC721To(address(localToken), address(remoteToken), address(0), tokenId, 1234, hex"5678"); + } + /// @dev Tests that `finalizeBridgeERC721` correctly finalizes a bridged token. function test_finalizeBridgeERC721_succeeds() external { // Bridge the token. diff --git a/packages/contracts-bedrock/test/L2/L2StandardBridge.t.sol b/packages/contracts-bedrock/test/L2/L2StandardBridge.t.sol index 86100b6e4d40..f7b61083e249 100644 --- a/packages/contracts-bedrock/test/L2/L2StandardBridge.t.sol +++ b/packages/contracts-bedrock/test/L2/L2StandardBridge.t.sol @@ -16,10 +16,10 @@ import { Hashing } from "src/libraries/Hashing.sol"; import { Types } from "src/libraries/Types.sol"; // Interfaces -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; -import { IStandardBridge } from "src/universal/interfaces/IStandardBridge.sol"; -import { IL2ToL1MessagePasser } from "src/L2/interfaces/IL2ToL1MessagePasser.sol"; -import { IL2StandardBridge } from "src/L2/interfaces/IL2StandardBridge.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; +import { IStandardBridge } from "interfaces/universal/IStandardBridge.sol"; +import { IL2ToL1MessagePasser } from "interfaces/L2/IL2ToL1MessagePasser.sol"; +import { IL2StandardBridge } from "interfaces/L2/IL2StandardBridge.sol"; contract L2StandardBridge_Test is CommonTest { using stdStorage for StdStorage; @@ -34,6 +34,7 @@ contract L2StandardBridge_Test is CommonTest { assertEq(address(impl.messenger()), Predeploys.L2_CROSS_DOMAIN_MESSENGER, "constructor zero check messenger"); assertEq(address(impl.OTHER_BRIDGE()), address(0), "constructor zero check OTHER_BRIDGE"); assertEq(address(impl.otherBridge()), address(0), "constructor zero check otherBridge"); + assertEq(address(impl.l1TokenBridge()), address(0), "constructor zero check l1TokenBridge"); } /// @dev Tests that the bridge is initialized correctly. @@ -378,6 +379,12 @@ contract L2StandardBridge_BridgeERC20_Test is PreBridgeERC20 { assertEq(L2Token.balanceOf(alice), 0); } + function test_bridgeERC20_isNotCorrectTokenPair_reverts() external { + vm.expectRevert("StandardBridge: wrong remote token for Optimism Mintable ERC20 local token"); + vm.prank(alice, alice); + l2StandardBridge.bridgeERC20(address(L2Token), address(BadL1Token), 100, 1000, hex""); + } + function test_withdrawLegacyERC20_succeeds() external { _preBridgeERC20({ _isLegacy: true, _l2Token: address(LegacyL2Token) }); l2StandardBridge.withdraw(address(LegacyL2Token), 100, 1000, hex""); @@ -659,3 +666,43 @@ contract L2StandardBridge_FinalizeBridgeETH_Test is CommonTest { l2StandardBridge.finalizeBridgeETH(alice, alice, 1, hex""); } } + +contract L2StandardBridge_FinalizeBridgeERC20_Test is CommonTest { + /// @dev Tests that `finalizeBridgeERC20` succeeds. + function test_finalizeBridgeERC20_succeeds() external { + address messenger = address(l2StandardBridge.messenger()); + address localToken = address(L2Token); + address remoteToken = address(L1Token); + vm.mockCall( + messenger, + abi.encodeCall(ICrossDomainMessenger.xDomainMessageSender, ()), + abi.encode(address(l2StandardBridge.OTHER_BRIDGE())) + ); + deal(localToken, messenger, 100, true); + vm.prank(messenger); + + vm.expectEmit(true, true, true, true); + emit DepositFinalized(remoteToken, localToken, alice, alice, 100, hex""); + + vm.expectEmit(true, true, true, true); + emit ERC20BridgeFinalized(localToken, remoteToken, alice, alice, 100, hex""); + + l2StandardBridge.finalizeBridgeERC20(localToken, remoteToken, alice, alice, 100, hex""); + } + + function test_finalizeBridgeERC20_isNotCorrectTokenPair_reverts() external { + address messenger = address(l2StandardBridge.messenger()); + address localToken = address(L2Token); + address remoteToken = address(BadL1Token); + vm.mockCall( + messenger, + abi.encodeCall(ICrossDomainMessenger.xDomainMessageSender, ()), + abi.encode(address(l2StandardBridge.OTHER_BRIDGE())) + ); + deal(localToken, messenger, 100, true); + vm.prank(messenger); + + vm.expectRevert("StandardBridge: wrong remote token for Optimism Mintable ERC20 local token"); + l2StandardBridge.finalizeBridgeERC20(localToken, remoteToken, alice, alice, 100, hex""); + } +} diff --git a/packages/contracts-bedrock/test/L2/L2StandardBridgeInterop.t.sol b/packages/contracts-bedrock/test/L2/L2StandardBridgeInterop.t.sol index 30212d7ad622..a57c38a644d7 100644 --- a/packages/contracts-bedrock/test/L2/L2StandardBridgeInterop.t.sol +++ b/packages/contracts-bedrock/test/L2/L2StandardBridgeInterop.t.sol @@ -5,13 +5,13 @@ pragma solidity 0.8.15; import { CommonTest } from "test/setup/CommonTest.sol"; // Interfaces -import { IMintableAndBurnableERC20 } from "src/L2/interfaces/IMintableAndBurnableERC20.sol"; -import { IL2StandardBridgeInterop } from "src/L2/interfaces/IL2StandardBridgeInterop.sol"; +import { IMintableAndBurnableERC20 } from "interfaces/L2/IMintableAndBurnableERC20.sol"; +import { IL2StandardBridgeInterop } from "interfaces/L2/IL2StandardBridgeInterop.sol"; import { IERC20Metadata } from "@openzeppelin/contracts/token/ERC20/extensions/IERC20Metadata.sol"; import { IERC165 } from "@openzeppelin/contracts/utils/introspection/IERC165.sol"; -import { IOptimismMintableERC20 } from "src/universal/interfaces/IOptimismMintableERC20.sol"; +import { IOptimismMintableERC20 } from "interfaces/universal/IOptimismMintableERC20.sol"; import { ILegacyMintableERC20 } from "src/universal/OptimismMintableERC20.sol"; -import { IOptimismERC20Factory } from "src/L2/interfaces/IOptimismERC20Factory.sol"; +import { IOptimismERC20Factory } from "interfaces/L2/IOptimismERC20Factory.sol"; contract L2StandardBridgeInterop_Test is CommonTest { /// @notice Emitted when a conversion is made. diff --git a/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol b/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol index 1e5e04edc25b..3b4314853690 100644 --- a/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol +++ b/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol @@ -10,7 +10,6 @@ import { Predeploys } from "src/libraries/Predeploys.sol"; import { Hashing } from "src/libraries/Hashing.sol"; // Target contract -import { CrossL2Inbox, Identifier } from "src/L2/CrossL2Inbox.sol"; import { L2ToL2CrossDomainMessenger, NotEntered, @@ -22,9 +21,14 @@ import { MessageTargetL2ToL2CrossDomainMessenger, MessageAlreadyRelayed, ReentrantCall, - TargetCallFailed + TargetCallFailed, + IDependencySet, + InvalidChainId } from "src/L2/L2ToL2CrossDomainMessenger.sol"; +// Interfaces +import { ICrossL2Inbox, Identifier } from "interfaces/L2/ICrossL2Inbox.sol"; + /// @title L2ToL2CrossDomainMessengerWithModifiableTransientStorage /// @dev L2ToL2CrossDomainMessenger contract with methods to modify the transient storage. /// This is used to test the transient storage of L2ToL2CrossDomainMessenger. @@ -85,6 +89,13 @@ contract L2ToL2CrossDomainMessengerTest is Test { // Ensure that the target contract is not CrossL2Inbox or L2ToL2CrossDomainMessenger vm.assume(_target != Predeploys.CROSS_L2_INBOX && _target != Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); + // Mock the call over the `isInDependencySet` function to return true + vm.mockCall( + Predeploys.L1_BLOCK_ATTRIBUTES, + abi.encodeCall(IDependencySet.isInDependencySet, (_destination)), + abi.encode(true) + ); + // Get the current message nonce uint256 messageNonce = l2ToL2CrossDomainMessenger.messageNonce(); @@ -193,6 +204,34 @@ contract L2ToL2CrossDomainMessengerTest is Test { }); } + /// @notice Tests the `sendMessage` function reverts when the `destination` is not in the dependency set. + function testFuzz_sendMessage_notInDependencySet_reverts( + uint256 _destination, + address _target, + bytes calldata _message + ) + external + { + // Ensure the destination is not the same as the source, otherwise the function will revert + vm.assume(_destination != block.chainid); + + // Ensure that the target contract is not CrossL2Inbox or L2ToL2CrossDomainMessenger + vm.assume(_target != Predeploys.CROSS_L2_INBOX && _target != Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); + + // Mock the call over the `isInDependencySet` function to return false + vm.mockCall( + Predeploys.L1_BLOCK_ATTRIBUTES, + abi.encodeCall(IDependencySet.isInDependencySet, (_destination)), + abi.encode(false) + ); + + // Expect a revert with the InvalidChainId selector + vm.expectRevert(InvalidChainId.selector); + + // Call `sendMessage` with a destination that is not in the dependency set to provoke revert + l2ToL2CrossDomainMessenger.sendMessage(_destination, _target, _message); + } + /// @dev Tests that the `relayMessage` function succeeds and emits the correct RelayedMessage event. function testFuzz_relayMessage_succeeds( uint256 _source, @@ -227,7 +266,7 @@ contract L2ToL2CrossDomainMessengerTest is Test { // Ensure the CrossL2Inbox validates this message vm.mockCall({ callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeCall(CrossL2Inbox.validateMessage, (id, keccak256(sentMessage))), + data: abi.encodeCall(ICrossL2Inbox.validateMessage, (id, keccak256(sentMessage))), returnData: "" }); @@ -271,7 +310,7 @@ contract L2ToL2CrossDomainMessengerTest is Test { // Ensure the CrossL2Inbox validates this message vm.mockCall({ callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeCall(CrossL2Inbox.validateMessage, (id, keccak256(sentMessage))), + data: abi.encodeCall(ICrossL2Inbox.validateMessage, (id, keccak256(sentMessage))), returnData: "" }); @@ -334,7 +373,7 @@ contract L2ToL2CrossDomainMessengerTest is Test { // Ensure the CrossL2Inbox validates this message vm.mockCall({ callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeCall(CrossL2Inbox.validateMessage, (id, keccak256(sentMessage))), + data: abi.encodeCall(ICrossL2Inbox.validateMessage, (id, keccak256(sentMessage))), returnData: "" }); @@ -415,7 +454,7 @@ contract L2ToL2CrossDomainMessengerTest is Test { // Ensure the CrossL2Inbox validates this message vm.mockCall({ callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeCall(CrossL2Inbox.validateMessage, (id, keccak256(sentMessage))), + data: abi.encodeCall(ICrossL2Inbox.validateMessage, (id, keccak256(sentMessage))), returnData: "" }); @@ -497,7 +536,7 @@ contract L2ToL2CrossDomainMessengerTest is Test { // Ensure the CrossL2Inbox validates this message vm.mockCall({ callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeCall(CrossL2Inbox.validateMessage, (id, keccak256(sentMessage))), + data: abi.encodeCall(ICrossL2Inbox.validateMessage, (id, keccak256(sentMessage))), returnData: "" }); @@ -536,7 +575,7 @@ contract L2ToL2CrossDomainMessengerTest is Test { // Ensure the CrossL2Inbox validates this message vm.mockCall({ callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeCall(CrossL2Inbox.validateMessage, (id, keccak256(sentMessage))), + data: abi.encodeCall(ICrossL2Inbox.validateMessage, (id, keccak256(sentMessage))), returnData: "" }); @@ -578,7 +617,7 @@ contract L2ToL2CrossDomainMessengerTest is Test { // Ensure the CrossL2Inbox validates this message vm.mockCall({ callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeCall(CrossL2Inbox.validateMessage, (id, keccak256(sentMessage))), + data: abi.encodeCall(ICrossL2Inbox.validateMessage, (id, keccak256(sentMessage))), returnData: "" }); @@ -628,7 +667,7 @@ contract L2ToL2CrossDomainMessengerTest is Test { // Ensure the CrossL2Inbox validates this message vm.mockCall({ callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeCall(CrossL2Inbox.validateMessage, (id, keccak256(sentMessage))), + data: abi.encodeCall(ICrossL2Inbox.validateMessage, (id, keccak256(sentMessage))), returnData: "" }); @@ -678,7 +717,7 @@ contract L2ToL2CrossDomainMessengerTest is Test { // Ensure the CrossL2Inbox validates this message vm.mockCall({ callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeCall(CrossL2Inbox.validateMessage, (id, keccak256(sentMessage))), + data: abi.encodeCall(ICrossL2Inbox.validateMessage, (id, keccak256(sentMessage))), returnData: "" }); diff --git a/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20.t.sol b/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20.t.sol index d5b999a922e5..80ee2e620228 100644 --- a/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20.t.sol +++ b/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20.t.sol @@ -11,14 +11,14 @@ import { Predeploys } from "src/libraries/Predeploys.sol"; import { Initializable } from "@openzeppelin/contracts-v5/proxy/utils/Initializable.sol"; import { IERC165 } from "@openzeppelin/contracts-v5/utils/introspection/IERC165.sol"; import { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; -import { IERC7802 } from "src/L2/interfaces/IERC7802.sol"; +import { IERC7802 } from "interfaces/L2/IERC7802.sol"; import { IBeacon } from "@openzeppelin/contracts-v5/proxy/beacon/IBeacon.sol"; import { BeaconProxy } from "@openzeppelin/contracts-v5/proxy/beacon/BeaconProxy.sol"; import { Unauthorized } from "src/libraries/errors/CommonErrors.sol"; import { Preinstalls } from "src/libraries/Preinstalls.sol"; // Target contract -import { IOptimismSuperchainERC20 } from "src/L2/interfaces/IOptimismSuperchainERC20.sol"; +import { IOptimismSuperchainERC20 } from "interfaces/L2/IOptimismSuperchainERC20.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; /// @title OptimismSuperchainERC20Test diff --git a/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20Factory.t.sol b/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20Factory.t.sol index d8d7f86f26a2..a2f7125fc218 100644 --- a/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20Factory.t.sol +++ b/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20Factory.t.sol @@ -8,7 +8,7 @@ import { CommonTest } from "test/setup/CommonTest.sol"; import { CREATE3, Bytes32AddressLib } from "@rari-capital/solmate/src/utils/CREATE3.sol"; // Target contract -import { IOptimismSuperchainERC20 } from "src/L2/interfaces/IOptimismSuperchainERC20.sol"; +import { IOptimismSuperchainERC20 } from "interfaces/L2/IOptimismSuperchainERC20.sol"; import { IERC20Metadata } from "@openzeppelin/contracts/interfaces/IERC20Metadata.sol"; /// @title OptimismSuperchainERC20FactoryTest diff --git a/packages/contracts-bedrock/test/L2/Preinstalls.t.sol b/packages/contracts-bedrock/test/L2/Preinstalls.t.sol index 5eec3f811dec..d0ead497834a 100644 --- a/packages/contracts-bedrock/test/L2/Preinstalls.t.sol +++ b/packages/contracts-bedrock/test/L2/Preinstalls.t.sol @@ -4,7 +4,7 @@ pragma solidity 0.8.15; import { CommonTest } from "test/setup/CommonTest.sol"; import { Preinstalls } from "src/libraries/Preinstalls.sol"; import { Bytes } from "src/libraries/Bytes.sol"; -import { IEIP712 } from "src/universal/interfaces/IEIP712.sol"; +import { IEIP712 } from "interfaces/universal/IEIP712.sol"; /// @title PreinstallsTest contract PreinstallsTest is CommonTest { diff --git a/packages/contracts-bedrock/test/L2/SequencerFeeVault.t.sol b/packages/contracts-bedrock/test/L2/SequencerFeeVault.t.sol index ca8c3806b38b..dc27fbbad9c9 100644 --- a/packages/contracts-bedrock/test/L2/SequencerFeeVault.t.sol +++ b/packages/contracts-bedrock/test/L2/SequencerFeeVault.t.sol @@ -7,7 +7,7 @@ import { Reverter } from "test/mocks/Callers.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; // Contracts -import { ISequencerFeeVault } from "src/L2/interfaces/ISequencerFeeVault.sol"; +import { ISequencerFeeVault } from "interfaces/L2/ISequencerFeeVault.sol"; // Libraries import { Hashing } from "src/libraries/Hashing.sol"; diff --git a/packages/contracts-bedrock/test/L2/SuperchainERC20.t.sol b/packages/contracts-bedrock/test/L2/SuperchainERC20.t.sol index 87f723a9345b..788792a4defd 100644 --- a/packages/contracts-bedrock/test/L2/SuperchainERC20.t.sol +++ b/packages/contracts-bedrock/test/L2/SuperchainERC20.t.sol @@ -9,9 +9,9 @@ import { Predeploys } from "src/libraries/Predeploys.sol"; // Target contract import { SuperchainERC20 } from "src/L2/SuperchainERC20.sol"; -import { IERC7802, IERC165 } from "src/L2/interfaces/IERC7802.sol"; +import { IERC7802, IERC165 } from "interfaces/L2/IERC7802.sol"; import { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; -import { ISuperchainERC20 } from "src/L2/interfaces/ISuperchainERC20.sol"; +import { ISuperchainERC20 } from "interfaces/L2/ISuperchainERC20.sol"; import { MockSuperchainERC20Implementation } from "test/mocks/SuperchainERC20Implementation.sol"; /// @title SuperchainERC20Test @@ -62,7 +62,7 @@ contract SuperchainERC20Test is Test { // Look for the emit of the `CrosschainMint` event vm.expectEmit(address(superchainERC20)); - emit IERC7802.CrosschainMint(_to, _amount); + emit IERC7802.CrosschainMint(_to, _amount, SUPERCHAIN_TOKEN_BRIDGE); // Call the `mint` function with the bridge caller vm.prank(SUPERCHAIN_TOKEN_BRIDGE); @@ -105,7 +105,7 @@ contract SuperchainERC20Test is Test { // Look for the emit of the `CrosschainBurn` event vm.expectEmit(address(superchainERC20)); - emit IERC7802.CrosschainBurn(_from, _amount); + emit IERC7802.CrosschainBurn(_from, _amount, SUPERCHAIN_TOKEN_BRIDGE); // Call the `burn` function with the bridge caller vm.prank(SUPERCHAIN_TOKEN_BRIDGE); diff --git a/packages/contracts-bedrock/test/L2/SuperchainTokenBridge.t.sol b/packages/contracts-bedrock/test/L2/SuperchainTokenBridge.t.sol index 3c39e8b1792c..2a63961ce414 100644 --- a/packages/contracts-bedrock/test/L2/SuperchainTokenBridge.t.sol +++ b/packages/contracts-bedrock/test/L2/SuperchainTokenBridge.t.sol @@ -6,14 +6,14 @@ import { CommonTest } from "test/setup/CommonTest.sol"; // Libraries import { Predeploys } from "src/libraries/Predeploys.sol"; -import { IL2ToL2CrossDomainMessenger } from "src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol"; +import { IL2ToL2CrossDomainMessenger } from "interfaces/L2/IL2ToL2CrossDomainMessenger.sol"; // Target contract -import { ISuperchainTokenBridge } from "src/L2/interfaces/ISuperchainTokenBridge.sol"; -import { ISuperchainERC20 } from "src/L2/interfaces/ISuperchainERC20.sol"; -import { IOptimismSuperchainERC20Factory } from "src/L2/interfaces/IOptimismSuperchainERC20Factory.sol"; +import { ISuperchainTokenBridge } from "interfaces/L2/ISuperchainTokenBridge.sol"; +import { ISuperchainERC20 } from "interfaces/L2/ISuperchainERC20.sol"; +import { IOptimismSuperchainERC20Factory } from "interfaces/L2/IOptimismSuperchainERC20Factory.sol"; import { IERC20 } from "@openzeppelin/contracts/interfaces/IERC20.sol"; -import { IERC7802 } from "src/L2/interfaces/IERC7802.sol"; +import { IERC7802 } from "interfaces/L2/IERC7802.sol"; /// @title SuperchainTokenBridgeTest /// @notice Contract for testing the SuperchainTokenBridge contract. diff --git a/packages/contracts-bedrock/test/L2/SuperchainWETH.t.sol b/packages/contracts-bedrock/test/L2/SuperchainWETH.t.sol index e342a53b6026..bc59c76c116f 100644 --- a/packages/contracts-bedrock/test/L2/SuperchainWETH.t.sol +++ b/packages/contracts-bedrock/test/L2/SuperchainWETH.t.sol @@ -6,14 +6,15 @@ import { CommonTest } from "test/setup/CommonTest.sol"; // Libraries import { Predeploys } from "src/libraries/Predeploys.sol"; -import { NotCustomGasToken } from "src/libraries/errors/CommonErrors.sol"; +import { NotCustomGasToken, Unauthorized, ZeroAddress } from "src/libraries/errors/CommonErrors.sol"; import { Preinstalls } from "src/libraries/Preinstalls.sol"; // Interfaces -import { IETHLiquidity } from "src/L2/interfaces/IETHLiquidity.sol"; -import { ISuperchainWETH } from "src/L2/interfaces/ISuperchainWETH.sol"; -import { IERC7802, IERC165 } from "src/L2/interfaces/IERC7802.sol"; +import { IETHLiquidity } from "interfaces/L2/IETHLiquidity.sol"; +import { ISuperchainWETH } from "interfaces/L2/ISuperchainWETH.sol"; +import { IERC7802, IERC165 } from "interfaces/L2/IERC7802.sol"; import { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; +import { IL2ToL2CrossDomainMessenger } from "interfaces/L2/IL2ToL2CrossDomainMessenger.sol"; /// @title SuperchainWETH_Test /// @notice Contract for testing the SuperchainWETH contract. @@ -28,10 +29,14 @@ contract SuperchainWETH_Test is CommonTest { event Withdrawal(address indexed src, uint256 wad); /// @notice Emitted when a crosschain transfer mints tokens. - event CrosschainMint(address indexed to, uint256 amount); + event CrosschainMint(address indexed to, uint256 amount, address indexed sender); /// @notice Emitted when a crosschain transfer burns tokens. - event CrosschainBurn(address indexed from, uint256 amount); + event CrosschainBurn(address indexed from, uint256 amount, address indexed sender); + + event SendETH(address indexed from, address indexed to, uint256 amount, uint256 destination); + + event RelayETH(address indexed from, address indexed to, uint256 amount, uint256 source); address internal constant ZERO_ADDRESS = address(0); @@ -162,7 +167,7 @@ contract SuperchainWETH_Test is CommonTest { // Look for the emit of the `CrosschainMint` event vm.expectEmit(address(superchainWeth)); - emit CrosschainMint(_to, _amount); + emit CrosschainMint(_to, _amount, Predeploys.SUPERCHAIN_TOKEN_BRIDGE); // Mock the `isCustomGasToken` function to return false _mockAndExpect(address(l1Block), abi.encodeCall(l1Block.isCustomGasToken, ()), abi.encode(false)); @@ -195,7 +200,7 @@ contract SuperchainWETH_Test is CommonTest { // Look for the emit of the `CrosschainMint` event vm.expectEmit(address(superchainWeth)); - emit CrosschainMint(_to, _amount); + emit CrosschainMint(_to, _amount, Predeploys.SUPERCHAIN_TOKEN_BRIDGE); // Mock the `isCustomGasToken` function to return false _mockAndExpect(address(l1Block), abi.encodeCall(l1Block.isCustomGasToken, ()), abi.encode(true)); @@ -248,7 +253,7 @@ contract SuperchainWETH_Test is CommonTest { // Look for the emit of the `CrosschainBurn` event vm.expectEmit(address(superchainWeth)); - emit CrosschainBurn(_from, _amount); + emit CrosschainBurn(_from, _amount, Predeploys.SUPERCHAIN_TOKEN_BRIDGE); // Mock the `isCustomGasToken` function to return false _mockAndExpect(address(l1Block), abi.encodeCall(l1Block.isCustomGasToken, ()), abi.encode(false)); @@ -290,7 +295,7 @@ contract SuperchainWETH_Test is CommonTest { // Look for the emit of the `CrosschainBurn` event vm.expectEmit(address(superchainWeth)); - emit CrosschainBurn(_from, _amount); + emit CrosschainBurn(_from, _amount, Predeploys.SUPERCHAIN_TOKEN_BRIDGE); // Expect to not call the `burn` function in the `ETHLiquidity` contract vm.expectCall(Predeploys.ETH_LIQUIDITY, abi.encodeCall(IETHLiquidity.burn, ()), 0); @@ -324,8 +329,8 @@ contract SuperchainWETH_Test is CommonTest { /// @notice Test that the internal mint function reverts to protect against accidentally changing the visibility. function testFuzz_calling_internalMintFunction_reverts(address _caller, address _to, uint256 _amount) public { // Arrange - bytes memory _calldata = abi.encodeWithSignature("_mint(address,uint256)", _to, _amount); // nosemgrep: - // sol-style-use-abi-encodecall + // nosemgrep: sol-style-use-abi-encodecall + bytes memory _calldata = abi.encodeWithSignature("_mint(address,uint256)", _to, _amount); vm.expectRevert(bytes("")); // Act @@ -339,8 +344,8 @@ contract SuperchainWETH_Test is CommonTest { /// @notice Test that the mint function reverts to protect against accidentally changing the visibility. function testFuzz_calling_mintFunction_reverts(address _caller, address _to, uint256 _amount) public { // Arrange - bytes memory _calldata = abi.encodeWithSignature("mint(address,uint256)", _to, _amount); // nosemgrep: - // sol-style-use-abi-encodecall + // nosemgrep: sol-style-use-abi-encodecall + bytes memory _calldata = abi.encodeWithSignature("mint(address,uint256)", _to, _amount); vm.expectRevert(bytes("")); // Act @@ -354,8 +359,8 @@ contract SuperchainWETH_Test is CommonTest { /// @notice Test that the internal burn function reverts to protect against accidentally changing the visibility. function testFuzz_calling_internalBurnFunction_reverts(address _caller, address _from, uint256 _amount) public { // Arrange - bytes memory _calldata = abi.encodeWithSignature("_burn(address,uint256)", _from, _amount); // nosemgrep: - // sol-style-use-abi-encodecall + // nosemgrep: sol-style-use-abi-encodecall + bytes memory _calldata = abi.encodeWithSignature("_burn(address,uint256)", _from, _amount); vm.expectRevert(bytes("")); // Act @@ -369,8 +374,8 @@ contract SuperchainWETH_Test is CommonTest { /// @notice Test that the burn function reverts to protect against accidentally changing the visibility. function testFuzz_calling_burnFuunction_reverts(address _caller, address _from, uint256 _amount) public { // Arrange - bytes memory _calldata = abi.encodeWithSignature("burn(address,uint256)", _from, _amount); // nosemgrep: - // sol-style-use-abi-encodecall + // nosemgrep: sol-style-use-abi-encodecall + bytes memory _calldata = abi.encodeWithSignature("burn(address,uint256)", _from, _amount); vm.expectRevert(bytes("")); // Act @@ -475,4 +480,207 @@ contract SuperchainWETH_Test is CommonTest { vm.assume(_interfaceId != type(IERC20).interfaceId); assertFalse(superchainWeth.supportsInterface(_interfaceId)); } + + /// @notice Tests the `sendETH` function reverts when the address `_to` is zero. + function testFuzz_sendETH_zeroAddressTo_reverts(address _sender, uint256 _amount, uint256 _chainId) public { + // Expect the revert with `ZeroAddress` selector + vm.expectRevert(ZeroAddress.selector); + + vm.deal(_sender, _amount); + vm.prank(_sender); + // Call the `sendETH` function with the zero address as `_to` + superchainWeth.sendETH{ value: _amount }(ZERO_ADDRESS, _chainId); + } + + /// @notice Tests the `sendETH` function burns the sender ETH, sends the message, and emits the `SendETH` + /// event. + function testFuzz_sendETH_fromNonCustomGasTokenChain_succeeds( + address _sender, + address _to, + uint256 _amount, + uint256 _chainId, + bytes32 _msgHash + ) + external + { + // Assume + vm.assume(_sender != address(ethLiquidity)); + vm.assume(_sender != ZERO_ADDRESS); + vm.assume(_to != ZERO_ADDRESS); + _amount = bound(_amount, 0, type(uint248).max - 1); + + // Arrange + vm.deal(_sender, _amount); + _mockAndExpect(address(l1Block), abi.encodeCall(l1Block.isCustomGasToken, ()), abi.encode(false)); + + // Get the total balance of `_sender` before the send to compare later on the assertions + uint256 _senderBalanceBefore = _sender.balance; + + // Look for the emit of the `SendETH` event + vm.expectEmit(address(superchainWeth)); + emit SendETH(_sender, _to, _amount, _chainId); + + // Expect the call to the `burn` function in the `ETHLiquidity` contract + vm.expectCall(Predeploys.ETH_LIQUIDITY, abi.encodeCall(IETHLiquidity.burn, ()), 1); + + // Mock the call over the `sendMessage` function and expect it to be called properly + bytes memory _message = abi.encodeCall(superchainWeth.relayETH, (_sender, _to, _amount)); + _mockAndExpect( + Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, + abi.encodeCall(IL2ToL2CrossDomainMessenger.sendMessage, (_chainId, address(superchainWeth), _message)), + abi.encode(_msgHash) + ); + + // Call the `sendETH` function + vm.prank(_sender); + bytes32 _returnedMsgHash = superchainWeth.sendETH{ value: _amount }(_to, _chainId); + + // Check the message hash was generated correctly + assertEq(_msgHash, _returnedMsgHash); + + // Check the total supply and balance of `_sender` after the send were updated correctly + assertEq(_sender.balance, _senderBalanceBefore - _amount); + } + + /// @notice Tests the `sendETH` function reverts when called on a custom gas token chain. + function testFuzz_sendETH_fromCustomGasTokenChain_fails( + address _sender, + address _to, + uint256 _amount, + uint256 _chainId + ) + external + { + // Assume + vm.assume(_sender != ZERO_ADDRESS); + vm.assume(_to != ZERO_ADDRESS); + _amount = bound(_amount, 0, type(uint248).max - 1); + + // Arrange + vm.deal(_sender, _amount); + _mockAndExpect(address(l1Block), abi.encodeCall(l1Block.isCustomGasToken, ()), abi.encode(true)); + + // Call the `sendETH` function + vm.prank(_sender); + vm.expectRevert(NotCustomGasToken.selector); + superchainWeth.sendETH{ value: _amount }(_to, _chainId); + } + + /// @notice Tests the `relayETH` function reverts when the caller is not the L2ToL2CrossDomainMessenger. + function testFuzz_relayETH_notMessenger_reverts(address _caller, address _to, uint256 _amount) public { + // Ensure the caller is not the messenger + vm.assume(_caller != Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); + + // Expect the revert with `Unauthorized` selector + vm.expectRevert(Unauthorized.selector); + + // Call the `relayETH` function with the non-messenger caller + vm.prank(_caller); + superchainWeth.relayETH(_caller, _to, _amount); + } + + /// @notice Tests the `relayETH` function reverts when the `crossDomainMessageSender` that sent the message is not + /// the same SuperchainWETH. + function testFuzz_relayETH_notCrossDomainSender_reverts( + address _crossDomainMessageSender, + uint256 _source, + address _to, + uint256 _amount + ) + public + { + vm.assume(_crossDomainMessageSender != address(superchainWeth)); + + // Mock the call over the `crossDomainMessageContext` function setting a wrong sender + vm.mockCall( + Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, + abi.encodeCall(IL2ToL2CrossDomainMessenger.crossDomainMessageContext, ()), + abi.encode(_crossDomainMessageSender, _source) + ); + + // Expect the revert with `InvalidCrossDomainSender` selector + vm.expectRevert(ISuperchainWETH.InvalidCrossDomainSender.selector); + + // Call the `relayETH` function with the sender caller + vm.prank(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); + superchainWeth.relayETH(_crossDomainMessageSender, _to, _amount); + } + + /// @notice Tests the `relayETH` function succeeds and sends SuperchainWETH to the recipient on a custom gas token + /// chain. + function testFuzz_relayETH_fromCustomGasTokenChain_succeeds( + address _from, + address _to, + uint256 _amount, + uint256 _source + ) + public + { + // Assume + vm.assume(_to != ZERO_ADDRESS); + _amount = bound(_amount, 0, type(uint248).max - 1); + + // Get the balance of `_to` before the mint to compare later on the assertions + uint256 _toBalanceBefore = superchainWeth.balanceOf(_to); + + // Look for the emit of the `Transfer` event + vm.expectEmit(address(superchainWeth)); + emit Transfer(ZERO_ADDRESS, _to, _amount); + + // Look for the emit of the `RelayETH` event + vm.expectEmit(address(superchainWeth)); + emit RelayETH(_from, _to, _amount, _source); + + // Arrange + _mockAndExpect(address(l1Block), abi.encodeCall(l1Block.isCustomGasToken, ()), abi.encode(true)); + _mockAndExpect( + Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, + abi.encodeCall(IL2ToL2CrossDomainMessenger.crossDomainMessageContext, ()), + abi.encode(address(superchainWeth), _source) + ); + // Expect to not call the `mint` function in the `ETHLiquidity` contract + vm.expectCall(Predeploys.ETH_LIQUIDITY, abi.encodeCall(IETHLiquidity.mint, (_amount)), 0); + + // Act + vm.prank(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); + superchainWeth.relayETH(_from, _to, _amount); + + // Check the total supply and balance of `_to` after the mint were updated correctly + assertEq(superchainWeth.balanceOf(_to), _toBalanceBefore + _amount); + assertEq(superchainWeth.totalSupply(), 0); + assertEq(address(superchainWeth).balance, 0); + } + + /// @notice Tests the `relayETH` function relays the proper amount of ETH and emits the `RelayETH` event. + function testFuzz_relayETH_succeeds(address _from, address _to, uint256 _amount, uint256 _source) public { + // Assume + vm.assume(_to != ZERO_ADDRESS); + assumePayable(_to); + _amount = bound(_amount, 0, type(uint248).max - 1); + + // Arrange + vm.deal(address(superchainWeth), _amount); + vm.deal(Predeploys.ETH_LIQUIDITY, _amount); + _mockAndExpect(address(l1Block), abi.encodeCall(l1Block.isCustomGasToken, ()), abi.encode(false)); + _mockAndExpect( + Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, + abi.encodeCall(IL2ToL2CrossDomainMessenger.crossDomainMessageContext, ()), + abi.encode(address(superchainWeth), _source) + ); + + uint256 _toBalanceBefore = _to.balance; + + // Look for the emit of the `RelayETH` event + vm.expectEmit(address(superchainWeth)); + emit RelayETH(_from, _to, _amount, _source); + + // Expect the call to the `mint` function in the `ETHLiquidity` contract + vm.expectCall(Predeploys.ETH_LIQUIDITY, abi.encodeCall(IETHLiquidity.mint, (_amount)), 1); + + // Call the `RelayETH` function with the messenger caller + vm.prank(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); + superchainWeth.relayETH(_from, _to, _amount); + + assertEq(_to.balance, _toBalanceBefore + _amount); + } } diff --git a/packages/contracts-bedrock/test/actors/FaultDisputeActors.sol b/packages/contracts-bedrock/test/actors/FaultDisputeActors.sol index 20f18eddc3c0..b2e7a1d64d18 100644 --- a/packages/contracts-bedrock/test/actors/FaultDisputeActors.sol +++ b/packages/contracts-bedrock/test/actors/FaultDisputeActors.sol @@ -8,7 +8,7 @@ import { CommonBase } from "forge-std/Base.sol"; import "src/dispute/lib/Types.sol"; // Interfaces -import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; /// @title GameSolver /// @notice The `GameSolver` contract is a contract that can produce an array of available diff --git a/packages/contracts-bedrock/test/cannon/MIPS.t.sol b/packages/contracts-bedrock/test/cannon/MIPS.t.sol index 62100f31cfbe..aa9830981216 100644 --- a/packages/contracts-bedrock/test/cannon/MIPS.t.sol +++ b/packages/contracts-bedrock/test/cannon/MIPS.t.sol @@ -14,8 +14,8 @@ import { InvalidExitedValue, InvalidMemoryProof } from "src/cannon/libraries/Can import "src/dispute/lib/Types.sol"; // Interfaces -import { IMIPS } from "src/cannon/interfaces/IMIPS.sol"; -import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; +import { IMIPS } from "interfaces/cannon/IMIPS.sol"; +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; contract MIPS_Test is CommonTest { IMIPS internal mips; diff --git a/packages/contracts-bedrock/test/cannon/MIPS2.t.sol b/packages/contracts-bedrock/test/cannon/MIPS2.t.sol index 2cc0519ea5bf..07c5883c17bc 100644 --- a/packages/contracts-bedrock/test/cannon/MIPS2.t.sol +++ b/packages/contracts-bedrock/test/cannon/MIPS2.t.sol @@ -14,8 +14,8 @@ import { InvalidExitedValue, InvalidMemoryProof, InvalidSecondMemoryProof } from import "src/dispute/lib/Types.sol"; // Interfaces -import { IMIPS2 } from "src/cannon/interfaces/IMIPS2.sol"; -import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; +import { IMIPS2 } from "interfaces/cannon/IMIPS2.sol"; +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; contract ThreadStack { bytes32 internal constant EMPTY_THREAD_ROOT = hex"ad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"; @@ -789,8 +789,9 @@ contract MIPS2_Test is CommonTest { vm.expectRevert(InvalidMemoryProof.selector); mips.step(encodeState(state), bytes.concat(threadWitness, invalidInsnAndMemProof, memProof2), 0); - (, bytes memory invalidMemProof2) = - ffi.getCannonMemoryProof2(pc, insn, timespecAddr, secs + 1, timespecAddr + 4); + uint32 _secs = secs + 1; + uint32 _timespecAddr = timespecAddr + 4; + (, bytes memory invalidMemProof2) = ffi.getCannonMemoryProof2(pc, insn, timespecAddr, _secs, _timespecAddr); vm.expectRevert(InvalidSecondMemoryProof.selector); mips.step(encodeState(state), bytes.concat(threadWitness, insnAndMemProof, invalidMemProof2), 0); } @@ -2766,31 +2767,21 @@ contract MIPS2_Test is CommonTest { } function encodeState(IMIPS2.State memory _state) internal pure returns (bytes memory) { - // Split up encoding to get around stack-too-deep error - return abi.encodePacked(encodeStateA(_state), encodeStateB(_state)); - } - - function encodeStateA(IMIPS2.State memory _state) internal pure returns (bytes memory) { - return abi.encodePacked( + bytes memory a = abi.encodePacked( _state.memRoot, _state.preimageKey, _state.preimageOffset, _state.heap, _state.llReservationStatus, - _state.llAddress, - _state.llOwnerThread, - _state.exitCode, - _state.exited, - _state.step, - _state.stepsSinceLastContextSwitch, - _state.wakeup, - _state.traverseRight, - _state.leftThreadStack + _state.llAddress ); - } - - function encodeStateB(IMIPS2.State memory _state) internal pure returns (bytes memory) { - return abi.encodePacked(_state.rightThreadStack, _state.nextThreadID); + bytes memory b = abi.encodePacked( + _state.llOwnerThread, _state.exitCode, _state.exited, _state.step, _state.stepsSinceLastContextSwitch + ); + bytes memory c = abi.encodePacked( + _state.wakeup, _state.traverseRight, _state.leftThreadStack, _state.rightThreadStack, _state.nextThreadID + ); + return abi.encodePacked(a, b, c); } function copyState(IMIPS2.State memory _state) internal pure returns (IMIPS2.State memory out_) { diff --git a/packages/contracts-bedrock/test/cannon/PreimageOracle.t.sol b/packages/contracts-bedrock/test/cannon/PreimageOracle.t.sol index 1c4d41728fcc..d6c7c520a5f1 100644 --- a/packages/contracts-bedrock/test/cannon/PreimageOracle.t.sol +++ b/packages/contracts-bedrock/test/cannon/PreimageOracle.t.sol @@ -16,7 +16,7 @@ import "src/cannon/libraries/CannonErrors.sol"; import "src/cannon/libraries/CannonTypes.sol"; // Interfaces -import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; contract PreimageOracle_Test is Test { IPreimageOracle oracle; @@ -889,7 +889,6 @@ contract PreimageOracle_LargePreimageProposals_Test is Test { /// @notice Tests that squeezing a large preimage proposal after the challenge period has passed always succeeds and /// persists the correct data. - /// forge-config: ciheavy.fuzz.runs = 512 function testFuzz_squeezeLPP_succeeds(uint256 _numBlocks, uint32 _partOffset) public { _numBlocks = bound(_numBlocks, 1, 2 ** 8); _partOffset = uint32(bound(_partOffset, 0, _numBlocks * LibKeccak.BLOCK_SIZE_BYTES + 8 - 1)); @@ -1087,7 +1086,6 @@ contract PreimageOracle_LargePreimageProposals_Test is Test { /// @notice Tests that challenging the first divergence in a large preimage proposal at an arbitrary location /// in the leaf values always succeeds. - /// forge-config: ciheavy.fuzz.runs = 512 function testFuzz_challenge_arbitraryLocation_succeeds(uint256 _lastCorrectLeafIdx, uint256 _numBlocks) public { _numBlocks = bound(_numBlocks, 1, 2 ** 8); _lastCorrectLeafIdx = bound(_lastCorrectLeafIdx, 0, _numBlocks - 1); @@ -1140,7 +1138,6 @@ contract PreimageOracle_LargePreimageProposals_Test is Test { } /// @notice Tests that challenging the a divergence in a large preimage proposal at the first leaf always succeeds. - /// forge-config: ciheavy.fuzz.runs = 1024 function testFuzz_challengeFirst_succeeds(uint256 _numBlocks) public { _numBlocks = bound(_numBlocks, 1, 2 ** 8); diff --git a/packages/contracts-bedrock/test/dispute/DelayedWETH.t.sol b/packages/contracts-bedrock/test/dispute/DelayedWETH.t.sol index 6d7a9bea5134..1cbaf0c1eb25 100644 --- a/packages/contracts-bedrock/test/dispute/DelayedWETH.t.sol +++ b/packages/contracts-bedrock/test/dispute/DelayedWETH.t.sol @@ -64,6 +64,110 @@ contract DelayedWETH_Unlock_Test is DelayedWETH_Init { } contract DelayedWETH_Withdraw_Test is DelayedWETH_Init { + /// @dev Tests that withdrawing while unlocked and delay has passed is successful. + function test_withdraw_whileUnlocked_succeeds() public { + // Deposit some WETH. + vm.prank(alice); + delayedWeth.deposit{ value: 1 ether }(); + uint256 balance = address(alice).balance; + + // Unlock the withdrawal. + vm.prank(alice); + delayedWeth.unlock(alice, 1 ether); + + // Wait for the delay. + vm.warp(block.timestamp + delayedWeth.delay() + 1); + + // Withdraw the WETH. + vm.expectEmit(true, true, false, false); + emit Withdrawal(address(alice), 1 ether); + vm.prank(alice); + delayedWeth.withdraw(1 ether); + assertEq(address(alice).balance, balance + 1 ether); + } + + /// @dev Tests that withdrawing when unlock was not called fails. + function test_withdraw_whileLocked_fails() public { + // Deposit some WETH. + vm.prank(alice); + delayedWeth.deposit{ value: 1 ether }(); + uint256 balance = address(alice).balance; + + // Withdraw fails when unlock not called. + vm.expectRevert("DelayedWETH: withdrawal not unlocked"); + vm.prank(alice); + delayedWeth.withdraw(0 ether); + assertEq(address(alice).balance, balance); + } + + /// @dev Tests that withdrawing while locked and delay has not passed fails. + function test_withdraw_whileLockedNotLongEnough_fails() public { + // Deposit some WETH. + vm.prank(alice); + delayedWeth.deposit{ value: 1 ether }(); + uint256 balance = address(alice).balance; + + // Call unlock. + vm.prank(alice); + delayedWeth.unlock(alice, 1 ether); + + // Wait for the delay, but not long enough. + vm.warp(block.timestamp + delayedWeth.delay() - 1); + + // Withdraw fails when delay not met. + vm.expectRevert("DelayedWETH: withdrawal delay not met"); + vm.prank(alice); + delayedWeth.withdraw(1 ether); + assertEq(address(alice).balance, balance); + } + + /// @dev Tests that withdrawing more than unlocked amount fails. + function test_withdraw_tooMuch_fails() public { + // Deposit some WETH. + vm.prank(alice); + delayedWeth.deposit{ value: 1 ether }(); + uint256 balance = address(alice).balance; + + // Unlock the withdrawal. + vm.prank(alice); + delayedWeth.unlock(alice, 1 ether); + + // Wait for the delay. + vm.warp(block.timestamp + delayedWeth.delay() + 1); + + // Withdraw too much fails. + vm.expectRevert("DelayedWETH: insufficient unlocked withdrawal"); + vm.prank(alice); + delayedWeth.withdraw(2 ether); + assertEq(address(alice).balance, balance); + } + + /// @dev Tests that withdrawing while paused fails. + function test_withdraw_whenPaused_fails() public { + // Deposit some WETH. + vm.prank(alice); + delayedWeth.deposit{ value: 1 ether }(); + + // Unlock the withdrawal. + vm.prank(alice); + delayedWeth.unlock(alice, 1 ether); + + // Wait for the delay. + vm.warp(block.timestamp + delayedWeth.delay() + 1); + + // Pause the contract. + address guardian = optimismPortal.guardian(); + vm.prank(guardian); + superchainConfig.pause("identifier"); + + // Withdraw fails. + vm.expectRevert("DelayedWETH: contract is paused"); + vm.prank(alice); + delayedWeth.withdraw(1 ether); + } +} + +contract DelayedWETH_WithdrawFrom_Test is DelayedWETH_Init { /// @dev Tests that withdrawing while unlocked and delay has passed is successful. function test_withdraw_whileUnlocked_succeeds() public { // Deposit some WETH. diff --git a/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol b/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol index b75809f1d798..f40a641994b0 100644 --- a/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol +++ b/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol @@ -13,8 +13,8 @@ import "src/dispute/lib/Types.sol"; import "src/dispute/lib/Errors.sol"; // Interfaces -import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; +import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; contract DisputeGameFactory_Init is CommonTest { FakeClone fakeClone; diff --git a/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol b/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol index 88e361a80b45..50fde1fad4cd 100644 --- a/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol @@ -24,11 +24,11 @@ import "src/dispute/lib/Types.sol"; import "src/dispute/lib/Errors.sol"; // Interfaces -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; -import { IPreimageOracle } from "src/dispute/interfaces/IBigStepper.sol"; -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; -import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; -import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { IPreimageOracle } from "interfaces/dispute/IBigStepper.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; contract FaultDisputeGame_Init is DisputeGameFactory_Init { /// @dev The type of the game being tested. @@ -73,16 +73,18 @@ contract FaultDisputeGame_Init is DisputeGameFactory_Init { abi.encodeCall( IFaultDisputeGame.__constructor__, ( - GAME_TYPE, - absolutePrestate, - 2 ** 3, - 2 ** 2, - Duration.wrap(3 hours), - Duration.wrap(3.5 days), - _vm, - delayedWeth, - anchorStateRegistry, - 10 + IFaultDisputeGame.GameConstructorParams({ + gameType: GAME_TYPE, + absolutePrestate: absolutePrestate, + maxGameDepth: 2 ** 3, + splitDepth: 2 ** 2, + clockExtension: Duration.wrap(3 hours), + maxClockDuration: Duration.wrap(3.5 days), + vm: _vm, + weth: delayedWeth, + anchorStateRegistry: anchorStateRegistry, + l2ChainId: 10 + }) ) ) ) @@ -154,16 +156,18 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { abi.encodeCall( IFaultDisputeGame.__constructor__, ( - GAME_TYPE, - absolutePrestate, - _maxGameDepth, - _maxGameDepth + 1, - Duration.wrap(3 hours), - Duration.wrap(3.5 days), - alphabetVM, - IDelayedWETH(payable(address(0))), - IAnchorStateRegistry(address(0)), - 10 + IFaultDisputeGame.GameConstructorParams({ + gameType: GAME_TYPE, + absolutePrestate: absolutePrestate, + maxGameDepth: _maxGameDepth, + splitDepth: _maxGameDepth + 1, + clockExtension: Duration.wrap(3 hours), + maxClockDuration: Duration.wrap(3.5 days), + vm: alphabetVM, + weth: IDelayedWETH(payable(address(0))), + anchorStateRegistry: IAnchorStateRegistry(address(0)), + l2ChainId: 10 + }) ) ) ) @@ -196,16 +200,18 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { abi.encodeCall( IFaultDisputeGame.__constructor__, ( - GAME_TYPE, - absolutePrestate, - 2 ** 3, - 2 ** 2, - Duration.wrap(3 hours), - Duration.wrap(3.5 days), - alphabetVM, - IDelayedWETH(payable(address(0))), - IAnchorStateRegistry(address(0)), - 10 + IFaultDisputeGame.GameConstructorParams({ + gameType: GAME_TYPE, + absolutePrestate: absolutePrestate, + maxGameDepth: 2 ** 3, + splitDepth: 2 ** 2, + clockExtension: Duration.wrap(3 hours), + maxClockDuration: Duration.wrap(3.5 days), + vm: alphabetVM, + weth: IDelayedWETH(payable(address(0))), + anchorStateRegistry: IAnchorStateRegistry(address(0)), + l2ChainId: 10 + }) ) ) ) @@ -234,16 +240,18 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { abi.encodeCall( IFaultDisputeGame.__constructor__, ( - GAME_TYPE, - absolutePrestate, - maxGameDepth, - _splitDepth, - Duration.wrap(3 hours), - Duration.wrap(3.5 days), - alphabetVM, - IDelayedWETH(payable(address(0))), - IAnchorStateRegistry(address(0)), - 10 + IFaultDisputeGame.GameConstructorParams({ + gameType: GAME_TYPE, + absolutePrestate: absolutePrestate, + maxGameDepth: maxGameDepth, + splitDepth: _splitDepth, + clockExtension: Duration.wrap(3 hours), + maxClockDuration: Duration.wrap(3.5 days), + vm: alphabetVM, + weth: IDelayedWETH(payable(address(0))), + anchorStateRegistry: IAnchorStateRegistry(address(0)), + l2ChainId: 10 + }) ) ) ) @@ -272,16 +280,18 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { abi.encodeCall( IFaultDisputeGame.__constructor__, ( - GAME_TYPE, - absolutePrestate, - 2 ** 3, - _splitDepth, - Duration.wrap(3 hours), - Duration.wrap(3.5 days), - alphabetVM, - IDelayedWETH(payable(address(0))), - IAnchorStateRegistry(address(0)), - 10 + IFaultDisputeGame.GameConstructorParams({ + gameType: GAME_TYPE, + absolutePrestate: absolutePrestate, + maxGameDepth: 2 ** 3, + splitDepth: _splitDepth, + clockExtension: Duration.wrap(3 hours), + maxClockDuration: Duration.wrap(3.5 days), + vm: alphabetVM, + weth: IDelayedWETH(payable(address(0))), + anchorStateRegistry: IAnchorStateRegistry(address(0)), + l2ChainId: 10 + }) ) ) ) @@ -318,16 +328,18 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { abi.encodeCall( IFaultDisputeGame.__constructor__, ( - GAME_TYPE, - absolutePrestate, - 16, - 8, - Duration.wrap(_clockExtension), - Duration.wrap(_maxClockDuration), - alphabetVM, - IDelayedWETH(payable(address(0))), - IAnchorStateRegistry(address(0)), - 10 + IFaultDisputeGame.GameConstructorParams({ + gameType: GAME_TYPE, + absolutePrestate: absolutePrestate, + maxGameDepth: 16, + splitDepth: 8, + clockExtension: Duration.wrap(_clockExtension), + maxClockDuration: Duration.wrap(_maxClockDuration), + vm: alphabetVM, + weth: IDelayedWETH(payable(address(0))), + anchorStateRegistry: IAnchorStateRegistry(address(0)), + l2ChainId: 10 + }) ) ) ) @@ -449,6 +461,17 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { gameProxy.initialize(); } + /// @dev Tests that startingOutputRoot and it's getters are set correctly. + function test_startingOutputRootGetters_succeeds() public view { + (Hash root, uint256 l2BlockNumber) = gameProxy.startingOutputRoot(); + (Hash anchorRoot, uint256 anchorRootBlockNumber) = anchorStateRegistry.anchors(GAME_TYPE); + + assertEq(gameProxy.startingBlockNumber(), l2BlockNumber); + assertEq(gameProxy.startingBlockNumber(), anchorRootBlockNumber); + assertEq(Hash.unwrap(gameProxy.startingRootHash()), Hash.unwrap(root)); + assertEq(Hash.unwrap(gameProxy.startingRootHash()), Hash.unwrap(anchorRoot)); + } + /// @dev Tests that the user cannot control the first 4 bytes of the CWIA data, disallowing them to control the /// entrypoint when no calldata is provided to a call. function test_cwiaCalldata_userCannotControlSelector_succeeds() public { @@ -1986,6 +2009,18 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { assertEq(datLen, expectedLen); } + /// @dev Tests that if the game is not in progress, querying of `getChallengerDuration` reverts + function test_getChallengerDuration_gameNotInProgress_reverts() public { + // resolve the game + vm.warp(block.timestamp + gameProxy.maxClockDuration().raw()); + + gameProxy.resolveClaim(0, 0); + gameProxy.resolve(); + + vm.expectRevert(GameNotInProgress.selector); + gameProxy.getChallengerDuration(1); + } + /// @dev Static unit test asserting that resolveClaim isn't possible if there's time /// left for a counter. function test_resolution_lastSecondDisputes_succeeds() public { diff --git a/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol b/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol index 698d8a0bd1f4..8c74bee750a6 100644 --- a/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol @@ -14,9 +14,10 @@ import "src/dispute/lib/Types.sol"; import "src/dispute/lib/Errors.sol"; // Interfaces -import { IPreimageOracle } from "src/dispute/interfaces/IBigStepper.sol"; -import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; -import { IPermissionedDisputeGame } from "src/dispute/interfaces/IPermissionedDisputeGame.sol"; +import { IPreimageOracle } from "interfaces/dispute/IBigStepper.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; +import { IPermissionedDisputeGame } from "interfaces/dispute/IPermissionedDisputeGame.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; contract PermissionedDisputeGame_Init is DisputeGameFactory_Init { /// @dev The type of the game being tested. @@ -67,16 +68,18 @@ contract PermissionedDisputeGame_Init is DisputeGameFactory_Init { abi.encodeCall( IPermissionedDisputeGame.__constructor__, ( - GAME_TYPE, - absolutePrestate, - 2 ** 3, - 2 ** 2, - Duration.wrap(3 hours), - Duration.wrap(3.5 days), - _vm, - _weth, - anchorStateRegistry, - 10, + IFaultDisputeGame.GameConstructorParams({ + gameType: GAME_TYPE, + absolutePrestate: absolutePrestate, + maxGameDepth: 2 ** 3, + splitDepth: 2 ** 2, + clockExtension: Duration.wrap(3 hours), + maxClockDuration: Duration.wrap(3.5 days), + vm: _vm, + weth: _weth, + anchorStateRegistry: anchorStateRegistry, + l2ChainId: 10 + }), PROPOSER, CHALLENGER ) @@ -198,6 +201,61 @@ contract PermissionedDisputeGame_Test is PermissionedDisputeGame_Init { vm.stopPrank(); } + /// @dev Tests that step works properly. + function test_step_succeeds() public { + // Give the test contract some ether + vm.deal(CHALLENGER, 1_000 ether); + + vm.startPrank(CHALLENGER, CHALLENGER); + + // Make claims all the way down the tree. + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: _getRequiredBond(0) }(disputed, 0, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(1); + gameProxy.attack{ value: _getRequiredBond(1) }(disputed, 1, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(2); + gameProxy.attack{ value: _getRequiredBond(2) }(disputed, 2, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(3); + gameProxy.attack{ value: _getRequiredBond(3) }(disputed, 3, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(4); + gameProxy.attack{ value: _getRequiredBond(4) }(disputed, 4, _changeClaimStatus(_dummyClaim(), VMStatuses.PANIC)); + (,,,, disputed,,) = gameProxy.claimData(5); + gameProxy.attack{ value: _getRequiredBond(5) }(disputed, 5, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(6); + gameProxy.attack{ value: _getRequiredBond(6) }(disputed, 6, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(7); + gameProxy.attack{ value: _getRequiredBond(7) }(disputed, 7, _dummyClaim()); + + // Verify game state before step + assertEq(uint256(gameProxy.status()), uint256(GameStatus.IN_PROGRESS)); + + gameProxy.addLocalData(LocalPreimageKey.DISPUTED_L2_BLOCK_NUMBER, 8, 0); + gameProxy.step(8, true, absolutePrestateData, hex""); + + vm.warp(block.timestamp + gameProxy.maxClockDuration().raw() + 1); + gameProxy.resolveClaim(8, 0); + gameProxy.resolveClaim(7, 0); + gameProxy.resolveClaim(6, 0); + gameProxy.resolveClaim(5, 0); + gameProxy.resolveClaim(4, 0); + gameProxy.resolveClaim(3, 0); + gameProxy.resolveClaim(2, 0); + gameProxy.resolveClaim(1, 0); + + gameProxy.resolveClaim(0, 0); + gameProxy.resolve(); + + assertEq(uint256(gameProxy.status()), uint256(GameStatus.CHALLENGER_WINS)); + assertEq(gameProxy.resolvedAt().raw(), block.timestamp); + (, address counteredBy,,,,,) = gameProxy.claimData(0); + assertEq(counteredBy, CHALLENGER); + } + + /// @dev Helper to return a pseudo-random claim + function _dummyClaim() internal view returns (Claim) { + return Claim.wrap(keccak256(abi.encode(gasleft()))); + } + /// @dev Helper to get the required bond for the given claim index. function _getRequiredBond(uint256 _claimIndex) internal view returns (uint256 bond_) { (,,,,, Position parent,) = gameProxy.claimData(_claimIndex); diff --git a/packages/contracts-bedrock/test/dispute/WETH98.t.sol b/packages/contracts-bedrock/test/dispute/WETH98.t.sol index b26cd927f12c..f207248be19c 100644 --- a/packages/contracts-bedrock/test/dispute/WETH98.t.sol +++ b/packages/contracts-bedrock/test/dispute/WETH98.t.sol @@ -5,7 +5,7 @@ pragma solidity 0.8.15; import { Test } from "forge-std/Test.sol"; // Contracts -import { IWETH98 } from "src/universal/interfaces/IWETH98.sol"; +import { IWETH98 } from "interfaces/universal/IWETH98.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; contract WETH98_Test is Test { diff --git a/packages/contracts-bedrock/test/governance/MintManager.t.sol b/packages/contracts-bedrock/test/governance/MintManager.t.sol index c29e25602834..04255474d7a7 100644 --- a/packages/contracts-bedrock/test/governance/MintManager.t.sol +++ b/packages/contracts-bedrock/test/governance/MintManager.t.sol @@ -5,8 +5,8 @@ pragma solidity 0.8.15; import { CommonTest } from "test/setup/CommonTest.sol"; // Interfaces -import { IGovernanceToken } from "src/governance/interfaces/IGovernanceToken.sol"; -import { IMintManager } from "src/governance/interfaces/IMintManager.sol"; +import { IGovernanceToken } from "interfaces/governance/IGovernanceToken.sol"; +import { IMintManager } from "interfaces/governance/IMintManager.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; contract MintManager_Initializer is CommonTest { diff --git a/packages/contracts-bedrock/test/invariants/CrossDomainMessenger.t.sol b/packages/contracts-bedrock/test/invariants/CrossDomainMessenger.t.sol index 3bf4ad3f7b3a..06df8d1ea076 100644 --- a/packages/contracts-bedrock/test/invariants/CrossDomainMessenger.t.sol +++ b/packages/contracts-bedrock/test/invariants/CrossDomainMessenger.t.sol @@ -3,8 +3,8 @@ pragma solidity 0.8.15; import { StdUtils } from "forge-std/StdUtils.sol"; import { Vm } from "forge-std/Vm.sol"; -import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; -import { IL1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; +import { IOptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; +import { IL1CrossDomainMessenger } from "interfaces/L1/IL1CrossDomainMessenger.sol"; import { CommonTest } from "test/setup/CommonTest.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; import { Constants } from "src/libraries/Constants.sol"; diff --git a/packages/contracts-bedrock/test/invariants/ETHLiquidity.t.sol b/packages/contracts-bedrock/test/invariants/ETHLiquidity.t.sol index 8d1221a792c1..4d6746810731 100644 --- a/packages/contracts-bedrock/test/invariants/ETHLiquidity.t.sol +++ b/packages/contracts-bedrock/test/invariants/ETHLiquidity.t.sol @@ -10,7 +10,7 @@ import { CommonTest } from "test/setup/CommonTest.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; // Interfaces -import { IETHLiquidity } from "src/L2/interfaces/IETHLiquidity.sol"; +import { IETHLiquidity } from "interfaces/L2/IETHLiquidity.sol"; /// @title ETHLiquidity_User /// @notice Actor contract that interacts with the ETHLiquidity contract. Always pretends to be the diff --git a/packages/contracts-bedrock/test/invariants/FaultDisputeGame.t.sol b/packages/contracts-bedrock/test/invariants/FaultDisputeGame.t.sol index 94b46930ad49..705e21bd4cfc 100644 --- a/packages/contracts-bedrock/test/invariants/FaultDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/invariants/FaultDisputeGame.t.sol @@ -11,7 +11,7 @@ import "src/dispute/lib/Types.sol"; import "src/dispute/lib/Errors.sol"; // Interfaces -import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; contract FaultDisputeGame_Solvency_Invariant is FaultDisputeGame_Init { Claim internal constant ROOT_CLAIM = Claim.wrap(bytes32(uint256(10))); diff --git a/packages/contracts-bedrock/test/invariants/L2OutputOracle.t.sol b/packages/contracts-bedrock/test/invariants/L2OutputOracle.t.sol index d7d6914841cd..fce298d2ceba 100644 --- a/packages/contracts-bedrock/test/invariants/L2OutputOracle.t.sol +++ b/packages/contracts-bedrock/test/invariants/L2OutputOracle.t.sol @@ -2,7 +2,7 @@ pragma solidity 0.8.15; import { CommonTest } from "test/setup/CommonTest.sol"; -import { IL2OutputOracle } from "src/L1/interfaces/IL2OutputOracle.sol"; +import { IL2OutputOracle } from "interfaces/L1/IL2OutputOracle.sol"; import { Vm } from "forge-std/Vm.sol"; contract L2OutputOracle_Proposer { diff --git a/packages/contracts-bedrock/test/invariants/OptimismPortal.t.sol b/packages/contracts-bedrock/test/invariants/OptimismPortal.t.sol index ae754cd0e96b..42bf52e1de83 100644 --- a/packages/contracts-bedrock/test/invariants/OptimismPortal.t.sol +++ b/packages/contracts-bedrock/test/invariants/OptimismPortal.t.sol @@ -4,9 +4,9 @@ pragma solidity 0.8.15; import { StdUtils } from "forge-std/Test.sol"; import { Vm } from "forge-std/Vm.sol"; -import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; +import { IOptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; import { ResourceMetering } from "src/L1/ResourceMetering.sol"; -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; import { Constants } from "src/libraries/Constants.sol"; import { CommonTest } from "test/setup/CommonTest.sol"; diff --git a/packages/contracts-bedrock/test/invariants/OptimismPortal2.t.sol b/packages/contracts-bedrock/test/invariants/OptimismPortal2.t.sol index 5e0e866dcfb1..0a870bc651f3 100644 --- a/packages/contracts-bedrock/test/invariants/OptimismPortal2.t.sol +++ b/packages/contracts-bedrock/test/invariants/OptimismPortal2.t.sol @@ -17,9 +17,9 @@ import "src/dispute/lib/Types.sol"; import "src/libraries/PortalErrors.sol"; // Interfaces -import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; -import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; +import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; contract OptimismPortal2_Depositor is StdUtils, ResourceMetering { Vm internal vm; diff --git a/packages/contracts-bedrock/test/invariants/ResourceMetering.t.sol b/packages/contracts-bedrock/test/invariants/ResourceMetering.t.sol index 4652f9b9e36e..49793f2adf14 100644 --- a/packages/contracts-bedrock/test/invariants/ResourceMetering.t.sol +++ b/packages/contracts-bedrock/test/invariants/ResourceMetering.t.sol @@ -8,7 +8,7 @@ import { StdInvariant } from "forge-std/StdInvariant.sol"; import { Arithmetic } from "src/libraries/Arithmetic.sol"; import { ResourceMetering } from "src/L1/ResourceMetering.sol"; -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; import { Constants } from "src/libraries/Constants.sol"; import { InvariantTest } from "test/invariants/InvariantTest.sol"; diff --git a/packages/contracts-bedrock/test/invariants/SuperchainWETH.t.sol b/packages/contracts-bedrock/test/invariants/SuperchainWETH.t.sol index 24ffc0a57963..bb6ee569da14 100644 --- a/packages/contracts-bedrock/test/invariants/SuperchainWETH.t.sol +++ b/packages/contracts-bedrock/test/invariants/SuperchainWETH.t.sol @@ -7,7 +7,7 @@ import { Vm } from "forge-std/Vm.sol"; import { CommonTest } from "test/setup/CommonTest.sol"; // Interfaces -import { ISuperchainWETH } from "src/L2/interfaces/ISuperchainWETH.sol"; +import { ISuperchainWETH } from "interfaces/L2/ISuperchainWETH.sol"; /// @title SuperchainWETH_User /// @notice Actor contract that interacts with the SuperchainWETH contract. diff --git a/packages/contracts-bedrock/test/invariants/SystemConfig.t.sol b/packages/contracts-bedrock/test/invariants/SystemConfig.t.sol index 5b0b300abda0..68add058f60d 100644 --- a/packages/contracts-bedrock/test/invariants/SystemConfig.t.sol +++ b/packages/contracts-bedrock/test/invariants/SystemConfig.t.sol @@ -2,8 +2,8 @@ pragma solidity 0.8.15; import { Test } from "forge-std/Test.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; import { Constants } from "src/libraries/Constants.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; diff --git a/packages/contracts-bedrock/test/kontrol/README.md b/packages/contracts-bedrock/test/kontrol/README.md index 7e4dc5a2809a..cf809eb65b48 100644 --- a/packages/contracts-bedrock/test/kontrol/README.md +++ b/packages/contracts-bedrock/test/kontrol/README.md @@ -48,10 +48,9 @@ The directory is structured as follows ### Installation -1. `cd` to the root of this repo. -2. Install Foundry by running `just install-foundry`. This installs `foundryup`, the foundry toolchain installer, then installs the required foundry version. -3. Install Kontrol by running `just install-kontrol`. This installs `kup`, the package manager for RV tools, then installs the required kontrol version. -4. Install Docker. +1. Make sure that the dependencies for the Optimism Monorepo are installed with `mise`. +1. Install [`kup`](https://github.com/runtimeverification/k/releases/tag/v7.1.180). +1. Use `kup` to [install `kontrol`](https://github.com/runtimeverification/kontrol?tab=readme-ov-file#fast-installation) ## Usage @@ -91,7 +90,7 @@ Use the [`run-kontrol.sh`](./scripts/run-kontrol.sh) script to runs the proofs i The `run-kontrol.sh` script supports three modes of proof execution: - `container`: Runs the proofs using the same Docker image used in CI. This is the default execution mode—if no arguments are provided, the proofs will be executed in this mode. -- `local`: Runs the proofs with your local Kontrol install, and enforces that the Kontrol version matches the one used in CI, which is specified in [`versions.json`](../../../../versions.json). +- `local`: Runs the proofs with your local Kontrol install, and enforces that the Kontrol version matches the one used in CI, which is specified in [`mise.toml`](../../../../mise.toml). - `dev`: Run the proofs with your local Kontrol install, without enforcing any version in particular. The intended use case is proof development and related matters. It also supports two methods for specifying which tests to execute: @@ -122,8 +121,8 @@ Write your proof in a `.k.sol` file in the [`proofs`](./proofs/) folder, which i To reference the correct addresses for writing the tests, first import the signatures as in this example: ```solidity -import { IOptimismPortal as OptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; -import { ISuperchainConfig as SuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IOptimismPortal as OptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; +import { ISuperchainConfig as SuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; ``` Declare the correspondent variables and cast the correct signatures to the correct addresses: diff --git a/packages/contracts-bedrock/test/kontrol/proofs/L1CrossDomainMessenger.k.sol b/packages/contracts-bedrock/test/kontrol/proofs/L1CrossDomainMessenger.k.sol index a04defe5d9ec..80adc430211f 100644 --- a/packages/contracts-bedrock/test/kontrol/proofs/L1CrossDomainMessenger.k.sol +++ b/packages/contracts-bedrock/test/kontrol/proofs/L1CrossDomainMessenger.k.sol @@ -3,8 +3,8 @@ pragma solidity ^0.8.13; import { DeploymentSummaryFaultProofs } from "./utils/DeploymentSummaryFaultProofs.sol"; import { KontrolUtils } from "./utils/KontrolUtils.sol"; -import { IL1CrossDomainMessenger as L1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; -import { ISuperchainConfig as SuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IL1CrossDomainMessenger as L1CrossDomainMessenger } from "interfaces/L1/IL1CrossDomainMessenger.sol"; +import { ISuperchainConfig as SuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; contract L1CrossDomainMessengerKontrol is DeploymentSummaryFaultProofs, KontrolUtils { L1CrossDomainMessenger l1CrossDomainMessenger; diff --git a/packages/contracts-bedrock/test/kontrol/proofs/L1ERC721Bridge.k.sol b/packages/contracts-bedrock/test/kontrol/proofs/L1ERC721Bridge.k.sol index 5e45e3e3a9fd..6a86fbd637bf 100644 --- a/packages/contracts-bedrock/test/kontrol/proofs/L1ERC721Bridge.k.sol +++ b/packages/contracts-bedrock/test/kontrol/proofs/L1ERC721Bridge.k.sol @@ -3,9 +3,9 @@ pragma solidity ^0.8.13; import { DeploymentSummaryFaultProofs } from "./utils/DeploymentSummaryFaultProofs.sol"; import { KontrolUtils } from "./utils/KontrolUtils.sol"; -import { IL1ERC721Bridge as L1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; -import { ISuperchainConfig as SuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { ICrossDomainMessenger as CrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; +import { IL1ERC721Bridge as L1ERC721Bridge } from "interfaces/L1/IL1ERC721Bridge.sol"; +import { ISuperchainConfig as SuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { ICrossDomainMessenger as CrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; contract L1ERC721BridgeKontrol is DeploymentSummaryFaultProofs, KontrolUtils { L1ERC721Bridge l1ERC721Bridge; diff --git a/packages/contracts-bedrock/test/kontrol/proofs/L1StandardBridge.k.sol b/packages/contracts-bedrock/test/kontrol/proofs/L1StandardBridge.k.sol index b5f8793426e6..d25e57ae288c 100644 --- a/packages/contracts-bedrock/test/kontrol/proofs/L1StandardBridge.k.sol +++ b/packages/contracts-bedrock/test/kontrol/proofs/L1StandardBridge.k.sol @@ -3,9 +3,9 @@ pragma solidity ^0.8.13; import { DeploymentSummaryFaultProofs } from "./utils/DeploymentSummaryFaultProofs.sol"; import { KontrolUtils } from "./utils/KontrolUtils.sol"; -import { IL1StandardBridge as L1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; -import { ISuperchainConfig as SuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { ICrossDomainMessenger as CrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; +import { IL1StandardBridge as L1StandardBridge } from "interfaces/L1/IL1StandardBridge.sol"; +import { ISuperchainConfig as SuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { ICrossDomainMessenger as CrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; contract L1StandardBridgeKontrol is DeploymentSummaryFaultProofs, KontrolUtils { L1StandardBridge l1standardBridge; diff --git a/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal.k.sol b/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal.k.sol index f0cf6cac7734..15bdc2b33dad 100644 --- a/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal.k.sol +++ b/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal.k.sol @@ -4,8 +4,8 @@ pragma solidity ^0.8.13; import { DeploymentSummary } from "./utils/DeploymentSummary.sol"; import { KontrolUtils } from "./utils/KontrolUtils.sol"; import { Types } from "src/libraries/Types.sol"; -import { IOptimismPortal as OptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; -import { ISuperchainConfig as SuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IOptimismPortal as OptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; +import { ISuperchainConfig as SuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; import "src/libraries/PortalErrors.sol"; contract OptimismPortalKontrol is DeploymentSummary, KontrolUtils { diff --git a/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal2.k.sol b/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal2.k.sol index d561b8b85092..1d16f1f99cb4 100644 --- a/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal2.k.sol +++ b/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal2.k.sol @@ -4,8 +4,8 @@ pragma solidity ^0.8.13; import { DeploymentSummaryFaultProofs } from "./utils/DeploymentSummaryFaultProofs.sol"; import { KontrolUtils } from "./utils/KontrolUtils.sol"; import { Types } from "src/libraries/Types.sol"; -import { IOptimismPortal as OptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; -import { ISuperchainConfig as SuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IOptimismPortal as OptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; +import { ISuperchainConfig as SuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; import "src/libraries/PortalErrors.sol"; contract OptimismPortal2Kontrol is DeploymentSummaryFaultProofs, KontrolUtils { diff --git a/packages/contracts-bedrock/test/kontrol/scripts/common.sh b/packages/contracts-bedrock/test/kontrol/scripts/common.sh index 3b99c3e50624..c301b28a9bd8 100644 --- a/packages/contracts-bedrock/test/kontrol/scripts/common.sh +++ b/packages/contracts-bedrock/test/kontrol/scripts/common.sh @@ -11,7 +11,7 @@ usage_run_kontrol() { echo "" 1>&2 echo "Execution modes:" echo " container Run in docker container. Reproduce CI execution. (Default)" 1>&2 - echo " local Run locally, enforces registered versions.json version for better reproducibility. (Recommended)" 1>&2 + echo " local Run locally, enforces registered mise.toml version for better reproducibility. (Recommended)" 1>&2 echo " dev Run locally, does NOT enforce registered version. (Useful for developing with new versions and features)" 1>&2 echo "" 1>&2 echo "Tests executed:" @@ -28,7 +28,7 @@ usage_make_summary() { echo "" 1>&2 echo "Execution modes:" echo " container Run in docker container. Reproduce CI execution. (Default)" 1>&2 - echo " local Run locally, enforces registered versions.json version for better reproducibility. (Recommended)" 1>&2 + echo " local Run locally, enforces registered mise.toml version for better reproducibility. (Recommended)" 1>&2 echo " dev Run locally, does NOT enforce registered version. (Useful for developing with new versions and features)" 1>&2 exit 0 } @@ -43,7 +43,7 @@ export CONTAINER_NAME=kontrol-tests if [ "$KONTROL_FP_DEPLOYMENT" = true ]; then export CONTAINER_NAME=kontrol-fp-tests fi -KONTROLRC=$(jq -r .kontrol < "$WORKSPACE_DIR/../../versions.json") +KONTROLRC=$(yq '.tools.kontrol' "$WORKSPACE_DIR/../../mise.toml") export KONTROL_RELEASE=$KONTROLRC export LOCAL=false export SCRIPT_TESTS=false diff --git a/packages/contracts-bedrock/test/legacy/DeployerWhitelist.t.sol b/packages/contracts-bedrock/test/legacy/DeployerWhitelist.t.sol index 3f140e9a0725..2a0f47920532 100644 --- a/packages/contracts-bedrock/test/legacy/DeployerWhitelist.t.sol +++ b/packages/contracts-bedrock/test/legacy/DeployerWhitelist.t.sol @@ -5,11 +5,16 @@ pragma solidity 0.8.15; import { Test } from "forge-std/Test.sol"; // Target contract -import { IDeployerWhitelist } from "src/legacy/interfaces/IDeployerWhitelist.sol"; +import { IDeployerWhitelist } from "interfaces/legacy/IDeployerWhitelist.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; contract DeployerWhitelist_Test is Test { IDeployerWhitelist list; + address owner = address(12345); + + event OwnerChanged(address oldOwner, address newOwner); + event WhitelistDisabled(address oldOwner); + event WhitelistStatusChanged(address deployer, bool whitelisted); /// @dev Sets up the test suite. function setUp() public { @@ -27,10 +32,102 @@ contract DeployerWhitelist_Test is Test { } /// @dev Tests that `setOwner` correctly sets the contract owner. - function test_storageSlots_succeeds() external { - vm.prank(list.owner()); - list.setOwner(address(1)); + function test_setOwner_succeeds(address _owner) external { + vm.store(address(list), bytes32(uint256(0)), bytes32(uint256(uint160(owner)))); + assertEq(list.owner(), owner); + _owner = address(uint160(bound(uint160(_owner), 1, type(uint160).max))); + + vm.prank(owner); + vm.expectEmit(true, true, true, true); + emit OwnerChanged(owner, _owner); + list.setOwner(_owner); + + assertEq(list.owner(), _owner); + } + + /// @dev Tests that `setOwner` reverts when the caller is not the owner. + function test_setOwner_callerNotOwner_reverts(address _caller, address _owner) external { + vm.store(address(list), bytes32(uint256(0)), bytes32(uint256(uint160(owner)))); + assertEq(list.owner(), owner); + + vm.assume(_caller != owner); + + vm.prank(_caller); + vm.expectRevert(bytes("DeployerWhitelist: function can only be called by the owner of this contract")); + list.setOwner(_owner); + } + + /// @dev Tests that `setOwner` reverts when the new owner is the zero address. + function test_setOwner_zeroAddress_reverts() external { + vm.store(address(list), bytes32(uint256(0)), bytes32(uint256(uint160(owner)))); + assertEq(list.owner(), owner); + + vm.prank(owner); + vm.expectRevert(bytes("DeployerWhitelist: can only be disabled via enableArbitraryContractDeployment")); + list.setOwner(address(0)); + } + + /// @dev Tests that `enableArbitraryContractDeployment` correctly disables the whitelist. + function test_enableArbitraryContractDeployment_succeeds() external { + vm.store(address(list), bytes32(uint256(0)), bytes32(uint256(uint160(owner)))); + assertEq(list.owner(), owner); + + vm.prank(owner); + vm.expectEmit(true, true, true, true); + emit WhitelistDisabled(owner); + list.enableArbitraryContractDeployment(); + + assertEq(list.owner(), address(0)); + + // Any address is allowed to deploy contracts even if they are not whitelisted + assertEq(list.whitelist(address(1)), false); + assertEq(list.isDeployerAllowed(address(1)), true); + } + + /// @dev Tests that `enableArbitraryContractDeployment` reverts when the caller is not the owner. + function test_enableArbitraryContractDeployment_callerNotOwner_reverts(address _caller) external { + vm.store(address(list), bytes32(uint256(0)), bytes32(uint256(uint160(owner)))); + assertEq(list.owner(), owner); + + vm.assume(_caller != owner); + + vm.prank(_caller); + vm.expectRevert(bytes("DeployerWhitelist: function can only be called by the owner of this contract")); + list.enableArbitraryContractDeployment(); + } + + /// @dev Tests that `setWhitelistedDeployer` correctly sets the whitelist status of a deployer. + function test_setWhitelistedDeployer_succeeds(address _deployer, bool _isWhitelisted) external { + vm.store(address(list), bytes32(uint256(0)), bytes32(uint256(uint160(owner)))); + assertEq(list.owner(), owner); + + vm.prank(owner); + vm.expectEmit(true, true, true, true); + emit WhitelistStatusChanged(_deployer, _isWhitelisted); + list.setWhitelistedDeployer(_deployer, _isWhitelisted); + + assertEq(list.whitelist(_deployer), _isWhitelisted); + + // _deployer is whitelisted or not (and arbitrary contract deployment is not enabled) + assertNotEq(list.owner(), address(0)); + assertEq(list.isDeployerAllowed(_deployer), _isWhitelisted); + } + + /// @dev Tests that `setWhitelistedDeployer` reverts when the caller is not the owner. + function test_setWhitelistedDeployer_callerNotOwner_reverts( + address _caller, + address _deployer, + bool _isWhitelisted + ) + external + { + vm.store(address(list), bytes32(uint256(0)), bytes32(uint256(uint160(owner)))); + assertEq(list.owner(), owner); + + vm.assume(_caller != owner); - assertEq(bytes32(uint256(1)), vm.load(address(list), bytes32(uint256(0)))); + vm.prank(_caller); + vm.expectRevert(bytes("DeployerWhitelist: function can only be called by the owner of this contract")); + list.setWhitelistedDeployer(_deployer, _isWhitelisted); } } diff --git a/packages/contracts-bedrock/test/legacy/L1BlockNumber.t.sol b/packages/contracts-bedrock/test/legacy/L1BlockNumber.t.sol index 49758739d2da..2c9760270442 100644 --- a/packages/contracts-bedrock/test/legacy/L1BlockNumber.t.sol +++ b/packages/contracts-bedrock/test/legacy/L1BlockNumber.t.sol @@ -4,16 +4,16 @@ pragma solidity 0.8.15; // Testing import { Test } from "forge-std/Test.sol"; -// Contracts -import { IL1BlockNumber } from "src/legacy/interfaces/IL1BlockNumber.sol"; -import { L1Block } from "src/L2/L1Block.sol"; - // Libraries import { Predeploys } from "src/libraries/Predeploys.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; +// Interfaces +import { IL1BlockNumber } from "interfaces/legacy/IL1BlockNumber.sol"; +import { IL1Block } from "interfaces/L2/IL1Block.sol"; + contract L1BlockNumberTest is Test { - L1Block lb; + IL1Block lb; IL1BlockNumber bn; uint64 constant number = 99; @@ -21,7 +21,7 @@ contract L1BlockNumberTest is Test { /// @dev Sets up the test suite. function setUp() external { vm.etch(Predeploys.L1_BLOCK_ATTRIBUTES, vm.getDeployedCode("L1Block.sol:L1Block")); - lb = L1Block(Predeploys.L1_BLOCK_ATTRIBUTES); + lb = IL1Block(Predeploys.L1_BLOCK_ATTRIBUTES); bn = IL1BlockNumber( DeployUtils.create1({ _name: "L1BlockNumber", @@ -49,7 +49,7 @@ contract L1BlockNumberTest is Test { /// @dev Tests that `fallback` is correctly dispatched. function test_fallback_succeeds() external { - (bool success, bytes memory ret) = address(bn).call(hex""); + (bool success, bytes memory ret) = address(bn).call(hex"11"); assertEq(success, true); assertEq(ret, abi.encode(number)); } diff --git a/packages/contracts-bedrock/test/legacy/L1ChugSplashProxy.t.sol b/packages/contracts-bedrock/test/legacy/L1ChugSplashProxy.t.sol new file mode 100644 index 000000000000..28d9e58ed8a7 --- /dev/null +++ b/packages/contracts-bedrock/test/legacy/L1ChugSplashProxy.t.sol @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.15; + +// Testing utilities +import { Test } from "forge-std/Test.sol"; +import { VmSafe } from "forge-std/Vm.sol"; + +// Target contract +import { IL1ChugSplashProxy } from "interfaces/legacy/IL1ChugSplashProxy.sol"; +import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; + +contract Owner { + bool public isUpgrading; + + function setIsUpgrading(bool _isUpgrading) public { + isUpgrading = _isUpgrading; + } +} + +contract Implementation { + function setCode(bytes memory) public pure returns (uint256) { + return 1; + } + + function setStorage(bytes32, bytes32) public pure returns (uint256) { + return 2; + } + + function setOwner(address) public pure returns (uint256) { + return 3; + } + + function getOwner() public pure returns (uint256) { + return 4; + } + + function getImplementation() public pure returns (uint256) { + return 5; + } +} + +contract L1ChugSplashProxy_Test is Test { + IL1ChugSplashProxy proxy; + address impl; + address owner = makeAddr("owner"); + address alice = makeAddr("alice"); + + function setUp() public { + proxy = IL1ChugSplashProxy( + DeployUtils.create1({ + _name: "L1ChugSplashProxy", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1ChugSplashProxy.__constructor__, (owner))) + }) + ); + vm.prank(owner); + assertEq(proxy.getOwner(), owner); + + vm.prank(owner); + proxy.setCode(type(Implementation).runtimeCode); + + vm.prank(owner); + impl = proxy.getImplementation(); + } + + /// @notice Tests that the owner can deploy a new implementation with a given runtime code + function test_setCode_whenOwner_succeeds() public { + vm.prank(owner); + proxy.setCode(hex"604260005260206000f3"); + + vm.prank(owner); + assertNotEq(proxy.getImplementation(), impl); + } + + /// @notice Tests that when not the owner, `setCode` delegatecalls the implementation + function test_setCode_whenNotOwner_works() public view { + uint256 ret = Implementation(address(proxy)).setCode(hex"604260005260206000f3"); + assertEq(ret, 1); + } + + /// @notice Tests that when the owner deploys the same bytecode as the existing implementation, + /// it does not deploy a new implementation + function test_setCode_whenOwnerSameBytecode_works() public { + vm.prank(owner); + proxy.setCode(type(Implementation).runtimeCode); + + // does not deploy new implementation + vm.prank(owner); + assertEq(proxy.getImplementation(), impl); + } + + /// @notice Tests that when the owner calls `setCode` with insufficient gas to complete the implementation + /// contract's deployment, it reverts. + /// @dev If this solc version/settings change and modifying this proves time consuming, we can just remove it. + function test_setCode_whenOwnerAndDeployOutOfGas_reverts() public { + // The values below are best gotten by removing the gas limit parameter from the call and running the test with + // a + // verbosity of `-vvvv` then setting the value to a few thousand gas lower than the gas used by the call. + // A faster way to do this for forge coverage cases, is to comment out the optimizer and optimizer runs in + // the foundry.toml file and then run forge test. This is faster because forge test only compiles modified + // contracts unlike forge coverage. + uint256 gasLimit; + + // Because forge coverage always runs with the optimizer disabled, + // if forge coverage is run before testing this with forge test or forge snapshot, forge clean should be + // run first so that it recompiles the contracts using the foundry.toml optimizer settings. + if (vm.isContext(VmSafe.ForgeContext.Coverage)) { + gasLimit = 95_000; + } else if (vm.isContext(VmSafe.ForgeContext.Test) || vm.isContext(VmSafe.ForgeContext.Snapshot)) { + gasLimit = 65_000; + } else { + revert("SafeCall_Test: unknown context"); + } + + vm.prank(owner); + vm.expectRevert(bytes("L1ChugSplashProxy: code was not correctly deployed")); // Ran out of gas + proxy.setCode{ gas: gasLimit }( + hex"fefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefe" + ); + } + + /// @notice Tests that when the caller is not the owner and the implementation is not set, all calls reverts + function test_calls_whenNotOwnerNoImplementation_reverts() public { + proxy = IL1ChugSplashProxy( + DeployUtils.create1({ + _name: "L1ChugSplashProxy", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1ChugSplashProxy.__constructor__, (owner))) + }) + ); + + vm.expectRevert(bytes("L1ChugSplashProxy: implementation is not set yet")); + Implementation(address(proxy)).setCode(hex"604260005260206000f3"); + } + + /// @notice Tests that when the caller is not the owner but the owner has marked `isUpgrading` as true, the call + /// reverts + function test_calls_whenUpgrading_reverts() public { + Owner ownerContract = new Owner(); + vm.prank(owner); + proxy.setOwner(address(ownerContract)); + + ownerContract.setIsUpgrading(true); + + vm.expectRevert(bytes("L1ChugSplashProxy: system is currently being upgraded")); + Implementation(address(proxy)).setCode(hex"604260005260206000f3"); + } + + /// @notice Tests that the owner can set storage of the proxy + function test_setStorage_whenOwner_works() public { + vm.prank(owner); + proxy.setStorage(bytes32(0), bytes32(uint256(42))); + assertEq(vm.load(address(proxy), bytes32(0)), bytes32(uint256(42))); + } + + /// @notice Tests that when not the owner, `setStorage` delegatecalls the implementation + function test_setStorage_whenNotOwner_works() public view { + uint256 ret = Implementation(address(proxy)).setStorage(bytes32(0), bytes32(uint256(42))); + assertEq(ret, 2); + assertEq(vm.load(address(proxy), bytes32(0)), bytes32(uint256(0))); + } + + /// @notice Tests that the owner can set the owner of the proxy + function test_setOwner_whenOwner_works() public { + vm.prank(owner); + proxy.setOwner(alice); + + vm.prank(alice); + assertEq(proxy.getOwner(), alice); + } + + /// @notice Tests that when not the owner, `setOwner` delegatecalls the implementation + function test_setOwner_whenNotOwner_works() public { + uint256 ret = Implementation(address(proxy)).setOwner(alice); + assertEq(ret, 3); + + vm.prank(owner); + assertEq(proxy.getOwner(), owner); + } + + /// @notice Tests that the owner can get the owner of the proxy + function test_getOwner_whenOwner_works() public { + vm.prank(owner); + assertEq(proxy.getOwner(), owner); + } + + /// @notice Tests that when not the owner, `getOwner` delegatecalls the implementation + function test_getOwner_whenNotOwner_works() public view { + uint256 ret = Implementation(address(proxy)).getOwner(); + assertEq(ret, 4); + } + + /// @notice Tests that the owner can get the implementation of the proxy + function test_getImplementation_whenOwner_works() public { + vm.prank(owner); + assertEq(proxy.getImplementation(), impl); + } + + /// @notice Tests that when not the owner, `getImplementation` delegatecalls the implementation + function test_getImplementation_whenNotOwner_works() public view { + uint256 ret = Implementation(address(proxy)).getImplementation(); + assertEq(ret, 5); + } +} diff --git a/packages/contracts-bedrock/test/legacy/LegacyMintableERC20.t.sol b/packages/contracts-bedrock/test/legacy/LegacyMintableERC20.t.sol new file mode 100644 index 000000000000..07f43cf5a610 --- /dev/null +++ b/packages/contracts-bedrock/test/legacy/LegacyMintableERC20.t.sol @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +// Testing utilities +import { CommonTest } from "test/setup/CommonTest.sol"; + +import { LegacyMintableERC20 } from "src/legacy/LegacyMintableERC20.sol"; +import { ILegacyMintableERC20 } from "interfaces/universal/ILegacyMintableERC20.sol"; + +contract LegacyMintableERC20_Test is CommonTest { + LegacyMintableERC20 legacyMintableERC20; + + function setUp() public override { + super.setUp(); + + legacyMintableERC20 = new LegacyMintableERC20(address(l2StandardBridge), address(L1Token), "_L2Token_", "_L2T_"); + } + + /// @notice Tests that the constructor sets the correct values + function test_constructor_works() public view { + assertEq(legacyMintableERC20.l2Bridge(), address(l2StandardBridge)); + assertEq(legacyMintableERC20.l1Token(), address(L1Token)); + assertEq(legacyMintableERC20.name(), "_L2Token_"); + assertEq(legacyMintableERC20.symbol(), "_L2T_"); + assertEq(legacyMintableERC20.decimals(), 18); + } + + /// @notice Tests that the contract supports the correct interfaces + function test_supportsInterface_works() public view { + assertEq(legacyMintableERC20.supportsInterface(bytes4(keccak256("supportsInterface(bytes4)"))), true); + assertEq( + legacyMintableERC20.supportsInterface( + ILegacyMintableERC20.l1Token.selector ^ ILegacyMintableERC20.mint.selector + ^ ILegacyMintableERC20.burn.selector + ), + true + ); + } + + /// @notice Tests that the mint function works when called by the bridge + function test_mint_byBridge_succeeds() public { + vm.prank(address(l2StandardBridge)); + legacyMintableERC20.mint(address(this), 1000); + assertEq(legacyMintableERC20.balanceOf(address(this)), 1000); + } + + /// @notice Tests that the mint function fails when called by an address other than the bridge + function test_mint_byNonBridge_reverts() public { + vm.expectRevert(bytes("Only L2 Bridge can mint and burn")); + legacyMintableERC20.mint(address(this), 1000); + } + + /// @notice Tests that the burn function works when called by the bridge + function test_burn_byBridge_succeeds() public { + vm.prank(address(l2StandardBridge)); + legacyMintableERC20.mint(address(this), 1000); + + vm.prank(address(l2StandardBridge)); + legacyMintableERC20.burn(address(this), 1000); + assertEq(legacyMintableERC20.balanceOf(address(this)), 0); + } + + /// @notice Tests that the burn function fails when called by an address other than the bridge + function test_burn_byNonBridge_reverts() public { + vm.expectRevert(bytes("Only L2 Bridge can mint and burn")); + legacyMintableERC20.burn(address(this), 1000); + } +} diff --git a/packages/contracts-bedrock/test/legacy/ResolvedDelegateProxy.t.sol b/packages/contracts-bedrock/test/legacy/ResolvedDelegateProxy.t.sol index 5b1f40d55bd8..19fbdec8f7c1 100644 --- a/packages/contracts-bedrock/test/legacy/ResolvedDelegateProxy.t.sol +++ b/packages/contracts-bedrock/test/legacy/ResolvedDelegateProxy.t.sol @@ -5,10 +5,10 @@ pragma solidity 0.8.15; import { Test } from "forge-std/Test.sol"; // Target contract dependencies -import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; +import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; // Target contract -import { IResolvedDelegateProxy } from "src/legacy/interfaces/IResolvedDelegateProxy.sol"; +import { IResolvedDelegateProxy } from "interfaces/legacy/IResolvedDelegateProxy.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; contract ResolvedDelegateProxy_Test is Test { diff --git a/packages/contracts-bedrock/test/libraries/Encoding.t.sol b/packages/contracts-bedrock/test/libraries/Encoding.t.sol index a301fdd97b3a..277cce328dcd 100644 --- a/packages/contracts-bedrock/test/libraries/Encoding.t.sol +++ b/packages/contracts-bedrock/test/libraries/Encoding.t.sol @@ -71,6 +71,18 @@ contract Encoding_Test is CommonTest { assertEq(legacyEncoding, bedrockEncoding); } + /// @dev Tests that encodeCrossDomainMessage reverts if version is greater than 1. + function testFuzz_encodeCrossDomainMessage_versionGreaterThanOne_reverts(uint256 nonce) external { + // nonce >> 240 must be greater than 1 + uint256 minInvalidNonce = (uint256(type(uint240).max) + 1) * 2; + nonce = bound(nonce, minInvalidNonce, type(uint256).max); + + EncodingContract encoding = new EncodingContract(); + + vm.expectRevert(bytes("Encoding: unknown cross domain message version")); + encoding.encodeCrossDomainMessage(nonce, address(this), address(this), 1, 100, hex""); + } + /// @dev Tests deposit transaction encoding. function testDiff_encodeDepositTransaction_succeeds( address _from, @@ -94,3 +106,20 @@ contract Encoding_Test is CommonTest { assertEq(txn, _txn); } } + +contract EncodingContract { + function encodeCrossDomainMessage( + uint256 nonce, + address sender, + address target, + uint256 value, + uint256 gasLimit, + bytes memory data + ) + external + pure + returns (bytes memory) + { + return Encoding.encodeCrossDomainMessage(nonce, sender, target, value, gasLimit, data); + } +} diff --git a/packages/contracts-bedrock/test/libraries/SafeCall.t.sol b/packages/contracts-bedrock/test/libraries/SafeCall.t.sol index ffdf07b09b83..d05e952e44d9 100644 --- a/packages/contracts-bedrock/test/libraries/SafeCall.t.sol +++ b/packages/contracts-bedrock/test/libraries/SafeCall.t.sol @@ -3,6 +3,7 @@ pragma solidity 0.8.15; // Testing utilities import { Test } from "forge-std/Test.sol"; +import { VmSafe } from "forge-std/Vm.sol"; import { StdCheatsSafe } from "forge-std/StdCheats.sol"; // Target contract @@ -52,7 +53,7 @@ contract SafeCall_Test is Test { /// @dev Tests that the `send` function with value succeeds. function testFuzz_sendWithGas_succeeds(address _from, address _to, uint64 _gas, uint256 _value) external { - vm.assume(_gas != 0); + _gas = uint64(bound(_gas, 1, type(uint64).max)); sendTest({ _from: _from, _to: _to, _gas: _gas, _value: _value }); } @@ -122,9 +123,29 @@ contract SafeCall_Test is Test { for (uint64 i = 40_000; i < 100_000; i++) { uint256 snapshot = vm.snapshot(); - // 65_922 is the exact amount of gas required to make the safe call - // successfully. - if (i < 65_922) { + // The values below are best gotten by setting the value to a high number and running the test with a + // verbosity of `-vvv` then setting the value to the value (gas arg) of the failed assertion. + // A faster way to do this for forge coverage cases, is to comment out the optimizer and optimizer runs in + // the foundry.toml file and then run forge test. This is faster because forge test only compiles modified + // contracts unlike forge coverage. + uint256 expected; + + // Because forge coverage always runs with the optimizer disabled, + // if forge coverage is run before testing this with forge test or forge snapshot, forge clean should be + // run first so that it recompiles the contracts using the foundry.toml optimizer settings. + if (vm.isContext(VmSafe.ForgeContext.Coverage)) { + // 66_290 is the exact amount of gas required to make the safe call + // successfully with the optimizer disabled (ran via forge coverage) + expected = 66_290; + } else if (vm.isContext(VmSafe.ForgeContext.Test) || vm.isContext(VmSafe.ForgeContext.Snapshot)) { + // 65_922 is the exact amount of gas required to make the safe call + // successfully with the foundry.toml optimizer settings. + expected = 65_922; + } else { + revert("SafeCall_Test: unknown context"); + } + + if (i < expected) { assertFalse(caller.makeSafeCall(i, 25_000)); } else { vm.expectCallMinGas(address(caller), 0, 25_000, abi.encodeCall(caller.setA, (1))); @@ -142,9 +163,29 @@ contract SafeCall_Test is Test { for (uint64 i = 15_200_000; i < 15_300_000; i++) { uint256 snapshot = vm.snapshot(); - // 15_278_621 is the exact amount of gas required to make the safe call - // successfully. - if (i < 15_278_621) { + // The values below are best gotten by setting the value to a high number and running the test with a + // verbosity of `-vvv` then setting the value to the value (gas arg) of the failed assertion. + // A faster way to do this for forge coverage cases, is to comment out the optimizer and optimizer runs in + // the foundry.toml file and then run forge test. This is faster because forge test only compiles modified + // contracts unlike forge coverage. + uint256 expected; + + // Because forge coverage always runs with the optimizer disabled, + // if forge coverage is run before testing this with forge test or forge snapshot, forge clean should be + // run first so that it recompiles the contracts using the foundry.toml optimizer settings. + if (vm.isContext(VmSafe.ForgeContext.Coverage)) { + // 15_278_989 is the exact amount of gas required to make the safe call + // successfully with the optimizer disabled (ran via forge coverage) + expected = 15_278_989; + } else if (vm.isContext(VmSafe.ForgeContext.Test) || vm.isContext(VmSafe.ForgeContext.Snapshot)) { + // 15_278_621 is the exact amount of gas required to make the safe call + // successfully with the foundry.toml optimizer settings. + expected = 15_278_621; + } else { + revert("SafeCall_Test: unknown context"); + } + + if (i < expected) { assertFalse(caller.makeSafeCall(i, 15_000_000)); } else { vm.expectCallMinGas(address(caller), 0, 15_000_000, abi.encodeCall(caller.setA, (1))); diff --git a/packages/contracts-bedrock/test/libraries/trie/MerkleTrie.t.sol b/packages/contracts-bedrock/test/libraries/trie/MerkleTrie.t.sol index 781ada343335..e6d04d971db5 100644 --- a/packages/contracts-bedrock/test/libraries/trie/MerkleTrie.t.sol +++ b/packages/contracts-bedrock/test/libraries/trie/MerkleTrie.t.sol @@ -3,6 +3,7 @@ pragma solidity 0.8.15; import { Test } from "forge-std/Test.sol"; import { MerkleTrie } from "src/libraries/trie/MerkleTrie.sol"; +import { RLPReader } from "src/libraries/rlp/RLPReader.sol"; import { FFIInterface } from "test/setup/FFIInterface.sol"; import "src/libraries/rlp/RLPErrors.sol"; @@ -372,4 +373,143 @@ contract MerkleTrie_get_Test is Test { vm.expectRevert("MerkleTrie: ran out of proof elements"); MerkleTrie.get(key, proof, root); } + + /// @notice Tests that `get` reverts if a proof node has an unknown prefix + function test_get_unknownNodePrefix_reverts(uint8 prefix) external { + // bound it to only have prefixes where the first nibble is >= 4 + prefix = uint8(bound(prefix, 0x40, 0xff)); + // if the first nibble of the prefix is odd, make it even by adding 16 + if (((prefix / 16) % 2) == 1) { + unchecked { + prefix += 16; + } + // bound it again in case it overflowed + prefix = uint8(bound(prefix, 0x40, 0xff)); + } + + MerkleTrieWrapper wrapper = new MerkleTrieWrapper(); + + bytes memory key = abi.encodePacked( + keccak256(abi.encodePacked(bytes32(0xa15bc60c955c405d20d9149c709e2460f1c2d9a497496a7f46004d1772c3054c))) + ); + bytes[] memory proof = new bytes[](5); + proof[0] = + hex"f90211a085ed702d58e6a962ad0e785e5c9036e06d878fd065eb9669122447f6aee7957da05badb8cfd5a7493d928614730af6e14eabe2c93fbac93c853dde3270c446309da01de85a57c524ac56a5bd4bed0b0aa7d963e364ad930ea964d0a42631a77ded4da0fe3143892366faeb9fae1117b888263afe0f74e6c73555fee53a604bf7188431a0af2c79f0dddd15d6f62e3fa60d515c44d58444ad3915c7ca4bddb31c8f148d0ca08f37a2f9093a4aee39519f3a06fe4674cc670fbbbd7a5f4eb096310b7bc1fdc9a086bd12d2031d9714130c687e6822250fa24b3147824780bea96cf8a7406c8966a03e42538ba2da8adaa0eca4550ef62de4dabde8ca06b71ac1b276ff31b67a7655a04a439f7eb6a62c77ec921139925af3359f61d16e083076e0e425583289107d7da0c453a51991b5a4c6174ddff77c0b7d9cc86f05ffda6ff523e2365f19797c7a00a06f43b7b9a118264ab4b6c24d7d3de77f39071a298426bfc27180adfca57d590da0032e0db4dcf122d4bdb1d4ec3c5df5fabd3127bcefe412cb046b7f0d80d11c9fa0560c2b8c9466b8cb5ffd600f24ea0ed9838bfdab7870d505d4387c2468d3c498a0597996e939ff8c29c9e59dc47c111e760450a9c4fe2b065825762da2a1f32495a0e3411c9af104364230c49de5a4d0802c84df18beee9778673364e1747a875622a02a6928825356d8280f361a02285af30e73889f4e55ecb63ed85c8581e07061d680"; + proof[1] = + hex"f90211a0db246208c4cef45e9aeb9f1df1baa8572675bc453f7da538165a2bb9e6a4c416a0d26d82a9ddff901d2a1f9e18506120dec0e5b3c95549c5bff0efc355061ea73fa04f1cedbb5c7df7ee5cc3210980baf5537affb29c661c6a4eeb193bc42e7fbc74a0acea389e0cf577d0e13483d98b15c82618ac18b7cc4a479981e3e672ddd16867a0ef59a06aeea1eb5ba1313bbe1fa74ff264d84d7319ab6178213734b5b5efa9c1a08f85dc6001713d77aa4e12982dfdb28fd1c7dcc290d46f2749e8a7d67ba2a694a0f6698ff794881edc50340b75311de64ce3da5f97debcfdfd4d20de57ef3ba7eba0680071ce05e9c7915f731bac8b9673332d1d77ea1f7dadab36d9b233eea32ba4a035ad3686f436232360c2aa364c9f2aa2081318b9fb33cd1050d69ee46f791d62a03b495b3d65d9ae39680a0f835c1d1378d218f7b1fb88d2b2c6ac6ef916f09172a0a808d1e8c632d9a6cfeb3c2c123a58b5b3e1998d4bd02c5fb0f7c5d4ba1338e6a0369376e9152831135ff3a902c9740cf22951d67edd51bf0541565e379d7efc25a0cc26d7fa1c326bc14950e92d9de78e4ed8372ff9727dec34602f24057b3a9b30a0278081783022e748dc70874a72377038935c00c1f0a24bbb8cd0fc208d8b68f4a06c4e83593571b94d08cb78ece0de4920b02a650a47a16583f94c8fe35f724707a0cd7eb9d730e5138fd943200b577e7bbb827d010a50d19af2f4b19ef19658156d80"; + proof[2] = + hex"f90211a0065f58fbe63e8e3387e91047d5b7a4532e7d9de0abc47f04791aef27a140fdb5a0858beea29778551c01b0d3e542d707675856da9c3f1d065c845e55c24d77be89a0e90a410489eff6f4f8d70b0cce1fb1339117ec0f6f1db195a6cc410509a2ebaea078ba7fe504e8d01d57f6bee52c4938d779108e500b5923272441ed2964b8c45da0f0430ed9fa807e5fb6ace75f8766ea60009d8539e00006e359f5f7bc38a76596a0a98a7938db99a2d80abea6349de42bf2576c9e51cc715c85fbacab365ec16f5ba026fadc7d124a456c62ada928eaede3e80611e3e6f99041f7393f062e9e788c8ca0ca48cad1e00d22d6146173341a6060378e738be727a7265a923cf6bfd1f8b610a0f8a4aae21a78ac28e2a61f50396f9a80f6c8232fe4afa203160361c0962242baa09a1029479959fb29b4da7239393fd6ca20bc376d860324f429a51b0e0565a158a0eefb84d3943d680e176258dffe0104ac48c171a8574a811535256a2d8ba531dea062a3d709a2f70ba1970842c4f20a602291273d1f6022e7a14cde2afdcd51e795a0397e6b9b87012cd79cbd0bb7daa4cc43830a673d80b65fb88c0449140175d89ca0f8a4c73c0078cbd32961227910e3f9315bc587716062e39f66be19747ccf9b67a0ea4bdd1b187fdba273a8625f88f284994d19c38ec58651839852665717d953d9a0319ebf356f45da83c7f106f1fd3decbf15f651fad3389a0d279602cdea8ee11480"; + proof[3] = + hex"f8f1a069a092c7a950214e7e45b99012dc8ad112eab0fc94ae5ca9efbd6949068384f280a0b25c46db67ef7cf0c47bb400c31c85a26c5a204431527c964c8ecaf3d63e52cc80a01911a2a74db0d8d182447176e23f25556d1a1eaa0afad96453f2d64876ad88e480808080a04a0ca9e3bed1bc3e3c819384d19b6d5e523164a6520c4eb42e828a63ef730ae38080a03b598ed1b9269d4b05e2e75cfb54298d25437669870c919a59a147d2d256fdba80a0db2d655057c83107a73d086cfdd8fcc74739bb48c652eb0ce597178ecf96b39aa05c66ac392a761341b9c22b773ea19af311f34ef537640b9bb96842ec6ace913280"; + + proof[4] = bytes.concat( + hex"f69f", + bytes1(prefix), + hex"4dcf44e265ba93879b2da89e1b16ab48fc5eb8e31bc16b0612d6da8463f195942536c09e5f5691498805884fa37811be3b2bddb4" + ); + + bytes32 root; + (proof[0], proof[1], proof[2], proof[3], root) = rehashOtherElements(proof[4]); + + vm.expectRevert("MerkleTrie: received a node with an unknown prefix"); + wrapper.get(key, proof, root); + } + + /// @notice Tests that `get` reverts if a proof node is unparsable i.e list length is not 2 or 17 + function test_get_unparsableNode_reverts(uint8 listLen) external { + listLen = uint8(bound(listLen, 1, RLPReader.MAX_LIST_LENGTH)); + if (listLen == 2 || listLen == 17) { + listLen++; + } + + MerkleTrieWrapper wrapper = new MerkleTrieWrapper(); + + bytes memory key = abi.encodePacked( + keccak256(abi.encodePacked(bytes32(0xa15bc60c955c405d20d9149c709e2460f1c2d9a497496a7f46004d1772c3054c))) + ); + bytes[] memory proof = new bytes[](5); + proof[0] = + hex"f90211a085ed702d58e6a962ad0e785e5c9036e06d878fd065eb9669122447f6aee7957da05badb8cfd5a7493d928614730af6e14eabe2c93fbac93c853dde3270c446309da01de85a57c524ac56a5bd4bed0b0aa7d963e364ad930ea964d0a42631a77ded4da0fe3143892366faeb9fae1117b888263afe0f74e6c73555fee53a604bf7188431a0af2c79f0dddd15d6f62e3fa60d515c44d58444ad3915c7ca4bddb31c8f148d0ca08f37a2f9093a4aee39519f3a06fe4674cc670fbbbd7a5f4eb096310b7bc1fdc9a086bd12d2031d9714130c687e6822250fa24b3147824780bea96cf8a7406c8966a03e42538ba2da8adaa0eca4550ef62de4dabde8ca06b71ac1b276ff31b67a7655a04a439f7eb6a62c77ec921139925af3359f61d16e083076e0e425583289107d7da0c453a51991b5a4c6174ddff77c0b7d9cc86f05ffda6ff523e2365f19797c7a00a06f43b7b9a118264ab4b6c24d7d3de77f39071a298426bfc27180adfca57d590da0032e0db4dcf122d4bdb1d4ec3c5df5fabd3127bcefe412cb046b7f0d80d11c9fa0560c2b8c9466b8cb5ffd600f24ea0ed9838bfdab7870d505d4387c2468d3c498a0597996e939ff8c29c9e59dc47c111e760450a9c4fe2b065825762da2a1f32495a0e3411c9af104364230c49de5a4d0802c84df18beee9778673364e1747a875622a02a6928825356d8280f361a02285af30e73889f4e55ecb63ed85c8581e07061d680"; + proof[1] = + hex"f90211a0db246208c4cef45e9aeb9f1df1baa8572675bc453f7da538165a2bb9e6a4c416a0d26d82a9ddff901d2a1f9e18506120dec0e5b3c95549c5bff0efc355061ea73fa04f1cedbb5c7df7ee5cc3210980baf5537affb29c661c6a4eeb193bc42e7fbc74a0acea389e0cf577d0e13483d98b15c82618ac18b7cc4a479981e3e672ddd16867a0ef59a06aeea1eb5ba1313bbe1fa74ff264d84d7319ab6178213734b5b5efa9c1a08f85dc6001713d77aa4e12982dfdb28fd1c7dcc290d46f2749e8a7d67ba2a694a0f6698ff794881edc50340b75311de64ce3da5f97debcfdfd4d20de57ef3ba7eba0680071ce05e9c7915f731bac8b9673332d1d77ea1f7dadab36d9b233eea32ba4a035ad3686f436232360c2aa364c9f2aa2081318b9fb33cd1050d69ee46f791d62a03b495b3d65d9ae39680a0f835c1d1378d218f7b1fb88d2b2c6ac6ef916f09172a0a808d1e8c632d9a6cfeb3c2c123a58b5b3e1998d4bd02c5fb0f7c5d4ba1338e6a0369376e9152831135ff3a902c9740cf22951d67edd51bf0541565e379d7efc25a0cc26d7fa1c326bc14950e92d9de78e4ed8372ff9727dec34602f24057b3a9b30a0278081783022e748dc70874a72377038935c00c1f0a24bbb8cd0fc208d8b68f4a06c4e83593571b94d08cb78ece0de4920b02a650a47a16583f94c8fe35f724707a0cd7eb9d730e5138fd943200b577e7bbb827d010a50d19af2f4b19ef19658156d80"; + proof[2] = + hex"f90211a0065f58fbe63e8e3387e91047d5b7a4532e7d9de0abc47f04791aef27a140fdb5a0858beea29778551c01b0d3e542d707675856da9c3f1d065c845e55c24d77be89a0e90a410489eff6f4f8d70b0cce1fb1339117ec0f6f1db195a6cc410509a2ebaea078ba7fe504e8d01d57f6bee52c4938d779108e500b5923272441ed2964b8c45da0f0430ed9fa807e5fb6ace75f8766ea60009d8539e00006e359f5f7bc38a76596a0a98a7938db99a2d80abea6349de42bf2576c9e51cc715c85fbacab365ec16f5ba026fadc7d124a456c62ada928eaede3e80611e3e6f99041f7393f062e9e788c8ca0ca48cad1e00d22d6146173341a6060378e738be727a7265a923cf6bfd1f8b610a0f8a4aae21a78ac28e2a61f50396f9a80f6c8232fe4afa203160361c0962242baa09a1029479959fb29b4da7239393fd6ca20bc376d860324f429a51b0e0565a158a0eefb84d3943d680e176258dffe0104ac48c171a8574a811535256a2d8ba531dea062a3d709a2f70ba1970842c4f20a602291273d1f6022e7a14cde2afdcd51e795a0397e6b9b87012cd79cbd0bb7daa4cc43830a673d80b65fb88c0449140175d89ca0f8a4c73c0078cbd32961227910e3f9315bc587716062e39f66be19747ccf9b67a0ea4bdd1b187fdba273a8625f88f284994d19c38ec58651839852665717d953d9a0319ebf356f45da83c7f106f1fd3decbf15f651fad3389a0d279602cdea8ee11480"; + proof[3] = + hex"f8f1a069a092c7a950214e7e45b99012dc8ad112eab0fc94ae5ca9efbd6949068384f280a0b25c46db67ef7cf0c47bb400c31c85a26c5a204431527c964c8ecaf3d63e52cc80a01911a2a74db0d8d182447176e23f25556d1a1eaa0afad96453f2d64876ad88e480808080a04a0ca9e3bed1bc3e3c819384d19b6d5e523164a6520c4eb42e828a63ef730ae38080a03b598ed1b9269d4b05e2e75cfb54298d25437669870c919a59a147d2d256fdba80a0db2d655057c83107a73d086cfdd8fcc74739bb48c652eb0ce597178ecf96b39aa05c66ac392a761341b9c22b773ea19af311f34ef537640b9bb96842ec6ace913280"; + proof[4] = + hex"f69f204dcf44e265ba93879b2da89e1b16ab48fc5eb8e31bc16b0612d6da8463f195942536c09e5f5691498805884fa37811be3b2bddb4"; // Correct + // leaf node + + bytes32 root = keccak256(proof[0]); + + // Should not revert + wrapper.get(key, proof, root); + + if (listLen > 3) { + // Node with list > 3 + proof[4] = + hex"f8379f204dcf44e265ba93879b2da89e1b16ab48fc5eb8e31bc16b0612d6da8463f195942536c09e5f5691498805884fa37811be3b2bddb480"; + for (uint256 i; i < listLen - 3; i++) { + proof[4] = bytes.concat(proof[4], hex"80"); + } + proof[4][1] = bytes1(uint8(proof[4][1]) + (listLen - 3)); + // rehash all proof elements and insert it into the proof element above it + (proof[0], proof[1], proof[2], proof[3], root) = rehashOtherElements(proof[4]); + + vm.expectRevert("MerkleTrie: received an unparseable node"); + wrapper.get(key, proof, root); + } else if (listLen == 1) { + // Node with list of 1 + proof[4] = hex"e09f204dcf44e265ba93879b2da89e1b16ab48fc5eb8e31bc16b0612d6da8463f1"; + // rehash all proof elements and insert it into the proof element above it + (proof[0], proof[1], proof[2], proof[3], root) = rehashOtherElements(proof[4]); + + vm.expectRevert("MerkleTrie: received an unparseable node"); + wrapper.get(key, proof, root); + } else if (listLen == 3) { + // Node with list of 3 + proof[4] = + hex"f79f204dcf44e265ba93879b2da89e1b16ab48fc5eb8e31bc16b0612d6da8463f195942536c09e5f5691498805884fa37811be3b2bddb480"; + // rehash all proof elements and insert it into the proof element above it + (proof[0], proof[1], proof[2], proof[3], root) = rehashOtherElements(proof[4]); + + vm.expectRevert("MerkleTrie: received an unparseable node"); + wrapper.get(key, proof, root); + } + } + + function rehashOtherElements(bytes memory _proof4) + private + pure + returns (bytes memory proof0_, bytes memory proof1_, bytes memory proof2_, bytes memory proof3_, bytes32 root_) + { + // rehash all proof elements and insert it into the proof element above it + proof3_ = bytes.concat( + hex"f8f1a069a092c7a950214e7e45b99012dc8ad112eab0fc94ae5ca9efbd6949068384f280a0b25c46db67ef7cf0c47bb400c31c85a26c5a204431527c964c8ecaf3d63e52cc80a0", + keccak256(_proof4), + hex"80808080a04a0ca9e3bed1bc3e3c819384d19b6d5e523164a6520c4eb42e828a63ef730ae38080a03b598ed1b9269d4b05e2e75cfb54298d25437669870c919a59a147d2d256fdba80a0db2d655057c83107a73d086cfdd8fcc74739bb48c652eb0ce597178ecf96b39aa05c66ac392a761341b9c22b773ea19af311f34ef537640b9bb96842ec6ace913280" + ); + proof2_ = bytes.concat( + hex"f90211a0065f58fbe63e8e3387e91047d5b7a4532e7d9de0abc47f04791aef27a140fdb5a0858beea29778551c01b0d3e542d707675856da9c3f1d065c845e55c24d77be89a0e90a410489eff6f4f8d70b0cce1fb1339117ec0f6f1db195a6cc410509a2ebaea078ba7fe504e8d01d57f6bee52c4938d779108e500b5923272441ed2964b8c45da0f0430ed9fa807e5fb6ace75f8766ea60009d8539e00006e359f5f7bc38a76596a0a98a7938db99a2d80abea6349de42bf2576c9e51cc715c85fbacab365ec16f5ba0", + keccak256(proof3_), + hex"a0ca48cad1e00d22d6146173341a6060378e738be727a7265a923cf6bfd1f8b610a0f8a4aae21a78ac28e2a61f50396f9a80f6c8232fe4afa203160361c0962242baa09a1029479959fb29b4da7239393fd6ca20bc376d860324f429a51b0e0565a158a0eefb84d3943d680e176258dffe0104ac48c171a8574a811535256a2d8ba531dea062a3d709a2f70ba1970842c4f20a602291273d1f6022e7a14cde2afdcd51e795a0397e6b9b87012cd79cbd0bb7daa4cc43830a673d80b65fb88c0449140175d89ca0f8a4c73c0078cbd32961227910e3f9315bc587716062e39f66be19747ccf9b67a0ea4bdd1b187fdba273a8625f88f284994d19c38ec58651839852665717d953d9a0319ebf356f45da83c7f106f1fd3decbf15f651fad3389a0d279602cdea8ee11480" + ); + proof1_ = bytes.concat( + hex"f90211a0db246208c4cef45e9aeb9f1df1baa8572675bc453f7da538165a2bb9e6a4c416a0d26d82a9ddff901d2a1f9e18506120dec0e5b3c95549c5bff0efc355061ea73fa04f1cedbb5c7df7ee5cc3210980baf5537affb29c661c6a4eeb193bc42e7fbc74a0acea389e0cf577d0e13483d98b15c82618ac18b7cc4a479981e3e672ddd16867a0ef59a06aeea1eb5ba1313bbe1fa74ff264d84d7319ab6178213734b5b5efa9c1a08f85dc6001713d77aa4e12982dfdb28fd1c7dcc290d46f2749e8a7d67ba2a694a0f6698ff794881edc50340b75311de64ce3da5f97debcfdfd4d20de57ef3ba7eba0680071ce05e9c7915f731bac8b9673332d1d77ea1f7dadab36d9b233eea32ba4a035ad3686f436232360c2aa364c9f2aa2081318b9fb33cd1050d69ee46f791d62a03b495b3d65d9ae39680a0f835c1d1378d218f7b1fb88d2b2c6ac6ef916f09172a0a808d1e8c632d9a6cfeb3c2c123a58b5b3e1998d4bd02c5fb0f7c5d4ba1338e6a0369376e9152831135ff3a902c9740cf22951d67edd51bf0541565e379d7efc25a0", + keccak256(proof2_), + hex"a0278081783022e748dc70874a72377038935c00c1f0a24bbb8cd0fc208d8b68f4a06c4e83593571b94d08cb78ece0de4920b02a650a47a16583f94c8fe35f724707a0cd7eb9d730e5138fd943200b577e7bbb827d010a50d19af2f4b19ef19658156d80" + ); + proof0_ = bytes.concat( + hex"f90211a085ed702d58e6a962ad0e785e5c9036e06d878fd065eb9669122447f6aee7957da05badb8cfd5a7493d928614730af6e14eabe2c93fbac93c853dde3270c446309da0", + keccak256(proof1_), + hex"a0fe3143892366faeb9fae1117b888263afe0f74e6c73555fee53a604bf7188431a0af2c79f0dddd15d6f62e3fa60d515c44d58444ad3915c7ca4bddb31c8f148d0ca08f37a2f9093a4aee39519f3a06fe4674cc670fbbbd7a5f4eb096310b7bc1fdc9a086bd12d2031d9714130c687e6822250fa24b3147824780bea96cf8a7406c8966a03e42538ba2da8adaa0eca4550ef62de4dabde8ca06b71ac1b276ff31b67a7655a04a439f7eb6a62c77ec921139925af3359f61d16e083076e0e425583289107d7da0c453a51991b5a4c6174ddff77c0b7d9cc86f05ffda6ff523e2365f19797c7a00a06f43b7b9a118264ab4b6c24d7d3de77f39071a298426bfc27180adfca57d590da0032e0db4dcf122d4bdb1d4ec3c5df5fabd3127bcefe412cb046b7f0d80d11c9fa0560c2b8c9466b8cb5ffd600f24ea0ed9838bfdab7870d505d4387c2468d3c498a0597996e939ff8c29c9e59dc47c111e760450a9c4fe2b065825762da2a1f32495a0e3411c9af104364230c49de5a4d0802c84df18beee9778673364e1747a875622a02a6928825356d8280f361a02285af30e73889f4e55ecb63ed85c8581e07061d680" + ); + root_ = keccak256(proof0_); + } +} + +contract MerkleTrieWrapper { + function get(bytes memory key, bytes[] memory proof, bytes32 root) external pure returns (bytes memory) { + return MerkleTrie.get(key, proof, root); + } } diff --git a/packages/contracts-bedrock/test/mocks/AlphabetVM.sol b/packages/contracts-bedrock/test/mocks/AlphabetVM.sol index 6ecf74e22868..ae024e5b2a12 100644 --- a/packages/contracts-bedrock/test/mocks/AlphabetVM.sol +++ b/packages/contracts-bedrock/test/mocks/AlphabetVM.sol @@ -6,7 +6,7 @@ import { PreimageKeyLib } from "src/cannon/PreimageKeyLib.sol"; import "src/dispute/lib/Types.sol"; // Interfaces -import { IBigStepper, IPreimageOracle } from "src/dispute/interfaces/IBigStepper.sol"; +import { IBigStepper, IPreimageOracle } from "interfaces/dispute/IBigStepper.sol"; /// @title AlphabetVM /// @dev A mock VM for the purpose of testing the dispute game infrastructure. Note that this only works diff --git a/packages/contracts-bedrock/test/mocks/OptimistInviterHelper.sol b/packages/contracts-bedrock/test/mocks/OptimistInviterHelper.sol deleted file mode 100644 index ebc2289f9c10..000000000000 --- a/packages/contracts-bedrock/test/mocks/OptimistInviterHelper.sol +++ /dev/null @@ -1,94 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -import { OptimistInviter } from "src/periphery/op-nft/OptimistInviter.sol"; -import { ECDSA } from "@openzeppelin/contracts/utils/cryptography/ECDSA.sol"; - -/// @notice Simple helper contract that helps with testing flow and signature for -/// OptimistInviter contract. Made this a separate contract instead of including -/// in OptimistInviter.t.sol for reusability. -contract OptimistInviterHelper { - /// @notice EIP712 typehash for the ClaimableInvite type. - bytes32 public constant CLAIMABLE_INVITE_TYPEHASH = keccak256("ClaimableInvite(address issuer,bytes32 nonce)"); - - /// @notice EIP712 typehash for the EIP712Domain type that is included as part of the signature. - bytes32 public constant EIP712_DOMAIN_TYPEHASH = - keccak256("EIP712Domain(string name,string version,uint256 chainId,address verifyingContract)"); - - /// @notice Address of OptimistInviter contract we are testing. - OptimistInviter public optimistInviter; - - /// @notice OptimistInviter contract name. Used to construct the EIP-712 domain. - string public name; - - /// @notice Keeps track of current nonce to generate new nonces for each invite. - uint256 public currentNonce; - - constructor(OptimistInviter _optimistInviter, string memory _name) { - optimistInviter = _optimistInviter; - name = _name; - } - - /// @notice Returns the hash of the struct ClaimableInvite. - /// @param _claimableInvite ClaimableInvite struct to hash. - /// @return EIP-712 typed struct hash. - function getClaimableInviteStructHash(OptimistInviter.ClaimableInvite memory _claimableInvite) - public - pure - returns (bytes32) - { - return keccak256(abi.encode(CLAIMABLE_INVITE_TYPEHASH, _claimableInvite.issuer, _claimableInvite.nonce)); - } - - /// @notice Returns a bytes32 nonce that should change everytime. In practice, people should use - /// pseudorandom nonces. - /// @return Nonce that should be used as part of ClaimableInvite. - function consumeNonce() public returns (bytes32) { - return bytes32(keccak256(abi.encode(currentNonce++))); - } - - /// @notice Returns a ClaimableInvite with the issuer and current nonce. - /// @param _issuer Issuer to include in the ClaimableInvite. - /// @return ClaimableInvite that can be hashed & signed. - function getClaimableInviteWithNewNonce(address _issuer) public returns (OptimistInviter.ClaimableInvite memory) { - return OptimistInviter.ClaimableInvite(_issuer, consumeNonce()); - } - - /// @notice Computes the EIP712 digest with default correct parameters. - /// @param _claimableInvite ClaimableInvite struct to hash. - /// @return EIP-712 compatible digest. - function getDigest(OptimistInviter.ClaimableInvite calldata _claimableInvite) public view returns (bytes32) { - return getDigestWithEIP712Domain( - _claimableInvite, - bytes(name), - bytes(optimistInviter.EIP712_VERSION()), - block.chainid, - address(optimistInviter) - ); - } - - /// @notice Computes the EIP712 digest with the given domain parameters. - /// Used for testing that different domain parameters fail. - /// @param _claimableInvite ClaimableInvite struct to hash. - /// @param _name Contract name to use in the EIP712 domain. - /// @param _version Contract version to use in the EIP712 domain. - /// @param _chainid Chain ID to use in the EIP712 domain. - /// @param _verifyingContract Address to use in the EIP712 domain. - /// @return EIP-712 compatible digest. - function getDigestWithEIP712Domain( - OptimistInviter.ClaimableInvite calldata _claimableInvite, - bytes memory _name, - bytes memory _version, - uint256 _chainid, - address _verifyingContract - ) - public - pure - returns (bytes32) - { - bytes32 domainSeparator = keccak256( - abi.encode(EIP712_DOMAIN_TYPEHASH, keccak256(_name), keccak256(_version), _chainid, _verifyingContract) - ); - return ECDSA.toTypedDataHash(domainSeparator, getClaimableInviteStructHash(_claimableInvite)); - } -} diff --git a/packages/contracts-bedrock/test/opcm/DeployAltDA.t.sol b/packages/contracts-bedrock/test/opcm/DeployAltDA.t.sol index d7cdc69f37e7..a23708e427ac 100644 --- a/packages/contracts-bedrock/test/opcm/DeployAltDA.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployAltDA.t.sol @@ -4,9 +4,9 @@ pragma solidity 0.8.15; import { Test } from "forge-std/Test.sol"; import { DeployAltDAInput, DeployAltDAOutput, DeployAltDA } from "scripts/deploy/DeployAltDA.s.sol"; -import { IDataAvailabilityChallenge } from "src/L1/interfaces/IDataAvailabilityChallenge.sol"; -import { IProxyAdmin } from "src/universal/interfaces/IProxyAdmin.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; +import { IDataAvailabilityChallenge } from "interfaces/L1/IDataAvailabilityChallenge.sol"; +import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; contract DeployAltDAInput_Test is Test { diff --git a/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol b/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol index 0282ea0b3d90..584aa59a9c2b 100644 --- a/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol @@ -4,22 +4,22 @@ pragma solidity 0.8.15; import { Test, stdStorage, StdStorage } from "forge-std/Test.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; -import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; -import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; -import { IMIPS } from "src/cannon/interfaces/IMIPS.sol"; -import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; +import { IMIPS } from "interfaces/cannon/IMIPS.sol"; +import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IProtocolVersions } from "interfaces/L1/IProtocolVersions.sol"; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; -import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { IL1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; -import { IL1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; -import { IL1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; -import { IOptimismMintableERC20Factory } from "src/universal/interfaces/IOptimismMintableERC20Factory.sol"; -import { IProxyAdmin } from "src/universal/interfaces/IProxyAdmin.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; +import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { IL1CrossDomainMessenger } from "interfaces/L1/IL1CrossDomainMessenger.sol"; +import { IL1ERC721Bridge } from "interfaces/L1/IL1ERC721Bridge.sol"; +import { IL1StandardBridge } from "interfaces/L1/IL1StandardBridge.sol"; +import { IOptimismMintableERC20Factory } from "interfaces/universal/IOptimismMintableERC20Factory.sol"; +import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; import { DeployImplementationsInput, @@ -61,7 +61,7 @@ contract DeployImplementationsInput_Test is Test { dii.disputeGameFinalityDelaySeconds(); vm.expectRevert("DeployImplementationsInput: not set"); - dii.release(); + dii.l1ContractsRelease(); vm.expectRevert("DeployImplementationsInput: not set"); dii.superchainConfigProxy(); @@ -69,23 +69,9 @@ contract DeployImplementationsInput_Test is Test { vm.expectRevert("DeployImplementationsInput: not set"); dii.protocolVersionsProxy(); - vm.expectRevert("DeployImplementationsInput: not set"); - dii.opcmProxyOwner(); - vm.expectRevert("DeployImplementationsInput: not set"); dii.standardVersionsToml(); } - - function test_opcmProxyOwner_whenNotSet_reverts() public { - vm.expectRevert("DeployImplementationsInput: not set"); - dii.opcmProxyOwner(); - } - - function test_opcmProxyOwner_succeeds() public { - dii.set(dii.opcmProxyOwner.selector, address(msg.sender)); - address opcmProxyOwner = dii.opcmProxyOwner(); - assertEq(address(msg.sender), address(opcmProxyOwner), "100"); - } } contract DeployImplementationsOutput_Test is Test { @@ -96,17 +82,7 @@ contract DeployImplementationsOutput_Test is Test { } function test_set_succeeds() public { - IProxy proxy = IProxy( - DeployUtils.create1({ - _name: "Proxy", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IProxy.__constructor__, (address(0)))) - }) - ); - address opcmImpl = address(makeAddr("opcmImpl")); - vm.prank(address(0)); - proxy.upgradeTo(opcmImpl); - - OPContractsManager opcmProxy = OPContractsManager(address(proxy)); + OPContractsManager opcm = OPContractsManager(address(makeAddr("opcm"))); IOptimismPortal2 optimismPortalImpl = IOptimismPortal2(payable(makeAddr("optimismPortalImpl"))); IDelayedWETH delayedWETHImpl = IDelayedWETH(payable(makeAddr("delayedWETHImpl"))); IPreimageOracle preimageOracleSingleton = IPreimageOracle(makeAddr("preimageOracleSingleton")); @@ -120,8 +96,7 @@ contract DeployImplementationsOutput_Test is Test { IOptimismMintableERC20Factory(makeAddr("optimismMintableERC20FactoryImpl")); IDisputeGameFactory disputeGameFactoryImpl = IDisputeGameFactory(makeAddr("disputeGameFactoryImpl")); - vm.etch(address(opcmProxy), address(opcmProxy).code); - vm.etch(address(opcmImpl), hex"01"); + vm.etch(address(opcm), hex"01"); vm.etch(address(optimismPortalImpl), hex"01"); vm.etch(address(delayedWETHImpl), hex"01"); vm.etch(address(preimageOracleSingleton), hex"01"); @@ -132,7 +107,7 @@ contract DeployImplementationsOutput_Test is Test { vm.etch(address(l1StandardBridgeImpl), hex"01"); vm.etch(address(optimismMintableERC20FactoryImpl), hex"01"); vm.etch(address(disputeGameFactoryImpl), hex"01"); - dio.set(dio.opcmProxy.selector, address(opcmProxy)); + dio.set(dio.opcm.selector, address(opcm)); dio.set(dio.optimismPortalImpl.selector, address(optimismPortalImpl)); dio.set(dio.delayedWETHImpl.selector, address(delayedWETHImpl)); dio.set(dio.preimageOracleSingleton.selector, address(preimageOracleSingleton)); @@ -144,7 +119,7 @@ contract DeployImplementationsOutput_Test is Test { dio.set(dio.optimismMintableERC20FactoryImpl.selector, address(optimismMintableERC20FactoryImpl)); dio.set(dio.disputeGameFactoryImpl.selector, address(disputeGameFactoryImpl)); - assertEq(address(opcmProxy), address(dio.opcmProxy()), "50"); + assertEq(address(opcm), address(dio.opcm()), "50"); assertEq(address(optimismPortalImpl), address(dio.optimismPortalImpl()), "100"); assertEq(address(delayedWETHImpl), address(dio.delayedWETHImpl()), "200"); assertEq(address(preimageOracleSingleton), address(dio.preimageOracleSingleton()), "300"); @@ -273,7 +248,7 @@ contract DeployImplementations_Test is Test { function test_deployImplementation_succeeds() public { string memory deployContractsRelease = "dev-release"; - dii.set(dii.release.selector, deployContractsRelease); + dii.set(dii.l1ContractsRelease.selector, deployContractsRelease); deployImplementations.deploySystemConfigImpl(dii, dio); assertTrue(address(0) != address(dio.systemConfigImpl())); } @@ -282,7 +257,7 @@ contract DeployImplementations_Test is Test { // All hardcoded addresses below are taken from the superchain-registry config: // https://github.com/ethereum-optimism/superchain-registry/blob/be65d22f8128cf0c4e5b4e1f677daf86843426bf/validation/standard/standard-versions.toml#L11 string memory testRelease = "op-contracts/v1.6.0"; - dii.set(dii.release.selector, testRelease); + dii.set(dii.l1ContractsRelease.selector, testRelease); deployImplementations.deploySystemConfigImpl(dii, dio); address srSystemConfigImpl = address(0xF56D96B2535B932656d3c04Ebf51baBff241D886); @@ -335,71 +310,6 @@ contract DeployImplementations_Test is Test { assertEq(srDisputeGameFactoryImpl, address(dio.disputeGameFactoryImpl())); } - function test_deploy_atNonExistentRelease_reverts() public { - string memory unknownRelease = "op-contracts/v0.0.0"; - dii.set(dii.release.selector, unknownRelease); - - bytes memory expectedErr = - bytes(string.concat("DeployImplementations: failed to deploy release ", unknownRelease)); - - vm.expectRevert(expectedErr); - deployImplementations.deploySystemConfigImpl(dii, dio); - - vm.expectRevert(expectedErr); - deployImplementations.deployL1CrossDomainMessengerImpl(dii, dio); - - vm.expectRevert(expectedErr); - deployImplementations.deployL1ERC721BridgeImpl(dii, dio); - - vm.expectRevert(expectedErr); - deployImplementations.deployL1StandardBridgeImpl(dii, dio); - - vm.expectRevert(expectedErr); - deployImplementations.deployOptimismMintableERC20FactoryImpl(dii, dio); - - // TODO: Uncomment the code below when OPContractsManager is deployed based on release. Superchain-registry - // doesn't contain OPContractsManager yet. - // dii.set(dii.superchainConfigProxy.selector, address(superchainConfigProxy)); - // dii.set(dii.protocolVersionsProxy.selector, address(protocolVersionsProxy)); - // vm.etch(address(superchainConfigProxy), hex"01"); - // vm.etch(address(protocolVersionsProxy), hex"01"); - // vm.expectRevert(expectedErr); - // deployImplementations.deployOPContractsManagerImpl(dii, dio); - - dii.set(dii.proofMaturityDelaySeconds.selector, 1); - dii.set(dii.disputeGameFinalityDelaySeconds.selector, 2); - vm.expectRevert(expectedErr); - deployImplementations.deployOptimismPortalImpl(dii, dio); - - dii.set(dii.withdrawalDelaySeconds.selector, 1); - vm.expectRevert(expectedErr); - deployImplementations.deployDelayedWETHImpl(dii, dio); - - dii.set(dii.minProposalSizeBytes.selector, 1); - dii.set(dii.challengePeriodSeconds.selector, 2); - vm.expectRevert(expectedErr); - deployImplementations.deployPreimageOracleSingleton(dii, dio); - - address preImageOracleSingleton = makeAddr("preImageOracleSingleton"); - vm.etch(address(preImageOracleSingleton), hex"01"); - dio.set(dio.preimageOracleSingleton.selector, preImageOracleSingleton); - vm.expectRevert(expectedErr); - deployImplementations.deployMipsSingleton(dii, dio); - - vm.expectRevert(expectedErr); // fault proof contracts don't exist at this release - deployImplementations.deployDisputeGameFactoryImpl(dii, dio); - } - - function test_deploy_noContractExistsAtRelease_reverts() public { - string memory unknownRelease = "op-contracts/v1.3.0"; - dii.set(dii.release.selector, unknownRelease); - bytes memory expectedErr = - bytes(string.concat("DeployImplementations: failed to deploy release ", unknownRelease)); - - vm.expectRevert(expectedErr); // fault proof contracts don't exist at this release - deployImplementations.deployDisputeGameFactoryImpl(dii, dio); - } - function testFuzz_run_memory_succeeds(bytes32 _seed) public { withdrawalDelaySeconds = uint256(hash(_seed, 0)); minProposalSizeBytes = uint256(hash(_seed, 1)); @@ -409,7 +319,7 @@ contract DeployImplementations_Test is Test { string memory release = string(bytes.concat(hash(_seed, 5))); protocolVersionsProxy = IProtocolVersions(address(uint160(uint256(hash(_seed, 7))))); - // Must configure the ProxyAdmin contract which is used to upgrade the OPCM's proxy contract. + // Must configure the ProxyAdmin contract. IProxyAdmin superchainProxyAdmin = IProxyAdmin( DeployUtils.create1({ _name: "ProxyAdmin", @@ -439,10 +349,9 @@ contract DeployImplementations_Test is Test { dii.set(dii.proofMaturityDelaySeconds.selector, proofMaturityDelaySeconds); dii.set(dii.disputeGameFinalityDelaySeconds.selector, disputeGameFinalityDelaySeconds); dii.set(dii.mipsVersion.selector, 1); - dii.set(dii.release.selector, release); + dii.set(dii.l1ContractsRelease.selector, release); dii.set(dii.superchainConfigProxy.selector, address(superchainConfigProxy)); dii.set(dii.protocolVersionsProxy.selector, address(protocolVersionsProxy)); - dii.set(dii.opcmProxyOwner.selector, msg.sender); deployImplementations.run(dii, dio); @@ -453,10 +362,9 @@ contract DeployImplementations_Test is Test { assertEq(proofMaturityDelaySeconds, dii.proofMaturityDelaySeconds(), "400"); assertEq(disputeGameFinalityDelaySeconds, dii.disputeGameFinalityDelaySeconds(), "500"); assertEq(1, dii.mipsVersion(), "512"); - assertEq(release, dii.release(), "525"); + assertEq(release, dii.l1ContractsRelease(), "525"); assertEq(address(superchainConfigProxy), address(dii.superchainConfigProxy()), "550"); assertEq(address(protocolVersionsProxy), address(dii.protocolVersionsProxy()), "575"); - assertEq(msg.sender, dii.opcmProxyOwner(), "580"); // Architecture assertions. assertEq(address(dio.mipsSingleton().oracle()), address(dio.preimageOracleSingleton()), "600"); @@ -475,7 +383,7 @@ contract DeployImplementations_Test is Test { dii.set(dii.disputeGameFinalityDelaySeconds.selector, disputeGameFinalityDelaySeconds); dii.set(dii.mipsVersion.selector, 1); string memory release = "dev-release"; - dii.set(dii.release.selector, release); + dii.set(dii.l1ContractsRelease.selector, release); dii.set(dii.superchainConfigProxy.selector, address(superchainConfigProxy)); dii.set(dii.protocolVersionsProxy.selector, address(protocolVersionsProxy)); diff --git a/packages/contracts-bedrock/test/opcm/DeployOPCM.t.sol b/packages/contracts-bedrock/test/opcm/DeployOPCM.t.sol new file mode 100644 index 000000000000..959b9c7031f9 --- /dev/null +++ b/packages/contracts-bedrock/test/opcm/DeployOPCM.t.sol @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +import { Test } from "forge-std/Test.sol"; +import { DeployOPCM, DeployOPCMInput, DeployOPCMOutput } from "scripts/deploy/DeployOPCM.s.sol"; +import { OPContractsManager } from "src/L1/OPContractsManager.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IProtocolVersions } from "interfaces/L1/IProtocolVersions.sol"; + +contract DeployOPCMInput_Test is Test { + DeployOPCMInput dii; + string release = "1.0.0"; + + function setUp() public { + dii = new DeployOPCMInput(); + } + + function test_getters_whenNotSet_reverts() public { + vm.expectRevert("DeployOPCMInput: not set"); + dii.superchainConfig(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.protocolVersions(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.l1ContractsRelease(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.addressManagerBlueprint(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.proxyBlueprint(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.proxyAdminBlueprint(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.l1ChugSplashProxyBlueprint(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.resolvedDelegateProxyBlueprint(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.anchorStateRegistryBlueprint(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.permissionedDisputeGame1Blueprint(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.permissionedDisputeGame2Blueprint(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.l1ERC721BridgeImpl(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.optimismPortalImpl(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.systemConfigImpl(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.optimismMintableERC20FactoryImpl(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.l1CrossDomainMessengerImpl(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.l1StandardBridgeImpl(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.disputeGameFactoryImpl(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.delayedWETHImpl(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.mipsImpl(); + } + + // Below setter tests are split into two parts to avoid stack too deep errors + + function test_set_part1_succeeds() public { + ISuperchainConfig superchainConfig = ISuperchainConfig(makeAddr("superchainConfig")); + IProtocolVersions protocolVersions = IProtocolVersions(makeAddr("protocolVersions")); + address addressManagerBlueprint = makeAddr("addressManagerBlueprint"); + address proxyBlueprint = makeAddr("proxyBlueprint"); + address proxyAdminBlueprint = makeAddr("proxyAdminBlueprint"); + address l1ChugSplashProxyBlueprint = makeAddr("l1ChugSplashProxyBlueprint"); + address resolvedDelegateProxyBlueprint = makeAddr("resolvedDelegateProxyBlueprint"); + address anchorStateRegistryBlueprint = makeAddr("anchorStateRegistryBlueprint"); + address permissionedDisputeGame1Blueprint = makeAddr("permissionedDisputeGame1Blueprint"); + address permissionedDisputeGame2Blueprint = makeAddr("permissionedDisputeGame2Blueprint"); + + dii.set(dii.superchainConfig.selector, address(superchainConfig)); + dii.set(dii.protocolVersions.selector, address(protocolVersions)); + dii.set(dii.l1ContractsRelease.selector, release); + dii.set(dii.addressManagerBlueprint.selector, addressManagerBlueprint); + dii.set(dii.proxyBlueprint.selector, proxyBlueprint); + dii.set(dii.proxyAdminBlueprint.selector, proxyAdminBlueprint); + dii.set(dii.l1ChugSplashProxyBlueprint.selector, l1ChugSplashProxyBlueprint); + dii.set(dii.resolvedDelegateProxyBlueprint.selector, resolvedDelegateProxyBlueprint); + dii.set(dii.anchorStateRegistryBlueprint.selector, anchorStateRegistryBlueprint); + dii.set(dii.permissionedDisputeGame1Blueprint.selector, permissionedDisputeGame1Blueprint); + dii.set(dii.permissionedDisputeGame2Blueprint.selector, permissionedDisputeGame2Blueprint); + + assertEq(address(dii.superchainConfig()), address(superchainConfig), "50"); + assertEq(address(dii.protocolVersions()), address(protocolVersions), "100"); + assertEq(dii.l1ContractsRelease(), release, "150"); + assertEq(dii.addressManagerBlueprint(), addressManagerBlueprint, "200"); + assertEq(dii.proxyBlueprint(), proxyBlueprint, "250"); + assertEq(dii.proxyAdminBlueprint(), proxyAdminBlueprint, "300"); + assertEq(dii.l1ChugSplashProxyBlueprint(), l1ChugSplashProxyBlueprint, "350"); + assertEq(dii.resolvedDelegateProxyBlueprint(), resolvedDelegateProxyBlueprint, "400"); + assertEq(dii.anchorStateRegistryBlueprint(), anchorStateRegistryBlueprint, "450"); + assertEq(dii.permissionedDisputeGame1Blueprint(), permissionedDisputeGame1Blueprint, "500"); + assertEq(dii.permissionedDisputeGame2Blueprint(), permissionedDisputeGame2Blueprint, "550"); + } + + function test_set_part2_succeeds() public { + address l1ERC721BridgeImpl = makeAddr("l1ERC721BridgeImpl"); + address optimismPortalImpl = makeAddr("optimismPortalImpl"); + address systemConfigImpl = makeAddr("systemConfigImpl"); + address optimismMintableERC20FactoryImpl = makeAddr("optimismMintableERC20FactoryImpl"); + address l1CrossDomainMessengerImpl = makeAddr("l1CrossDomainMessengerImpl"); + address l1StandardBridgeImpl = makeAddr("l1StandardBridgeImpl"); + address disputeGameFactoryImpl = makeAddr("disputeGameFactoryImpl"); + address delayedWETHImpl = makeAddr("delayedWETHImpl"); + address mipsImpl = makeAddr("mipsImpl"); + + dii.set(dii.l1ERC721BridgeImpl.selector, l1ERC721BridgeImpl); + dii.set(dii.optimismPortalImpl.selector, optimismPortalImpl); + dii.set(dii.systemConfigImpl.selector, systemConfigImpl); + dii.set(dii.optimismMintableERC20FactoryImpl.selector, optimismMintableERC20FactoryImpl); + dii.set(dii.l1CrossDomainMessengerImpl.selector, l1CrossDomainMessengerImpl); + dii.set(dii.l1StandardBridgeImpl.selector, l1StandardBridgeImpl); + dii.set(dii.disputeGameFactoryImpl.selector, disputeGameFactoryImpl); + dii.set(dii.delayedWETHImpl.selector, delayedWETHImpl); + dii.set(dii.mipsImpl.selector, mipsImpl); + + assertEq(dii.l1ERC721BridgeImpl(), l1ERC721BridgeImpl, "600"); + assertEq(dii.optimismPortalImpl(), optimismPortalImpl, "650"); + assertEq(dii.systemConfigImpl(), systemConfigImpl, "700"); + assertEq(dii.optimismMintableERC20FactoryImpl(), optimismMintableERC20FactoryImpl, "750"); + assertEq(dii.l1CrossDomainMessengerImpl(), l1CrossDomainMessengerImpl, "800"); + assertEq(dii.l1StandardBridgeImpl(), l1StandardBridgeImpl, "850"); + assertEq(dii.disputeGameFactoryImpl(), disputeGameFactoryImpl, "900"); + assertEq(dii.delayedWETHImpl(), delayedWETHImpl, "950"); + assertEq(dii.mipsImpl(), mipsImpl, "1000"); + } + + function test_set_withZeroAddress_reverts() public { + vm.expectRevert("DeployOPCMInput: cannot set zero address"); + dii.set(dii.superchainConfig.selector, address(0)); + } + + function test_set_withEmptyString_reverts() public { + vm.expectRevert("DeployOPCMInput: cannot set empty string"); + dii.set(dii.l1ContractsRelease.selector, ""); + } + + function test_set_withInvalidSelector_reverts() public { + vm.expectRevert("DeployOPCMInput: unknown selector"); + dii.set(bytes4(0xdeadbeef), address(1)); + } + + function test_set_withInvalidStringSelector_reverts() public { + vm.expectRevert("DeployOPCMInput: unknown selector"); + dii.set(bytes4(0xdeadbeef), "test"); + } +} + +contract DeployOPCMOutput_Test is Test { + DeployOPCMOutput doo; + + function setUp() public { + doo = new DeployOPCMOutput(); + } + + function test_getters_whenNotSet_reverts() public { + vm.expectRevert("DeployOPCMOutput: not set"); + doo.opcm(); + } + + function test_set_succeeds() public { + OPContractsManager opcm = OPContractsManager(makeAddr("opcm")); + vm.etch(address(opcm), hex"01"); + + doo.set(doo.opcm.selector, address(opcm)); + + assertEq(address(doo.opcm()), address(opcm), "50"); + } + + function test_set_withZeroAddress_reverts() public { + vm.expectRevert("DeployOPCMOutput: cannot set zero address"); + doo.set(doo.opcm.selector, address(0)); + } + + function test_set_withInvalidSelector_reverts() public { + vm.expectRevert("DeployOPCMOutput: unknown selector"); + doo.set(bytes4(0xdeadbeef), makeAddr("test")); + } +} + +contract DeployOPCMTest is Test { + DeployOPCM deployOPCM; + DeployOPCMInput doi; + DeployOPCMOutput doo; + + ISuperchainConfig superchainConfigProxy = ISuperchainConfig(makeAddr("superchainConfigProxy")); + IProtocolVersions protocolVersionsProxy = IProtocolVersions(makeAddr("protocolVersionsProxy")); + + function setUp() public virtual { + deployOPCM = new DeployOPCM(); + (doi, doo) = deployOPCM.etchIOContracts(); + } + + function test_run_succeeds() public { + doi.set(doi.superchainConfig.selector, address(superchainConfigProxy)); + doi.set(doi.protocolVersions.selector, address(protocolVersionsProxy)); + doi.set(doi.l1ContractsRelease.selector, "1.0.0"); + + // Set and etch blueprints + doi.set(doi.addressManagerBlueprint.selector, makeAddr("addressManagerBlueprint")); + doi.set(doi.proxyBlueprint.selector, makeAddr("proxyBlueprint")); + doi.set(doi.proxyAdminBlueprint.selector, makeAddr("proxyAdminBlueprint")); + doi.set(doi.l1ChugSplashProxyBlueprint.selector, makeAddr("l1ChugSplashProxyBlueprint")); + doi.set(doi.resolvedDelegateProxyBlueprint.selector, makeAddr("resolvedDelegateProxyBlueprint")); + doi.set(doi.anchorStateRegistryBlueprint.selector, makeAddr("anchorStateRegistryBlueprint")); + doi.set(doi.permissionedDisputeGame1Blueprint.selector, makeAddr("permissionedDisputeGame1Blueprint")); + doi.set(doi.permissionedDisputeGame2Blueprint.selector, makeAddr("permissionedDisputeGame2Blueprint")); + + // Set and etch implementations + doi.set(doi.l1ERC721BridgeImpl.selector, makeAddr("l1ERC721BridgeImpl")); + doi.set(doi.optimismPortalImpl.selector, makeAddr("optimismPortalImpl")); + doi.set(doi.systemConfigImpl.selector, makeAddr("systemConfigImpl")); + doi.set(doi.optimismMintableERC20FactoryImpl.selector, makeAddr("optimismMintableERC20FactoryImpl")); + doi.set(doi.l1CrossDomainMessengerImpl.selector, makeAddr("l1CrossDomainMessengerImpl")); + doi.set(doi.l1StandardBridgeImpl.selector, makeAddr("l1StandardBridgeImpl")); + doi.set(doi.disputeGameFactoryImpl.selector, makeAddr("disputeGameFactoryImpl")); + doi.set(doi.delayedWETHImpl.selector, makeAddr("delayedWETHImpl")); + doi.set(doi.mipsImpl.selector, makeAddr("mipsImpl")); + + // Etch all addresses with dummy bytecode + vm.etch(address(doi.superchainConfig()), hex"01"); + vm.etch(address(doi.protocolVersions()), hex"01"); + + vm.etch(doi.addressManagerBlueprint(), hex"01"); + vm.etch(doi.proxyBlueprint(), hex"01"); + vm.etch(doi.proxyAdminBlueprint(), hex"01"); + vm.etch(doi.l1ChugSplashProxyBlueprint(), hex"01"); + vm.etch(doi.resolvedDelegateProxyBlueprint(), hex"01"); + vm.etch(doi.anchorStateRegistryBlueprint(), hex"01"); + vm.etch(doi.permissionedDisputeGame1Blueprint(), hex"01"); + vm.etch(doi.permissionedDisputeGame2Blueprint(), hex"01"); + + vm.etch(doi.l1ERC721BridgeImpl(), hex"01"); + vm.etch(doi.optimismPortalImpl(), hex"01"); + vm.etch(doi.systemConfigImpl(), hex"01"); + vm.etch(doi.optimismMintableERC20FactoryImpl(), hex"01"); + vm.etch(doi.l1CrossDomainMessengerImpl(), hex"01"); + vm.etch(doi.l1StandardBridgeImpl(), hex"01"); + vm.etch(doi.disputeGameFactoryImpl(), hex"01"); + vm.etch(doi.delayedWETHImpl(), hex"01"); + vm.etch(doi.mipsImpl(), hex"01"); + + deployOPCM.run(doi, doo); + + assertNotEq(address(doo.opcm()), address(0)); + + // sanity check to ensure that the OPCM is validated + deployOPCM.assertValidOpcm(doi, doo); + } +} diff --git a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol index 5249ded41cc9..5ac899579d65 100644 --- a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol @@ -13,19 +13,19 @@ import { import { DeployOPChainInput, DeployOPChain, DeployOPChainOutput } from "scripts/deploy/DeployOPChain.s.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; -import { IProxyAdmin } from "src/universal/interfaces/IProxyAdmin.sol"; +import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; -import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; -import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; -import { IPermissionedDisputeGame } from "src/dispute/interfaces/IPermissionedDisputeGame.sol"; -import { IL1ChugSplashProxy } from "src/legacy/interfaces/IL1ChugSplashProxy.sol"; -import { IResolvedDelegateProxy } from "src/legacy/interfaces/IResolvedDelegateProxy.sol"; +import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; +import { IPermissionedDisputeGame } from "interfaces/dispute/IPermissionedDisputeGame.sol"; +import { IL1ChugSplashProxy } from "interfaces/legacy/IL1ChugSplashProxy.sol"; +import { IResolvedDelegateProxy } from "interfaces/legacy/IResolvedDelegateProxy.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IProtocolVersions, ProtocolVersion } from "src/L1/interfaces/IProtocolVersions.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IProtocolVersions, ProtocolVersion } from "interfaces/L1/IProtocolVersions.sol"; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; import { Claim, Duration, GameType, GameTypes, Hash, OutputRoot } from "src/dispute/lib/Types.sol"; @@ -39,10 +39,10 @@ contract DeployOPChainInput_Test is Test { address unsafeBlockSigner = makeAddr("unsafeBlockSigner"); address proposer = makeAddr("proposer"); address challenger = makeAddr("challenger"); + address opcm = makeAddr("opcm"); uint32 basefeeScalar = 100; uint32 blobBaseFeeScalar = 200; uint256 l2ChainId = 300; - OPContractsManager opcm = OPContractsManager(makeAddr("opcm")); string saltMixer = "saltMixer"; function setUp() public { @@ -60,9 +60,8 @@ contract DeployOPChainInput_Test is Test { doi.set(doi.blobBaseFeeScalar.selector, blobBaseFeeScalar); doi.set(doi.l2ChainId.selector, l2ChainId); doi.set(doi.allowCustomDisputeParameters.selector, true); - - (IProxy opcmProxy) = DeployUtils.buildERC1967ProxyWithImpl("opcmProxy"); - doi.set(doi.opcmProxy.selector, address(opcmProxy)); + doi.set(doi.opcm.selector, opcm); + vm.etch(opcm, hex"01"); // Compare the default inputs to the getter methods. assertEq(opChainProxyAdminOwner, doi.opChainProxyAdminOwner(), "200"); @@ -74,7 +73,7 @@ contract DeployOPChainInput_Test is Test { assertEq(basefeeScalar, doi.basefeeScalar(), "800"); assertEq(blobBaseFeeScalar, doi.blobBaseFeeScalar(), "900"); assertEq(l2ChainId, doi.l2ChainId(), "1000"); - assertEq(address(opcmProxy), address(doi.opcmProxy()), "1100"); + assertEq(opcm, address(doi.opcm()), "1100"); assertEq(true, doi.allowCustomDisputeParameters(), "1200"); } @@ -396,7 +395,7 @@ contract DeployOPChain_TestBase is Test { dii.set(dii.proofMaturityDelaySeconds.selector, proofMaturityDelaySeconds); dii.set(dii.disputeGameFinalityDelaySeconds.selector, disputeGameFinalityDelaySeconds); dii.set(dii.mipsVersion.selector, 1); - dii.set(dii.release.selector, release); + dii.set(dii.l1ContractsRelease.selector, release); dii.set(dii.superchainConfigProxy.selector, address(superchainConfigProxy)); dii.set(dii.protocolVersionsProxy.selector, address(protocolVersionsProxy)); // End users of the DeployImplementations contract will need to set the `standardVersionsToml`. @@ -404,7 +403,7 @@ contract DeployOPChain_TestBase is Test { string.concat(vm.projectRoot(), "/test/fixtures/standard-versions.toml"); string memory standardVersionsToml = vm.readFile(standardVersionsTomlPath); dii.set(dii.standardVersionsToml.selector, standardVersionsToml); - dii.set(dii.opcmProxyOwner.selector, address(1)); + deployImplementations.run(dii, dio); // Deploy DeployOpChain, but defer populating the input values to the test suites inheriting this contract. @@ -412,7 +411,7 @@ contract DeployOPChain_TestBase is Test { (doi, doo) = deployOPChain.etchIOContracts(); // Set the OPContractsManager input for DeployOPChain. - opcm = dio.opcmProxy(); + opcm = dio.opcm(); } // See the function of the same name in the `DeployImplementations_Test` contract of @@ -466,7 +465,7 @@ contract DeployOPChain_Test is DeployOPChain_TestBase { doi.set(doi.basefeeScalar.selector, basefeeScalar); doi.set(doi.blobBaseFeeScalar.selector, blobBaseFeeScalar); doi.set(doi.l2ChainId.selector, l2ChainId); - doi.set(doi.opcmProxy.selector, address(opcm)); // Not fuzzed since it must be an actual instance. + doi.set(doi.opcm.selector, address(opcm)); doi.set(doi.saltMixer.selector, saltMixer); doi.set(doi.gasLimit.selector, gasLimit); doi.set(doi.disputeGameType.selector, disputeGameType); @@ -559,7 +558,7 @@ contract DeployOPChain_Test is DeployOPChain_TestBase { doi.set(doi.basefeeScalar.selector, basefeeScalar); doi.set(doi.blobBaseFeeScalar.selector, blobBaseFeeScalar); doi.set(doi.l2ChainId.selector, l2ChainId); - doi.set(doi.opcmProxy.selector, address(opcm)); + doi.set(doi.opcm.selector, address(opcm)); doi.set(doi.saltMixer.selector, saltMixer); doi.set(doi.gasLimit.selector, gasLimit); doi.set(doi.disputeGameType.selector, disputeGameType); diff --git a/packages/contracts-bedrock/test/opcm/DeploySuperchain.t.sol b/packages/contracts-bedrock/test/opcm/DeploySuperchain.t.sol index 924957cc1800..93c3c0c9344a 100644 --- a/packages/contracts-bedrock/test/opcm/DeploySuperchain.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeploySuperchain.t.sol @@ -7,7 +7,7 @@ import { stdToml } from "forge-std/StdToml.sol"; import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; import { Proxy } from "src/universal/Proxy.sol"; import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; -import { IProtocolVersions, ProtocolVersion } from "src/L1/interfaces/IProtocolVersions.sol"; +import { IProtocolVersions, ProtocolVersion } from "interfaces/L1/IProtocolVersions.sol"; import { DeploySuperchainInput, DeploySuperchain, DeploySuperchainOutput } from "scripts/deploy/DeploySuperchain.s.sol"; contract DeploySuperchainInput_Test is Test { diff --git a/packages/contracts-bedrock/test/periphery/drippie/Drippie.t.sol b/packages/contracts-bedrock/test/periphery/drippie/Drippie.t.sol index 0e6755b0e63c..c5791e06868b 100644 --- a/packages/contracts-bedrock/test/periphery/drippie/Drippie.t.sol +++ b/packages/contracts-bedrock/test/periphery/drippie/Drippie.t.sol @@ -16,10 +16,6 @@ import { SimpleStorage } from "test/mocks/SimpleStorage.sol"; contract TestDrippie is Drippie { constructor(address owner) Drippie(owner) { } - function dripStatus(string memory name) external view returns (Drippie.DripStatus) { - return drips[name].status; - } - function dripStateLast(string memory name) external view returns (uint256) { return drips[name].last; } @@ -131,12 +127,12 @@ contract Drippie_Test is Test { vm.prank(drippie.owner()); drippie.create(dripName, cfg); - Drippie.DripStatus status = drippie.dripStatus(dripName); + Drippie.DripStatus status = drippie.getDripStatus(dripName); Drippie.DripConfig memory config = drippie.dripConfig(dripName); assertEq(uint256(status), uint256(Drippie.DripStatus.PAUSED)); - assertEq(config.interval, cfg.interval); + assertEq(drippie.getDripInterval(dripName), cfg.interval); assertEq(config.reentrant, cfg.reentrant); assertEq(address(config.dripcheck), address(cfg.dripcheck)); assertEq(config.checkparams, cfg.checkparams); @@ -186,7 +182,7 @@ contract Drippie_Test is Test { address owner = drippie.owner(); { - Drippie.DripStatus status = drippie.dripStatus(dripName); + Drippie.DripStatus status = drippie.getDripStatus(dripName); assertEq(uint256(status), uint256(Drippie.DripStatus.PAUSED)); } @@ -198,7 +194,7 @@ contract Drippie_Test is Test { drippie.status(dripName, Drippie.DripStatus.ACTIVE); { - Drippie.DripStatus status = drippie.dripStatus(dripName); + Drippie.DripStatus status = drippie.getDripStatus(dripName); assertEq(uint256(status), uint256(Drippie.DripStatus.ACTIVE)); } @@ -210,7 +206,7 @@ contract Drippie_Test is Test { drippie.status(dripName, Drippie.DripStatus.PAUSED); { - Drippie.DripStatus status = drippie.dripStatus(dripName); + Drippie.DripStatus status = drippie.getDripStatus(dripName); assertEq(uint256(status), uint256(Drippie.DripStatus.PAUSED)); } } @@ -252,7 +248,7 @@ contract Drippie_Test is Test { drippie.status(dripName, Drippie.DripStatus.ARCHIVED); - Drippie.DripStatus status = drippie.dripStatus(dripName); + Drippie.DripStatus status = drippie.getDripStatus(dripName); assertEq(uint256(status), uint256(Drippie.DripStatus.ARCHIVED)); } @@ -463,7 +459,7 @@ contract Drippie_Test is Test { function test_not_active_reverts() external { _createDefaultDrip(dripName); - Drippie.DripStatus status = drippie.dripStatus(dripName); + Drippie.DripStatus status = drippie.getDripStatus(dripName); assertEq(uint256(status), uint256(Drippie.DripStatus.PAUSED)); vm.prank(drippie.owner()); diff --git a/packages/contracts-bedrock/test/periphery/faucet/Faucet.t.sol b/packages/contracts-bedrock/test/periphery/faucet/Faucet.t.sol index a0ef75f28d55..b5f156cf1a85 100644 --- a/packages/contracts-bedrock/test/periphery/faucet/Faucet.t.sol +++ b/packages/contracts-bedrock/test/periphery/faucet/Faucet.t.sol @@ -103,6 +103,24 @@ contract FaucetTest is Faucet_Initializer { assertEq(faucet.ADMIN(), faucetContractAdmin); } + function test_configure_whenAdmin_succeeds() external { + vm.startPrank(faucetContractAdmin); + faucet.configure(optimistNftFam, Faucet.ModuleConfig("OptimistNftModule", true, 1 days, 1 ether)); + + (string memory name, bool enabled, uint256 ttl, uint256 amount) = faucet.modules(optimistNftFam); + assertEq(name, "OptimistNftModule"); + assertEq(enabled, true); + assertEq(ttl, 1 days); + assertEq(amount, 1 ether); + + assertTrue(faucet.isModuleEnabled(optimistNftFam)); + } + + function test_configure_whenNotAdmin_reverts() external { + vm.expectRevert("Faucet: function can only be called by admin"); + faucet.configure(optimistNftFam, Faucet.ModuleConfig("OptimistNftModule", true, 1 days, 1 ether)); + } + function test_authAdmin_drip_succeeds() external { _enableFaucetAuthModules(); bytes32 nonce = faucetHelper.consumeNonce(); diff --git a/packages/contracts-bedrock/test/periphery/op-nft/AttestationStation.t.sol b/packages/contracts-bedrock/test/periphery/op-nft/AttestationStation.t.sol deleted file mode 100644 index 4c9b72254d34..000000000000 --- a/packages/contracts-bedrock/test/periphery/op-nft/AttestationStation.t.sol +++ /dev/null @@ -1,115 +0,0 @@ -//SPDX-License-Identifier: MIT -pragma solidity 0.8.15; - -/* Testing utilities */ -import { Test } from "forge-std/Test.sol"; -import { AttestationStation } from "src/periphery/op-nft/AttestationStation.sol"; - -contract AttestationStation_Initializer is Test { - address alice_attestor = address(128); - address bob = address(256); - address sally = address(512); - - function setUp() public { - // Give alice and bob some ETH - vm.deal(alice_attestor, 1 ether); - - vm.label(alice_attestor, "alice_attestor"); - vm.label(bob, "bob"); - vm.label(sally, "sally"); - } -} - -contract AttestationStationTest is AttestationStation_Initializer { - event AttestationCreated(address indexed creator, address indexed about, bytes32 indexed key, bytes val); - - function test_attest_individual_succeeds() external { - AttestationStation attestationStation = new AttestationStation(); - - vm.expectEmit(true, true, true, true); - emit AttestationCreated(alice_attestor, bob, bytes32("foo"), bytes("bar")); - - vm.prank(alice_attestor); - attestationStation.attest({ _about: bob, _key: bytes32("foo"), _val: bytes("bar") }); - } - - function test_attest_single_succeeds() external { - AttestationStation attestationStation = new AttestationStation(); - - AttestationStation.AttestationData[] memory attestationDataArr = new AttestationStation.AttestationData[](1); - - // alice is going to attest about bob - AttestationStation.AttestationData memory attestationData = AttestationStation.AttestationData({ - about: bob, - key: bytes32("test-key:string"), - val: bytes("test-value") - }); - - // assert the attestation starts empty - assertEq(attestationStation.attestations(alice_attestor, attestationData.about, attestationData.key), ""); - - // make attestation - vm.prank(alice_attestor); - attestationDataArr[0] = attestationData; - attestationStation.attest(attestationDataArr); - - // assert the attestation is there - assertEq( - attestationStation.attestations(alice_attestor, attestationData.about, attestationData.key), - attestationData.val - ); - - bytes memory new_val = bytes("new updated value"); - // make a new attestations to same about and key - attestationData = - AttestationStation.AttestationData({ about: attestationData.about, key: attestationData.key, val: new_val }); - - vm.prank(alice_attestor); - attestationDataArr[0] = attestationData; - attestationStation.attest(attestationDataArr); - - // assert the attestation is updated - assertEq( - attestationStation.attestations(alice_attestor, attestationData.about, attestationData.key), - attestationData.val - ); - } - - function test_attest_bulk_succeeds() external { - AttestationStation attestationStation = new AttestationStation(); - - vm.prank(alice_attestor); - - AttestationStation.AttestationData[] memory attestationData = new AttestationStation.AttestationData[](3); - attestationData[0] = AttestationStation.AttestationData({ - about: bob, - key: bytes32("test-key:string"), - val: bytes("test-value") - }); - - attestationData[1] = - AttestationStation.AttestationData({ about: bob, key: bytes32("test-key2"), val: bytes("test-value2") }); - - attestationData[2] = AttestationStation.AttestationData({ - about: sally, - key: bytes32("test-key:string"), - val: bytes("test-value3") - }); - - attestationStation.attest(attestationData); - - // assert the attestations are there - assertEq( - attestationStation.attestations(alice_attestor, attestationData[0].about, attestationData[0].key), - attestationData[0].val - ); - assertEq( - attestationStation.attestations(alice_attestor, attestationData[1].about, attestationData[1].key), - attestationData[1].val - ); - assertEq( - attestationStation.attestations(alice_attestor, attestationData[2].about, attestationData[2].key), - attestationData[2].val - ); - } -} diff --git a/packages/contracts-bedrock/test/periphery/op-nft/Optimist.t.sol b/packages/contracts-bedrock/test/periphery/op-nft/Optimist.t.sol deleted file mode 100644 index 2eb2f07e860f..000000000000 --- a/packages/contracts-bedrock/test/periphery/op-nft/Optimist.t.sol +++ /dev/null @@ -1,543 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity >=0.6.2 <0.9.0; - -// Testing utilities -import { Test } from "forge-std/Test.sol"; -import { IMulticall3 } from "forge-std/interfaces/IMulticall3.sol"; -import { AttestationStation } from "src/periphery/op-nft/AttestationStation.sol"; -import { Optimist } from "src/periphery/op-nft/Optimist.sol"; -import { OptimistAllowlist } from "src/periphery/op-nft/OptimistAllowlist.sol"; -import { OptimistInviter } from "src/periphery/op-nft/OptimistInviter.sol"; -import { OptimistInviterHelper } from "test/mocks/OptimistInviterHelper.sol"; -import { IERC721 } from "@openzeppelin/contracts/token/ERC721/IERC721.sol"; - -library Multicall { - bytes internal constant code = - hex"6080604052600436106100f35760003560e01c80634d2301cc1161008a578063a8b0574e11610059578063a8b0574e1461025a578063bce38bd714610275578063c3077fa914610288578063ee82ac5e1461029b57600080fd5b80634d2301cc146101ec57806372425d9d1461022157806382ad56cb1461023457806386d516e81461024757600080fd5b80633408e470116100c65780633408e47014610191578063399542e9146101a45780633e64a696146101c657806342cbb15c146101d957600080fd5b80630f28c97d146100f8578063174dea711461011a578063252dba421461013a57806327e86d6e1461015b575b600080fd5b34801561010457600080fd5b50425b6040519081526020015b60405180910390f35b61012d610128366004610a85565b6102ba565b6040516101119190610bbe565b61014d610148366004610a85565b6104ef565b604051610111929190610bd8565b34801561016757600080fd5b50437fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0140610107565b34801561019d57600080fd5b5046610107565b6101b76101b2366004610c60565b610690565b60405161011193929190610cba565b3480156101d257600080fd5b5048610107565b3480156101e557600080fd5b5043610107565b3480156101f857600080fd5b50610107610207366004610ce2565b73ffffffffffffffffffffffffffffffffffffffff163190565b34801561022d57600080fd5b5044610107565b61012d610242366004610a85565b6106ab565b34801561025357600080fd5b5045610107565b34801561026657600080fd5b50604051418152602001610111565b61012d610283366004610c60565b61085a565b6101b7610296366004610a85565b610a1a565b3480156102a757600080fd5b506101076102b6366004610d18565b4090565b60606000828067ffffffffffffffff8111156102d8576102d8610d31565b60405190808252806020026020018201604052801561031e57816020015b6040805180820190915260008152606060208201528152602001906001900390816102f65790505b5092503660005b8281101561047757600085828151811061034157610341610d60565b6020026020010151905087878381811061035d5761035d610d60565b905060200281019061036f9190610d8f565b6040810135958601959093506103886020850185610ce2565b73ffffffffffffffffffffffffffffffffffffffff16816103ac6060870187610dcd565b6040516103ba929190610e32565b60006040518083038185875af1925050503d80600081146103f7576040519150601f19603f3d011682016040523d82523d6000602084013e6103fc565b606091505b50602080850191909152901515808452908501351761046d577f08c379a000000000000000000000000000000000000000000000000000000000600052602060045260176024527f4d756c746963616c6c333a2063616c6c206661696c656400000000000000000060445260846000fd5b5050600101610325565b508234146104e6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f4d756c746963616c6c333a2076616c7565206d69736d6174636800000000000060448201526064015b60405180910390fd5b50505092915050565b436060828067ffffffffffffffff81111561050c5761050c610d31565b60405190808252806020026020018201604052801561053f57816020015b606081526020019060019003908161052a5790505b5091503660005b8281101561068657600087878381811061056257610562610d60565b90506020028101906105749190610e42565b92506105836020840184610ce2565b73ffffffffffffffffffffffffffffffffffffffff166105a66020850185610dcd565b6040516105b4929190610e32565b6000604051808303816000865af19150503d80600081146105f1576040519150601f19603f3d011682016040523d82523d6000602084013e6105f6565b606091505b5086848151811061060957610609610d60565b602090810291909101015290508061067d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f4d756c746963616c6c333a2063616c6c206661696c656400000000000000000060448201526064016104dd565b50600101610546565b5050509250929050565b43804060606106a086868661085a565b905093509350939050565b6060818067ffffffffffffffff8111156106c7576106c7610d31565b60405190808252806020026020018201604052801561070d57816020015b6040805180820190915260008152606060208201528152602001906001900390816106e55790505b5091503660005b828110156104e657600084828151811061073057610730610d60565b6020026020010151905086868381811061074c5761074c610d60565b905060200281019061075e9190610e76565b925061076d6020840184610ce2565b73ffffffffffffffffffffffffffffffffffffffff166107906040850185610dcd565b60405161079e929190610e32565b6000604051808303816000865af19150503d80600081146107db576040519150601f19603f3d011682016040523d82523d6000602084013e6107e0565b606091505b506020808401919091529015158083529084013517610851577f08c379a000000000000000000000000000000000000000000000000000000000600052602060045260176024527f4d756c746963616c6c333a2063616c6c206661696c656400000000000000000060445260646000fd5b50600101610714565b6060818067ffffffffffffffff81111561087657610876610d31565b6040519080825280602002602001820160405280156108bc57816020015b6040805180820190915260008152606060208201528152602001906001900390816108945790505b5091503660005b82811015610a105760008482815181106108df576108df610d60565b602002602001015190508686838181106108fb576108fb610d60565b905060200281019061090d9190610e42565b925061091c6020840184610ce2565b73ffffffffffffffffffffffffffffffffffffffff1661093f6020850185610dcd565b60405161094d929190610e32565b6000604051808303816000865af19150503d806000811461098a576040519150601f19603f3d011682016040523d82523d6000602084013e61098f565b606091505b506020830152151581528715610a07578051610a07576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f4d756c746963616c6c333a2063616c6c206661696c656400000000000000000060448201526064016104dd565b506001016108c3565b5050509392505050565b6000806060610a2b60018686610690565b919790965090945092505050565b60008083601f840112610a4b57600080fd5b50813567ffffffffffffffff811115610a6357600080fd5b6020830191508360208260051b8501011115610a7e57600080fd5b9250929050565b60008060208385031215610a9857600080fd5b823567ffffffffffffffff811115610aaf57600080fd5b610abb85828601610a39565b90969095509350505050565b6000815180845260005b81811015610aed57602081850181015186830182015201610ad1565b81811115610aff576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b600082825180855260208086019550808260051b84010181860160005b84811015610bb1578583037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe001895281518051151584528401516040858501819052610b9d81860183610ac7565b9a86019a9450505090830190600101610b4f565b5090979650505050505050565b602081526000610bd16020830184610b32565b9392505050565b600060408201848352602060408185015281855180845260608601915060608160051b870101935082870160005b82811015610c52577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa0888703018452610c40868351610ac7565b95509284019290840190600101610c06565b509398975050505050505050565b600080600060408486031215610c7557600080fd5b83358015158114610c8557600080fd5b9250602084013567ffffffffffffffff811115610ca157600080fd5b610cad86828701610a39565b9497909650939450505050565b838152826020820152606060408201526000610cd96060830184610b32565b95945050505050565b600060208284031215610cf457600080fd5b813573ffffffffffffffffffffffffffffffffffffffff81168114610bd157600080fd5b600060208284031215610d2a57600080fd5b5035919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600082357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff81833603018112610dc357600080fd5b9190910192915050565b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe1843603018112610e0257600080fd5b83018035915067ffffffffffffffff821115610e1d57600080fd5b602001915036819003821315610a7e57600080fd5b8183823760009101908152919050565b600082357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc1833603018112610dc357600080fd5b600082357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa1833603018112610dc357600080fdfea2646970667358221220bb2b5c71a328032f97c676ae39a1ec2148d3e5d6f73d95e9b17910152d61f16264736f6c634300080c0033"; - address internal constant addr = 0xcA11bde05977b3631167028862bE2a173976CA11; -} - -contract Optimist_Initializer is Test { - event Transfer(address indexed from, address indexed to, uint256 indexed tokenId); - event Initialized(uint8); - event AttestationCreated(address indexed creator, address indexed about, bytes32 indexed key, bytes val); - - string constant name = "Optimist name"; - string constant symbol = "OPTIMISTSYMBOL"; - string constant base_uri = "https://storageapi.fleek.co/6442819a1b05-bucket/optimist-nft/attributes"; - AttestationStation attestationStation; - Optimist optimist; - OptimistAllowlist optimistAllowlist; - OptimistInviter optimistInviter; - - // Helps with EIP-712 signature generation - OptimistInviterHelper optimistInviterHelper; - - // To test multicall for claiming and minting in one call - IMulticall3 multicall3; - - address internal carol_baseURIAttestor; - address internal alice_allowlistAttestor; - address internal eve_inviteGranter; - address internal ted_coinbaseAttestor; - address internal bob; - address internal sally; - - /// @notice BaseURI attestor sets the baseURI of the Optimist NFT. - function _attestBaseURI(string memory _baseUri) internal { - bytes32 baseURIAttestationKey = optimist.BASE_URI_ATTESTATION_KEY(); - AttestationStation.AttestationData[] memory attestationData = new AttestationStation.AttestationData[](1); - attestationData[0] = - AttestationStation.AttestationData(address(optimist), baseURIAttestationKey, bytes(_baseUri)); - - vm.expectEmit(true, true, true, true, address(attestationStation)); - emit AttestationCreated(carol_baseURIAttestor, address(optimist), baseURIAttestationKey, bytes(_baseUri)); - vm.prank(carol_baseURIAttestor); - attestationStation.attest(attestationData); - } - - /// @notice Allowlist attestor creates an attestation for an address. - function _attestAllowlist(address _about) internal { - bytes32 attestationKey = optimistAllowlist.OPTIMIST_CAN_MINT_ATTESTATION_KEY(); - AttestationStation.AttestationData[] memory attestationData = new AttestationStation.AttestationData[](1); - // we are using true but it can be any non empty value - attestationData[0] = - AttestationStation.AttestationData({ about: _about, key: attestationKey, val: bytes("true") }); - - vm.expectEmit(true, true, true, true, address(attestationStation)); - emit AttestationCreated(alice_allowlistAttestor, _about, attestationKey, bytes("true")); - - vm.prank(alice_allowlistAttestor); - attestationStation.attest(attestationData); - - assertTrue(optimist.isOnAllowList(_about)); - } - - /// @notice Coinbase Quest attestor creates an attestation for an address. - function _attestCoinbaseQuest(address _about) internal { - bytes32 attestationKey = optimistAllowlist.COINBASE_QUEST_ELIGIBLE_ATTESTATION_KEY(); - AttestationStation.AttestationData[] memory attestationData = new AttestationStation.AttestationData[](1); - // we are using true but it can be any non empty value - attestationData[0] = - AttestationStation.AttestationData({ about: _about, key: attestationKey, val: bytes("true") }); - - vm.expectEmit(true, true, true, true, address(attestationStation)); - emit AttestationCreated(ted_coinbaseAttestor, _about, attestationKey, bytes("true")); - - vm.prank(ted_coinbaseAttestor); - attestationStation.attest(attestationData); - - assertTrue(optimist.isOnAllowList(_about)); - } - - /// @notice Issues invite, then claims it using the claimer's address. - function _inviteAndClaim(address _about) internal { - uint256 inviterPrivateKey = 0xbeefbeef; - address inviter = vm.addr(inviterPrivateKey); - - address[] memory addresses = new address[](1); - addresses[0] = inviter; - - vm.prank(eve_inviteGranter); - - // grant invites to Inviter; - optimistInviter.setInviteCounts(addresses, 3); - - // issue a new invite - OptimistInviter.ClaimableInvite memory claimableInvite = - optimistInviterHelper.getClaimableInviteWithNewNonce(inviter); - - // EIP-712 sign with Inviter's private key - - (uint8 v, bytes32 r, bytes32 s) = vm.sign(inviterPrivateKey, optimistInviterHelper.getDigest(claimableInvite)); - bytes memory signature = abi.encodePacked(r, s, v); - - bytes32 hashedCommit = keccak256(abi.encode(_about, signature)); - - // commit the invite - vm.prank(_about); - optimistInviter.commitInvite(hashedCommit); - - // wait minimum commitment period - vm.warp(optimistInviter.MIN_COMMITMENT_PERIOD() + block.timestamp); - - // reveal and claim the invite - optimistInviter.claimInvite(_about, claimableInvite, signature); - - assertTrue(optimist.isOnAllowList(_about)); - } - - /// @notice Mocks the allowlistAttestor to always return true for a given address. - function _mockAllowlistTrueFor(address _claimer) internal { - vm.mockCall( - address(optimistAllowlist), abi.encodeCall(OptimistAllowlist.isAllowedToMint, (_claimer)), abi.encode(true) - ); - - assertTrue(optimist.isOnAllowList(_claimer)); - } - - /// @notice Returns address as uint256. - function _getTokenId(address _owner) internal pure returns (uint256) { - return uint256(uint160(address(_owner))); - } - - function setUp() public { - carol_baseURIAttestor = makeAddr("carol_baseURIAttestor"); - alice_allowlistAttestor = makeAddr("alice_allowlistAttestor"); - eve_inviteGranter = makeAddr("eve_inviteGranter"); - ted_coinbaseAttestor = makeAddr("ted_coinbaseAttestor"); - bob = makeAddr("bob"); - sally = makeAddr("sally"); - _initializeContracts(); - } - - function _initializeContracts() internal { - attestationStation = new AttestationStation(); - vm.expectEmit(true, true, false, false); - emit Initialized(1); - - optimistInviter = - new OptimistInviter({ _inviteGranter: eve_inviteGranter, _attestationStation: attestationStation }); - - optimistInviter.initialize("OptimistInviter"); - - // Initialize the helper which helps sign EIP-712 signatures - optimistInviterHelper = new OptimistInviterHelper(optimistInviter, "OptimistInviter"); - - optimistAllowlist = new OptimistAllowlist({ - _attestationStation: attestationStation, - _allowlistAttestor: alice_allowlistAttestor, - _coinbaseQuestAttestor: ted_coinbaseAttestor, - _optimistInviter: address(optimistInviter) - }); - - optimist = new Optimist({ - _name: name, - _symbol: symbol, - _baseURIAttestor: carol_baseURIAttestor, - _attestationStation: attestationStation, - _optimistAllowlist: optimistAllowlist - }); - - multicall3 = IMulticall3(Multicall.addr); - vm.etch(Multicall.addr, Multicall.code); - } -} - -contract OptimistTest is Optimist_Initializer { - /// @notice Check that constructor and initializer parameters are correctly set. - function test_initialize_succeeds() external view { - // expect name to be set - assertEq(optimist.name(), name); - // expect symbol to be set - assertEq(optimist.symbol(), symbol); - // expect attestationStation to be set - assertEq(address(optimist.ATTESTATION_STATION()), address(attestationStation)); - assertEq(optimist.BASE_URI_ATTESTOR(), carol_baseURIAttestor); - } - - /// @notice Bob should be able to mint an NFT if he is allowlisted - /// by the allowlistAttestor and has a balance of 0. - function test_mint_afterAllowlistAttestation_succeeds() external { - // bob should start with 0 balance - assertEq(optimist.balanceOf(bob), 0); - - // allowlist bob - _attestAllowlist(bob); - - assertTrue(optimistAllowlist.isAllowedToMint(bob)); - - // Check that the OptimistAllowlist is checked - bytes memory data = abi.encodeCall(OptimistAllowlist.isAllowedToMint, (bob)); - vm.expectCall(address(optimistAllowlist), data); - - // mint an NFT and expect mint transfer event to be emitted - vm.expectEmit(true, true, true, true); - emit Transfer(address(0), bob, _getTokenId(bob)); - vm.prank(bob); - optimist.mint(bob); - - // expect the NFT to be owned by bob - assertEq(optimist.ownerOf(_getTokenId(bob)), bob); - assertEq(optimist.balanceOf(bob), 1); - } - - /// @notice Bob should be able to mint an NFT if he claimed an invite through OptimistInviter - /// and has a balance of 0. - function test_mint_afterInviteClaimed_succeeds() external { - // bob should start with 0 balance - assertEq(optimist.balanceOf(bob), 0); - - // bob claims an invite - _inviteAndClaim(bob); - - assertTrue(optimistAllowlist.isAllowedToMint(bob)); - - // Check that the OptimistAllowlist is checked - bytes memory data = abi.encodeCall(OptimistAllowlist.isAllowedToMint, (bob)); - vm.expectCall(address(optimistAllowlist), data); - - // mint an NFT and expect mint transfer event to be emitted - vm.expectEmit(true, true, true, true); - emit Transfer(address(0), bob, _getTokenId(bob)); - vm.prank(bob); - optimist.mint(bob); - - // expect the NFT to be owned by bob - assertEq(optimist.ownerOf(_getTokenId(bob)), bob); - assertEq(optimist.balanceOf(bob), 1); - } - - /// @notice Bob should be able to mint an NFT if he has an attestation from Coinbase Quest - /// attestor and has a balance of 0. - function test_mint_afterCoinbaseQuestAttestation_succeeds() external { - // bob should start with 0 balance - assertEq(optimist.balanceOf(bob), 0); - - // bob receives attestation from Coinbase Quest attestor - _attestCoinbaseQuest(bob); - - assertTrue(optimistAllowlist.isAllowedToMint(bob)); - - // Check that the OptimistAllowlist is checked - bytes memory data = abi.encodeCall(OptimistAllowlist.isAllowedToMint, (bob)); - vm.expectCall(address(optimistAllowlist), data); - - // mint an NFT and expect mint transfer event to be emitted - vm.expectEmit(true, true, true, true); - emit Transfer(address(0), bob, _getTokenId(bob)); - vm.prank(bob); - optimist.mint(bob); - - // expect the NFT to be owned by bob - assertEq(optimist.ownerOf(_getTokenId(bob)), bob); - assertEq(optimist.balanceOf(bob), 1); - } - - /// @notice Multiple valid attestations should allow Bob to mint. - function test_mint_afterMultipleAttestations_succeeds() external { - // bob should start with 0 balance - assertEq(optimist.balanceOf(bob), 0); - - // bob receives attestation from Coinbase Quest attestor - _attestCoinbaseQuest(bob); - - // allowlist bob - _attestAllowlist(bob); - - // bob claims an invite - _inviteAndClaim(bob); - - assertTrue(optimistAllowlist.isAllowedToMint(bob)); - - // Check that the OptimistAllowlist is checked - bytes memory data = abi.encodeCall(OptimistAllowlist.isAllowedToMint, (bob)); - vm.expectCall(address(optimistAllowlist), data); - - // mint an NFT and expect mint transfer event to be emitted - vm.expectEmit(true, true, true, true); - emit Transfer(address(0), bob, _getTokenId(bob)); - vm.prank(bob); - optimist.mint(bob); - - // expect the NFT to be owned by bob - assertEq(optimist.ownerOf(_getTokenId(bob)), bob); - assertEq(optimist.balanceOf(bob), 1); - } - - /// @notice Sally should be able to mint a token on behalf of bob. - function test_mint_secondaryMinter_succeeds() external { - _mockAllowlistTrueFor(bob); - - vm.expectEmit(true, true, true, true); - emit Transfer(address(0), bob, _getTokenId(bob)); - - // mint as sally instead of bob - vm.prank(sally); - optimist.mint(bob); - - // expect the NFT to be owned by bob - assertEq(optimist.ownerOf(_getTokenId(bob)), bob); - assertEq(optimist.balanceOf(bob), 1); - } - - /// @notice Bob should not be able to mint an NFT if he is not allowlisted. - function test_mint_forNonAllowlistedClaimer_reverts() external { - vm.prank(bob); - vm.expectRevert("Optimist: address is not on allowList"); - optimist.mint(bob); - } - - /// @notice Bob's tx should revert if he already minted. - function test_mint_forAlreadyMintedClaimer_reverts() external { - _attestAllowlist(bob); - - // mint initial nft with bob - vm.prank(bob); - optimist.mint(bob); - // expect the NFT to be owned by bob - assertEq(optimist.ownerOf(_getTokenId(bob)), bob); - assertEq(optimist.balanceOf(bob), 1); - - // attempt to mint again - vm.expectRevert("ERC721: token already minted"); - optimist.mint(bob); - } - - /// @notice The baseURI should be set by attestation station by the baseURIAttestor. - function test_baseURI_returnsCorrectBaseURI_succeeds() external { - _attestBaseURI(base_uri); - - bytes memory data = abi.encodeCall( - attestationStation.attestations, - (carol_baseURIAttestor, address(optimist), optimist.BASE_URI_ATTESTATION_KEY()) - ); - vm.expectCall(address(attestationStation), data); - vm.prank(carol_baseURIAttestor); - - // assert baseURI is set - assertEq(optimist.baseURI(), base_uri); - } - - /// @notice tokenURI should return the token uri for a minted token. - function test_tokenURI_returnsCorrectTokenURI_succeeds() external { - // we are using true but it can be any non empty value - _attestBaseURI(base_uri); - - // mint an NFT - _mockAllowlistTrueFor(bob); - vm.prank(bob); - optimist.mint(bob); - - // assert tokenURI is set - assertEq(optimist.baseURI(), base_uri); - assertEq( - optimist.tokenURI(_getTokenId(bob)), - "https://storageapi.fleek.co/6442819a1b05-bucket/optimist-nft/attributes/0x1d96f2f6bef1202e4ce1ff6dad0c2cb002861d3e.json" - ); - } - - /// @notice Should return the token id of the owner. - function test_tokenIdOfAddress_returnsOwnerID_succeeds() external { - uint256 willTokenId = 1024; - address will = address(1024); - - _mockAllowlistTrueFor(will); - - optimist.mint(will); - - assertEq(optimist.tokenIdOfAddress(will), willTokenId); - } - - /// @notice transferFrom should revert since Optimist is a SBT. - function test_transferFrom_soulbound_reverts() external { - _mockAllowlistTrueFor(bob); - - // mint as bob - vm.prank(bob); - optimist.mint(bob); - - // attempt to transfer to sally - vm.expectRevert(bytes("Optimist: soul bound token")); - vm.prank(bob); - optimist.transferFrom(bob, sally, _getTokenId(bob)); - - // attempt to transfer to sally - vm.expectRevert(bytes("Optimist: soul bound token")); - vm.prank(bob); - optimist.safeTransferFrom(bob, sally, _getTokenId(bob)); - // attempt to transfer to sally - vm.expectRevert(bytes("Optimist: soul bound token")); - vm.prank(bob); - optimist.safeTransferFrom(bob, sally, _getTokenId(bob), bytes("0x")); - } - - /// @notice approve should revert since Optimist is a SBT. - function test_approve_soulbound_reverts() external { - _mockAllowlistTrueFor(bob); - - // mint as bob - vm.prank(bob); - optimist.mint(bob); - - // attempt to approve sally - vm.prank(bob); - vm.expectRevert("Optimist: soul bound token"); - optimist.approve(address(attestationStation), _getTokenId(bob)); - - assertEq(optimist.getApproved(_getTokenId(bob)), address(0)); - } - - /// @notice setApprovalForAll should revert since Optimist is a SBT. - function test_setApprovalForAll_soulbound_reverts() external { - _mockAllowlistTrueFor(bob); - - // mint as bob - vm.prank(bob); - optimist.mint(bob); - vm.prank(alice_allowlistAttestor); - vm.expectRevert(bytes("Optimist: soul bound token")); - optimist.setApprovalForAll(alice_allowlistAttestor, true); - - // expect approval amount to stil be 0 - assertEq(optimist.getApproved(_getTokenId(bob)), address(0)); - // isApprovedForAll should return false - assertEq(optimist.isApprovedForAll(alice_allowlistAttestor, alice_allowlistAttestor), false); - } - - /// @notice Only owner should be able to burn token. - function test_burn_byOwner_succeeds() external { - _mockAllowlistTrueFor(bob); - - // mint as bob - vm.prank(bob); - optimist.mint(bob); - - // burn as bob - vm.prank(bob); - optimist.burn(_getTokenId(bob)); - - // expect bob to have no balance now - assertEq(optimist.balanceOf(bob), 0); - } - - /// @notice Non-owner attempting to burn token should revert. - function test_burn_byNonOwner_reverts() external { - _mockAllowlistTrueFor(bob); - - // mint as bob - vm.prank(bob); - optimist.mint(bob); - - vm.expectRevert("ERC721: caller is not token owner nor approved"); - // burn as Sally - vm.prank(sally); - optimist.burn(_getTokenId(bob)); - - // expect bob to have still have the token - assertEq(optimist.balanceOf(bob), 1); - } - - /// @notice Should support ERC-721 interface. - function test_supportsInterface_returnsCorrectInterfaceForERC721_succeeds() external view { - bytes4 iface721 = type(IERC721).interfaceId; - // check that it supports ERC-721 interface - assertEq(optimist.supportsInterface(iface721), true); - } - - /// @notice Checking that multi-call using the invite & claim flow works correctly, since the - /// frontend will be making multicalls to improve UX. The OptimistInviter.claimInvite - /// and Optimist.mint will be batched - function test_multicall_batchingClaimAndMint_succeeds() external { - uint256 inviterPrivateKey = 0xbeefbeef; - address inviter = vm.addr(inviterPrivateKey); - - address[] memory addresses = new address[](1); - addresses[0] = inviter; - - vm.prank(eve_inviteGranter); - - // grant invites to Inviter; - optimistInviter.setInviteCounts(addresses, 3); - - // issue a new invite - OptimistInviter.ClaimableInvite memory claimableInvite = - optimistInviterHelper.getClaimableInviteWithNewNonce(inviter); - - // EIP-712 sign with Inviter's private key - - (uint8 v, bytes32 r, bytes32 s) = vm.sign(inviterPrivateKey, optimistInviterHelper.getDigest(claimableInvite)); - bytes memory signature = abi.encodePacked(r, s, v); - - bytes32 hashedCommit = keccak256(abi.encode(bob, signature)); - - // commit the invite - vm.prank(bob); - optimistInviter.commitInvite(hashedCommit); - - // wait minimum commitment period - vm.warp(optimistInviter.MIN_COMMITMENT_PERIOD() + block.timestamp); - - IMulticall3.Call3[] memory calls = new IMulticall3.Call3[](2); - - // First call is to claim the invite, receiving the attestation - calls[0] = IMulticall3.Call3({ - target: address(optimistInviter), - callData: abi.encodeCall(OptimistInviter.claimInvite, (bob, claimableInvite, signature)), - allowFailure: false - }); - - // Second call is to mint the Optimist NFT - calls[1] = IMulticall3.Call3({ - target: address(optimist), - callData: abi.encodeCall(Optimist.mint, (bob)), - allowFailure: false - }); - - multicall3.aggregate3(calls); - - assertTrue(optimist.isOnAllowList(bob)); - assertEq(optimist.ownerOf(_getTokenId(bob)), bob); - assertEq(optimist.balanceOf(bob), 1); - } -} diff --git a/packages/contracts-bedrock/test/periphery/op-nft/OptimistAllowlist.t.sol b/packages/contracts-bedrock/test/periphery/op-nft/OptimistAllowlist.t.sol deleted file mode 100644 index c0c2aef2bce0..000000000000 --- a/packages/contracts-bedrock/test/periphery/op-nft/OptimistAllowlist.t.sol +++ /dev/null @@ -1,227 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.15; - -// Testing utilities -import { Test } from "forge-std/Test.sol"; -import { AttestationStation } from "src/periphery/op-nft/AttestationStation.sol"; -import { OptimistAllowlist } from "src/periphery/op-nft/OptimistAllowlist.sol"; -import { OptimistInviter } from "src/periphery/op-nft/OptimistInviter.sol"; -import { OptimistInviterHelper } from "test/mocks/OptimistInviterHelper.sol"; -import { OptimistConstants } from "src/periphery/op-nft/libraries/OptimistConstants.sol"; - -contract OptimistAllowlist_Initializer is Test { - event AttestationCreated(address indexed creator, address indexed about, bytes32 indexed key, bytes val); - - address internal alice_allowlistAttestor; - address internal sally_coinbaseQuestAttestor; - address internal ted; - - uint256 internal bobPrivateKey; - address internal bob; - - AttestationStation attestationStation; - OptimistAllowlist optimistAllowlist; - OptimistInviter optimistInviter; - - // Helps with EIP-712 signature generation - OptimistInviterHelper optimistInviterHelper; - - function setUp() public { - alice_allowlistAttestor = makeAddr("alice_allowlistAttestor"); - sally_coinbaseQuestAttestor = makeAddr("sally_coinbaseQuestAttestor"); - ted = makeAddr("ted"); - - bobPrivateKey = 0xB0B0B0B0; - bob = vm.addr(bobPrivateKey); - vm.label(bob, "bob"); - - // Give alice and bob and sally some ETH - vm.deal(alice_allowlistAttestor, 1 ether); - vm.deal(sally_coinbaseQuestAttestor, 1 ether); - vm.deal(bob, 1 ether); - vm.deal(ted, 1 ether); - - _initializeContracts(); - } - - function attestAllowlist(address _about) internal { - AttestationStation.AttestationData[] memory attestationData = new AttestationStation.AttestationData[](1); - // we are using true but it can be any non empty value - attestationData[0] = AttestationStation.AttestationData({ - about: _about, - key: optimistAllowlist.OPTIMIST_CAN_MINT_ATTESTATION_KEY(), - val: bytes("true") - }); - vm.prank(alice_allowlistAttestor); - attestationStation.attest(attestationData); - } - - function attestCoinbaseQuest(address _about) internal { - AttestationStation.AttestationData[] memory attestationData = new AttestationStation.AttestationData[](1); - // we are using true but it can be any non empty value - attestationData[0] = AttestationStation.AttestationData({ - about: _about, - key: optimistAllowlist.COINBASE_QUEST_ELIGIBLE_ATTESTATION_KEY(), - val: bytes("true") - }); - vm.prank(sally_coinbaseQuestAttestor); - attestationStation.attest(attestationData); - } - - function inviteAndClaim(address claimer) internal { - address[] memory addresses = new address[](1); - addresses[0] = bob; - - vm.prank(alice_allowlistAttestor); - - // grant invites to Bob; - optimistInviter.setInviteCounts(addresses, 3); - - // issue a new invite - OptimistInviter.ClaimableInvite memory claimableInvite = - optimistInviterHelper.getClaimableInviteWithNewNonce(bob); - - // EIP-712 sign with Bob's private key - bytes memory signature = _getSignature(bobPrivateKey, optimistInviterHelper.getDigest(claimableInvite)); - - bytes32 hashedCommit = keccak256(abi.encode(claimer, signature)); - - // commit the invite - vm.prank(claimer); - optimistInviter.commitInvite(hashedCommit); - - // wait minimum commitment period - vm.warp(optimistInviter.MIN_COMMITMENT_PERIOD() + block.timestamp); - - // reveal and claim the invite - optimistInviter.claimInvite(claimer, claimableInvite, signature); - } - - /// @notice Get signature as a bytes blob, since SignatureChecker takes arbitrary signature blobs. - function _getSignature(uint256 _signingPrivateKey, bytes32 _digest) internal pure returns (bytes memory) { - (uint8 v, bytes32 r, bytes32 s) = vm.sign(_signingPrivateKey, _digest); - - bytes memory signature = abi.encodePacked(r, s, v); - return signature; - } - - function _initializeContracts() internal { - attestationStation = new AttestationStation(); - - optimistInviter = new OptimistInviter(alice_allowlistAttestor, attestationStation); - optimistInviter.initialize("OptimistInviter"); - - optimistAllowlist = new OptimistAllowlist( - attestationStation, alice_allowlistAttestor, sally_coinbaseQuestAttestor, address(optimistInviter) - ); - - optimistInviterHelper = new OptimistInviterHelper(optimistInviter, "OptimistInviter"); - } -} - -contract OptimistAllowlistTest is OptimistAllowlist_Initializer { - function test_constructor_succeeds() external view { - // expect attestationStation to be set - assertEq(address(optimistAllowlist.ATTESTATION_STATION()), address(attestationStation)); - assertEq(optimistAllowlist.ALLOWLIST_ATTESTOR(), alice_allowlistAttestor); - assertEq(optimistAllowlist.COINBASE_QUEST_ATTESTOR(), sally_coinbaseQuestAttestor); - assertEq(address(optimistAllowlist.OPTIMIST_INVITER()), address(optimistInviter)); - } - - /// @notice Base case, a account without any relevant attestations should not be able to mint. - function test_isAllowedToMint_withoutAnyAttestations_fails() external view { - assertFalse(optimistAllowlist.isAllowedToMint(bob)); - } - - /// @notice After receiving a valid allowlist attestation, the account should be able to mint. - function test_isAllowedToMint_fromAllowlistAttestor_succeeds() external { - attestAllowlist(bob); - assertTrue(optimistAllowlist.isAllowedToMint(bob)); - } - - /// @notice After receiving a valid attestation from the Coinbase Quest attestor, - /// the account should be able to mint. - function test_isAllowedToMint_fromCoinbaseQuestAttestor_succeeds() external { - attestCoinbaseQuest(bob); - assertTrue(optimistAllowlist.isAllowedToMint(bob)); - } - - /// @notice Account that received an attestation from the OptimistInviter contract by going - /// through the claim invite flow should be able to mint. - function test_isAllowedToMint_fromInvite_succeeds() external { - inviteAndClaim(bob); - assertTrue(optimistAllowlist.isAllowedToMint(bob)); - } - - /// @notice Attestation from the wrong allowlist attestor should not allow minting. - function test_isAllowedToMint_fromWrongAllowlistAttestor_fails() external { - // Ted is not the allowlist attestor - vm.prank(ted); - attestationStation.attest(bob, optimistAllowlist.OPTIMIST_CAN_MINT_ATTESTATION_KEY(), bytes("true")); - assertFalse(optimistAllowlist.isAllowedToMint(bob)); - } - - /// @notice Coinbase quest attestation from wrong attestor should not allow minting. - function test_isAllowedToMint_fromWrongCoinbaseQuestAttestor_fails() external { - // Ted is not the coinbase quest attestor - vm.prank(ted); - attestationStation.attest(bob, optimistAllowlist.COINBASE_QUEST_ELIGIBLE_ATTESTATION_KEY(), bytes("true")); - assertFalse(optimistAllowlist.isAllowedToMint(bob)); - } - - /// @notice Claiming an invite on the non-official OptimistInviter contract should not allow - /// minting. - function test_isAllowedToMint_fromWrongOptimistInviter_fails() external { - vm.prank(ted); - attestationStation.attest(bob, OptimistConstants.OPTIMIST_CAN_MINT_FROM_INVITE_ATTESTATION_KEY, bytes("true")); - assertFalse(optimistAllowlist.isAllowedToMint(bob)); - } - - /// @notice Having multiple signals, even if one is invalid, should still allow minting. - function test_isAllowedToMint_withMultipleAttestations_succeeds() external { - attestAllowlist(bob); - attestCoinbaseQuest(bob); - inviteAndClaim(bob); - - assertTrue(optimistAllowlist.isAllowedToMint(bob)); - - // A invalid attestation, as Ted is not allowlist attestor - vm.prank(ted); - attestationStation.attest(bob, optimistAllowlist.OPTIMIST_CAN_MINT_ATTESTATION_KEY(), bytes("true")); - - // Since Bob has at least one valid attestation, he should be allowed to mint - assertTrue(optimistAllowlist.isAllowedToMint(bob)); - } - - /// @notice Having falsy attestation value should not allow minting. - function test_isAllowedToMint_fromAllowlistAttestorWithFalsyValue_fails() external { - // First sends correct attestation - attestAllowlist(bob); - - bytes32 key = optimistAllowlist.OPTIMIST_CAN_MINT_ATTESTATION_KEY(); - vm.expectEmit(true, true, true, false); - emit AttestationCreated(alice_allowlistAttestor, bob, key, bytes("dsafsds")); - - // Invalidates existing attestation - vm.prank(alice_allowlistAttestor); - attestationStation.attest(bob, key, bytes("")); - - assertFalse(optimistAllowlist.isAllowedToMint(bob)); - } - - /// @notice Having falsy attestation value from Coinbase attestor should not allow minting. - function test_isAllowedToMint_fromCoinbaseQuestAttestorWithFalsyValue_fails() external { - // First sends correct attestation - attestAllowlist(bob); - - bytes32 key = optimistAllowlist.OPTIMIST_CAN_MINT_ATTESTATION_KEY(); - vm.expectEmit(true, true, true, true); - emit AttestationCreated(alice_allowlistAttestor, bob, key, bytes("")); - - // Invalidates existing attestation - vm.prank(alice_allowlistAttestor); - attestationStation.attest(bob, key, bytes("")); - - assertFalse(optimistAllowlist.isAllowedToMint(bob)); - } -} diff --git a/packages/contracts-bedrock/test/periphery/op-nft/OptimistInviter.t.sol b/packages/contracts-bedrock/test/periphery/op-nft/OptimistInviter.t.sol deleted file mode 100644 index 58e71a13a8d7..000000000000 --- a/packages/contracts-bedrock/test/periphery/op-nft/OptimistInviter.t.sol +++ /dev/null @@ -1,529 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.15; - -// Testing utilities -import { Test } from "forge-std/Test.sol"; -import { AttestationStation } from "src/periphery/op-nft/AttestationStation.sol"; -import { OptimistInviter } from "src/periphery/op-nft/OptimistInviter.sol"; -import { Optimist } from "src/periphery/op-nft/Optimist.sol"; -import { TestERC1271Wallet } from "test/mocks/TestERC1271Wallet.sol"; -import { OptimistInviterHelper } from "test/mocks/OptimistInviterHelper.sol"; -import { OptimistConstants } from "src/periphery/op-nft/libraries/OptimistConstants.sol"; - -contract OptimistInviter_Initializer is Test { - event InviteClaimed(address indexed issuer, address indexed claimer); - event Initialized(uint8 version); - event Transfer(address indexed from, address indexed to, uint256 indexed tokenId); - event AttestationCreated(address indexed creator, address indexed about, bytes32 indexed key, bytes val); - - bytes32 EIP712_DOMAIN_TYPEHASH; - - address internal alice_inviteGranter; - address internal sally; - address internal ted; - address internal eve; - - address internal bob; - uint256 internal bobPrivateKey; - address internal carol; - uint256 internal carolPrivateKey; - - TestERC1271Wallet carolERC1271Wallet; - - AttestationStation attestationStation; - OptimistInviter optimistInviter; - - OptimistInviterHelper optimistInviterHelper; - - function setUp() public { - alice_inviteGranter = makeAddr("alice_inviteGranter"); - sally = makeAddr("sally"); - ted = makeAddr("ted"); - eve = makeAddr("eve"); - - bobPrivateKey = 0xB0B0B0B0; - bob = vm.addr(bobPrivateKey); - - carolPrivateKey = 0xC0C0C0C0; - carol = vm.addr(carolPrivateKey); - - carolERC1271Wallet = new TestERC1271Wallet(carol); - - // Give alice and bob and sally some ETH - vm.deal(alice_inviteGranter, 1 ether); - vm.deal(bob, 1 ether); - vm.deal(sally, 1 ether); - vm.deal(ted, 1 ether); - vm.deal(eve, 1 ether); - - EIP712_DOMAIN_TYPEHASH = - keccak256("EIP712Domain(string name,string version,uint256 chainId,address verifyingContract)"); - - _initializeContracts(); - } - - /// @notice Instantiates an AttestationStation, and an OptimistInviter. - function _initializeContracts() internal { - attestationStation = new AttestationStation(); - - optimistInviter = new OptimistInviter(alice_inviteGranter, attestationStation); - - vm.expectEmit(true, true, true, true, address(optimistInviter)); - emit Initialized(1); - optimistInviter.initialize("OptimistInviter"); - - optimistInviterHelper = new OptimistInviterHelper(optimistInviter, "OptimistInviter"); - } - - function _passMinCommitmentPeriod() internal { - vm.warp(optimistInviter.MIN_COMMITMENT_PERIOD() + block.timestamp); - } - - /// @notice Returns a user's current invite count, as stored in the AttestationStation. - function _getInviteCount(address _issuer) internal view returns (uint256) { - return optimistInviter.inviteCounts(_issuer); - } - - /// @notice Returns true if claimer has the proper attestation from OptimistInviter to mint. - function _hasMintAttestation(address _claimer) internal view returns (bool) { - bytes memory attestation = attestationStation.attestations( - address(optimistInviter), _claimer, OptimistConstants.OPTIMIST_CAN_MINT_FROM_INVITE_ATTESTATION_KEY - ); - return attestation.length > 0; - } - - /// @notice Get signature as a bytes blob, since SignatureChecker takes arbitrary signature blobs. - function _getSignature(uint256 _signingPrivateKey, bytes32 _digest) internal pure returns (bytes memory) { - (uint8 v, bytes32 r, bytes32 s) = vm.sign(_signingPrivateKey, _digest); - - bytes memory signature = abi.encodePacked(r, s, v); - return signature; - } - - /// @notice Signs a claimable invite with the given private key and returns the signature using - /// correct EIP712 domain separator. - function _issueInviteAs(uint256 _privateKey) - internal - returns (OptimistInviter.ClaimableInvite memory, bytes memory) - { - return _issueInviteWithEIP712Domain( - _privateKey, - bytes("OptimistInviter"), - bytes(optimistInviter.EIP712_VERSION()), - block.chainid, - address(optimistInviter) - ); - } - - /// @notice Signs a claimable invite with the given private key and returns the signature using - /// the given EIP712 domain separator. This assumes that the issuer's address is the - /// corresponding public key to _issuerPrivateKey. - function _issueInviteWithEIP712Domain( - uint256 _issuerPrivateKey, - bytes memory _eip712Name, - bytes memory _eip712Version, - uint256 _eip712Chainid, - address _eip712VerifyingContract - ) - internal - returns (OptimistInviter.ClaimableInvite memory, bytes memory) - { - address issuer = vm.addr(_issuerPrivateKey); - OptimistInviter.ClaimableInvite memory claimableInvite = - optimistInviterHelper.getClaimableInviteWithNewNonce(issuer); - return ( - claimableInvite, - _getSignature( - _issuerPrivateKey, - optimistInviterHelper.getDigestWithEIP712Domain( - claimableInvite, _eip712Name, _eip712Version, _eip712Chainid, _eip712VerifyingContract - ) - ) - ); - } - - /// @notice Commits a signature and claimer address to the OptimistInviter contract. - function _commitInviteAs(address _as, bytes memory _signature) internal { - vm.prank(_as); - bytes32 hashedSignature = keccak256(abi.encode(_as, _signature)); - optimistInviter.commitInvite(hashedSignature); - - // Check that the commitment was stored correctly - assertEq(optimistInviter.commitmentTimestamps(hashedSignature), block.timestamp); - } - - /// @notice Signs a claimable invite with the given private key. The claimer commits then claims - /// the invite. Checks that all expected events are emitted and that state is updated - /// correctly. Returns the signature and invite for use in tests. - function _issueThenClaimShouldSucceed( - uint256 _issuerPrivateKey, - address _claimer - ) - internal - returns (OptimistInviter.ClaimableInvite memory, bytes memory) - { - address issuer = vm.addr(_issuerPrivateKey); - uint256 prevInviteCount = _getInviteCount(issuer); - (OptimistInviter.ClaimableInvite memory claimableInvite, bytes memory signature) = - _issueInviteAs(_issuerPrivateKey); - - _commitInviteAs(_claimer, signature); - - // The hash(claimer ++ signature) should be committed - assertEq(optimistInviter.commitmentTimestamps(keccak256(abi.encode(_claimer, signature))), block.timestamp); - - _passMinCommitmentPeriod(); - - // OptimistInviter should issue a new attestation allowing claimer to mint - vm.expectEmit(true, true, true, true, address(attestationStation)); - emit AttestationCreated( - address(optimistInviter), - _claimer, - OptimistConstants.OPTIMIST_CAN_MINT_FROM_INVITE_ATTESTATION_KEY, - abi.encode(issuer) - ); - - // Should emit an event indicating that the invite was claimed - vm.expectEmit(true, false, false, false, address(optimistInviter)); - emit InviteClaimed(issuer, _claimer); - - vm.prank(_claimer); - optimistInviter.claimInvite(_claimer, claimableInvite, signature); - - // The nonce that issuer used should be marked as used - assertTrue(optimistInviter.usedNonces(issuer, claimableInvite.nonce)); - - // Issuer should have one less invite - assertEq(prevInviteCount - 1, _getInviteCount(issuer)); - - // Claimer should have the mint attestation from the OptimistInviter contract - assertTrue(_hasMintAttestation(_claimer)); - - return (claimableInvite, signature); - } - - /// @notice Issues 3 invites to the given address. Checks that all expected events are emitted - /// and that state is updated correctly. - function _grantInvitesTo(address _to) internal { - address[] memory addresses = new address[](1); - addresses[0] = _to; - - vm.expectEmit(true, true, true, true, address(attestationStation)); - emit AttestationCreated( - address(optimistInviter), _to, optimistInviter.CAN_INVITE_ATTESTATION_KEY(), bytes("true") - ); - - vm.prank(alice_inviteGranter); - optimistInviter.setInviteCounts(addresses, 3); - - assertEq(_getInviteCount(_to), 3); - } -} - -contract OptimistInviterTest is OptimistInviter_Initializer { - function test_initialize_succeeds() external view { - // expect attestationStation to be set - assertEq(address(optimistInviter.ATTESTATION_STATION()), address(attestationStation)); - assertEq(optimistInviter.INVITE_GRANTER(), alice_inviteGranter); - } - - /// @notice Alice the admin should be able to give Bob, Sally, and Carol 3 invites, and the - /// OptimistInviter contract should increment invite counts on inviteCounts and issue - /// 'optimist.can-invite' attestations. - function test_grantInvites_adminAddingInvites_succeeds() external { - address[] memory addresses = new address[](3); - addresses[0] = bob; - addresses[1] = sally; - addresses[2] = address(carolERC1271Wallet); - - vm.expectEmit(true, true, true, true, address(attestationStation)); - emit AttestationCreated( - address(optimistInviter), bob, optimistInviter.CAN_INVITE_ATTESTATION_KEY(), bytes("true") - ); - - vm.expectEmit(true, true, true, true, address(attestationStation)); - emit AttestationCreated( - address(optimistInviter), sally, optimistInviter.CAN_INVITE_ATTESTATION_KEY(), bytes("true") - ); - - vm.expectEmit(true, true, true, true, address(attestationStation)); - emit AttestationCreated( - address(optimistInviter), - address(carolERC1271Wallet), - optimistInviter.CAN_INVITE_ATTESTATION_KEY(), - bytes("true") - ); - - vm.prank(alice_inviteGranter); - optimistInviter.setInviteCounts(addresses, 3); - - assertEq(_getInviteCount(bob), 3); - assertEq(_getInviteCount(sally), 3); - assertEq(_getInviteCount(address(carolERC1271Wallet)), 3); - } - - /// @notice Bob, who is not the invite granter, should not be able to issue invites. - function test_grantInvites_nonAdminAddingInvites_reverts() external { - address[] memory addresses = new address[](2); - addresses[0] = bob; - addresses[1] = sally; - - vm.expectRevert("OptimistInviter: only invite granter can grant invites"); - vm.prank(bob); - optimistInviter.setInviteCounts(addresses, 3); - } - - /// @notice Sally should be able to commit an invite given by by Bob. - function test_commitInvite_committingForYourself_succeeds() external { - _grantInvitesTo(bob); - (, bytes memory signature) = _issueInviteAs(bobPrivateKey); - - vm.prank(sally); - bytes32 hashedSignature = keccak256(abi.encode(sally, signature)); - optimistInviter.commitInvite(hashedSignature); - - assertEq(optimistInviter.commitmentTimestamps(hashedSignature), block.timestamp); - } - - /// @notice Sally should be able to Bob's for a different claimer, Eve. - function test_commitInvite_committingForSomeoneElse_succeeds() external { - _grantInvitesTo(bob); - (, bytes memory signature) = _issueInviteAs(bobPrivateKey); - - vm.prank(sally); - bytes32 hashedSignature = keccak256(abi.encode(eve, signature)); - optimistInviter.commitInvite(hashedSignature); - - assertEq(optimistInviter.commitmentTimestamps(hashedSignature), block.timestamp); - } - - /// @notice Attempting to commit the same hash twice should revert. This prevents griefing. - function test_commitInvite_committingSameHashTwice_reverts() external { - _grantInvitesTo(bob); - (, bytes memory signature) = _issueInviteAs(bobPrivateKey); - - vm.prank(sally); - bytes32 hashedSignature = keccak256(abi.encode(eve, signature)); - optimistInviter.commitInvite(hashedSignature); - - assertEq(optimistInviter.commitmentTimestamps(hashedSignature), block.timestamp); - - vm.expectRevert("OptimistInviter: commitment already made"); - optimistInviter.commitInvite(hashedSignature); - } - - /// @notice Bob issues signature, and Sally claims the invite. Bob's invite count should be - /// decremented, and Sally should be able to mint. - function test_claimInvite_succeeds() external { - _grantInvitesTo(bob); - _issueThenClaimShouldSucceed(bobPrivateKey, sally); - } - - /// @notice Bob issues signature, and Ted commits the invite for Sally. Eve claims for Sally. - function test_claimInvite_claimForSomeoneElse_succeeds() external { - _grantInvitesTo(bob); - (OptimistInviter.ClaimableInvite memory claimableInvite, bytes memory signature) = _issueInviteAs(bobPrivateKey); - - vm.prank(ted); - optimistInviter.commitInvite(keccak256(abi.encode(sally, signature))); - _passMinCommitmentPeriod(); - - vm.expectEmit(true, true, true, true, address(attestationStation)); - emit AttestationCreated( - address(optimistInviter), - sally, - OptimistConstants.OPTIMIST_CAN_MINT_FROM_INVITE_ATTESTATION_KEY, - abi.encode(bob) - ); - - // Should emit an event indicating that the invite was claimed - vm.expectEmit(true, true, true, true, address(optimistInviter)); - emit InviteClaimed(bob, sally); - - vm.prank(eve); - optimistInviter.claimInvite(sally, claimableInvite, signature); - - assertEq(_getInviteCount(bob), 2); - assertTrue(_hasMintAttestation(sally)); - assertFalse(_hasMintAttestation(eve)); - } - - function test_claimInvite_claimBeforeMinCommitmentPeriod_reverts() external { - _grantInvitesTo(bob); - (OptimistInviter.ClaimableInvite memory claimableInvite, bytes memory signature) = _issueInviteAs(bobPrivateKey); - - _commitInviteAs(sally, signature); - - // Some time passes, but not enough to meet the minimum commitment period - vm.warp(block.timestamp + 10); - - vm.expectRevert("OptimistInviter: minimum commitment period has not elapsed yet"); - vm.prank(sally); - optimistInviter.claimInvite(sally, claimableInvite, signature); - } - - /// @notice Signature issued for previous versions of the contract should fail. - function test_claimInvite_usingSignatureIssuedForDifferentVersion_reverts() external { - _grantInvitesTo(bob); - (OptimistInviter.ClaimableInvite memory claimableInvite, bytes memory signature) = _issueInviteWithEIP712Domain( - bobPrivateKey, "OptimismInviter", "0.9.1", block.chainid, address(optimistInviter) - ); - - _commitInviteAs(sally, signature); - _passMinCommitmentPeriod(); - - vm.expectRevert("OptimistInviter: invalid signature"); - vm.prank(sally); - optimistInviter.claimInvite(sally, claimableInvite, signature); - } - - /// @notice Replay attack for signature issued for contract on different chain (ie. mainnet) - /// should fail. - function test_claimInvite_usingSignatureIssuedForDifferentChain_reverts() external { - _grantInvitesTo(bob); - (OptimistInviter.ClaimableInvite memory claimableInvite, bytes memory signature) = _issueInviteWithEIP712Domain( - bobPrivateKey, "OptimismInviter", bytes(optimistInviter.EIP712_VERSION()), 1, address(optimistInviter) - ); - - _commitInviteAs(sally, signature); - _passMinCommitmentPeriod(); - - vm.expectRevert("OptimistInviter: invalid signature"); - vm.prank(sally); - optimistInviter.claimInvite(sally, claimableInvite, signature); - } - - /// @notice Replay attack for signature issued for instantiation of the OptimistInviter contract - /// on a different address should fail. - function test_claimInvite_usingSignatureIssuedForDifferentContract_reverts() external { - _grantInvitesTo(bob); - (OptimistInviter.ClaimableInvite memory claimableInvite, bytes memory signature) = _issueInviteWithEIP712Domain( - bobPrivateKey, "OptimismInviter", bytes(optimistInviter.EIP712_VERSION()), block.chainid, address(0xBEEF) - ); - - _commitInviteAs(sally, signature); - _passMinCommitmentPeriod(); - - vm.expectRevert("OptimistInviter: invalid signature"); - vm.prank(sally); - optimistInviter.claimInvite(sally, claimableInvite, signature); - } - - /// @notice Attempting to claim again using the same signature again should fail. - function test_claimInvite_replayingUsedNonce_reverts() external { - _grantInvitesTo(bob); - - (OptimistInviter.ClaimableInvite memory claimableInvite, bytes memory signature) = - _issueThenClaimShouldSucceed(bobPrivateKey, sally); - - // Sally tries to claim the invite using the same signature - vm.expectRevert("OptimistInviter: nonce has already been used"); - vm.prank(sally); - optimistInviter.claimInvite(sally, claimableInvite, signature); - - // Carol tries to claim the invite using the same signature - _commitInviteAs(carol, signature); - _passMinCommitmentPeriod(); - - vm.expectRevert("OptimistInviter: nonce has already been used"); - vm.prank(carol); - optimistInviter.claimInvite(carol, claimableInvite, signature); - } - - /// @notice Issuing signatures through a contract that implements ERC1271 should succeed (ie. - /// Gnosis Safe or other smart contract wallets). Carol is using a ERC1271 contract - /// wallet that is simply backed by her private key. - function test_claimInvite_usingERC1271Wallet_succeeds() external { - _grantInvitesTo(address(carolERC1271Wallet)); - - OptimistInviter.ClaimableInvite memory claimableInvite = - optimistInviterHelper.getClaimableInviteWithNewNonce(address(carolERC1271Wallet)); - - bytes memory signature = _getSignature(carolPrivateKey, optimistInviterHelper.getDigest(claimableInvite)); - - // Sally tries to claim the invite - _commitInviteAs(sally, signature); - _passMinCommitmentPeriod(); - - vm.expectEmit(true, true, true, true, address(attestationStation)); - emit AttestationCreated( - address(optimistInviter), - sally, - OptimistConstants.OPTIMIST_CAN_MINT_FROM_INVITE_ATTESTATION_KEY, - abi.encode(address(carolERC1271Wallet)) - ); - - vm.prank(sally); - optimistInviter.claimInvite(sally, claimableInvite, signature); - assertEq(_getInviteCount(address(carolERC1271Wallet)), 2); - } - - /// @notice Claimer must commit the signature before claiming the invite. Sally attempts to - /// claim the Bob's invite without committing the signature first. - function test_claimInvite_withoutCommittingHash_reverts() external { - _grantInvitesTo(bob); - (OptimistInviter.ClaimableInvite memory claimableInvite, bytes memory signature) = _issueInviteAs(bobPrivateKey); - - vm.expectRevert("OptimistInviter: claimer and signature have not been committed yet"); - vm.prank(sally); - optimistInviter.claimInvite(sally, claimableInvite, signature); - } - - /// @notice Using a signature that doesn't correspond to the claimable invite should fail. - function test_claimInvite_withIncorrectSignature_reverts() external { - _grantInvitesTo(carol); - _grantInvitesTo(bob); - (OptimistInviter.ClaimableInvite memory bobClaimableInvite, bytes memory bobSignature) = - _issueInviteAs(bobPrivateKey); - (, bytes memory carolSignature) = _issueInviteAs(carolPrivateKey); - - _commitInviteAs(sally, bobSignature); - _commitInviteAs(sally, carolSignature); - - _passMinCommitmentPeriod(); - - vm.expectRevert("OptimistInviter: invalid signature"); - vm.prank(sally); - optimistInviter.claimInvite(sally, bobClaimableInvite, carolSignature); - } - - /// @notice Attempting to use a signature from a issuer who never was granted invites should - /// fail. - function test_claimInvite_whenIssuerNeverReceivedInvites_reverts() external { - // Bob was never granted any invites, but issues an invite for Eve - (OptimistInviter.ClaimableInvite memory claimableInvite, bytes memory signature) = _issueInviteAs(bobPrivateKey); - - _commitInviteAs(sally, signature); - _passMinCommitmentPeriod(); - - vm.expectRevert("OptimistInviter: issuer has no invites"); - vm.prank(sally); - optimistInviter.claimInvite(sally, claimableInvite, signature); - } - - /// @notice Attempting to use a signature from a issuer who has no more invites should fail. - /// Bob has 3 invites, but issues 4 invites for Sally, Carol, Ted, and Eve. Only the - /// first 3 invites should be claimable. The last claimer, Eve, should not be able to - /// claim the invite. - function test_claimInvite_whenIssuerHasNoInvitesLeft_reverts() external { - _grantInvitesTo(bob); - - _issueThenClaimShouldSucceed(bobPrivateKey, sally); - _issueThenClaimShouldSucceed(bobPrivateKey, carol); - _issueThenClaimShouldSucceed(bobPrivateKey, ted); - - assertEq(_getInviteCount(bob), 0); - - (OptimistInviter.ClaimableInvite memory claimableInvite4, bytes memory signature4) = - _issueInviteAs(bobPrivateKey); - - _commitInviteAs(eve, signature4); - _passMinCommitmentPeriod(); - - vm.expectRevert("OptimistInviter: issuer has no invites"); - vm.prank(eve); - optimistInviter.claimInvite(eve, claimableInvite4, signature4); - - assertEq(_getInviteCount(bob), 0); - } -} diff --git a/packages/contracts-bedrock/test/safe/DeputyGuardianModule.t.sol b/packages/contracts-bedrock/test/safe/DeputyGuardianModule.t.sol index 8dd1ba970abd..21fad7bb547f 100644 --- a/packages/contracts-bedrock/test/safe/DeputyGuardianModule.t.sol +++ b/packages/contracts-bedrock/test/safe/DeputyGuardianModule.t.sol @@ -8,15 +8,15 @@ import { GnosisSafe as Safe } from "safe-contracts/GnosisSafe.sol"; import "test/safe-tools/SafeTestTools.sol"; // Contracts -import { IDeputyGuardianModule } from "src/safe/interfaces/IDeputyGuardianModule.sol"; +import { IDeputyGuardianModule } from "interfaces/safe/IDeputyGuardianModule.sol"; // Libraries import "src/dispute/lib/Types.sol"; // Interfaces -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; -import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; contract DeputyGuardianModule_TestInit is CommonTest, SafeTestTools { @@ -257,7 +257,13 @@ contract DeputyGuardianModule_setRespectedGameType_Test is DeputyGuardianModule_ contract DeputyGuardianModule_setRespectedGameType_TestFail is DeputyGuardianModule_TestInit { /// @dev Tests that `setRespectedGameType` when called by a non deputy guardian. function testFuzz_setRespectedGameType_notDeputyGuardian_reverts(GameType _gameType) external { - vm.assume(GameType.unwrap(optimismPortal2.respectedGameType()) != GameType.unwrap(_gameType)); + // Change the game type if it's the same to avoid test rejections. + if (GameType.unwrap(optimismPortal2.respectedGameType()) == GameType.unwrap(_gameType)) { + unchecked { + _gameType = GameType.wrap(GameType.unwrap(_gameType) + 1); + } + } + vm.expectRevert(abi.encodeWithSelector(Unauthorized.selector)); deputyGuardianModule.setRespectedGameType(optimismPortal2, _gameType); assertNotEq(GameType.unwrap(optimismPortal2.respectedGameType()), GameType.unwrap(_gameType)); @@ -288,8 +294,8 @@ contract DeputyGuardianModule_NoPortalCollisions_Test is DeputyGuardianModule_Te excludes[0] = "src/dispute/lib/*"; excludes[1] = "src/L1/OptimismPortal2.sol"; excludes[2] = "src/L1/OptimismPortalInterop.sol"; - excludes[3] = "src/L1/interfaces/IOptimismPortal2.sol"; - excludes[4] = "src/L1/interfaces/IOptimismPortalInterop.sol"; + excludes[3] = "interfaces/L1/IOptimismPortal2.sol"; + excludes[4] = "interfaces/L1/IOptimismPortalInterop.sol"; Abi[] memory abis = ForgeArtifacts.getContractFunctionAbis("src/{L1,dispute,universal}", excludes); for (uint256 i; i < abis.length; i++) { for (uint256 j; j < abis[i].entries.length; j++) { diff --git a/packages/contracts-bedrock/test/safe/LivenessGuard.t.sol b/packages/contracts-bedrock/test/safe/LivenessGuard.t.sol index a7b67b415b22..90a0ecd9f1fc 100644 --- a/packages/contracts-bedrock/test/safe/LivenessGuard.t.sol +++ b/packages/contracts-bedrock/test/safe/LivenessGuard.t.sol @@ -228,7 +228,6 @@ contract LivenessGuard_FuzzOwnerManagement_Test is StdCheats, StdUtils, Liveness mapping(address => uint256) privateKeys; /// @dev Tests that the guard correctly manages the lastLive mapping when owners are added, removed, or swapped - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_ownerManagement_works( uint256 initialOwners, uint256 threshold, diff --git a/packages/contracts-bedrock/test/setup/DeployVariations.t.sol b/packages/contracts-bedrock/test/setup/DeployVariations.t.sol index 31f687f0fdb8..94628067e119 100644 --- a/packages/contracts-bedrock/test/setup/DeployVariations.t.sol +++ b/packages/contracts-bedrock/test/setup/DeployVariations.t.sol @@ -22,7 +22,6 @@ contract DeployVariations_Test is CommonTest { } } - /// forge-config: ciheavy.fuzz.runs = 512 /// @dev It should be possible to enable Fault Proofs with any mix of CGT and Alt-DA. function testFuzz_enableFaultProofs_succeeds(bool _enableCGT, bool _enableAltDa) public virtual { enableAddOns(_enableCGT, _enableAltDa); @@ -30,7 +29,6 @@ contract DeployVariations_Test is CommonTest { super.setUp(); } - /// forge-config: ciheavy.fuzz.runs = 512 /// @dev It should be possible to enable Fault Proofs and Interop with any mix of CGT and Alt-DA. function test_enableInteropAndFaultProofs_succeeds(bool _enableCGT, bool _enableAltDa) public virtual { enableAddOns(_enableCGT, _enableAltDa); diff --git a/packages/contracts-bedrock/test/setup/Events.sol b/packages/contracts-bedrock/test/setup/Events.sol index 966b236c30c8..7056f0cbdd6b 100644 --- a/packages/contracts-bedrock/test/setup/Events.sol +++ b/packages/contracts-bedrock/test/setup/Events.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; import "src/dispute/lib/Types.sol"; import { Types } from "src/libraries/Types.sol"; diff --git a/packages/contracts-bedrock/test/setup/Setup.sol b/packages/contracts-bedrock/test/setup/Setup.sol index ef2b654b2410..613dd067647c 100644 --- a/packages/contracts-bedrock/test/setup/Setup.sol +++ b/packages/contracts-bedrock/test/setup/Setup.sol @@ -17,38 +17,38 @@ import { Preinstalls } from "src/libraries/Preinstalls.sol"; import { AddressAliasHelper } from "src/vendor/AddressAliasHelper.sol"; // Interfaces -import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; -import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; -import { IL1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; -import { IL2OutputOracle } from "src/L1/interfaces/IL2OutputOracle.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IDataAvailabilityChallenge } from "src/L1/interfaces/IDataAvailabilityChallenge.sol"; -import { IL1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; -import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; -import { IL1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; -import { IOptimismMintableERC721Factory } from "src/universal/interfaces/IOptimismMintableERC721Factory.sol"; -import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; -import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; -import { IL2CrossDomainMessenger } from "src/L2/interfaces/IL2CrossDomainMessenger.sol"; -import { IL2StandardBridgeInterop } from "src/L2/interfaces/IL2StandardBridgeInterop.sol"; -import { IL2ToL1MessagePasser } from "src/L2/interfaces/IL2ToL1MessagePasser.sol"; -import { IL2ERC721Bridge } from "src/L2/interfaces/IL2ERC721Bridge.sol"; -import { IOptimismMintableERC20Factory } from "src/universal/interfaces/IOptimismMintableERC20Factory.sol"; -import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; -import { IOptimismSuperchainERC20Factory } from "src/L2/interfaces/IOptimismSuperchainERC20Factory.sol"; -import { IBaseFeeVault } from "src/L2/interfaces/IBaseFeeVault.sol"; -import { ISequencerFeeVault } from "src/L2/interfaces/ISequencerFeeVault.sol"; -import { IL1FeeVault } from "src/L2/interfaces/IL1FeeVault.sol"; -import { IGasPriceOracle } from "src/L2/interfaces/IGasPriceOracle.sol"; -import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; -import { ISuperchainWETH } from "src/L2/interfaces/ISuperchainWETH.sol"; -import { IETHLiquidity } from "src/L2/interfaces/IETHLiquidity.sol"; -import { IWETH98 } from "src/universal/interfaces/IWETH98.sol"; -import { IGovernanceToken } from "src/governance/interfaces/IGovernanceToken.sol"; -import { ILegacyMessagePasser } from "src/legacy/interfaces/ILegacyMessagePasser.sol"; -import { ISuperchainTokenBridge } from "src/L2/interfaces/ISuperchainTokenBridge.sol"; +import { IOptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; +import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; +import { IL1CrossDomainMessenger } from "interfaces/L1/IL1CrossDomainMessenger.sol"; +import { IL2OutputOracle } from "interfaces/L1/IL2OutputOracle.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IDataAvailabilityChallenge } from "interfaces/L1/IDataAvailabilityChallenge.sol"; +import { IL1StandardBridge } from "interfaces/L1/IL1StandardBridge.sol"; +import { IProtocolVersions } from "interfaces/L1/IProtocolVersions.sol"; +import { IL1ERC721Bridge } from "interfaces/L1/IL1ERC721Bridge.sol"; +import { IOptimismMintableERC721Factory } from "interfaces/universal/IOptimismMintableERC721Factory.sol"; +import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IL2CrossDomainMessenger } from "interfaces/L2/IL2CrossDomainMessenger.sol"; +import { IL2StandardBridgeInterop } from "interfaces/L2/IL2StandardBridgeInterop.sol"; +import { IL2ToL1MessagePasser } from "interfaces/L2/IL2ToL1MessagePasser.sol"; +import { IL2ERC721Bridge } from "interfaces/L2/IL2ERC721Bridge.sol"; +import { IOptimismMintableERC20Factory } from "interfaces/universal/IOptimismMintableERC20Factory.sol"; +import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; +import { IOptimismSuperchainERC20Factory } from "interfaces/L2/IOptimismSuperchainERC20Factory.sol"; +import { IBaseFeeVault } from "interfaces/L2/IBaseFeeVault.sol"; +import { ISequencerFeeVault } from "interfaces/L2/ISequencerFeeVault.sol"; +import { IL1FeeVault } from "interfaces/L2/IL1FeeVault.sol"; +import { IGasPriceOracle } from "interfaces/L2/IGasPriceOracle.sol"; +import { IL1Block } from "interfaces/L2/IL1Block.sol"; +import { ISuperchainWETH } from "interfaces/L2/ISuperchainWETH.sol"; +import { IETHLiquidity } from "interfaces/L2/IETHLiquidity.sol"; +import { IWETH98 } from "interfaces/universal/IWETH98.sol"; +import { IGovernanceToken } from "interfaces/governance/IGovernanceToken.sol"; +import { ILegacyMessagePasser } from "interfaces/legacy/ILegacyMessagePasser.sol"; +import { ISuperchainTokenBridge } from "interfaces/L2/ISuperchainTokenBridge.sol"; /// @title Setup /// @dev This contact is responsible for setting up the contracts in state. It currently diff --git a/packages/contracts-bedrock/test/universal/BenchmarkTest.t.sol b/packages/contracts-bedrock/test/universal/BenchmarkTest.t.sol index fd0a9bfc7847..c27b493344b3 100644 --- a/packages/contracts-bedrock/test/universal/BenchmarkTest.t.sol +++ b/packages/contracts-bedrock/test/universal/BenchmarkTest.t.sol @@ -9,11 +9,11 @@ import { CommonTest } from "test/setup/CommonTest.sol"; // Libraries import { Types } from "src/libraries/Types.sol"; import { SafeCall } from "src/libraries/SafeCall.sol"; -import { IL1BlockInterop } from "src/L2/interfaces/IL1BlockInterop.sol"; +import { IL1BlockInterop } from "interfaces/L2/IL1BlockInterop.sol"; import { Encoding } from "src/libraries/Encoding.sol"; // Interfaces -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; // Free function for setting the prevBaseFee param in the OptimismPortal. diff --git a/packages/contracts-bedrock/test/universal/CrossDomainMessenger.t.sol b/packages/contracts-bedrock/test/universal/CrossDomainMessenger.t.sol index 50398e4a8920..12cc2a8c53b2 100644 --- a/packages/contracts-bedrock/test/universal/CrossDomainMessenger.t.sol +++ b/packages/contracts-bedrock/test/universal/CrossDomainMessenger.t.sol @@ -10,7 +10,7 @@ import { Predeploys } from "src/libraries/Predeploys.sol"; import { Hashing } from "src/libraries/Hashing.sol"; import { Encoding } from "src/libraries/Encoding.sol"; -import { IL1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; +import { IL1CrossDomainMessenger } from "interfaces/L1/IL1CrossDomainMessenger.sol"; // CrossDomainMessenger_Test is for testing functionality which is common to both the L1 and L2 // CrossDomainMessenger contracts. For simplicity, we use the L1 Messenger as the test contract. diff --git a/packages/contracts-bedrock/test/universal/OptimismMintableERC20.t.sol b/packages/contracts-bedrock/test/universal/OptimismMintableERC20.t.sol index d56a97b19db3..4925c801885d 100644 --- a/packages/contracts-bedrock/test/universal/OptimismMintableERC20.t.sol +++ b/packages/contracts-bedrock/test/universal/OptimismMintableERC20.t.sol @@ -2,8 +2,8 @@ pragma solidity 0.8.15; import { CommonTest } from "test/setup/CommonTest.sol"; -import { IOptimismMintableERC20 } from "src/universal/interfaces/IOptimismMintableERC20.sol"; -import { ILegacyMintableERC20 } from "src/universal/interfaces/ILegacyMintableERC20.sol"; +import { IOptimismMintableERC20 } from "interfaces/universal/IOptimismMintableERC20.sol"; +import { ILegacyMintableERC20 } from "interfaces/universal/ILegacyMintableERC20.sol"; import { IERC165 } from "@openzeppelin/contracts/utils/introspection/IERC165.sol"; contract OptimismMintableERC20_Test is CommonTest { diff --git a/packages/contracts-bedrock/test/universal/OptimismMintableERC20Factory.t.sol b/packages/contracts-bedrock/test/universal/OptimismMintableERC20Factory.t.sol index 74df9e729e83..867c11b3884e 100644 --- a/packages/contracts-bedrock/test/universal/OptimismMintableERC20Factory.t.sol +++ b/packages/contracts-bedrock/test/universal/OptimismMintableERC20Factory.t.sol @@ -11,8 +11,8 @@ import { OptimismMintableERC20 } from "src/universal/OptimismMintableERC20.sol"; import { OptimismMintableERC20Factory } from "src/universal/OptimismMintableERC20Factory.sol"; // Interfaces -import { IProxy } from "src/universal/interfaces/IProxy.sol"; -import { IOptimismMintableERC20Factory } from "src/universal/interfaces/IOptimismMintableERC20Factory.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; +import { IOptimismMintableERC20Factory } from "interfaces/universal/IOptimismMintableERC20Factory.sol"; contract OptimismMintableTokenFactory_Test is CommonTest { event StandardL2TokenCreated(address indexed remoteToken, address indexed localToken); diff --git a/packages/contracts-bedrock/test/universal/OptimismMintableERC721.t.sol b/packages/contracts-bedrock/test/universal/OptimismMintableERC721.t.sol index d22e0957a7df..dfe2234e0b64 100644 --- a/packages/contracts-bedrock/test/universal/OptimismMintableERC721.t.sol +++ b/packages/contracts-bedrock/test/universal/OptimismMintableERC721.t.sol @@ -30,6 +30,7 @@ contract OptimismMintableERC721_Test is CommonTest { vm.label(address(L2NFT), "L2ERC721Token"); } + /// @notice Tests that the constructor works as expected. function test_constructor_succeeds() external view { assertEq(L2NFT.name(), "L2NFT"); assertEq(L2NFT.symbol(), "L2T"); @@ -41,6 +42,24 @@ contract OptimismMintableERC721_Test is CommonTest { assertEq(L2NFT.REMOTE_CHAIN_ID(), 1); } + /// @notice Tests that the bridge cannot be address(0) at construction time. + function test_constructor_bridgeAsAddress0_reverts() external { + vm.expectRevert("OptimismMintableERC721: bridge cannot be address(0)"); + L2NFT = new OptimismMintableERC721(address(0), 1, address(L1NFT), "L2NFT", "L2T"); + } + + /// @notice Tests that the remote chain ID cannot be zero at construction time. + function test_constructor_remoteChainId0_reverts() external { + vm.expectRevert("OptimismMintableERC721: remote chain id cannot be zero"); + L2NFT = new OptimismMintableERC721(address(l2ERC721Bridge), 0, address(L1NFT), "L2NFT", "L2T"); + } + + /// @notice Tests that the remote token cannot be address(0) at construction time. + function test_constructor_remoteTokenAsAddress0_reverts() external { + vm.expectRevert("OptimismMintableERC721: remote token cannot be address(0)"); + L2NFT = new OptimismMintableERC721(address(l2ERC721Bridge), 1, address(0), "L2NFT", "L2T"); + } + /// @notice Ensure that the contract supports the expected interfaces. function test_supportsInterfaces_succeeds() external view { // Checks if the contract supports the IOptimismMintableERC721 interface. diff --git a/packages/contracts-bedrock/test/universal/Proxy.t.sol b/packages/contracts-bedrock/test/universal/Proxy.t.sol index 8c6aa7ae5137..437fabfe6714 100644 --- a/packages/contracts-bedrock/test/universal/Proxy.t.sol +++ b/packages/contracts-bedrock/test/universal/Proxy.t.sol @@ -3,7 +3,7 @@ pragma solidity 0.8.15; import { Test } from "forge-std/Test.sol"; import { Bytes32AddressLib } from "@rari-capital/solmate/src/utils/Bytes32AddressLib.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; contract SimpleStorage { diff --git a/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol b/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol index 04e416cbd3a9..b1b6fa92a209 100644 --- a/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol +++ b/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol @@ -6,11 +6,11 @@ import { Test } from "forge-std/Test.sol"; import { SimpleStorage } from "test/universal/Proxy.t.sol"; // Interfaces -import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; -import { IL1ChugSplashProxy } from "src/legacy/interfaces/IL1ChugSplashProxy.sol"; -import { IResolvedDelegateProxy } from "src/legacy/interfaces/IResolvedDelegateProxy.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; -import { IProxyAdmin } from "src/universal/interfaces/IProxyAdmin.sol"; +import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; +import { IL1ChugSplashProxy } from "interfaces/legacy/IL1ChugSplashProxy.sol"; +import { IResolvedDelegateProxy } from "interfaces/legacy/IResolvedDelegateProxy.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; +import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; diff --git a/packages/contracts-bedrock/test/universal/SafeSend.t.sol b/packages/contracts-bedrock/test/universal/SafeSend.t.sol new file mode 100644 index 000000000000..9b2f930fd134 --- /dev/null +++ b/packages/contracts-bedrock/test/universal/SafeSend.t.sol @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +import { SafeSend } from "src/universal/SafeSend.sol"; +import { CommonTest } from "test/setup/CommonTest.sol"; + +contract SafeSendTest is CommonTest { + /// @notice Tests that sending to an EOA succeeds. + function test_send_toEOA_succeeds() public { + assertNotEq(alice, address(0)); + assertNotEq(bob, address(0)); + assertEq(bob.code.length, 0); + + vm.deal(alice, 100 ether); + + uint256 aliceBalanceBefore = alice.balance; + uint256 bobBalanceBefore = bob.balance; + + vm.prank(alice); + SafeSend safeSend = new SafeSend{ value: 100 ether }(payable(bob)); + + assertEq(address(safeSend).code.length, 0); + assertEq(address(safeSend).balance, 0); + assertEq(alice.balance, aliceBalanceBefore - 100 ether); + assertEq(bob.balance, bobBalanceBefore + 100 ether); + } + + /// @notice Tests that sending to a contract succeeds without executing the + /// contract's code. + function test_send_toContract_succeeds() public { + // etch reverting code into bob + vm.etch(bob, hex"fe"); + vm.deal(alice, 100 ether); + + uint256 aliceBalanceBefore = alice.balance; + uint256 bobBalanceBefore = bob.balance; + + vm.prank(alice); + SafeSend safeSend = new SafeSend{ value: 100 ether }(payable(bob)); + + assertEq(address(safeSend).code.length, 0); + assertEq(address(safeSend).balance, 0); + assertEq(alice.balance, aliceBalanceBefore - 100 ether); + assertEq(bob.balance, bobBalanceBefore + 100 ether); + } +} diff --git a/packages/contracts-bedrock/test/universal/Specs.t.sol b/packages/contracts-bedrock/test/universal/Specs.t.sol index 5150b4d725dd..d8c48849875d 100644 --- a/packages/contracts-bedrock/test/universal/Specs.t.sol +++ b/packages/contracts-bedrock/test/universal/Specs.t.sol @@ -12,12 +12,13 @@ import { ForgeArtifacts, Abi, AbiEntry } from "scripts/libraries/ForgeArtifacts. import { OPContractsManager } from "src/L1/OPContractsManager.sol"; // Interfaces -import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; -import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; -import { IOptimismPortalInterop } from "src/L1/interfaces/IOptimismPortalInterop.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { IDataAvailabilityChallenge } from "src/L1/interfaces/IDataAvailabilityChallenge.sol"; -import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; +import { IOptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; +import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; +import { IOptimismPortalInterop } from "interfaces/L1/IOptimismPortalInterop.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { ISystemConfigInterop } from "interfaces/L1/ISystemConfigInterop.sol"; +import { IDataAvailabilityChallenge } from "interfaces/L1/IDataAvailabilityChallenge.sol"; +import { IProtocolVersions } from "interfaces/L1/IProtocolVersions.sol"; /// @title Specification_Test /// @dev Specifies common security properties of entrypoints to L1 contracts, including authorization and @@ -106,14 +107,6 @@ contract Specification_Test is CommonTest { _addSpec({ _name: "DataAvailabilityChallenge", _sel: IDataAvailabilityChallenge.resolve.selector }); _addSpec({ _name: "DataAvailabilityChallenge", _sel: IDataAvailabilityChallenge.unlockBond.selector }); - // DelayedVetoable - _addSpec({ _name: "DelayedVetoable", _sel: _getSel("delay()") }); - _addSpec({ _name: "DelayedVetoable", _sel: _getSel("initiator()") }); - _addSpec({ _name: "DelayedVetoable", _sel: _getSel("queuedAt(bytes32)") }); - _addSpec({ _name: "DelayedVetoable", _sel: _getSel("target()") }); - _addSpec({ _name: "DelayedVetoable", _sel: _getSel("version()") }); - _addSpec({ _name: "DelayedVetoable", _sel: _getSel("vetoer()") }); - // L1CrossDomainMessenger _addSpec({ _name: "L1CrossDomainMessenger", _sel: _getSel("MESSAGE_VERSION()") }); _addSpec({ _name: "L1CrossDomainMessenger", _sel: _getSel("MIN_GAS_CALLDATA_OVERHEAD()") }); @@ -487,36 +480,37 @@ contract Specification_Test is CommonTest { _addSpec({ _name: "SystemConfigInterop", _sel: _getSel("gasLimit()") }); _addSpec({ _name: "SystemConfigInterop", _sel: _getSel("eip1559Denominator()") }); _addSpec({ _name: "SystemConfigInterop", _sel: _getSel("eip1559Elasticity()") }); + _addSpec({ _name: "SystemConfigInterop", _sel: ISystemConfigInterop.initialize.selector }); _addSpec({ _name: "SystemConfigInterop", _sel: ISystemConfig.initialize.selector }); - _addSpec({ _name: "SystemConfigInterop", _sel: ISystemConfig.minimumGasLimit.selector }); + _addSpec({ _name: "SystemConfigInterop", _sel: ISystemConfigInterop.minimumGasLimit.selector }); _addSpec({ _name: "SystemConfigInterop", _sel: _getSel("overhead()") }); _addSpec({ _name: "SystemConfigInterop", _sel: _getSel("owner()") }); _addSpec({ _name: "SystemConfigInterop", _sel: _getSel("renounceOwnership()"), _auth: Role.SYSTEMCONFIGOWNER }); - _addSpec({ _name: "SystemConfigInterop", _sel: ISystemConfig.resourceConfig.selector }); + _addSpec({ _name: "SystemConfigInterop", _sel: ISystemConfigInterop.resourceConfig.selector }); _addSpec({ _name: "SystemConfigInterop", _sel: _getSel("scalar()") }); _addSpec({ _name: "SystemConfigInterop", - _sel: ISystemConfig.setBatcherHash.selector, + _sel: ISystemConfigInterop.setBatcherHash.selector, _auth: Role.SYSTEMCONFIGOWNER }); _addSpec({ _name: "SystemConfigInterop", - _sel: ISystemConfig.setGasConfig.selector, + _sel: ISystemConfigInterop.setGasConfig.selector, _auth: Role.SYSTEMCONFIGOWNER }); _addSpec({ _name: "SystemConfigInterop", - _sel: ISystemConfig.setGasLimit.selector, + _sel: ISystemConfigInterop.setGasLimit.selector, _auth: Role.SYSTEMCONFIGOWNER }); _addSpec({ _name: "SystemConfigInterop", - _sel: ISystemConfig.setEIP1559Params.selector, + _sel: ISystemConfigInterop.setEIP1559Params.selector, _auth: Role.SYSTEMCONFIGOWNER }); _addSpec({ _name: "SystemConfigInterop", - _sel: ISystemConfig.setUnsafeBlockSigner.selector, + _sel: ISystemConfigInterop.setUnsafeBlockSigner.selector, _auth: Role.SYSTEMCONFIGOWNER }); _addSpec({ @@ -524,7 +518,7 @@ contract Specification_Test is CommonTest { _sel: _getSel("transferOwnership(address)"), _auth: Role.SYSTEMCONFIGOWNER }); - _addSpec({ _name: "SystemConfigInterop", _sel: ISystemConfig.unsafeBlockSigner.selector }); + _addSpec({ _name: "SystemConfigInterop", _sel: ISystemConfigInterop.unsafeBlockSigner.selector }); _addSpec({ _name: "SystemConfigInterop", _sel: _getSel("version()") }); _addSpec({ _name: "SystemConfigInterop", _sel: _getSel("l1CrossDomainMessenger()") }); _addSpec({ _name: "SystemConfigInterop", _sel: _getSel("l1ERC721Bridge()") }); @@ -560,12 +554,6 @@ contract Specification_Test is CommonTest { _auth: Role.DEPENDENCYMANAGER }); _addSpec({ _name: "SystemConfigInterop", _sel: _getSel("dependencyManager()") }); - _addSpec({ - _name: "SystemConfigInterop", - _sel: _getSel( - "initialize(address,uint32,uint32,bytes32,uint64,address,(uint32,uint8,uint8,uint32,uint32,uint128),address,(address,address,address,address,address,address,address),address)" - ) - }); // ProxyAdmin _addSpec({ _name: "ProxyAdmin", _sel: _getSel("addressManager()") }); @@ -849,27 +837,25 @@ contract Specification_Test is CommonTest { _addSpec({ _name: "OPContractsManager", _sel: _getSel("version()") }); _addSpec({ _name: "OPContractsManager", _sel: _getSel("superchainConfig()") }); _addSpec({ _name: "OPContractsManager", _sel: _getSel("protocolVersions()") }); - _addSpec({ _name: "OPContractsManager", _sel: _getSel("latestRelease()") }); - _addSpec({ _name: "OPContractsManager", _sel: _getSel("implementations(string,string)") }); + _addSpec({ _name: "OPContractsManager", _sel: _getSel("l1ContractsRelease()") }); _addSpec({ _name: "OPContractsManager", _sel: _getSel("systemConfigs(uint256)") }); _addSpec({ _name: "OPContractsManager", _sel: _getSel("OUTPUT_VERSION()") }); - _addSpec({ _name: "OPContractsManager", _sel: OPContractsManager.initialize.selector }); _addSpec({ _name: "OPContractsManager", _sel: OPContractsManager.deploy.selector }); _addSpec({ _name: "OPContractsManager", _sel: OPContractsManager.blueprints.selector }); _addSpec({ _name: "OPContractsManager", _sel: OPContractsManager.chainIdToBatchInboxAddress.selector }); + _addSpec({ _name: "OPContractsManager", _sel: OPContractsManager.implementations.selector }); // OPContractsManagerInterop _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("version()") }); _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("superchainConfig()") }); _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("protocolVersions()") }); - _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("latestRelease()") }); - _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("implementations(string,string)") }); + _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("l1ContractsRelease()") }); _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("systemConfigs(uint256)") }); _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("OUTPUT_VERSION()") }); - _addSpec({ _name: "OPContractsManagerInterop", _sel: OPContractsManager.initialize.selector }); _addSpec({ _name: "OPContractsManagerInterop", _sel: OPContractsManager.deploy.selector }); _addSpec({ _name: "OPContractsManagerInterop", _sel: OPContractsManager.blueprints.selector }); _addSpec({ _name: "OPContractsManagerInterop", _sel: OPContractsManager.chainIdToBatchInboxAddress.selector }); + _addSpec({ _name: "OPContractsManagerInterop", _sel: OPContractsManager.implementations.selector }); // DeputyGuardianModule _addSpec({ @@ -954,12 +940,12 @@ contract Specification_Test is CommonTest { /// @notice Ensures that there's an auth spec for every L1 contract function. function test_contractAuth_works() public { string[] memory pathExcludes = new string[](6); - pathExcludes[0] = "src/dispute/interfaces/*"; + pathExcludes[0] = "interfaces/dispute/*"; pathExcludes[1] = "src/dispute/lib/*"; pathExcludes[2] = "src/safe/SafeSigners.sol"; - pathExcludes[3] = "src/L1/interfaces/*"; - pathExcludes[4] = "src/governance/interfaces/*"; - pathExcludes[5] = "src/safe/interfaces/*"; + pathExcludes[3] = "interfaces/L1/*"; + pathExcludes[4] = "interfaces/governance/*"; + pathExcludes[5] = "interfaces/safe/*"; Abi[] memory abis = ForgeArtifacts.getContractFunctionAbis( "src/{L1,dispute,governance,safe,universal/ProxyAdmin.sol,universal/WETH98.sol}", pathExcludes ); diff --git a/packages/contracts-bedrock/test/vendor/Initializable.t.sol b/packages/contracts-bedrock/test/vendor/Initializable.t.sol index eb43ae187599..1f9fd946c51d 100644 --- a/packages/contracts-bedrock/test/vendor/Initializable.t.sol +++ b/packages/contracts-bedrock/test/vendor/Initializable.t.sol @@ -14,11 +14,11 @@ import { Constants } from "src/libraries/Constants.sol"; import { GameType } from "src/dispute/lib/Types.sol"; // Interfaces -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { ProtocolVersion } from "src/L1/interfaces/IProtocolVersions.sol"; -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { ProtocolVersion } from "interfaces/L1/IProtocolVersions.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; /// @title Initializer_Test /// @dev Ensures that the `initialize()` function on contracts cannot be called more than diff --git a/packages/contracts-bedrock/test/vendor/InitializableOZv5.t.sol b/packages/contracts-bedrock/test/vendor/InitializableOZv5.t.sol index 4b663d697ba6..51c2fce26678 100644 --- a/packages/contracts-bedrock/test/vendor/InitializableOZv5.t.sol +++ b/packages/contracts-bedrock/test/vendor/InitializableOZv5.t.sol @@ -2,7 +2,7 @@ pragma solidity 0.8.25; import { Test } from "forge-std/Test.sol"; -import { IOptimismSuperchainERC20 } from "src/L2/interfaces/IOptimismSuperchainERC20.sol"; +import { IOptimismSuperchainERC20 } from "interfaces/L2/IOptimismSuperchainERC20.sol"; import { Initializable } from "@openzeppelin/contracts-v5/proxy/utils/Initializable.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; /// @title InitializerOZv5_Test diff --git a/versions.json b/versions.json deleted file mode 100644 index 5a2ae52b57f0..000000000000 --- a/versions.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "go": "1.22.6", - "abigen": "v1.10.25", - "foundry": "143abd6a768eeb52a5785240b763d72a56987b4a", - "geth": "v1.14.7", - "geth_release": "1.14.7-aa55f5ea", - "eth2_testnet_genesis": "v0.10.0", - "nvm": "v20.9.0", - "slither": "0.10.2", - "kontrol": "1.0.53", - "just": "1.34.0", - "binary_signer": "1.0.4", - "semgrep": "1.90.0" -}