diff --git a/.github/assets/cdk-logo-name.png b/.github/assets/cdk-logo-name.png new file mode 100644 index 00000000..4b205936 Binary files /dev/null and b/.github/assets/cdk-logo-name.png differ diff --git a/.github/assets/cdk-logo.svg b/.github/assets/cdk-logo.svg new file mode 100644 index 00000000..cba03359 --- /dev/null +++ b/.github/assets/cdk-logo.svg @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 8a5d2d90..f6205e61 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -1,35 +1,17 @@ -# For most projects, this workflow file will not need changing; you simply need -# to commit it to your repository. -# -# You may wish to alter this file to override the set of languages analyzed, -# or to provide custom queries or build logic. -# -# ******** NOTE ******** -# We have attempted to detect the languages in your repository. Please check -# the `language` matrix defined below to confirm you have the correct set of -# supported CodeQL languages. -# name: "CodeQL" on: push: - branches: [ $default-branch, $protected-branches ] + branches: + - develop + - main pull_request: - # The branches below must be a subset of the branches above - branches: [ $default-branch ] - schedule: - - cron: $cron-weekly jobs: analyze: name: Analyze - # Runner size impacts CodeQL analysis time. To learn more, please see: - # - https://gh.io/recommended-hardware-resources-for-running-codeql - # - https://gh.io/supported-runners-and-hardware-resources - # - https://gh.io/using-larger-runners - # Consider using larger runners for possible analysis time improvements. - runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} - timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }} + runs-on: ${{ matrix.language == 'swift' && 'macos-latest' || 'ubuntu-latest' }} + timeout-minutes: ${{ matrix.language == 'swift' && 120 || 360 }} permissions: actions: read contents: read @@ -38,45 +20,25 @@ jobs: strategy: fail-fast: false matrix: - language: [ $detected-codeql-languages ] - # CodeQL supports [ $supported-codeql-languages ] - # Use only 'java-kotlin' to analyze code written in Java, Kotlin or both - # Use only 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both - # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support + language: + - go steps: - - name: Checkout repository - uses: actions/checkout@v3 - - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v2 - with: - languages: ${{ matrix.language }} - # If you wish to specify custom queries, you can do so here or in a config file. - # By default, queries listed here will override any specified in a config file. - # Prefix the list here with "+" to use these queries and those in the config file. - - # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs - # queries: security-extended,security-and-quality - - - # Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift). - # If this step fails, then you should remove it and run the build manually (see below) - - name: Autobuild - uses: github/codeql-action/autobuild@v2 - - # ℹī¸ Command-line programs to run using the OS shell. - # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun - - # If the Autobuild fails above, remove it and uncomment the following three lines. - # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. - - # - run: | - # echo "Run, Build Application using script" - # ./location_of_script_within_repo/buildscript.sh - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 - with: - category: "/language:${{matrix.language}}" + - name: Checkout repository + uses: actions/checkout@v4 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v2 + with: + languages: ${{ matrix.language }} + + # Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v2 + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v2 + with: + category: "/language:${{ matrix.language }}" diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 00000000..bdad36b2 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,24 @@ +name: Lint +on: + push: + branches: + - main + - develop + - update-external-dependencies + - 'release/**' + pull_request: +jobs: + lint: + runs-on: ubuntu-latest + steps: + - name: Install Go + uses: actions/setup-go@v3 + with: + go-version: 1.21.x + - name: Checkout code + uses: actions/checkout@v3 + - name: golangci-lint + uses: golangci/golangci-lint-action@v6 + with: + version: v1.61 + args: --timeout=30m diff --git a/.github/workflows/security-build.yml b/.github/workflows/security-build.yml deleted file mode 100644 index a4def677..00000000 --- a/.github/workflows/security-build.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: Security Build -on: - push: - branches: - - main # or the name of your main branch - workflow_dispatch: {} - pull_request: - types: [opened, synchronize, reopened] - -jobs: - sonarcloud: - name: SonarCloud - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis - - name: SonarCloud Scan - uses: SonarSource/sonarcloud-github-action@master - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Needed to get PR information, if any - SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index fad10c79..b20bb982 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -20,7 +20,7 @@ jobs: uses: actions/checkout@v4 - name: Install Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: go-version: ${{ matrix.go-version }} env: @@ -57,8 +57,12 @@ jobs: repository: 0xPolygon/kurtosis-cdk path: "kurtosis-cdk" + - name: Setup Bats and bats libs + uses: bats-core/bats-action@2.0.0 + - name: Test run: make test-e2e-${{ matrix.e2e-group }} working-directory: test env: KURTOSIS_FOLDER: ${{ github.workspace }}/kurtosis-cdk + BATS_LIB_PATH: /usr/lib/ diff --git a/.github/workflows/test-resequence.yml b/.github/workflows/test-resequence.yml index 9175b8a3..6f970150 100644 --- a/.github/workflows/test-resequence.yml +++ b/.github/workflows/test-resequence.yml @@ -97,7 +97,7 @@ jobs: - name: Upload logs if: always() - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: logs_${{ github.run_id }} path: ./kurtosis-cdk/ci_logs diff --git a/.github/workflows/test-unit.yml b/.github/workflows/test-unit.yml new file mode 100644 index 00000000..66cfc010 --- /dev/null +++ b/.github/workflows/test-unit.yml @@ -0,0 +1,40 @@ +name: Test Unit and SonarCloud analysis + +on: + push: + branches: + - main + - develop + - 'release/**' + pull_request: + workflow_dispatch: {} + +jobs: + test-unit: + strategy: + fail-fast: false + matrix: + go-version: [1.22.4] + goarch: ["amd64"] + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis + + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version: ${{ matrix.go-version }} + env: + GOARCH: ${{ matrix.goarch }} + + - name: Test + run: make test-unit + + - name: Analyze with SonarCloud + uses: sonarsource/sonarcloud-github-action@master + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/test-unittest.yml b/.github/workflows/test-unittest.yml deleted file mode 100644 index 156a0144..00000000 --- a/.github/workflows/test-unittest.yml +++ /dev/null @@ -1,20 +0,0 @@ -name: Test Unittest -on: - push: - branches: - - '**' - workflow_dispatch: {} - - -jobs: - test-unittest: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Install Go - uses: actions/setup-go@v3 - - - name: Launch unittest - run: make test \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml index a0a1caef..98197d74 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -3,73 +3,78 @@ run: timeout: 3m tests: true - # default is true. Enables skipping of directories: - # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ skip-dirs-use-default: true - skip-dirs: - - tests service: - golangci-lint-version: 1.59.1 - + golangci-lint-version: 1.61.0 + linters: disable-all: true enable: - whitespace # Tool for detection of leading and trailing whitespace # - wsl # Forces you to use empty lines - wastedassign # Finds wasted assignment statements - - unconvert # Unnecessary type conversions + - unconvert # Unnecessary type conversions - tparallel # Detects inappropriate usage of t.Parallel() method in your Go test codes - thelper # Detects golang test helpers without t.Helper() call and checks the consistency of test helpers - # - stylecheck # Stylecheck is a replacement for golint - # - prealloc # Finds slice declarations that could potentially be pre-allocated + - stylecheck # Stylecheck is a replacement for golint + - prealloc # Finds slice declarations that could potentially be pre-allocated - predeclared # Finds code that shadows one of Go's predeclared identifiers - # - nolintlint # Ill-formed or insufficient nolint directives + - nolintlint # Ill-formed or insufficient nolint directives # - nlreturn # Checks for a new line before return and branch statements to increase code clarity - misspell # Misspelled English words in comments - makezero # Finds slice declarations with non-zero initial length - # - lll # Long lines - - importas # Enforces consistent import aliases + - lll # Long lines + - importas # Enforces consistent import aliases - gosec # Security problems - gofmt # Whether the code was gofmt-ed - goimports # Unused imports - goconst # Repeated strings that could be replaced by a constant - # - forcetypeassert # Finds forced type assertions + - forcetypeassert # Finds forced type assertions - dogsled # Checks assignments with too many blank identifiers (e.g. x, , , _, := f()) - # - dupl # Code clone detection - - errname # Checks that sentinel errors are prefixed with the Err and error types are suffixed with the Error - # - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13 - # - gocritic - - errcheck # Errcheck is a go lint rule for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases - # - godox # Godox is a linter for TODOs and FIXMEs left in the code + - dupl # Code clone detection + - errname # Checks that sentinel errors are prefixed with Err and error types are suffixed with Error + - errorlint # Error wrapping introduced in Go 1.13 + - gocritic # gocritic is a Go source code linter that maintains checks that are not in other linters + - errcheck # Errcheck is a go lint rule for checking for unchecked errors + # - godox # Linter for TODOs and FIXMEs left in the code + - gci # Gci checks the consistency of the code with the Go code style guide + - mnd # mnd is a linter for magic numbers + # - revive + - unparam # Unused function parameters linters-settings: gofmt: simplify: true - goconst: - min-len: 3 - min-occurrences: 3 gocritic: enabled-checks: - ruleguard - settings: - ruleguard: - rules: "./gorules/rules.go" + revive: + rules: + - name: exported + arguments: + - disableStutteringCheck + goconst: + min-len: 3 + min-occurrences: 3 + gosec: + excludes: + - G115 # Potential integer overflow when converting between integer types issues: - new-from-rev: origin/develop # report only new issues with reference to develop branch whole-files: true exclude-rules: - - path: _test\.go + - path: '(_test\.go|^test/.*)' linters: - gosec - unparam - lll - - path: gen_sc_data\.go + - path: 'etherman/contracts/contracts_(banana|elderberry)\.go' linters: - - wsl - - lll - - stylecheck + - dupl + exclude-dirs: + - tests + - aggregator/db/migrations include: - EXC0012 # Exported (.+) should have comment( \(or a comment on this block\))? or be unexported - EXC0013 # Package comment should be of the form "(.+)... diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..ca9b0551 --- /dev/null +++ b/LICENSE @@ -0,0 +1,619 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS diff --git a/Makefile b/Makefile index 256509bc..c653ec1e 100644 --- a/Makefile +++ b/Makefile @@ -20,15 +20,6 @@ LDFLAGS += -X 'github.com/0xPolygon/cdk.GitRev=$(GITREV)' LDFLAGS += -X 'github.com/0xPolygon/cdk.GitBranch=$(GITBRANCH)' LDFLAGS += -X 'github.com/0xPolygon/cdk.BuildDate=$(DATE)' -# Variables -VENV = .venv -VENV_PYTHON = $(VENV)/bin/python -SYSTEM_PYTHON = $(or $(shell which python3), $(shell which python)) -PYTHON = $(or $(wildcard $(VENV_PYTHON)), "install_first_venv") -GENERATE_SCHEMA_DOC = $(VENV)/bin/generate-schema-doc -GENERATE_DOC_PATH = "docs/config-file/" -GENERATE_DOC_TEMPLATES_PATH = "docs/config-file/templates/" - # Check dependencies # Check for Go .PHONY: check-go @@ -80,28 +71,17 @@ build-docker-nc: ## Builds a docker image with the cdk binary - but without buil stop: ## Stops all services docker-compose down -.PHONY: test -test: - trap '$(STOP)' EXIT; MallocNanoZone=0 go test -count=1 -short -race -p 1 -covermode=atomic -coverprofile=../coverage.out -coverpkg ./... -timeout 200s ./... +.PHONY: test-unit +test-unit: + trap '$(STOP)' EXIT; MallocNanoZone=0 go test -count=1 -short -race -p 1 -covermode=atomic -coverprofile=coverage.out -coverpkg ./... -timeout 200s ./... .PHONY: test-seq_sender test-seq_sender: trap '$(STOP)' EXIT; MallocNanoZone=0 go test -count=1 -short -race -p 1 -covermode=atomic -coverprofile=../coverage.out -timeout 200s ./sequencesender/... - - -.PHONY: install-linter -install-linter: ## Installs the linter - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $$(go env GOPATH)/bin v1.54.2 .PHONY: lint lint: ## Runs the linter - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/golangci-lint run - -$(VENV_PYTHON): - rm -rf $(VENV) - $(SYSTEM_PYTHON) -m venv $(VENV) - -venv: $(VENV_PYTHON) + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/golangci-lint run --timeout 5m .PHONY: generate-code-from-proto generate-code-from-proto: ## Generates code from proto files diff --git a/README.md b/README.md index 9d0427d5..23a4e4e8 100644 --- a/README.md +++ b/README.md @@ -1,70 +1,59 @@ - -![Build][Build-badge] -[![Coverage][Coverage-badge]][Sonar-url] -[![Vulnerabilities][Vulnerability-badge]][Sonar-url] -# 0xPolygon GitHub Template -This repo serves as a template for new repositories in the 0xPolygon organization. +
+ +
+
+ +Logo +Logo -# Action Items -The following actions are required once you create a new repository from this repo: -- Update this README. Complete the sections to reflect your project -- Update `CODEOWNERS` file -- Update `sonar-project.properties` by adding your Sonar project key provided by the Security Team -- Update badges to reflect your project (badges are located at the bottom of this README) -- Ensure you are using Issue and PR Templates +## Polygon CDK -## About The Project +**Polygon CDK** (Chain Development Kit) is a modular framework that developers can use to build and deploy Zero Knowledge Proofs enabled Rollups and Validiums. +The CDK allow to build Rollups that are ZK powered, verifying the execution using the zkEVM prover from Polygon, they can be completelly personalizable because its modullar architecture. -### Built With + +[![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=0xPolygon_cdk&metric=alert_status&token=aa6d76993fc213c4153bf65e0d62e4d08207ea7e)](https://sonarcloud.io/summary/new_code?id=0xPolygon_cdk) -[![Node.js 18][Language-badge]][Language-url] +
+ +
## Getting Started -### Local Development +## Pre-requisites + +Setup Kurtosis following this instructions https://github.com/0xPolygon/kurtosis-cdk?tab=readme-ov-file#getting-started + +### Local Testing - You can run locally against kurtosis-cdk environment using: [docs/local_debug.md](docs/local_debug.md) -### Database +## Contributing -### Installation +Contributions are very welcomed, the guidelines are currently not available (WIP) -## Usage +## Support -Use this space to show useful examples of how a project can be used. Additional screenshots, code examples, and demos work well in this space. This is incredibly useful for support personnel when responding to issues related to your application. +Feel free to [open an issue](https://github.com/0xPolygon/cdk/issues/new) if you have any feature request or bug report.
-## Contributing -This is the place to document your delivery workflow. For example: - -1. Clone the project -2. Create a feature branch beginning with the ticket number (`git checkout -b INC-7689/update-readme`) -3. Commit your changes (`git commit -m 'Update README.me with default template`) -4. Push to the branch (`git push origin INC-7689/update-readme`) -5. Open a Pull Request -6. After review and approval, changes are deployed immediately - -## Contact - -![Email][Email-badge] -![Slack][Slack-badge] -[![Production URL][Production-badge]][Production-url] -[![Staging URL][Staging-badge]][Staging-url] - - - -[Build-badge]: https://github.com/0xPolygon/learn-api/actions/workflows/main.yml/badge.svg -[Coverage-badge]: https://sonarqube.polygon.technology/api/project_badges/measure?project=TODO -[Vulnerability-badge]: https://sonarqube.polygon.technology/api/project_badges/measure?project=TODO -[Sonar-url]: https://sonarqube.polygon.technology/dashboard?id=TODO -[Language-badge]: https://img.shields.io/badge/Nodejs-18.0-informational -[Language-url]: https://nodejs.org/en -[Email-badge]: https://img.shields.io/badge/Email-team_email_address-informational?logo=gmail -[Slack-badge]: https://img.shields.io/badge/Slack-team_ChannelName-informational?logo=slack -[Production-badge]: https://img.shields.io/badge/Production_URL-polygon.technology-informational -[Production-url]: https://link.to/prod -[Staging-badge]: https://img.shields.io/badge/Staging_URL-staging.polygon.technology-informational -[Staging-url]: https://link.to/staging +## License + +Polygon Chain Development Kit +Copyright (c) 2024 PT Services DMCC + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published +by the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/aggoracle/chaingersender/evm.go b/aggoracle/chaingersender/evm.go index 859f4b8b..ee02771e 100644 --- a/aggoracle/chaingersender/evm.go +++ b/aggoracle/chaingersender/evm.go @@ -25,12 +25,22 @@ type EthClienter interface { type EthTxManager interface { Remove(ctx context.Context, id common.Hash) error - ResultsByStatus(ctx context.Context, statuses []ethtxmanager.MonitoredTxStatus) ([]ethtxmanager.MonitoredTxResult, error) + ResultsByStatus(ctx context.Context, + statuses []ethtxmanager.MonitoredTxStatus, + ) ([]ethtxmanager.MonitoredTxResult, error) Result(ctx context.Context, id common.Hash) (ethtxmanager.MonitoredTxResult, error) - Add(ctx context.Context, to *common.Address, forcedNonce *uint64, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar) (common.Hash, error) + Add(ctx context.Context, + to *common.Address, + forcedNonce *uint64, + value *big.Int, + data []byte, + gasOffset uint64, + sidecar *types.BlobTxSidecar, + ) (common.Hash, error) } type EVMChainGERSender struct { + logger *log.Logger gerContract *pessimisticglobalexitroot.Pessimisticglobalexitroot gerAddr common.Address sender common.Address @@ -51,6 +61,7 @@ type EVMConfig struct { } func NewEVMChainGERSender( + logger *log.Logger, l2GlobalExitRoot, sender common.Address, l2Client EthClienter, ethTxMan EthTxManager, @@ -61,7 +72,9 @@ func NewEVMChainGERSender( if err != nil { return nil, err } + return &EVMChainGERSender{ + logger: logger, gerContract: gerContract, gerAddr: l2GlobalExitRoot, sender: sender, @@ -77,6 +90,7 @@ func (c *EVMChainGERSender) IsGERAlreadyInjected(ger common.Hash) (bool, error) if err != nil { return false, fmt.Errorf("error calling gerContract.GlobalExitRootMap: %w", err) } + return timestamp.Cmp(big.NewInt(0)) != 0, nil } @@ -95,10 +109,10 @@ func (c *EVMChainGERSender) UpdateGERWaitUntilMined(ctx context.Context, ger com } for { time.Sleep(c.waitPeriodMonitorTx) - log.Debugf("waiting for tx %s to be mined", id.Hex()) + c.logger.Debugf("waiting for tx %s to be mined", id.Hex()) res, err := c.ethTxMan.Result(ctx, id) if err != nil { - log.Error("error calling ethTxMan.Result: ", err) + c.logger.Error("error calling ethTxMan.Result: ", err) } switch res.Status { case ethtxmanager.MonitoredTxStatusCreated, @@ -111,7 +125,7 @@ func (c *EVMChainGERSender) UpdateGERWaitUntilMined(ctx context.Context, ger com ethtxmanager.MonitoredTxStatusFinalized: return nil default: - log.Error("unexpected tx status: ", res.Status) + c.logger.Error("unexpected tx status: ", res.Status) } } } diff --git a/aggoracle/config.go b/aggoracle/config.go index e6097707..8559ddb6 100644 --- a/aggoracle/config.go +++ b/aggoracle/config.go @@ -19,7 +19,7 @@ type Config struct { TargetChainType TargetChainType `mapstructure:"TargetChainType"` URLRPCL1 string `mapstructure:"URLRPCL1"` // BlockFinality indicates the status of the blocks that will be queried in order to sync - BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` + BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` //nolint:lll WaitPeriodNextGER types.Duration `mapstructure:"WaitPeriodNextGER"` EVMSender chaingersender.EVMConfig `mapstructure:"EVMSender"` } diff --git a/aggoracle/e2e_test.go b/aggoracle/e2e_test.go index 39150c00..25a8a96d 100644 --- a/aggoracle/e2e_test.go +++ b/aggoracle/e2e_test.go @@ -27,6 +27,8 @@ func runTest( l1Client *simulated.Backend, authL1 *bind.TransactOpts, ) { + t.Helper() + for i := 0; i < 10; i++ { _, err := gerL1Contract.UpdateExitRoot(authL1, common.HexToHash(strconv.Itoa(i))) require.NoError(t, err) diff --git a/aggoracle/oracle.go b/aggoracle/oracle.go index f22ee1f0..1ba94d7a 100644 --- a/aggoracle/oracle.go +++ b/aggoracle/oracle.go @@ -2,6 +2,7 @@ package aggoracle import ( "context" + "errors" "math/big" "time" @@ -22,6 +23,7 @@ type ChainSender interface { } type AggOracle struct { + logger *log.Logger ticker *time.Ticker l1Client ethereum.ChainReader l1Info L1InfoTreer @@ -30,6 +32,7 @@ type AggOracle struct { } func New( + logger *log.Logger, chainSender ChainSender, l1Client ethereum.ChainReader, l1InfoTreeSyncer L1InfoTreer, @@ -41,7 +44,9 @@ func New( if err != nil { return nil, err } + return &AggOracle{ + logger: logger, ticker: ticker, l1Client: l1Client, l1Info: l1InfoTreeSyncer, @@ -61,29 +66,33 @@ func (a *AggOracle) Start(ctx context.Context) { case <-a.ticker.C: blockNumToFetch, gerToInject, err = a.getLastFinalisedGER(ctx, blockNumToFetch) if err != nil { - if err == l1infotreesync.ErrBlockNotProcessed { - log.Debugf("syncer is not ready for the block %d", blockNumToFetch) - } else if err == l1infotreesync.ErrNotFound { + switch { + case errors.Is(err, l1infotreesync.ErrBlockNotProcessed): + a.logger.Debugf("syncer is not ready for the block %d", blockNumToFetch) + + case errors.Is(err, l1infotreesync.ErrNotFound): blockNumToFetch = 0 - log.Debugf("syncer has not found any GER until block %d", blockNumToFetch) - } else { - log.Error("error calling getLastFinalisedGER: ", err) + a.logger.Debugf("syncer has not found any GER until block %d", blockNumToFetch) + + default: + a.logger.Error("error calling getLastFinalisedGER: ", err) } + continue } if alreadyInjected, err := a.chainSender.IsGERAlreadyInjected(gerToInject); err != nil { - log.Error("error calling isGERAlreadyInjected: ", err) + a.logger.Error("error calling isGERAlreadyInjected: ", err) continue } else if alreadyInjected { - log.Debugf("GER %s already injected", gerToInject.Hex()) + a.logger.Debugf("GER %s already injected", gerToInject.Hex()) continue } - log.Infof("injecting new GER: %s", gerToInject.Hex()) + a.logger.Infof("injecting new GER: %s", gerToInject.Hex()) if err := a.chainSender.UpdateGERWaitUntilMined(ctx, gerToInject); err != nil { - log.Errorf("error calling updateGERWaitUntilMined, when trying to inject GER %s: %v", gerToInject.Hex(), err) + a.logger.Errorf("error calling updateGERWaitUntilMined, when trying to inject GER %s: %v", gerToInject.Hex(), err) continue } - log.Infof("GER %s injected", gerToInject.Hex()) + a.logger.Infof("GER %s injected", gerToInject.Hex()) case <-ctx.Done(): return } @@ -106,5 +115,6 @@ func (a *AggOracle) getLastFinalisedGER(ctx context.Context, blockNumToFetch uin if err != nil { return blockNumToFetch, common.Hash{}, err } + return 0, info.GlobalExitRoot, nil } diff --git a/aggregator/agglayer_tx.go b/aggregator/agglayer_tx.go index b0cd09c9..30a483ae 100644 --- a/aggregator/agglayer_tx.go +++ b/aggregator/agglayer_tx.go @@ -41,6 +41,7 @@ func (t *Tx) Sign(privateKey *ecdsa.PrivateKey) (*SignedTx, error) { if err != nil { return nil, err } + return &SignedTx{ Tx: *t, Signature: sig, @@ -59,5 +60,6 @@ func (s *SignedTx) Signer() (common.Address, error) { if err != nil { return common.Address{}, err } + return crypto.PubkeyToAddress(*pubKey), nil } diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 2b90826e..a97d72f9 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -32,6 +32,7 @@ import ( "github.com/0xPolygonHermez/zkevm-synchronizer-l1/state/entities" "github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer" "github.com/ethereum/go-ethereum/common" + "go.uber.org/zap/zapcore" "google.golang.org/grpc" grpchealth "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/peer" @@ -60,7 +61,8 @@ type finalProofMsg struct { type Aggregator struct { prover.UnimplementedAggregatorServiceServer - cfg Config + cfg Config + logger *log.Logger state stateInterface etherman etherman @@ -100,15 +102,20 @@ type Aggregator struct { func New( ctx context.Context, cfg Config, + logger *log.Logger, stateInterface stateInterface, etherman etherman) (*Aggregator, error) { var profitabilityChecker aggregatorTxProfitabilityChecker switch cfg.TxProfitabilityCheckerType { case ProfitabilityBase: - profitabilityChecker = NewTxProfitabilityCheckerBase(stateInterface, cfg.IntervalAfterWhichBatchConsolidateAnyway.Duration, cfg.TxProfitabilityMinReward.Int) + profitabilityChecker = NewTxProfitabilityCheckerBase( + stateInterface, cfg.IntervalAfterWhichBatchConsolidateAnyway.Duration, cfg.TxProfitabilityMinReward.Int, + ) case ProfitabilityAcceptAll: - profitabilityChecker = NewTxProfitabilityCheckerAcceptAll(stateInterface, cfg.IntervalAfterWhichBatchConsolidateAnyway.Duration) + profitabilityChecker = NewTxProfitabilityCheckerAcceptAll( + stateInterface, cfg.IntervalAfterWhichBatchConsolidateAnyway.Duration, + ) } // Create ethtxmanager client @@ -119,7 +126,7 @@ func New( } ethTxManager, err := ethtxmanager.New(cfg.EthTxManager) if err != nil { - log.Fatalf("error creating ethtxmanager client: %v", err) + logger.Fatalf("error creating ethtxmanager client: %v", err) } var streamClient *datastreamer.StreamClient @@ -132,14 +139,12 @@ func New( Outputs: cfg.Log.Outputs, } - log.Init(cfg.Log) - - log.Info("Creating data stream client....") + logger.Info("Creating data stream client....") streamClient, err = datastreamer.NewClientWithLogsConfig(cfg.StreamClient.Server, dataStreamType, streamLogConfig) if err != nil { - log.Fatalf("failed to create stream client, error: %v", err) + logger.Fatalf("failed to create stream client, error: %v", err) } - log.Info("Data stream client created.") + logger.Info("Data stream client created.") } // Synchonizer logs @@ -153,10 +158,10 @@ func New( // Create L1 synchronizer client cfg.Synchronizer.Etherman.L1URL = cfg.EthTxManager.Etherman.URL - log.Debugf("Creating synchronizer client with config: %+v", cfg.Synchronizer) + logger.Debugf("Creating synchronizer client with config: %+v", cfg.Synchronizer) l1Syncr, err := synchronizer.NewSynchronizer(ctx, cfg.Synchronizer) if err != nil { - log.Fatalf("failed to create synchronizer client, error: %v", err) + logger.Fatalf("failed to create synchronizer client, error: %v", err) } var ( @@ -176,6 +181,7 @@ func New( a := &Aggregator{ ctx: ctx, cfg: cfg, + logger: logger, state: stateInterface, etherman: etherman, ethTxManager: ethTxManager, @@ -223,21 +229,26 @@ func (a *Aggregator) retrieveWitness() { for !success { var err error // Get Witness - dbBatch.Witness, err = getWitness(dbBatch.Batch.BatchNumber, a.cfg.WitnessURL, a.cfg.UseFullWitness) + dbBatch.Witness, err = a.getWitness(dbBatch.Batch.BatchNumber, a.cfg.WitnessURL, a.cfg.UseFullWitness) if err != nil { - if err == errBusy { - log.Debugf("Witness server is busy, retrying get witness for batch %d in %v", dbBatch.Batch.BatchNumber, a.cfg.RetryTime.Duration) + if errors.Is(err, errBusy) { + a.logger.Debugf( + "Witness server is busy, retrying get witness for batch %d in %v", + dbBatch.Batch.BatchNumber, a.cfg.RetryTime.Duration, + ) } else { - log.Errorf("Failed to get witness for batch %d, err: %v", dbBatch.Batch.BatchNumber, err) + a.logger.Errorf("Failed to get witness for batch %d, err: %v", dbBatch.Batch.BatchNumber, err) } time.Sleep(a.cfg.RetryTime.Duration) + continue inner } err = a.state.AddBatch(a.ctx, &dbBatch, nil) if err != nil { - log.Errorf("Error adding batch: %v", err) + a.logger.Errorf("Error adding batch: %v", err) time.Sleep(a.cfg.RetryTime.Duration) + continue inner } success = true @@ -248,29 +259,32 @@ func (a *Aggregator) retrieveWitness() { } func (a *Aggregator) handleReorg(reorgData synchronizer.ReorgExecutionResult) { - log.Warnf("Reorg detected, reorgData: %+v", reorgData) + a.logger.Warnf("Reorg detected, reorgData: %+v", reorgData) // Get new latest verified batch number lastVBatchNumber, err := a.l1Syncr.GetLastestVirtualBatchNumber(a.ctx) if err != nil { - log.Errorf("Error getting last virtual batch number: %v", err) + a.logger.Errorf("Error getting last virtual batch number: %v", err) } else { err = a.state.DeleteBatchesNewerThanBatchNumber(a.ctx, lastVBatchNumber, nil) if err != nil { - log.Errorf("Error deleting batches newer than batch number %d: %v", lastVBatchNumber, err) + a.logger.Errorf("Error deleting batches newer than batch number %d: %v", lastVBatchNumber, err) } } // Halt the aggregator a.halted.Store(true) for { - log.Errorf("Halting the aggregator due to a L1 reorg. Reorged data has been deleted so it is safe to manually restart the aggregator.") - time.Sleep(10 * time.Second) // nolint:gomnd + a.logger.Warnf( + "Halting the aggregator due to a L1 reorg. " + + "Reorged data has been deleted, so it is safe to manually restart the aggregator.", + ) + time.Sleep(10 * time.Second) //nolint:mnd } } func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBatchesData) { - log.Warnf("Rollback batches event, rollbackBatchesData: %+v", rollbackData) + a.logger.Warnf("Rollback batches event, rollbackBatchesData: %+v", rollbackData) a.streamClientMutex.Lock() defer a.streamClientMutex.Unlock() @@ -286,9 +300,9 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat // Stop Reading the data stream err = a.streamClient.ExecCommandStop() if err != nil { - log.Errorf("failed to stop data stream: %v.", err) + a.logger.Errorf("failed to stop data stream: %v.", err) } else { - log.Info("Data stream client stopped") + a.logger.Info("Data stream client stopped") } } @@ -297,22 +311,25 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat if err == nil { lastVerifiedBatchNumber, err = a.etherman.GetLatestVerifiedBatchNum() if err != nil { - log.Errorf("Error getting latest verified batch number: %v", err) + a.logger.Errorf("Error getting latest verified batch number: %v", err) } } // Check lastVerifiedBatchNumber makes sense if err == nil && lastVerifiedBatchNumber > rollbackData.LastBatchNumber { - err = fmt.Errorf("last verified batch number %d is greater than the last batch number %d in the rollback data", lastVerifiedBatchNumber, rollbackData.LastBatchNumber) + err = fmt.Errorf( + "last verified batch number %d is greater than the last batch number %d in the rollback data", + lastVerifiedBatchNumber, rollbackData.LastBatchNumber, + ) } // Delete invalidated batches if err == nil { err = a.state.DeleteBatchesNewerThanBatchNumber(a.ctx, rollbackData.LastBatchNumber, nil) if err != nil { - log.Errorf("Error deleting batches newer than batch number %d: %v", rollbackData.LastBatchNumber, err) + a.logger.Errorf("Error deleting batches newer than batch number %d: %v", rollbackData.LastBatchNumber, err) } else { - log.Infof("Deleted batches newer than batch number %d", rollbackData.LastBatchNumber) + a.logger.Infof("Deleted batches newer than batch number %d", rollbackData.LastBatchNumber) } } @@ -320,9 +337,9 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat if err == nil { err = a.state.DeleteBatchesOlderThanBatchNumber(a.ctx, rollbackData.LastBatchNumber, nil) if err != nil { - log.Errorf("Error deleting batches older than batch number %d: %v", rollbackData.LastBatchNumber, err) + a.logger.Errorf("Error deleting batches older than batch number %d: %v", rollbackData.LastBatchNumber, err) } else { - log.Infof("Deleted batches older than batch number %d", rollbackData.LastBatchNumber) + a.logger.Infof("Deleted batches older than batch number %d", rollbackData.LastBatchNumber) } } @@ -330,9 +347,9 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat if err == nil { err = a.state.DeleteUngeneratedProofs(a.ctx, nil) if err != nil { - log.Errorf("Error deleting ungenerated proofs: %v", err) + a.logger.Errorf("Error deleting ungenerated proofs: %v", err) } else { - log.Info("Deleted ungenerated proofs") + a.logger.Info("Deleted ungenerated proofs") } } @@ -340,9 +357,9 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat if err == nil { err = a.state.DeleteGeneratedProofs(a.ctx, rollbackData.LastBatchNumber+1, maxDBBigIntValue, nil) if err != nil { - log.Errorf("Error deleting generated proofs: %v", err) + a.logger.Errorf("Error deleting generated proofs: %v", err) } else { - log.Infof("Deleted generated proofs for batches newer than %d", rollbackData.LastBatchNumber) + a.logger.Infof("Deleted generated proofs for batches newer than %d", rollbackData.LastBatchNumber) } } @@ -350,7 +367,7 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat // Reset current batch data previously read from the data stream a.resetCurrentBatchData() a.currentStreamBatch = state.Batch{} - log.Info("Current batch data reset") + a.logger.Info("Current batch data reset") var marshalledBookMark []byte // Reset the data stream reading point @@ -360,40 +377,43 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat } marshalledBookMark, err = proto.Marshal(bookMark) + //nolint:gocritic if err != nil { - log.Error("failed to marshal bookmark: %v", err) + a.logger.Error("failed to marshal bookmark: %v", err) } else { // Restart the stream client if needed if dsClientWasRunning { a.streamClient.SetProcessEntryFunc(a.handleReceivedDataStream) err = a.streamClient.Start() if err != nil { - log.Errorf("failed to start stream client, error: %v", err) + a.logger.Errorf("failed to start stream client, error: %v", err) } else { // Resume data stream reading err = a.streamClient.ExecCommandStartBookmark(marshalledBookMark) if err != nil { - log.Errorf("failed to connect to data stream: %v", err) + a.logger.Errorf("failed to connect to data stream: %v", err) } - log.Info("Data stream client resumed") + a.logger.Info("Data stream client resumed") } } } } if err == nil { - log.Info("Handling rollback batches event finished successfully") + a.logger.Info("Handling rollback batches event finished successfully") } else { // Halt the aggregator a.halted.Store(true) for { - log.Errorf("Halting the aggregator due to an error handling rollback batches event: %v", err) - time.Sleep(10 * time.Second) // nolint:gomnd + a.logger.Errorf("Halting the aggregator due to an error handling rollback batches event: %v", err) + time.Sleep(10 * time.Second) //nolint:mnd } } } -func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, client *datastreamer.StreamClient, server *datastreamer.StreamServer) error { +func (a *Aggregator) handleReceivedDataStream( + entry *datastreamer.FileEntry, client *datastreamer.StreamClient, server *datastreamer.StreamServer, +) error { forcedBlockhashL1 := common.Hash{} if !a.halted.Load() { @@ -405,7 +425,8 @@ func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, cli batch := &datastream.BatchStart{} err := proto.Unmarshal(entry.Data, batch) if err != nil { - log.Errorf("Error unmarshalling batch: %v", err) + a.logger.Errorf("Error unmarshalling batch: %v", err) + return err } @@ -417,7 +438,8 @@ func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, cli batch := &datastream.BatchEnd{} err := proto.Unmarshal(entry.Data, batch) if err != nil { - log.Errorf("Error unmarshalling batch: %v", err) + a.logger.Errorf("Error unmarshalling batch: %v", err) + return err } @@ -436,62 +458,71 @@ func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, cli // Get batchl2Data from L1 virtualBatch, err := a.l1Syncr.GetVirtualBatchByBatchNumber(a.ctx, a.currentStreamBatch.BatchNumber) if err != nil && !errors.Is(err, entities.ErrNotFound) { - log.Errorf("Error getting virtual batch: %v", err) + a.logger.Errorf("Error getting virtual batch: %v", err) + return err } for errors.Is(err, entities.ErrNotFound) { - log.Debug("Waiting for virtual batch to be available") + a.logger.Debug("Waiting for virtual batch to be available") time.Sleep(a.cfg.RetryTime.Duration) virtualBatch, err = a.l1Syncr.GetVirtualBatchByBatchNumber(a.ctx, a.currentStreamBatch.BatchNumber) if err != nil && !errors.Is(err, entities.ErrNotFound) { - log.Errorf("Error getting virtual batch: %v", err) + a.logger.Errorf("Error getting virtual batch: %v", err) + return err } } // Encode batch - if a.currentStreamBatch.Type != datastream.BatchType_BATCH_TYPE_INVALID && a.currentStreamBatch.Type != datastream.BatchType_BATCH_TYPE_INJECTED { + if a.currentStreamBatch.Type != datastream.BatchType_BATCH_TYPE_INVALID && + a.currentStreamBatch.Type != datastream.BatchType_BATCH_TYPE_INJECTED { batchl2Data, err = state.EncodeBatchV2(&a.currentStreamBatchRaw) if err != nil { - log.Errorf("Error encoding batch: %v", err) + a.logger.Errorf("Error encoding batch: %v", err) + return err } } // If the batch is marked as Invalid in the DS we enforce retrieve the data from L1 - if a.cfg.UseL1BatchData || a.currentStreamBatch.Type == datastream.BatchType_BATCH_TYPE_INVALID || a.currentStreamBatch.Type == datastream.BatchType_BATCH_TYPE_INJECTED { + if a.cfg.UseL1BatchData || + a.currentStreamBatch.Type == datastream.BatchType_BATCH_TYPE_INVALID || + a.currentStreamBatch.Type == datastream.BatchType_BATCH_TYPE_INJECTED { a.currentStreamBatch.BatchL2Data = virtualBatch.BatchL2Data } else { a.currentStreamBatch.BatchL2Data = batchl2Data } // Compare BatchL2Data from L1 and DataStream - if common.Bytes2Hex(batchl2Data) != common.Bytes2Hex(virtualBatch.BatchL2Data) && a.currentStreamBatch.Type != datastream.BatchType_BATCH_TYPE_INJECTED { - log.Warnf("BatchL2Data from L1 and data stream are different for batch %d", a.currentStreamBatch.BatchNumber) + if common.Bytes2Hex(batchl2Data) != common.Bytes2Hex(virtualBatch.BatchL2Data) && + a.currentStreamBatch.Type != datastream.BatchType_BATCH_TYPE_INJECTED { + a.logger.Warnf("BatchL2Data from L1 and data stream are different for batch %d", a.currentStreamBatch.BatchNumber) if a.currentStreamBatch.Type == datastream.BatchType_BATCH_TYPE_INVALID { - log.Warnf("Batch is marked as invalid in data stream") + a.logger.Warnf("Batch is marked as invalid in data stream") } else { - log.Warnf("DataStream BatchL2Data:%v", common.Bytes2Hex(batchl2Data)) + a.logger.Warnf("DataStream BatchL2Data:%v", common.Bytes2Hex(batchl2Data)) } - log.Warnf("L1 BatchL2Data:%v", common.Bytes2Hex(virtualBatch.BatchL2Data)) + a.logger.Warnf("L1 BatchL2Data:%v", common.Bytes2Hex(virtualBatch.BatchL2Data)) } // Get L1InfoRoot sequence, err := a.l1Syncr.GetSequenceByBatchNumber(a.ctx, a.currentStreamBatch.BatchNumber) if err != nil { - log.Errorf("Error getting sequence: %v", err) + a.logger.Errorf("Error getting sequence: %v", err) + return err } for sequence == nil { - log.Debug("Waiting for sequence to be available") + a.logger.Debug("Waiting for sequence to be available") time.Sleep(a.cfg.RetryTime.Duration) sequence, err = a.l1Syncr.GetSequenceByBatchNumber(a.ctx, a.currentStreamBatch.BatchNumber) if err != nil { - log.Errorf("Error getting sequence: %v", err) + a.logger.Errorf("Error getting sequence: %v", err) + return err } } @@ -502,7 +533,8 @@ func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, cli // Calculate Acc Input Hash oldDBBatch, err := a.state.GetBatch(a.ctx, a.currentStreamBatch.BatchNumber-1, nil) if err != nil { - log.Errorf("Error getting batch %d: %v", a.currentStreamBatch.BatchNumber-1, err) + a.logger.Errorf("Error getting batch %d: %v", a.currentStreamBatch.BatchNumber-1, err) + return err } @@ -510,7 +542,8 @@ func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, cli if a.currentStreamBatch.BatchNumber == 1 { l1Block, err := a.l1Syncr.GetL1BlockByNumber(a.ctx, virtualBatch.BlockNumber) if err != nil { - log.Errorf("Error getting L1 block: %v", err) + a.logger.Errorf("Error getting L1 block: %v", err) + return err } @@ -519,6 +552,7 @@ func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, cli } accInputHash := cdkcommon.CalculateAccInputHash( + a.logger, oldDBBatch.Batch.AccInputHash, a.currentStreamBatch.BatchL2Data, a.currentStreamBatch.L1InfoRoot, @@ -538,7 +572,8 @@ func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, cli wDBBatch, err := a.state.GetBatch(a.ctx, a.currentStreamBatch.BatchNumber, nil) if err != nil { if !errors.Is(err, state.ErrNotFound) { - log.Errorf("Error getting batch %d: %v", a.currentStreamBatch.BatchNumber, err) + a.logger.Errorf("Error getting batch %d: %v", a.currentStreamBatch.BatchNumber, err) + return err } } @@ -550,7 +585,8 @@ func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, cli // Store batch in the DB err = a.state.AddBatch(a.ctx, &dbBatch, nil) if err != nil { - log.Errorf("Error adding batch: %v", err) + a.logger.Errorf("Error adding batch: %v", err) + return err } @@ -572,7 +608,8 @@ func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, cli l2Block := &datastream.L2Block{} err := proto.Unmarshal(entry.Data, l2Block) if err != nil { - log.Errorf("Error unmarshalling L2Block: %v", err) + a.logger.Errorf("Error unmarshalling L2Block: %v", err) + return err } @@ -592,13 +629,15 @@ func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, cli l2Tx := &datastream.Transaction{} err := proto.Unmarshal(entry.Data, l2Tx) if err != nil { - log.Errorf("Error unmarshalling L2Tx: %v", err) + a.logger.Errorf("Error unmarshalling L2Tx: %v", err) + return err } // New Tx raw tx, err := state.DecodeTx(common.Bytes2Hex(l2Tx.Encoded)) if err != nil { - log.Errorf("Error decoding tx: %v", err) + a.logger.Errorf("Error decoding tx: %v", err) + return err } @@ -611,6 +650,7 @@ func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, cli } } } + return nil } @@ -619,7 +659,8 @@ func (a *Aggregator) Start() error { // Initial L1 Sync blocking err := a.l1Syncr.Sync(true) if err != nil { - log.Fatalf("Failed to synchronize from L1: %v", err) + a.logger.Fatalf("Failed to synchronize from L1: %v", err) + return err } @@ -627,7 +668,7 @@ func (a *Aggregator) Start() error { go func() { err := a.l1Syncr.Sync(false) if err != nil { - log.Fatalf("Failed to synchronize from L1: %v", err) + a.logger.Fatalf("Failed to synchronize from L1: %v", err) } }() @@ -635,7 +676,7 @@ func (a *Aggregator) Start() error { address := fmt.Sprintf("%s:%d", a.cfg.Host, a.cfg.Port) lis, err := net.Listen("tcp", address) if err != nil { - log.Fatalf("Failed to listen: %v", err) + a.logger.Fatalf("Failed to listen: %v", err) } a.srv = grpc.NewServer() @@ -667,11 +708,18 @@ func (a *Aggregator) Start() error { return err } - log.Infof("Last Verified Batch Number:%v", lastVerifiedBatchNumber) - log.Infof("Starting AccInputHash:%v", accInputHash.String()) + a.logger.Infof("Last Verified Batch Number:%v", lastVerifiedBatchNumber) + a.logger.Infof("Starting AccInputHash:%v", accInputHash.String()) // Store Acc Input Hash of the latest verified batch - dummyDBBatch := state.DBBatch{Batch: state.Batch{BatchNumber: lastVerifiedBatchNumber, AccInputHash: *accInputHash}, Datastream: []byte{0}, Witness: []byte{0}} + dummyDBBatch := state.DBBatch{ + Batch: state.Batch{ + BatchNumber: lastVerifiedBatchNumber, + AccInputHash: *accInputHash, + }, + Datastream: []byte{0}, + Witness: []byte{0}, + } err = a.state.AddBatch(a.ctx, &dummyDBBatch, nil) if err != nil { return err @@ -694,7 +742,7 @@ func (a *Aggregator) Start() error { err = a.streamClient.Start() if err != nil { - log.Fatalf("failed to start stream client, error: %v", err) + a.logger.Fatalf("failed to start stream client, error: %v", err) } bookMark := &datastream.BookMark{ @@ -704,25 +752,26 @@ func (a *Aggregator) Start() error { marshalledBookMark, err := proto.Marshal(bookMark) if err != nil { - log.Fatalf("failed to marshal bookmark: %v", err) + a.logger.Fatalf("failed to marshal bookmark: %v", err) } err = a.streamClient.ExecCommandStartBookmark(marshalledBookMark) if err != nil { - log.Fatalf("failed to connect to data stream: %v", err) + a.logger.Fatalf("failed to connect to data stream: %v", err) } // A this point everything is ready, so start serving go func() { - log.Infof("Server listening on port %d", a.cfg.Port) + a.logger.Infof("Server listening on port %d", a.cfg.Port) if err := a.srv.Serve(lis); err != nil { a.exit() - log.Fatalf("Failed to serve: %v", err) + a.logger.Fatalf("Failed to serve: %v", err) } }() } <-a.ctx.Done() + return a.ctx.Err() } @@ -741,22 +790,24 @@ func (a *Aggregator) Channel(stream prover.AggregatorService_ChannelServer) erro if ok { proverAddr = p.Addr } - prover, err := prover.New(stream, proverAddr, a.cfg.ProofStatePollingInterval) + proverLogger := log.WithFields("module", cdkcommon.PROVER) + prover, err := prover.New(proverLogger, stream, proverAddr, a.cfg.ProofStatePollingInterval) if err != nil { return err } - log := log.WithFields( + tmpLogger := proverLogger.WithFields( "prover", prover.Name(), "proverId", prover.ID(), "proverAddr", prover.Addr(), ) - log.Info("Establishing stream connection with prover") + tmpLogger.Info("Establishing stream connection with prover") // Check if prover supports the required Fork ID if !prover.SupportsForkID(a.cfg.ForkId) { err := errors.New("prover does not support required fork ID") - log.Warn(FirstToUpper(err.Error())) + tmpLogger.Warn(FirstToUpper(err.Error())) + return err } @@ -773,30 +824,32 @@ func (a *Aggregator) Channel(stream prover.AggregatorService_ChannelServer) erro if !a.halted.Load() { isIdle, err := prover.IsIdle() if err != nil { - log.Errorf("Failed to check if prover is idle: %v", err) + tmpLogger.Errorf("Failed to check if prover is idle: %v", err) time.Sleep(a.cfg.RetryTime.Duration) + continue } if !isIdle { - log.Debug("Prover is not idle") + tmpLogger.Debug("Prover is not idle") time.Sleep(a.cfg.RetryTime.Duration) + continue } _, err = a.tryBuildFinalProof(ctx, prover, nil) if err != nil { - log.Errorf("Error checking proofs to verify: %v", err) + tmpLogger.Errorf("Error checking proofs to verify: %v", err) } proofGenerated, err := a.tryAggregateProofs(ctx, prover) if err != nil { - log.Errorf("Error trying to aggregate proofs: %v", err) + tmpLogger.Errorf("Error trying to aggregate proofs: %v", err) } if !proofGenerated { proofGenerated, err = a.tryGenerateBatchProof(ctx, prover) if err != nil { - log.Errorf("Error trying to generate proof: %v", err) + tmpLogger.Errorf("Error trying to generate proof: %v", err) } } if !proofGenerated { @@ -822,15 +875,18 @@ func (a *Aggregator) sendFinalProof() { ctx := a.ctx proof := msg.recursiveProof - log.WithFields("proofId", proof.ProofID, "batches", fmt.Sprintf("%d-%d", proof.BatchNumber, proof.BatchNumberFinal)) - log.Info("Verifying final proof with ethereum smart contract") + tmpLogger := a.logger.WithFields( + "proofId", proof.ProofID, + "batches", fmt.Sprintf("%d-%d", proof.BatchNumber, proof.BatchNumberFinal)) + tmpLogger.Info("Verifying final proof with ethereum smart contract") a.startProofVerification() finalDBBatch, err := a.state.GetBatch(ctx, proof.BatchNumberFinal, nil) if err != nil { - log.Errorf("Failed to retrieve batch with number [%d]: %v", proof.BatchNumberFinal, err) + tmpLogger.Errorf("Failed to retrieve batch with number [%d]: %v", proof.BatchNumberFinal, err) a.endProofVerification() + continue } @@ -875,28 +931,28 @@ func (a *Aggregator) settleWithAggLayer( } signedTx, err := tx.Sign(a.sequencerPrivateKey) if err != nil { - log.Errorf("failed to sign tx: %v", err) + a.logger.Errorf("failed to sign tx: %v", err) a.handleFailureToAddVerifyBatchToBeMonitored(ctx, proof) return false } - log.Debug("final proof: %+v", tx) - log.Debug("final proof signedTx: ", signedTx.Tx.ZKP.Proof.Hex()) + a.logger.Debug("final proof: %+v", tx) + a.logger.Debug("final proof signedTx: ", signedTx.Tx.ZKP.Proof.Hex()) txHash, err := a.aggLayerClient.SendTx(*signedTx) if err != nil { - log.Errorf("failed to send tx to the agglayer: %v", err) + a.logger.Errorf("failed to send tx to the agglayer: %v", err) a.handleFailureToAddVerifyBatchToBeMonitored(ctx, proof) return false } - log.Infof("tx %s sent to agglayer, waiting to be mined", txHash.Hex()) - log.Debugf("Timeout set to %f seconds", a.cfg.AggLayerTxTimeout.Duration.Seconds()) + a.logger.Infof("tx %s sent to agglayer, waiting to be mined", txHash.Hex()) + a.logger.Debugf("Timeout set to %f seconds", a.cfg.AggLayerTxTimeout.Duration.Seconds()) waitCtx, cancelFunc := context.WithDeadline(ctx, time.Now().Add(a.cfg.AggLayerTxTimeout.Duration)) defer cancelFunc() if err := a.aggLayerClient.WaitTxToBeMined(txHash, waitCtx); err != nil { - log.Errorf("agglayer didn't mine the tx: %v", err) + a.logger.Errorf("agglayer didn't mine the tx: %v", err) a.handleFailureToAddVerifyBatchToBeMonitored(ctx, proof) return false @@ -912,19 +968,23 @@ func (a *Aggregator) settleDirect( inputs ethmanTypes.FinalProofInputs) bool { // add batch verification to be monitored sender := common.HexToAddress(a.cfg.SenderAddress) - to, data, err := a.etherman.BuildTrustedVerifyBatchesTxData(proof.BatchNumber-1, proof.BatchNumberFinal, &inputs, sender) + to, data, err := a.etherman.BuildTrustedVerifyBatchesTxData( + proof.BatchNumber-1, proof.BatchNumberFinal, &inputs, sender, + ) if err != nil { - log.Errorf("Error estimating batch verification to add to eth tx manager: %v", err) + a.logger.Errorf("Error estimating batch verification to add to eth tx manager: %v", err) a.handleFailureToAddVerifyBatchToBeMonitored(ctx, proof) + return false } monitoredTxID, err := a.ethTxManager.Add(ctx, to, nil, big.NewInt(0), data, a.cfg.GasOffset, nil) if err != nil { - log.Errorf("Error Adding TX to ethTxManager: %v", err) + a.logger.Errorf("Error Adding TX to ethTxManager: %v", err) mTxLogger := ethtxmanager.CreateLogger(monitoredTxID, sender, to) mTxLogger.Errorf("Error to add batch verification tx to eth tx manager: %v", err) a.handleFailureToAddVerifyBatchToBeMonitored(ctx, proof) + return false } @@ -937,18 +997,22 @@ func (a *Aggregator) settleDirect( } func (a *Aggregator) handleFailureToAddVerifyBatchToBeMonitored(ctx context.Context, proof *state.Proof) { - log := log.WithFields("proofId", proof.ProofID, "batches", fmt.Sprintf("%d-%d", proof.BatchNumber, proof.BatchNumberFinal)) + tmpLogger := a.logger.WithFields( + "proofId", proof.ProofID, + "batches", fmt.Sprintf("%d-%d", proof.BatchNumber, proof.BatchNumberFinal), + ) proof.GeneratingSince = nil err := a.state.UpdateGeneratedProof(ctx, proof, nil) if err != nil { - log.Errorf("Failed updating proof state (false): %v", err) + tmpLogger.Errorf("Failed updating proof state (false): %v", err) } a.endProofVerification() } // buildFinalProof builds and return the final proof for an aggregated/batch proof. -func (a *Aggregator) buildFinalProof(ctx context.Context, prover proverInterface, proof *state.Proof) (*prover.FinalProof, error) { - log := log.WithFields( +func (a *Aggregator) buildFinalProof( + ctx context.Context, prover proverInterface, proof *state.Proof) (*prover.FinalProof, error) { + tmpLogger := a.logger.WithFields( "prover", prover.Name(), "proverId", prover.ID(), "proverAddr", prover.Addr(), @@ -962,8 +1026,8 @@ func (a *Aggregator) buildFinalProof(ctx context.Context, prover proverInterface } proof.ProofID = finalProofID - log.Infof("Final proof ID for batches [%d-%d]: %s", proof.BatchNumber, proof.BatchNumberFinal, *proof.ProofID) - log = log.WithFields("finalProofId", finalProofID) + tmpLogger.Infof("Final proof ID for batches [%d-%d]: %s", proof.BatchNumber, proof.BatchNumberFinal, *proof.ProofID) + tmpLogger = tmpLogger.WithFields("finalProofId", finalProofID) finalProof, err := prover.WaitFinalProof(ctx, *proof.ProofID) if err != nil { @@ -971,36 +1035,21 @@ func (a *Aggregator) buildFinalProof(ctx context.Context, prover proverInterface } // mock prover sanity check - if string(finalProof.Public.NewStateRoot) == mockedStateRoot && string(finalProof.Public.NewLocalExitRoot) == mockedLocalExitRoot { + if string(finalProof.Public.NewStateRoot) == mockedStateRoot && + string(finalProof.Public.NewLocalExitRoot) == mockedLocalExitRoot { // This local exit root and state root come from the mock // prover, use the one captured by the executor instead finalDBBatch, err := a.state.GetBatch(ctx, proof.BatchNumberFinal, nil) if err != nil { return nil, fmt.Errorf("failed to retrieve batch with number [%d]", proof.BatchNumberFinal) } - log.Warnf("NewLocalExitRoot and NewStateRoot look like a mock values, using values from executor instead: LER: %v, SR: %v", + tmpLogger.Warnf( + "NewLocalExitRoot and NewStateRoot look like a mock values, using values from executor instead: LER: %v, SR: %v", finalDBBatch.Batch.LocalExitRoot.TerminalString(), finalDBBatch.Batch.StateRoot.TerminalString()) finalProof.Public.NewStateRoot = finalDBBatch.Batch.StateRoot.Bytes() finalProof.Public.NewLocalExitRoot = finalDBBatch.Batch.LocalExitRoot.Bytes() } - // Sanity Check: state root from the proof must match the one from the final batch - if a.cfg.FinalProofSanityCheckEnabled { - finalDBBatch, err := a.state.GetBatch(ctx, proof.BatchNumberFinal, nil) - if err != nil { - return nil, fmt.Errorf("failed to retrieve batch with number [%d]", proof.BatchNumberFinal) - } - - if common.BytesToHash(finalProof.Public.NewStateRoot).String() != finalDBBatch.Batch.StateRoot.String() { - for { - log.Errorf("State root from the final proof does not match the expected for batch %d: Proof = [%s] Expected = [%s]", proof.BatchNumberFinal, common.BytesToHash(finalProof.Public.NewStateRoot).String(), finalDBBatch.Batch.StateRoot.String()) - time.Sleep(a.cfg.RetryTime.Duration) - } - } else { - log.Infof("State root sanity check from the final proof for batch %d passed", proof.BatchNumberFinal) - } - } - return finalProof, nil } @@ -1012,19 +1061,18 @@ func (a *Aggregator) tryBuildFinalProof(ctx context.Context, prover proverInterf proverName := prover.Name() proverID := prover.ID() - log := log.WithFields( + tmpLogger := a.logger.WithFields( "prover", proverName, "proverId", proverID, "proverAddr", prover.Addr(), ) - log.Debug("tryBuildFinalProof start") + tmpLogger.Debug("tryBuildFinalProof start") - var err error if !a.canVerifyProof() { - log.Debug("Time to verify proof not reached or proof verification in progress") + tmpLogger.Debug("Time to verify proof not reached or proof verification in progress") return false, nil } - log.Debug("Send final proof time reached") + tmpLogger.Debug("Send final proof time reached") lastVerifiedBatchNumber, err := a.etherman.GetLatestVerifiedBatchNum() if err != nil { @@ -1034,11 +1082,10 @@ func (a *Aggregator) tryBuildFinalProof(ctx context.Context, prover proverInterf if proof == nil { // we don't have a proof generating at the moment, check if we // have a proof ready to verify - - proof, err = a.getAndLockProofReadyToVerify(ctx, prover, lastVerifiedBatchNumber) + proof, err = a.getAndLockProofReadyToVerify(ctx, lastVerifiedBatchNumber) if errors.Is(err, state.ErrNotFound) { // nothing to verify, swallow the error - log.Debug("No proof ready to verify") + tmpLogger.Debug("No proof ready to verify") return false, nil } if err != nil { @@ -1051,7 +1098,7 @@ func (a *Aggregator) tryBuildFinalProof(ctx context.Context, prover proverInterf proof.GeneratingSince = nil err2 := a.state.UpdateGeneratedProof(a.ctx, proof, nil) if err2 != nil { - log.Errorf("Failed to unlock proof: %v", err2) + tmpLogger.Errorf("Failed to unlock proof: %v", err2) } } }() @@ -1067,7 +1114,7 @@ func (a *Aggregator) tryBuildFinalProof(ctx context.Context, prover proverInterf } } - log = log.WithFields( + tmpLogger = tmpLogger.WithFields( "proofId", *proof.ProofID, "batches", fmt.Sprintf("%d-%d", proof.BatchNumber, proof.BatchNumberFinal), ) @@ -1076,7 +1123,7 @@ func (a *Aggregator) tryBuildFinalProof(ctx context.Context, prover proverInterf finalProof, err := a.buildFinalProof(ctx, prover, proof) if err != nil { err = fmt.Errorf("failed to build final proof, %w", err) - log.Error(FirstToUpper(err.Error())) + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } @@ -1093,27 +1140,35 @@ func (a *Aggregator) tryBuildFinalProof(ctx context.Context, prover proverInterf case a.finalProof <- msg: } - log.Debug("tryBuildFinalProof end") + tmpLogger.Debug("tryBuildFinalProof end") return true, nil } -func (a *Aggregator) validateEligibleFinalProof(ctx context.Context, proof *state.Proof, lastVerifiedBatchNum uint64) (bool, error) { +func (a *Aggregator) validateEligibleFinalProof( + ctx context.Context, proof *state.Proof, lastVerifiedBatchNum uint64, +) (bool, error) { batchNumberToVerify := lastVerifiedBatchNum + 1 if proof.BatchNumber != batchNumberToVerify { - if proof.BatchNumber < batchNumberToVerify && proof.BatchNumberFinal >= batchNumberToVerify { + //nolint:gocritic + if proof.BatchNumber < batchNumberToVerify && + proof.BatchNumberFinal >= batchNumberToVerify { // We have a proof that contains some batches below the last batch verified, anyway can be eligible as final proof - log.Warnf("Proof %d-%d contains some batches lower than last batch verified %d. Check anyway if it is eligible", proof.BatchNumber, proof.BatchNumberFinal, lastVerifiedBatchNum) + a.logger.Warnf("Proof %d-%d contains some batches lower than last batch verified %d. Check anyway if it is eligible", + proof.BatchNumber, proof.BatchNumberFinal, lastVerifiedBatchNum) } else if proof.BatchNumberFinal < batchNumberToVerify { // We have a proof that contains batches below that the last batch verified, we need to delete this proof - log.Warnf("Proof %d-%d lower than next batch to verify %d. Deleting it", proof.BatchNumber, proof.BatchNumberFinal, batchNumberToVerify) + a.logger.Warnf("Proof %d-%d lower than next batch to verify %d. Deleting it", + proof.BatchNumber, proof.BatchNumberFinal, batchNumberToVerify) err := a.state.DeleteGeneratedProofs(ctx, proof.BatchNumber, proof.BatchNumberFinal, nil) if err != nil { return false, fmt.Errorf("failed to delete discarded proof, err: %w", err) } + return false, nil } else { - log.Debugf("Proof batch number %d is not the following to last verfied batch number %d", proof.BatchNumber, lastVerifiedBatchNum) + a.logger.Debugf("Proof batch number %d is not the following to last verfied batch number %d", + proof.BatchNumber, lastVerifiedBatchNum) return false, nil } } @@ -1123,13 +1178,17 @@ func (a *Aggregator) validateEligibleFinalProof(ctx context.Context, proof *stat return false, fmt.Errorf("failed to check if proof contains complete sequences, %w", err) } if !bComplete { - log.Infof("Recursive proof %d-%d not eligible to be verified: not containing complete sequences", proof.BatchNumber, proof.BatchNumberFinal) + a.logger.Infof("Recursive proof %d-%d not eligible to be verified: not containing complete sequences", + proof.BatchNumber, proof.BatchNumberFinal) return false, nil } + return true, nil } -func (a *Aggregator) getAndLockProofReadyToVerify(ctx context.Context, prover proverInterface, lastVerifiedBatchNum uint64) (*state.Proof, error) { +func (a *Aggregator) getAndLockProofReadyToVerify( + ctx context.Context, lastVerifiedBatchNum uint64, +) (*state.Proof, error) { a.stateDBMutex.Lock() defer a.stateDBMutex.Unlock() @@ -1154,7 +1213,7 @@ func (a *Aggregator) unlockProofsToAggregate(ctx context.Context, proof1 *state. // Release proofs from generating state in a single transaction dbTx, err := a.state.BeginStateTransaction(ctx) if err != nil { - log.Warnf("Failed to begin transaction to release proof aggregation state, err: %v", err) + a.logger.Warnf("Failed to begin transaction to release proof aggregation state, err: %v", err) return err } @@ -1168,9 +1227,10 @@ func (a *Aggregator) unlockProofsToAggregate(ctx context.Context, proof1 *state. if err != nil { if err := dbTx.Rollback(ctx); err != nil { err := fmt.Errorf("failed to rollback proof aggregation state: %w", err) - log.Error(FirstToUpper(err.Error())) + a.logger.Error(FirstToUpper(err.Error())) return err } + return fmt.Errorf("failed to release proof aggregation state: %w", err) } @@ -1182,8 +1242,9 @@ func (a *Aggregator) unlockProofsToAggregate(ctx context.Context, proof1 *state. return nil } -func (a *Aggregator) getAndLockProofsToAggregate(ctx context.Context, prover proverInterface) (*state.Proof, *state.Proof, error) { - log := log.WithFields( +func (a *Aggregator) getAndLockProofsToAggregate( + ctx context.Context, prover proverInterface) (*state.Proof, *state.Proof, error) { + tmpLogger := a.logger.WithFields( "prover", prover.Name(), "proverId", prover.ID(), "proverAddr", prover.Addr(), @@ -1200,7 +1261,7 @@ func (a *Aggregator) getAndLockProofsToAggregate(ctx context.Context, prover pro // Set proofs in generating state in a single transaction dbTx, err := a.state.BeginStateTransaction(ctx) if err != nil { - log.Errorf("Failed to begin transaction to set proof aggregation state, err: %v", err) + tmpLogger.Errorf("Failed to begin transaction to set proof aggregation state, err: %v", err) return nil, nil, err } @@ -1215,9 +1276,10 @@ func (a *Aggregator) getAndLockProofsToAggregate(ctx context.Context, prover pro if err != nil { if err := dbTx.Rollback(ctx); err != nil { err := fmt.Errorf("failed to rollback proof aggregation state %w", err) - log.Error(FirstToUpper(err.Error())) + tmpLogger.Error(FirstToUpper(err.Error())) return nil, nil, err } + return nil, nil, fmt.Errorf("failed to set proof aggregation state %w", err) } @@ -1233,17 +1295,17 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf proverName := prover.Name() proverID := prover.ID() - log := log.WithFields( + tmpLogger := a.logger.WithFields( "prover", proverName, "proverId", proverID, "proverAddr", prover.Addr(), ) - log.Debug("tryAggregateProofs start") + tmpLogger.Debug("tryAggregateProofs start") proof1, proof2, err0 := a.getAndLockProofsToAggregate(ctx, prover) if errors.Is(err0, state.ErrNotFound) { // nothing to aggregate, swallow the error - log.Debug("Nothing to aggregate") + tmpLogger.Debug("Nothing to aggregate") return false, nil } if err0 != nil { @@ -1259,16 +1321,17 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf if err != nil { err2 := a.unlockProofsToAggregate(a.ctx, proof1, proof2) if err2 != nil { - log.Errorf("Failed to release aggregated proofs, err: %v", err2) + tmpLogger.Errorf("Failed to release aggregated proofs, err: %v", err2) } } - log.Debug("tryAggregateProofs end") + tmpLogger.Debug("tryAggregateProofs end") }() - log.Infof("Aggregating proofs: %d-%d and %d-%d", proof1.BatchNumber, proof1.BatchNumberFinal, proof2.BatchNumber, proof2.BatchNumberFinal) + tmpLogger.Infof("Aggregating proofs: %d-%d and %d-%d", + proof1.BatchNumber, proof1.BatchNumberFinal, proof2.BatchNumber, proof2.BatchNumberFinal) batches := fmt.Sprintf("%d-%d", proof1.BatchNumber, proof2.BatchNumberFinal) - log = log.WithFields("batches", batches) + tmpLogger = tmpLogger.WithFields("batches", batches) inputProver := map[string]interface{}{ "recursive_proof_1": proof1.Proof, @@ -1277,7 +1340,7 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf b, err := json.Marshal(inputProver) if err != nil { err = fmt.Errorf("failed to serialize input prover, %w", err) - log.Error(FirstToUpper(err.Error())) + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } @@ -1292,23 +1355,23 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf aggrProofID, err = prover.AggregatedProof(proof1.Proof, proof2.Proof) if err != nil { err = fmt.Errorf("failed to get aggregated proof id, %w", err) - log.Error(FirstToUpper(err.Error())) + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } proof.ProofID = aggrProofID - log.Infof("Proof ID for aggregated proof: %v", *proof.ProofID) - log = log.WithFields("proofId", *proof.ProofID) + tmpLogger.Infof("Proof ID for aggregated proof: %v", *proof.ProofID) + tmpLogger = tmpLogger.WithFields("proofId", *proof.ProofID) recursiveProof, _, err := prover.WaitRecursiveProof(ctx, *proof.ProofID) if err != nil { err = fmt.Errorf("failed to get aggregated proof from prover, %w", err) - log.Error(FirstToUpper(err.Error())) + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } - log.Info("Aggregated proof generated") + tmpLogger.Info("Aggregated proof generated") proof.Proof = recursiveProof @@ -1317,7 +1380,7 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf dbTx, err := a.state.BeginStateTransaction(ctx) if err != nil { err = fmt.Errorf("failed to begin transaction to update proof aggregation state, %w", err) - log.Error(FirstToUpper(err.Error())) + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } @@ -1325,11 +1388,11 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf if err != nil { if err := dbTx.Rollback(ctx); err != nil { err := fmt.Errorf("failed to rollback proof aggregation state, %w", err) - log.Error(FirstToUpper(err.Error())) + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } err = fmt.Errorf("failed to delete previously aggregated proofs, %w", err) - log.Error(FirstToUpper(err.Error())) + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } @@ -1340,18 +1403,18 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf if err != nil { if err := dbTx.Rollback(ctx); err != nil { err := fmt.Errorf("failed to rollback proof aggregation state, %w", err) - log.Error(FirstToUpper(err.Error())) + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } err = fmt.Errorf("failed to store the recursive proof, %w", err) - log.Error(FirstToUpper(err.Error())) + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } err = dbTx.Commit(ctx) if err != nil { err = fmt.Errorf("failed to store the recursive proof, %w", err) - log.Error(FirstToUpper(err.Error())) + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } @@ -1363,7 +1426,7 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf finalProofBuilt, finalProofErr := a.tryBuildFinalProof(ctx, prover, proof) if finalProofErr != nil { // just log the error and continue to handle the aggregated proof - log.Errorf("Failed trying to check if recursive proof can be verified: %v", finalProofErr) + tmpLogger.Errorf("Failed trying to check if recursive proof can be verified: %v", finalProofErr) } // NOTE(pg): prover is done, use a.ctx from now on @@ -1375,7 +1438,7 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf err := a.state.UpdateGeneratedProof(a.ctx, proof, nil) if err != nil { err = fmt.Errorf("failed to store batch proof result, %w", err) - log.Error(FirstToUpper(err.Error())) + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } } @@ -1392,11 +1455,13 @@ func (a *Aggregator) getVerifiedBatchAccInputHash(ctx context.Context, batchNumb return &accInputHash, nil } -func (a *Aggregator) getAndLockBatchToProve(ctx context.Context, prover proverInterface) (*state.Batch, []byte, *state.Proof, error) { +func (a *Aggregator) getAndLockBatchToProve( + ctx context.Context, prover proverInterface, +) (*state.Batch, []byte, *state.Proof, error) { proverID := prover.ID() proverName := prover.Name() - log := log.WithFields( + tmpLogger := a.logger.WithFields( "prover", proverName, "proverId", proverID, "proverAddr", prover.Addr(), @@ -1419,7 +1484,8 @@ func (a *Aggregator) getAndLockBatchToProve(ctx context.Context, prover proverIn batchNumberToVerify++ proofExists, err = a.state.CheckProofExistsForBatch(ctx, batchNumberToVerify, nil) if err != nil { - log.Infof("Error checking proof exists for batch %d", batchNumberToVerify) + tmpLogger.Infof("Error checking proof exists for batch %d", batchNumberToVerify) + return nil, nil, nil, err } } @@ -1432,7 +1498,8 @@ func (a *Aggregator) getAndLockBatchToProve(ctx context.Context, prover proverIn // Not found, so it it not possible to verify the batch yet if sequence == nil || errors.Is(err, entities.ErrNotFound) { - log.Infof("No sequence found for batch %d", batchNumberToVerify) + tmpLogger.Infof("No sequence found for batch %d", batchNumberToVerify) + return nil, nil, nil, state.ErrNotFound } @@ -1445,38 +1512,43 @@ func (a *Aggregator) getAndLockBatchToProve(ctx context.Context, prover proverIn dbBatch, err := a.state.GetBatch(ctx, batchNumberToVerify, nil) if err != nil { if errors.Is(err, state.ErrNotFound) { - log.Infof("Batch (%d) is not yet in DB", batchNumberToVerify) + tmpLogger.Infof("Batch (%d) is not yet in DB", batchNumberToVerify) } + return nil, nil, nil, err } // Check if the witness is already in the DB if len(dbBatch.Witness) == 0 { - log.Infof("Witness for batch %d is not yet in DB", batchNumberToVerify) + tmpLogger.Infof("Witness for batch %d is not yet in DB", batchNumberToVerify) + return nil, nil, nil, state.ErrNotFound } err = a.state.AddSequence(ctx, stateSequence, nil) if err != nil { - log.Infof("Error storing sequence for batch %d", batchNumberToVerify) + tmpLogger.Infof("Error storing sequence for batch %d", batchNumberToVerify) + return nil, nil, nil, err } // All the data required to generate a proof is ready - log.Infof("Found virtual batch %d pending to generate proof", dbBatch.Batch.BatchNumber) - log = log.WithFields("batch", dbBatch.Batch.BatchNumber) + tmpLogger.Infof("Found virtual batch %d pending to generate proof", dbBatch.Batch.BatchNumber) + tmpLogger = tmpLogger.WithFields("batch", dbBatch.Batch.BatchNumber) - log.Info("Checking profitability to aggregate batch") + tmpLogger.Info("Checking profitability to aggregate batch") // pass pol collateral as zero here, bcs in smart contract fee for aggregator is not defined yet isProfitable, err := a.profitabilityChecker.IsProfitable(ctx, big.NewInt(0)) if err != nil { - log.Errorf("Failed to check aggregator profitability, err: %v", err) + tmpLogger.Errorf("Failed to check aggregator profitability, err: %v", err) + return nil, nil, nil, err } if !isProfitable { - log.Infof("Batch is not profitable, pol collateral %d", big.NewInt(0)) + tmpLogger.Infof("Batch is not profitable, pol collateral %d", big.NewInt(0)) + return nil, nil, nil, err } @@ -1492,7 +1564,8 @@ func (a *Aggregator) getAndLockBatchToProve(ctx context.Context, prover proverIn // Avoid other prover to process the same batch err = a.state.AddGeneratedProof(ctx, proof, nil) if err != nil { - log.Errorf("Failed to add batch proof, err: %v", err) + tmpLogger.Errorf("Failed to add batch proof, err: %v", err) + return nil, nil, nil, err } @@ -1500,24 +1573,24 @@ func (a *Aggregator) getAndLockBatchToProve(ctx context.Context, prover proverIn } func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover proverInterface) (bool, error) { - log := log.WithFields( + tmpLogger := a.logger.WithFields( "prover", prover.Name(), "proverId", prover.ID(), "proverAddr", prover.Addr(), ) - log.Debug("tryGenerateBatchProof start") + tmpLogger.Debug("tryGenerateBatchProof start") batchToProve, witness, proof, err0 := a.getAndLockBatchToProve(ctx, prover) if errors.Is(err0, state.ErrNotFound) || errors.Is(err0, entities.ErrNotFound) { // nothing to proof, swallow the error - log.Debug("Nothing to generate proof") + tmpLogger.Debug("Nothing to generate proof") return false, nil } if err0 != nil { return false, err0 } - log = log.WithFields("batch", batchToProve.BatchNumber) + tmpLogger = tmpLogger.WithFields("batch", batchToProve.BatchNumber) var ( genProofID *string @@ -1526,54 +1599,56 @@ func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover proverInt defer func() { if err != nil { - log.Debug("Deleting proof in progress") + tmpLogger.Debug("Deleting proof in progress") err2 := a.state.DeleteGeneratedProofs(a.ctx, proof.BatchNumber, proof.BatchNumberFinal, nil) if err2 != nil { - log.Errorf("Failed to delete proof in progress, err: %v", err2) + tmpLogger.Errorf("Failed to delete proof in progress, err: %v", err2) } } - log.Debug("tryGenerateBatchProof end") + tmpLogger.Debug("tryGenerateBatchProof end") }() - log.Infof("Sending zki + batch to the prover, batchNumber [%d]", batchToProve.BatchNumber) + tmpLogger.Infof("Sending zki + batch to the prover, batchNumber [%d]", batchToProve.BatchNumber) inputProver, err := a.buildInputProver(ctx, batchToProve, witness) if err != nil { err = fmt.Errorf("failed to build input prover, %w", err) - log.Error(FirstToUpper(err.Error())) + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } - log.Infof("Sending a batch to the prover. OldAccInputHash [%#x], L1InfoRoot [%#x]", + tmpLogger.Infof("Sending a batch to the prover. OldAccInputHash [%#x], L1InfoRoot [%#x]", inputProver.PublicInputs.OldAccInputHash, inputProver.PublicInputs.L1InfoRoot) genProofID, err = prover.BatchProof(inputProver) if err != nil { err = fmt.Errorf("failed to get batch proof id, %w", err) - log.Error(FirstToUpper(err.Error())) + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } proof.ProofID = genProofID - log = log.WithFields("proofId", *proof.ProofID) + tmpLogger = tmpLogger.WithFields("proofId", *proof.ProofID) resGetProof, stateRoot, err := prover.WaitRecursiveProof(ctx, *proof.ProofID) if err != nil { err = fmt.Errorf("failed to get proof from prover, %w", err) - log.Error(FirstToUpper(err.Error())) + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } - log.Info("Batch proof generated") + tmpLogger.Info("Batch proof generated") // Sanity Check: state root from the proof must match the one from the batch if a.cfg.BatchProofSanityCheckEnabled && (stateRoot != common.Hash{}) && (stateRoot != batchToProve.StateRoot) { for { - log.Errorf("State root from the proof does not match the expected for batch %d: Proof = [%s] Expected = [%s]", batchToProve.BatchNumber, stateRoot.String(), batchToProve.StateRoot.String()) + tmpLogger.Errorf("State root from the proof does not match the expected for batch %d: Proof = [%s] Expected = [%s]", + batchToProve.BatchNumber, stateRoot.String(), batchToProve.StateRoot.String(), + ) time.Sleep(a.cfg.RetryTime.Duration) } } else { - log.Infof("State root sanity check for batch %d passed", batchToProve.BatchNumber) + tmpLogger.Infof("State root sanity check for batch %d passed", batchToProve.BatchNumber) } proof.Proof = resGetProof @@ -1584,7 +1659,7 @@ func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover proverInt finalProofBuilt, finalProofErr := a.tryBuildFinalProof(ctx, prover, proof) if finalProofErr != nil { // just log the error and continue to handle the generated proof - log.Errorf("Error trying to build final proof: %v", finalProofErr) + tmpLogger.Errorf("Error trying to build final proof: %v", finalProofErr) } // NOTE(pg): prover is done, use a.ctx from now on @@ -1596,7 +1671,7 @@ func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover proverInt err := a.state.UpdateGeneratedProof(a.ctx, proof, nil) if err != nil { err = fmt.Errorf("failed to store batch proof result, %w", err) - log.Error(FirstToUpper(err.Error())) + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } } @@ -1609,10 +1684,12 @@ func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover proverInt func (a *Aggregator) canVerifyProof() bool { a.timeSendFinalProofMutex.RLock() defer a.timeSendFinalProofMutex.RUnlock() + return a.timeSendFinalProof.Before(time.Now()) && !a.verifyingProof } -// startProofVerification sets to true the verifyingProof variable to indicate that there is a proof verification in progress +// startProofVerification sets the verifyingProof variable to true +// to indicate that there is a proof verification in progress. func (a *Aggregator) startProofVerification() { a.timeSendFinalProofMutex.Lock() defer a.timeSendFinalProofMutex.Unlock() @@ -1633,7 +1710,9 @@ func (a *Aggregator) resetVerifyProofTime() { a.timeSendFinalProof = time.Now().Add(a.cfg.VerifyProofInterval.Duration) } -func (a *Aggregator) buildInputProver(ctx context.Context, batchToVerify *state.Batch, witness []byte) (*prover.StatelessInputProver, error) { +func (a *Aggregator) buildInputProver( + ctx context.Context, batchToVerify *state.Batch, witness []byte, +) (*prover.StatelessInputProver, error) { isForcedBatch := false batchRawData := &state.BatchRawV2{} var err error @@ -1643,7 +1722,7 @@ func (a *Aggregator) buildInputProver(ctx context.Context, batchToVerify *state. } else { batchRawData, err = state.DecodeBatchV2(batchToVerify.BatchL2Data) if err != nil { - log.Errorf("Failed to decode batch data, err: %v", err) + a.logger.Errorf("Failed to decode batch data, err: %v", err) return nil, err } } @@ -1651,8 +1730,9 @@ func (a *Aggregator) buildInputProver(ctx context.Context, batchToVerify *state. l1InfoTreeData := map[uint32]*prover.L1Data{} forcedBlockhashL1 := common.Hash{} l1InfoRoot := batchToVerify.L1InfoRoot.Bytes() + //nolint:gocritic if !isForcedBatch { - tree, err := l1infotree.NewL1InfoTree(32, [][32]byte{}) // nolint:gomnd + tree, err := l1infotree.NewL1InfoTree(a.logger, 32, [][32]byte{}) //nolint:mnd if err != nil { return nil, err } @@ -1664,7 +1744,10 @@ func (a *Aggregator) buildInputProver(ctx context.Context, batchToVerify *state. aLeaves := make([][32]byte, len(leaves)) for i, leaf := range leaves { - aLeaves[i] = l1infotree.HashLeafData(leaf.GlobalExitRoot, leaf.PreviousBlockHash, uint64(leaf.Timestamp.Unix())) + aLeaves[i] = l1infotree.HashLeafData( + leaf.GlobalExitRoot, + leaf.PreviousBlockHash, + uint64(leaf.Timestamp.Unix())) } for _, l2blockRaw := range batchRawData.Blocks { @@ -1672,25 +1755,29 @@ func (a *Aggregator) buildInputProver(ctx context.Context, batchToVerify *state. if !contained && l2blockRaw.IndexL1InfoTree != 0 { leaves, err := a.l1Syncr.GetL1InfoTreeLeaves(ctx, []uint32{l2blockRaw.IndexL1InfoTree}) if err != nil { - log.Errorf("Error getting l1InfoTreeLeaf: %v", err) + a.logger.Errorf("Error getting l1InfoTreeLeaf: %v", err) return nil, err } l1InfoTreeLeaf := leaves[l2blockRaw.IndexL1InfoTree] // Calculate smt proof - log.Infof("Calling tree.ComputeMerkleProof") + a.logger.Infof("Calling tree.ComputeMerkleProof") smtProof, calculatedL1InfoRoot, err := tree.ComputeMerkleProof(l2blockRaw.IndexL1InfoTree, aLeaves) if err != nil { - log.Errorf("Error computing merkle proof: %v", err) + a.logger.Errorf("Error computing merkle proof: %v", err) return nil, err } if batchToVerify.L1InfoRoot != calculatedL1InfoRoot { - return nil, fmt.Errorf("error: l1InfoRoot mismatch. L1InfoRoot: %s, calculatedL1InfoRoot: %s. l1InfoTreeIndex: %d", batchToVerify.L1InfoRoot.String(), calculatedL1InfoRoot.String(), l2blockRaw.IndexL1InfoTree) + return nil, fmt.Errorf( + "error: l1InfoRoot mismatch. L1InfoRoot: %s, calculatedL1InfoRoot: %s. l1InfoTreeIndex: %d", + batchToVerify.L1InfoRoot.String(), calculatedL1InfoRoot.String(), l2blockRaw.IndexL1InfoTree, + ) } protoProof := make([][]byte, len(smtProof)) + for i, proof := range smtProof { tmpProof := proof protoProof[i] = tmpProof[:] @@ -1709,12 +1796,12 @@ func (a *Aggregator) buildInputProver(ctx context.Context, batchToVerify *state. if batchToVerify.BatchNumber == 1 { virtualBatch, err := a.l1Syncr.GetVirtualBatchByBatchNumber(ctx, batchToVerify.BatchNumber) if err != nil { - log.Errorf("Error getting virtual batch: %v", err) + a.logger.Errorf("Error getting virtual batch: %v", err) return nil, err } l1Block, err := a.l1Syncr.GetL1BlockByNumber(ctx, virtualBatch.BlockNumber) if err != nil { - log.Errorf("Error getting l1 block: %v", err) + a.logger.Errorf("Error getting l1 block: %v", err) return nil, err } @@ -1751,23 +1838,25 @@ func (a *Aggregator) buildInputProver(ctx context.Context, batchToVerify *state. }, } - printInputProver(inputProver) + printInputProver(a.logger, inputProver) return inputProver, nil } -func getWitness(batchNumber uint64, URL string, fullWitness bool) ([]byte, error) { - var witness string - var response rpc.Response - var err error +func (a *Aggregator) getWitness(batchNumber uint64, url string, fullWitness bool) ([]byte, error) { + var ( + witness string + response rpc.Response + err error + ) witnessType := "trimmed" if fullWitness { witnessType = "full" } - log.Infof("Requesting witness for batch %d of type %s", batchNumber, witnessType) + a.logger.Infof("Requesting witness for batch %d of type %s", batchNumber, witnessType) - response, err = rpc.JSONRPCCall(URL, "zkevm_getBatchWitness", batchNumber, witnessType) + response, err = rpc.JSONRPCCall(url, "zkevm_getBatchWitness", batchNumber, witnessType) if err != nil { return nil, err } @@ -1777,6 +1866,7 @@ func getWitness(batchNumber uint64, URL string, fullWitness bool) ([]byte, error if response.Error.Message == "busy" { return nil, errBusy } + return nil, fmt.Errorf("error from witness for batch %d: %v", batchNumber, response.Error) } @@ -1794,17 +1884,21 @@ func getWitness(batchNumber uint64, URL string, fullWitness bool) ([]byte, error return bytes, nil } -func printInputProver(inputProver *prover.StatelessInputProver) { - log.Debugf("Witness length: %v", len(inputProver.PublicInputs.Witness)) - log.Debugf("BatchL2Data length: %v", len(inputProver.PublicInputs.BatchL2Data)) - // log.Debugf("Full DataStream: %v", common.Bytes2Hex(inputProver.PublicInputs.DataStream)) - log.Debugf("OldAccInputHash: %v", common.BytesToHash(inputProver.PublicInputs.OldAccInputHash)) - log.Debugf("L1InfoRoot: %v", common.BytesToHash(inputProver.PublicInputs.L1InfoRoot)) - log.Debugf("TimestampLimit: %v", inputProver.PublicInputs.TimestampLimit) - log.Debugf("SequencerAddr: %v", inputProver.PublicInputs.SequencerAddr) - log.Debugf("AggregatorAddr: %v", inputProver.PublicInputs.AggregatorAddr) - log.Debugf("L1InfoTreeData: %+v", inputProver.PublicInputs.L1InfoTreeData) - log.Debugf("ForcedBlockhashL1: %v", common.BytesToHash(inputProver.PublicInputs.ForcedBlockhashL1)) +func printInputProver(logger *log.Logger, inputProver *prover.StatelessInputProver) { + if !logger.IsEnabledLogLevel(zapcore.DebugLevel) { + return + } + + logger.Debugf("Witness length: %v", len(inputProver.PublicInputs.Witness)) + logger.Debugf("BatchL2Data length: %v", len(inputProver.PublicInputs.BatchL2Data)) + // logger.Debugf("Full DataStream: %v", common.Bytes2Hex(inputProver.PublicInputs.DataStream)) + logger.Debugf("OldAccInputHash: %v", common.BytesToHash(inputProver.PublicInputs.OldAccInputHash)) + logger.Debugf("L1InfoRoot: %v", common.BytesToHash(inputProver.PublicInputs.L1InfoRoot)) + logger.Debugf("TimestampLimit: %v", inputProver.PublicInputs.TimestampLimit) + logger.Debugf("SequencerAddr: %v", inputProver.PublicInputs.SequencerAddr) + logger.Debugf("AggregatorAddr: %v", inputProver.PublicInputs.AggregatorAddr) + logger.Debugf("L1InfoTreeData: %+v", inputProver.PublicInputs.L1InfoTreeData) + logger.Debugf("ForcedBlockhashL1: %v", common.BytesToHash(inputProver.PublicInputs.ForcedBlockhashL1)) } // healthChecker will provide an implementation of the HealthCheck interface. @@ -1820,8 +1914,11 @@ func newHealthChecker() *healthChecker { // Check returns the current status of the server for unary gRPC health requests, // for now if the server is up and able to respond we will always return SERVING. -func (hc *healthChecker) Check(ctx context.Context, req *grpchealth.HealthCheckRequest) (*grpchealth.HealthCheckResponse, error) { +func (hc *healthChecker) Check( + ctx context.Context, req *grpchealth.HealthCheckRequest, +) (*grpchealth.HealthCheckResponse, error) { log.Info("Serving the Check request for health check") + return &grpchealth.HealthCheckResponse{ Status: grpchealth.HealthCheckResponse_SERVING, }, nil @@ -1831,6 +1928,7 @@ func (hc *healthChecker) Check(ctx context.Context, req *grpchealth.HealthCheckR // for now if the server is up and able to respond we will always return SERVING. func (hc *healthChecker) Watch(req *grpchealth.HealthCheckRequest, server grpchealth.Health_WatchServer) error { log.Info("Serving the Watch request for health check") + return server.Send(&grpchealth.HealthCheckResponse{ Status: grpchealth.HealthCheckResponse_SERVING, }) @@ -1896,12 +1994,12 @@ func (a *Aggregator) cleanupLockedProofs() { case <-time.After(a.timeCleanupLockedProofs.Duration): n, err := a.state.CleanupLockedProofs(a.ctx, a.cfg.GeneratingProofCleanupThreshold, nil) if err != nil { - log.Errorf("Failed to cleanup locked proofs: %v", err) + a.logger.Errorf("Failed to cleanup locked proofs: %v", err) } if n == 1 { - log.Warn("Found a stale proof and removed from cache") + a.logger.Warn("Found a stale proof and removed from cache") } else if n > 1 { - log.Warnf("Found %d stale proofs and removed from cache", n) + a.logger.Warnf("Found %d stale proofs and removed from cache", n) } } } @@ -1912,5 +2010,6 @@ func (a *Aggregator) cleanupLockedProofs() { func FirstToUpper(s string) string { runes := []rune(s) runes[0] = unicode.ToUpper(runes[0]) + return string(runes) } diff --git a/aggregator/config.go b/aggregator/config.go index 38280187..4550c637 100644 --- a/aggregator/config.go +++ b/aggregator/config.go @@ -74,20 +74,19 @@ type Config struct { // this parameter is used for the base tx profitability checker TxProfitabilityMinReward TokenAmountWithDecimals `mapstructure:"TxProfitabilityMinReward"` - // IntervalAfterWhichBatchConsolidateAnyway this is interval for the main sequencer, that will check if there is no transactions + // IntervalAfterWhichBatchConsolidateAnyway is the interval duration for the main sequencer to check + // if there are no transactions. If there are no transactions in this interval, the sequencer will + // consolidate the batch anyway. IntervalAfterWhichBatchConsolidateAnyway types.Duration `mapstructure:"IntervalAfterWhichBatchConsolidateAnyway"` // BatchProofSanityCheckEnabled is a flag to enable the sanity check of the batch proof BatchProofSanityCheckEnabled bool `mapstructure:"BatchProofSanityCheckEnabled"` - // FinalProofSanityCheckEnabled is a flag to enable the sanity check of the final proof - FinalProofSanityCheckEnabled bool `mapstructure:"FinalProofSanityCheckEnabled"` - // ChainID is the L2 ChainID provided by the Network Config ChainID uint64 // ForkID is the L2 ForkID provided by the Network Config - ForkId uint64 `mapstructure:"ForkId"` + ForkId uint64 `mapstructure:"ForkId"` //nolint:stylecheck // SenderAddress defines which private key the eth tx manager needs to use // to sign the L1 txs @@ -137,7 +136,8 @@ type Config struct { // Synchornizer config Synchronizer syncronizerConfig.Config `mapstructure:"Synchronizer"` - // SettlementBackend configuration defines how a final ZKP should be settled. Directly to L1 or over the Beethoven service. + // SettlementBackend configuration defines how a final ZKP should be settled. + // It can be settled directly to L1 or over Agglayer. SettlementBackend SettlementBackend `mapstructure:"SettlementBackend" jsonschema:"enum=agglayer,enum=l1"` // SequencerPrivateKey Private key of the trusted sequencer @@ -178,5 +178,6 @@ func newKeyFromKeystore(cfg types.KeystoreFileConfig) (*ecdsa.PrivateKey, error) if err != nil { return nil, err } + return key.PrivateKey, nil } diff --git a/aggregator/db/db.go b/aggregator/db/db.go index b9112f53..ecfffc11 100644 --- a/aggregator/db/db.go +++ b/aggregator/db/db.go @@ -9,20 +9,21 @@ import ( ) // NewSQLDB creates a new SQL DB -func NewSQLDB(cfg Config) (*pgxpool.Pool, error) { - config, err := pgxpool.ParseConfig(fmt.Sprintf("postgres://%s:%s@%s:%s/%s?pool_max_conns=%d", cfg.User, cfg.Password, cfg.Host, cfg.Port, cfg.Name, cfg.MaxConns)) +func NewSQLDB(logger *log.Logger, cfg Config) (*pgxpool.Pool, error) { + config, err := pgxpool.ParseConfig(fmt.Sprintf("postgres://%s:%s@%s:%s/%s?pool_max_conns=%d", + cfg.User, cfg.Password, cfg.Host, cfg.Port, cfg.Name, cfg.MaxConns)) if err != nil { - log.Errorf("Unable to parse DB config: %v\n", err) + logger.Errorf("Unable to parse DB config: %v\n", err) return nil, err } if cfg.EnableLog { - config.ConnConfig.Logger = logger{} + config.ConnConfig.Logger = dbLoggerImpl{} } conn, err := pgxpool.ConnectConfig(context.Background(), config) if err != nil { - log.Errorf("Unable to connect to database: %v\n", err) + logger.Errorf("Unable to connect to database: %v\n", err) return nil, err } diff --git a/aggregator/db/logger.go b/aggregator/db/logger.go index 3b425b13..e60a7b01 100644 --- a/aggregator/db/logger.go +++ b/aggregator/db/logger.go @@ -8,9 +8,9 @@ import ( "github.com/jackc/pgx/v4" ) -type logger struct{} +type dbLoggerImpl struct{} -func (l logger) Log(ctx context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) { +func (l dbLoggerImpl) Log(ctx context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) { m := fmt.Sprintf("%s %v", msg, data) switch level { @@ -21,7 +21,7 @@ func (l logger) Log(ctx context.Context, level pgx.LogLevel, msg string, data ma case pgx.LogLevelError: log.Error(m) default: - m = fmt.Sprintf("%s %s %v", level.String(), msg, data) + m = fmt.Sprintf("[%s] %s %v", level.String(), msg, data) log.Debug(m) } } diff --git a/aggregator/db/migrations.go b/aggregator/db/migrations.go index 8aeda2e9..20e8c29a 100644 --- a/aggregator/db/migrations.go +++ b/aggregator/db/migrations.go @@ -30,6 +30,7 @@ func init() { // RunMigrationsUp runs migrate-up for the given config. func RunMigrationsUp(cfg Config, name string) error { log.Info("running migrations up") + return runMigrations(cfg, name, migrate.Up) } @@ -41,6 +42,7 @@ func CheckMigrations(cfg Config, name string) error { // RunMigrationsDown runs migrate-down for the given config. func RunMigrationsDown(cfg Config, name string) error { log.Info("running migrations down") + return runMigrations(cfg, name, migrate.Down) } @@ -48,7 +50,10 @@ func RunMigrationsDown(cfg Config, name string) error { // the database updated with the latest changes in either direction, // up or down. func runMigrations(cfg Config, name string, direction migrate.MigrationDirection) error { - c, err := pgx.ParseConfig(fmt.Sprintf("postgres://%s:%s@%s:%s/%s", cfg.User, cfg.Password, cfg.Host, cfg.Port, cfg.Name)) + c, err := pgx.ParseConfig(fmt.Sprintf( + "postgres://%s:%s@%s:%s/%s", + cfg.User, cfg.Password, cfg.Host, cfg.Port, cfg.Name, + )) if err != nil { return err } @@ -64,17 +69,22 @@ func runMigrations(cfg Config, name string, direction migrate.MigrationDirection FileSystem: embedMigration, Root: "migrations", } + nMigrations, err := migrate.Exec(db, "postgres", migrations, direction) if err != nil { return err } log.Info("successfully ran ", nMigrations, " migrations") + return nil } func checkMigrations(cfg Config, name string) error { - c, err := pgx.ParseConfig(fmt.Sprintf("postgres://%s:%s@%s:%s/%s", cfg.User, cfg.Password, cfg.Host, cfg.Port, cfg.Name)) + c, err := pgx.ParseConfig(fmt.Sprintf( + "postgres://%s:%s@%s:%s/%s", + cfg.User, cfg.Password, cfg.Host, cfg.Port, cfg.Name, + )) if err != nil { return err } @@ -87,9 +97,11 @@ func checkMigrations(cfg Config, name string) error { } migrationSource := &migrate.EmbedFileSystemMigrationSource{FileSystem: embedMigration} + migrations, err := migrationSource.FindMigrations() if err != nil { log.Errorf("error getting migrations from source: %v", err) + return err } @@ -105,12 +117,17 @@ func checkMigrations(cfg Config, name string) error { err = db.QueryRow(query).Scan(&actual) if err != nil { log.Error("error getting migrations count: ", err) + return err } if expected == actual { log.Infof("Found %d migrations as expected", actual) } else { - return fmt.Errorf("error the component needs to run %d migrations before starting. DB only contains %d migrations", expected, actual) + return fmt.Errorf( + "error the component needs to run %d migrations before starting. DB only contains %d migrations", + expected, actual, + ) } + return nil } diff --git a/aggregator/db/migrations_test.go b/aggregator/db/migrations_test.go index b688f341..0a118c69 100644 --- a/aggregator/db/migrations_test.go +++ b/aggregator/db/migrations_test.go @@ -11,7 +11,6 @@ func Test_checkMigrations(t *testing.T) { embedMigration := embedMigrations[AggregatorMigrationName] migrationSource := &migrate.EmbedFileSystemMigrationSource{ FileSystem: embedMigration, - Root: "migrations", } _, err := migrationSource.FileSystem.ReadFile("migrations/0001.sql") diff --git a/aggregator/interfaces.go b/aggregator/interfaces.go index 6b5ba63a..b231de35 100644 --- a/aggregator/interfaces.go +++ b/aggregator/interfaces.go @@ -30,7 +30,9 @@ type proverInterface interface { type etherman interface { GetRollupId() uint32 GetLatestVerifiedBatchNum() (uint64, error) - BuildTrustedVerifyBatchesTxData(lastVerifiedBatch, newVerifiedBatch uint64, inputs *ethmanTypes.FinalProofInputs, beneficiary common.Address) (to *common.Address, data []byte, err error) + BuildTrustedVerifyBatchesTxData( + lastVerifiedBatch, newVerifiedBatch uint64, inputs *ethmanTypes.FinalProofInputs, beneficiary common.Address, + ) (to *common.Address, data []byte, err error) GetLatestBlockHeader(ctx context.Context) (*types.Header, error) GetBatchAccInputHash(ctx context.Context, batchNumber uint64) (common.Hash, error) } diff --git a/aggregator/profitabilitychecker.go b/aggregator/profitabilitychecker.go index 27e3f705..f05799eb 100644 --- a/aggregator/profitabilitychecker.go +++ b/aggregator/profitabilitychecker.go @@ -24,7 +24,9 @@ type TxProfitabilityCheckerBase struct { } // NewTxProfitabilityCheckerBase init base tx profitability checker -func NewTxProfitabilityCheckerBase(state stateInterface, interval time.Duration, minReward *big.Int) *TxProfitabilityCheckerBase { +func NewTxProfitabilityCheckerBase( + state stateInterface, interval time.Duration, minReward *big.Int, +) *TxProfitabilityCheckerBase { return &TxProfitabilityCheckerBase{ State: state, IntervalAfterWhichBatchSentAnyway: interval, @@ -34,7 +36,7 @@ func NewTxProfitabilityCheckerBase(state stateInterface, interval time.Duration, // IsProfitable checks pol collateral with min reward func (pc *TxProfitabilityCheckerBase) IsProfitable(ctx context.Context, polCollateral *big.Int) (bool, error) { - //if pc.IntervalAfterWhichBatchSentAnyway != 0 { + // if pc.IntervalAfterWhichBatchSentAnyway != 0 { // ok, err := isConsolidatedBatchAppeared(ctx, pc.State, pc.IntervalAfterWhichBatchSentAnyway) // if err != nil { // return false, err @@ -42,8 +44,7 @@ func (pc *TxProfitabilityCheckerBase) IsProfitable(ctx context.Context, polColla // if ok { // return true, nil // } - //} - + // } return polCollateral.Cmp(pc.MinReward) >= 0, nil } @@ -63,7 +64,7 @@ func NewTxProfitabilityCheckerAcceptAll(state stateInterface, interval time.Dura // IsProfitable validate batch anyway and don't check anything func (pc *TxProfitabilityCheckerAcceptAll) IsProfitable(ctx context.Context, polCollateral *big.Int) (bool, error) { - //if pc.IntervalAfterWhichBatchSentAnyway != 0 { + // if pc.IntervalAfterWhichBatchSentAnyway != 0 { // ok, err := isConsolidatedBatchAppeared(ctx, pc.State, pc.IntervalAfterWhichBatchSentAnyway) // if err != nil { // return false, err @@ -71,13 +72,13 @@ func (pc *TxProfitabilityCheckerAcceptAll) IsProfitable(ctx context.Context, pol // if ok { // return true, nil // } - //} - + // } return true, nil } // TODO: now it's impossible to check, when batch got consolidated, bcs it's not saved -//func isConsolidatedBatchAppeared(ctx context.Context, state stateInterface, intervalAfterWhichBatchConsolidatedAnyway time.Duration) (bool, error) { +// func isConsolidatedBatchAppeared(ctx context.Context, state stateInterface, +// intervalAfterWhichBatchConsolidatedAnyway time.Duration) (bool, error) { // batch, err := state.GetLastVerifiedBatch(ctx, nil) // if err != nil { // return false, fmt.Errorf("failed to get last verified batch, err: %v", err) @@ -88,4 +89,4 @@ func (pc *TxProfitabilityCheckerAcceptAll) IsProfitable(ctx context.Context, pol // } // // return false, err -//} +// } diff --git a/aggregator/prover/prover.go b/aggregator/prover/prover.go index fd9b1e7d..8cb13b1d 100644 --- a/aggregator/prover/prover.go +++ b/aggregator/prover/prover.go @@ -23,17 +23,18 @@ const ( ) var ( - ErrBadProverResponse = errors.New("Prover returned wrong type for response") //nolint:revive - ErrProverInternalError = errors.New("Prover returned INTERNAL_ERROR response") //nolint:revive - ErrProverCompletedError = errors.New("Prover returned COMPLETED_ERROR response") //nolint:revive - ErrBadRequest = errors.New("Prover returned ERROR for a bad request") //nolint:revive - ErrUnspecified = errors.New("Prover returned an UNSPECIFIED response") //nolint:revive - ErrUnknown = errors.New("Prover returned an unknown response") //nolint:revive - ErrProofCanceled = errors.New("Proof has been canceled") //nolint:revive + ErrBadProverResponse = errors.New("prover returned wrong type for response") //nolint:revive + ErrProverInternalError = errors.New("prover returned INTERNAL_ERROR response") //nolint:revive + ErrProverCompletedError = errors.New("prover returned COMPLETED_ERROR response") //nolint:revive + ErrBadRequest = errors.New("prover returned ERROR for a bad request") //nolint:revive + ErrUnspecified = errors.New("prover returned an UNSPECIFIED response") //nolint:revive + ErrUnknown = errors.New("prover returned an unknown response") //nolint:revive + ErrProofCanceled = errors.New("proof has been canceled") //nolint:revive ) // Prover abstraction of the grpc prover client. type Prover struct { + logger *log.Logger name string id string address net.Addr @@ -42,18 +43,22 @@ type Prover struct { } // New returns a new Prover instance. -func New(stream AggregatorService_ChannelServer, addr net.Addr, proofStatePollingInterval types.Duration) (*Prover, error) { +func New(logger *log.Logger, stream AggregatorService_ChannelServer, + addr net.Addr, proofStatePollingInterval types.Duration) (*Prover, error) { p := &Prover{ + logger: logger, stream: stream, address: addr, proofStatePollingInterval: proofStatePollingInterval, } + status, err := p.Status() if err != nil { - return nil, fmt.Errorf("Failed to retrieve prover id %w", err) + return nil, fmt.Errorf("failed to retrieve prover id %w", err) } p.name = status.ProverName p.id = status.ProverId + return p, nil } @@ -68,6 +73,7 @@ func (p *Prover) Addr() string { if p.address == nil { return "" } + return p.address.String() } @@ -85,6 +91,7 @@ func (p *Prover) Status() (*GetStatusResponse, error) { if msg, ok := res.Response.(*ProverMessage_GetStatusResponse); ok { return msg.GetStatusResponse, nil } + return nil, fmt.Errorf("%w, wanted %T, got %T", ErrBadProverResponse, &ProverMessage_GetStatusResponse{}, res.Response) } @@ -94,6 +101,7 @@ func (p *Prover) IsIdle() (bool, error) { if err != nil { return false, err } + return status.Status == GetStatusResponse_STATUS_IDLE, nil } @@ -101,11 +109,11 @@ func (p *Prover) IsIdle() (bool, error) { func (p *Prover) SupportsForkID(forkID uint64) bool { status, err := p.Status() if err != nil { - log.Warnf("Error asking status for prover ID %s: %v", p.ID(), err) + p.logger.Warnf("Error asking status for prover ID %s: %v", p.ID(), err) return false } - log.Debugf("Prover %s supports fork ID %d", p.ID(), status.ForkId) + p.logger.Debugf("Prover %s supports fork ID %d", p.ID(), status.ForkId) return status.ForkId == forkID } @@ -126,19 +134,34 @@ func (p *Prover) BatchProof(input *StatelessInputProver) (*string, error) { if msg, ok := res.Response.(*ProverMessage_GenBatchProofResponse); ok { switch msg.GenBatchProofResponse.Result { case Result_RESULT_UNSPECIFIED: - return nil, fmt.Errorf("failed to generate proof %s, %w, input %v", msg.GenBatchProofResponse.String(), ErrUnspecified, input) + return nil, fmt.Errorf( + "failed to generate proof %s, %w, input %v", + msg.GenBatchProofResponse.String(), ErrUnspecified, input, + ) case Result_RESULT_OK: return &msg.GenBatchProofResponse.Id, nil case Result_RESULT_ERROR: - return nil, fmt.Errorf("failed to generate proof %s, %w, input %v", msg.GenBatchProofResponse.String(), ErrBadRequest, input) + return nil, fmt.Errorf( + "failed to generate proof %s, %w, input %v", + msg.GenBatchProofResponse.String(), ErrBadRequest, input, + ) case Result_RESULT_INTERNAL_ERROR: - return nil, fmt.Errorf("failed to generate proof %s, %w, input %v", msg.GenBatchProofResponse.String(), ErrProverInternalError, input) + return nil, fmt.Errorf( + "failed to generate proof %s, %w, input %v", + msg.GenBatchProofResponse.String(), ErrProverInternalError, input, + ) default: - return nil, fmt.Errorf("failed to generate proof %s, %w,input %v", msg.GenBatchProofResponse.String(), ErrUnknown, input) + return nil, fmt.Errorf( + "failed to generate proof %s, %w,input %v", + msg.GenBatchProofResponse.String(), ErrUnknown, input, + ) } } - return nil, fmt.Errorf("%w, wanted %T, got %T", ErrBadProverResponse, &ProverMessage_GenBatchProofResponse{}, res.Response) + return nil, fmt.Errorf( + "%w, wanted %T, got %T", + ErrBadProverResponse, &ProverMessage_GenBatchProofResponse{}, res.Response, + ) } // AggregatedProof instructs the prover to generate an aggregated proof from @@ -176,7 +199,10 @@ func (p *Prover) AggregatedProof(inputProof1, inputProof2 string) (*string, erro } } - return nil, fmt.Errorf("%w, wanted %T, got %T", ErrBadProverResponse, &ProverMessage_GenAggregatedProofResponse{}, res.Response) + return nil, fmt.Errorf( + "%w, wanted %T, got %T", + ErrBadProverResponse, &ProverMessage_GenAggregatedProofResponse{}, res.Response, + ) } // FinalProof instructs the prover to generate a final proof for the given @@ -213,7 +239,11 @@ func (p *Prover) FinalProof(inputProof string, aggregatorAddr string) (*string, msg.GenFinalProofResponse.String(), ErrUnknown, inputProof) } } - return nil, fmt.Errorf("%w, wanted %T, got %T", ErrBadProverResponse, &ProverMessage_GenFinalProofResponse{}, res.Response) + + return nil, fmt.Errorf( + "%w, wanted %T, got %T", + ErrBadProverResponse, &ProverMessage_GenFinalProofResponse{}, res.Response, + ) } // CancelProofRequest asks the prover to stop the generation of the proof @@ -246,6 +276,7 @@ func (p *Prover) CancelProofRequest(proofID string) error { proofID, ErrUnknown, msg.CancelResponse.String()) } } + return fmt.Errorf("%w, wanted %T, got %T", ErrBadProverResponse, &ProverMessage_CancelResponse{}, res.Response) } @@ -257,15 +288,21 @@ func (p *Prover) WaitRecursiveProof(ctx context.Context, proofID string) (string return "", common.Hash{}, err } - resProof := res.Proof.(*GetProofResponse_RecursiveProof) + resProof, ok := res.Proof.(*GetProofResponse_RecursiveProof) + if !ok { + return "", common.Hash{}, fmt.Errorf( + "%w, wanted %T, got %T", + ErrBadProverResponse, &GetProofResponse_RecursiveProof{}, res.Proof, + ) + } - sr, err := GetStateRootFromProof(resProof.RecursiveProof) + sr, err := GetStateRootFromProof(p.logger, resProof.RecursiveProof) if err != nil && sr != (common.Hash{}) { - log.Errorf("Error getting state root from proof: %v", err) + p.logger.Errorf("Error getting state root from proof: %v", err) } if sr == (common.Hash{}) { - log.Info("Recursive proof does not contain state root. Possibly mock prover is in use.") + p.logger.Info("Recursive proof does not contain state root. Possibly mock prover is in use.") } return resProof.RecursiveProof, sr, nil @@ -278,7 +315,11 @@ func (p *Prover) WaitFinalProof(ctx context.Context, proofID string) (*FinalProo if err != nil { return nil, err } - resProof := res.Proof.(*GetProofResponse_FinalProof) + resProof, ok := res.Proof.(*GetProofResponse_FinalProof) + if !ok { + return nil, fmt.Errorf("%w, wanted %T, got %T", ErrBadProverResponse, &GetProofResponse_FinalProof{}, res.Proof) + } + return resProof.FinalProof, nil } @@ -307,6 +348,7 @@ func (p *Prover) waitProof(ctx context.Context, proofID string) (*GetProofRespon switch msg.GetProofResponse.Result { case GetProofResponse_RESULT_PENDING: time.Sleep(p.proofStatePollingInterval.Duration) + continue case GetProofResponse_RESULT_UNSPECIFIED: return nil, fmt.Errorf("failed to get proof ID: %s, %w, prover response: %s", @@ -330,7 +372,11 @@ func (p *Prover) waitProof(ctx context.Context, proofID string) (*GetProofRespon proofID, ErrUnknown, msg.GetProofResponse.String()) } } - return nil, fmt.Errorf("%w, wanted %T, got %T", ErrBadProverResponse, &ProverMessage_GetProofResponse{}, res.Response) + + return nil, fmt.Errorf( + "%w, wanted %T, got %T", + ErrBadProverResponse, &ProverMessage_GetProofResponse{}, res.Response, + ) } } } @@ -345,13 +391,14 @@ func (p *Prover) call(req *AggregatorMessage) (*ProverMessage, error) { if err != nil { return nil, err } + return res, nil } // GetStateRootFromProof returns the state root from the proof. -func GetStateRootFromProof(proof string) (common.Hash, error) { +func GetStateRootFromProof(logger *log.Logger, proof string) (common.Hash, error) { // Log received proof - log.Debugf("Received proof to get SR from: %s", proof) + logger.Debugf("Received proof to get SR from: %s", proof) type Publics struct { Publics []string `mapstructure:"publics"` @@ -365,16 +412,18 @@ func GetStateRootFromProof(proof string) (common.Hash, error) { var publics Publics err := json.Unmarshal([]byte(proof), &publics) if err != nil { - log.Errorf("Error unmarshalling proof: %v", err) + logger.Errorf("Error unmarshalling proof: %v", err) return common.Hash{}, err } - var v [8]uint64 - var j = 0 + var ( + v [8]uint64 + j = 0 + ) for i := stateRootStartIndex; i < stateRootFinalIndex; i++ { u64, err := strconv.ParseInt(publics.Publics[i], 10, 64) if err != nil { - log.Fatal(err) + logger.Fatal(err) } v[j] = uint64(u64) j++ @@ -394,12 +443,13 @@ func fea2scalar(v []uint64) *big.Int { return big.NewInt(0) } res := new(big.Int).SetUint64(v[0]) - res.Add(res, new(big.Int).Lsh(new(big.Int).SetUint64(v[1]), 32)) //nolint:gomnd - res.Add(res, new(big.Int).Lsh(new(big.Int).SetUint64(v[2]), 64)) //nolint:gomnd - res.Add(res, new(big.Int).Lsh(new(big.Int).SetUint64(v[3]), 96)) //nolint:gomnd - res.Add(res, new(big.Int).Lsh(new(big.Int).SetUint64(v[4]), 128)) //nolint:gomnd - res.Add(res, new(big.Int).Lsh(new(big.Int).SetUint64(v[5]), 160)) //nolint:gomnd - res.Add(res, new(big.Int).Lsh(new(big.Int).SetUint64(v[6]), 192)) //nolint:gomnd - res.Add(res, new(big.Int).Lsh(new(big.Int).SetUint64(v[7]), 224)) //nolint:gomnd + res.Add(res, new(big.Int).Lsh(new(big.Int).SetUint64(v[1]), 32)) //nolint:mnd + res.Add(res, new(big.Int).Lsh(new(big.Int).SetUint64(v[2]), 64)) //nolint:mnd + res.Add(res, new(big.Int).Lsh(new(big.Int).SetUint64(v[3]), 96)) //nolint:mnd + res.Add(res, new(big.Int).Lsh(new(big.Int).SetUint64(v[4]), 128)) //nolint:mnd + res.Add(res, new(big.Int).Lsh(new(big.Int).SetUint64(v[5]), 160)) //nolint:mnd + res.Add(res, new(big.Int).Lsh(new(big.Int).SetUint64(v[6]), 192)) //nolint:mnd + res.Add(res, new(big.Int).Lsh(new(big.Int).SetUint64(v[7]), 224)) //nolint:mnd + return res } diff --git a/aggregator/prover/prover_test.go b/aggregator/prover/prover_test.go index c98a1572..737d5592 100644 --- a/aggregator/prover/prover_test.go +++ b/aggregator/prover/prover_test.go @@ -2,11 +2,11 @@ package prover_test import ( "fmt" - "log" "os" "testing" "github.com/0xPolygon/cdk/aggregator/prover" + "github.com/0xPolygon/cdk/log" "github.com/stretchr/testify/require" ) @@ -28,9 +28,7 @@ func TestCalculateStateRoots(t *testing.T) { // Read all files in the directory files, err := os.ReadDir(dir) - if err != nil { - log.Fatal(err) - } + require.NoError(t, err) for _, file := range files { if file.IsDir() { @@ -39,21 +37,15 @@ func TestCalculateStateRoots(t *testing.T) { // Read the file data, err := os.ReadFile(fmt.Sprintf("%s/%s", dir, file.Name())) - if err != nil { - log.Fatal(err) - } + require.NoError(t, err) // Get the state root from the batch proof - fileStateRoot, err := prover.GetStateRootFromProof(string(data)) - if err != nil { - log.Fatal(err) - } + fileStateRoot, err := prover.GetStateRootFromProof(log.GetDefaultLogger(), string(data)) + require.NoError(t, err) // Get the expected state root expectedStateRoot, ok := expectedStateRoots[file.Name()] - if !ok { - log.Fatal("Expected state root not found") - } + require.True(t, ok, "Expected state root not found") // Compare the state roots require.Equal(t, expectedStateRoot, fileStateRoot.String(), "State roots do not match") diff --git a/bridgesync/bridgesync.go b/bridgesync/bridgesync.go index 220c33fd..e79fba2e 100644 --- a/bridgesync/bridgesync.go +++ b/bridgesync/bridgesync.go @@ -6,6 +6,7 @@ import ( "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/sync" + tree "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/common" ) @@ -15,6 +16,7 @@ const ( downloadBufferSize = 1000 ) +// BridgeSync manages the state of the exit tree for the bridge contract by processing Ethereum blockchain events. type BridgeSync struct { processor *processor driver *sync.EVMDriver @@ -97,14 +99,16 @@ func newBridgeSync( maxRetryAttemptsAfterError int, syncFullClaims bool, ) (*BridgeSync, error) { - processor, err := newProcessor(ctx, dbPath, l1OrL2ID) + processor, err := newProcessor(dbPath, l1OrL2ID) if err != nil { return nil, err } + lastProcessedBlock, err := processor.GetLastProcessedBlock(ctx) if err != nil { return nil, err } + if lastProcessedBlock < initialBlock { err = processor.ProcessBlock(ctx, sync.Block{ Num: initialBlock, @@ -140,6 +144,7 @@ func newBridgeSync( if err != nil { return nil, err } + return &BridgeSync{ processor: processor, driver: driver, @@ -155,14 +160,29 @@ func (s *BridgeSync) GetLastProcessedBlock(ctx context.Context) (uint64, error) return s.processor.GetLastProcessedBlock(ctx) } -func (s *BridgeSync) GetBridgeIndexByRoot(ctx context.Context, root common.Hash) (uint32, error) { - return s.processor.exitTree.GetIndexByRoot(ctx, root) +func (s *BridgeSync) GetBridgeRootByHash(ctx context.Context, root common.Hash) (tree.Root, error) { + return s.processor.exitTree.GetRootByHash(ctx, root) +} + +func (s *BridgeSync) GetClaims(ctx context.Context, fromBlock, toBlock uint64) ([]Claim, error) { + return s.processor.GetClaims(ctx, fromBlock, toBlock) } -func (s *BridgeSync) GetClaimsAndBridges(ctx context.Context, fromBlock, toBlock uint64) ([]Event, error) { - return s.processor.GetClaimsAndBridges(ctx, fromBlock, toBlock) +func (s *BridgeSync) GetBridges(ctx context.Context, fromBlock, toBlock uint64) ([]Bridge, error) { + return s.processor.GetBridges(ctx, fromBlock, toBlock) } -func (s *BridgeSync) GetProof(ctx context.Context, depositCount uint32, localExitRoot common.Hash) ([32]common.Hash, error) { +// GetProof retrieves the Merkle proof for the given deposit count and exit root. +func (s *BridgeSync) GetProof( + ctx context.Context, depositCount uint32, localExitRoot common.Hash, +) ([32]common.Hash, error) { return s.processor.exitTree.GetProof(ctx, depositCount, localExitRoot) } + +func (p *processor) GetBlockByLER(ctx context.Context, ler common.Hash) (uint64, error) { + root, err := p.exitTree.GetRootByHash(ctx, ler) + if err != nil { + return 0, err + } + return root.BlockNum, nil +} diff --git a/bridgesync/claimcalldata_test.go b/bridgesync/claimcalldata_test.go index 20c1b7c5..1319835b 100644 --- a/bridgesync/claimcalldata_test.go +++ b/bridgesync/claimcalldata_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/test/contracts/claimmock" "github.com/0xPolygon/cdk/test/contracts/claimmockcaller" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -27,6 +28,7 @@ type testCase struct { func TestClaimCalldata(t *testing.T) { testCases := []testCase{} // Setup Docker L1 + log.Debug("starting docker") ctx := context.Background() msg, err := exec.Command("bash", "-l", "-c", "docker compose up -d").CombinedOutput() require.NoError(t, err, string(msg)) @@ -35,6 +37,7 @@ func TestClaimCalldata(t *testing.T) { msg, err = exec.Command("bash", "-l", "-c", "docker compose down").CombinedOutput() require.NoError(t, err, string(msg)) }() + log.Debug("docker started") client, err := ethclient.Dial("http://localhost:8545") require.NoError(t, err) privateKey, err := crypto.HexToECDSA("ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80") @@ -57,7 +60,6 @@ func TestClaimCalldata(t *testing.T) { proofRollup[4] = common.HexToHash("a1fa") proofRollupH[4] = common.HexToHash("a1fa") expectedClaim := Claim{ - GlobalIndex: big.NewInt(420), OriginNetwork: 69, OriginAddress: common.HexToAddress("ffaaffaa"), DestinationAddress: common.HexToAddress("123456789"), @@ -69,9 +71,26 @@ func TestClaimCalldata(t *testing.T) { DestinationNetwork: 0, Metadata: []byte{}, } + expectedClaim2 := Claim{ + OriginNetwork: 87, + OriginAddress: common.HexToAddress("eebbeebb"), + DestinationAddress: common.HexToAddress("2233445566"), + Amount: big.NewInt(4), + MainnetExitRoot: common.HexToHash("5ca1e"), + RollupExitRoot: common.HexToHash("dead"), + ProofLocalExitRoot: proofLocalH, + ProofRollupExitRoot: proofRollupH, + DestinationNetwork: 0, + Metadata: []byte{}, + } auth.GasLimit = 999999 // for some reason gas estimation fails :( + abi, err := claimmock.ClaimmockMetaData.GetAbi() + require.NoError(t, err) + // direct call claim asset + expectedClaim.GlobalIndex = big.NewInt(421) + expectedClaim.IsMessage = false tx, err := bridgeContract.ClaimAsset( auth, proofLocal, @@ -89,7 +108,6 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err := client.TransactionReceipt(ctx, tx.Hash()) - expectedClaim.IsMessage = false testCases = append(testCases, testCase{ description: "direct call to claim asset", bridgeAddr: bridgeAddr, @@ -98,6 +116,8 @@ func TestClaimCalldata(t *testing.T) { }) // indirect call claim asset + expectedClaim.IsMessage = false + expectedClaim.GlobalIndex = big.NewInt(422) tx, err = claimCaller.ClaimAsset( auth, proofLocal, @@ -111,11 +131,11 @@ func TestClaimCalldata(t *testing.T) { expectedClaim.DestinationAddress, expectedClaim.Amount, nil, + false, ) require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) - expectedClaim.IsMessage = false testCases = append(testCases, testCase{ description: "indirect call to claim asset", bridgeAddr: bridgeAddr, @@ -123,7 +143,42 @@ func TestClaimCalldata(t *testing.T) { expectedClaim: expectedClaim, }) + // indirect call claim asset bytes + expectedClaim.GlobalIndex = big.NewInt(423) + expectedClaim.IsMessage = false + expectedClaimBytes, err := abi.Pack( + "claimAsset", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + tx, err = claimCaller.ClaimBytes( + auth, + expectedClaimBytes, + false, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "indirect call to claim asset bytes", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + // direct call claim message + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(424) tx, err = bridgeContract.ClaimMessage( auth, proofLocal, @@ -141,7 +196,6 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) - expectedClaim.IsMessage = true testCases = append(testCases, testCase{ description: "direct call to claim message", bridgeAddr: bridgeAddr, @@ -150,6 +204,8 @@ func TestClaimCalldata(t *testing.T) { }) // indirect call claim message + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(425) tx, err = claimCaller.ClaimMessage( auth, proofLocal, @@ -163,11 +219,11 @@ func TestClaimCalldata(t *testing.T) { expectedClaim.DestinationAddress, expectedClaim.Amount, nil, + false, ) require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) - expectedClaim.IsMessage = true testCases = append(testCases, testCase{ description: "indirect call to claim message", bridgeAddr: bridgeAddr, @@ -175,7 +231,717 @@ func TestClaimCalldata(t *testing.T) { expectedClaim: expectedClaim, }) + // indirect call claim message bytes + expectedClaim.GlobalIndex = big.NewInt(426) + expectedClaim.IsMessage = true + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + tx, err = claimCaller.ClaimBytes( + auth, + expectedClaimBytes, + false, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "indirect call to claim message bytes", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + + // indirect call claim message bytes + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim.IsMessage = true + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + tx, err = claimCaller.ClaimBytes( + auth, + expectedClaimBytes, + true, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + log.Infof("%+v", r.Logs) + + reverted := [2]bool{false, false} + + // 2 indirect call claim message (same global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(427) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err := abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + tx, err = claimCaller.Claim2Bytes( + auth, + expectedClaimBytes, + expectedClaimBytes2, + reverted, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "2 indirect call claim message 1 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + testCases = append(testCases, testCase{ + description: "2 indirect call claim message 2 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[1], + expectedClaim: expectedClaim2, + }) + + // 2 indirect call claim message (diff global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(428) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(429) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + tx, err = claimCaller.Claim2Bytes( + auth, + expectedClaimBytes, + expectedClaimBytes2, + reverted, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "2 indirect call claim message 1 (diff globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + testCases = append(testCases, testCase{ + description: "2 indirect call claim message 2 (diff globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[1], + expectedClaim: expectedClaim2, + }) + + reverted = [2]bool{false, true} + + // 2 indirect call claim message (same global index) (1 ok, 1 reverted) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(430) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(430) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + tx, err = claimCaller.Claim2Bytes( + auth, + expectedClaimBytes, + expectedClaimBytes2, + reverted, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "2 indirect call claim message (same globalIndex) (1 ok, 1 reverted)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + + // 2 indirect call claim message (diff global index) (1 ok, 1 reverted) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(431) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(432) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + tx, err = claimCaller.Claim2Bytes( + auth, + expectedClaimBytes, + expectedClaimBytes2, + reverted, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "2 indirect call claim message (diff globalIndex) (1 ok, 1 reverted)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + + reverted = [2]bool{true, false} + + // 2 indirect call claim message (same global index) (1 reverted, 1 ok) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(433) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(433) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + tx, err = claimCaller.Claim2Bytes( + auth, + expectedClaimBytes, + expectedClaimBytes2, + reverted, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "2 indirect call claim message (same globalIndex) (reverted,ok)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim2, + }) + + // 2 indirect call claim message (diff global index) (1 reverted, 1 ok) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(434) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(435) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + tx, err = claimCaller.Claim2Bytes( + auth, + expectedClaimBytes, + expectedClaimBytes2, + reverted, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "2 indirect call claim message (diff globalIndex) (reverted,ok)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim2, + }) + + reverted = [2]bool{false, false} + + // 2 indirect call claim asset (same global index) + expectedClaim.IsMessage = false + expectedClaim.GlobalIndex = big.NewInt(436) + expectedClaim2.IsMessage = false + expectedClaim2.GlobalIndex = big.NewInt(436) + expectedClaimBytes, err = abi.Pack( + "claimAsset", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimAsset", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + tx, err = claimCaller.Claim2Bytes( + auth, + expectedClaimBytes, + expectedClaimBytes2, + reverted, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "2 indirect call claim asset 1 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + testCases = append(testCases, testCase{ + description: "2 indirect call claim asset 2 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[1], + expectedClaim: expectedClaim2, + }) + + // 2 indirect call claim asset (diff global index) + expectedClaim.IsMessage = false + expectedClaim.GlobalIndex = big.NewInt(437) + expectedClaim2.IsMessage = false + expectedClaim2.GlobalIndex = big.NewInt(438) + expectedClaimBytes, err = abi.Pack( + "claimAsset", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimAsset", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + tx, err = claimCaller.Claim2Bytes( + auth, + expectedClaimBytes, + expectedClaimBytes2, + reverted, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "2 indirect call claim asset 1 (diff globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + testCases = append(testCases, testCase{ + description: "2 indirect call claim asset 2 (diff globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[1], + expectedClaim: expectedClaim2, + }) + + reverted = [2]bool{false, true} + + // 2 indirect call claim asset (same global index) (1 ok, 1 reverted) + expectedClaim.IsMessage = false + expectedClaim.GlobalIndex = big.NewInt(439) + expectedClaim2.IsMessage = false + expectedClaim2.GlobalIndex = big.NewInt(439) + expectedClaimBytes, err = abi.Pack( + "claimAsset", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimAsset", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + tx, err = claimCaller.Claim2Bytes( + auth, + expectedClaimBytes, + expectedClaimBytes2, + reverted, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "2 indirect call claim asset (same globalIndex) (1 ok, 1 reverted)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + + // 2 indirect call claim message (diff global index) (1 ok, 1 reverted) + expectedClaim.IsMessage = false + expectedClaim.GlobalIndex = big.NewInt(440) + expectedClaim2.IsMessage = false + expectedClaim2.GlobalIndex = big.NewInt(441) + expectedClaimBytes, err = abi.Pack( + "claimAsset", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimAsset", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + tx, err = claimCaller.Claim2Bytes( + auth, + expectedClaimBytes, + expectedClaimBytes2, + reverted, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "2 indirect call claim asset (diff globalIndex) (1 ok, 1 reverted)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + + reverted = [2]bool{true, false} + + // 2 indirect call claim asset (same global index) (1 reverted, 1 ok) + expectedClaim.IsMessage = false + expectedClaim.GlobalIndex = big.NewInt(442) + expectedClaim2.IsMessage = false + expectedClaim2.GlobalIndex = big.NewInt(442) + expectedClaimBytes, err = abi.Pack( + "claimAsset", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimAsset", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + tx, err = claimCaller.Claim2Bytes( + auth, + expectedClaimBytes, + expectedClaimBytes2, + reverted, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "2 indirect call claim asset (same globalIndex) (reverted,ok)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim2, + }) + + // 2 indirect call claim asset (diff global index) (1 reverted, 1 ok) + expectedClaim.IsMessage = false + expectedClaim.GlobalIndex = big.NewInt(443) + expectedClaim2.IsMessage = false + expectedClaim2.GlobalIndex = big.NewInt(444) + expectedClaimBytes, err = abi.Pack( + "claimAsset", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimAsset", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + tx, err = claimCaller.Claim2Bytes( + auth, + expectedClaimBytes, + expectedClaimBytes2, + reverted, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "2 indirect call claim asset (diff globalIndex) (reverted,ok)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim2, + }) + for _, tc := range testCases { + log.Info(tc.description) t.Run(tc.description, func(t *testing.T) { claimEvent, err := bridgeContract.ParseClaimEvent(tc.log) require.NoError(t, err) diff --git a/bridgesync/config.go b/bridgesync/config.go index 9aa849e2..66eb00ed 100644 --- a/bridgesync/config.go +++ b/bridgesync/config.go @@ -9,7 +9,7 @@ type Config struct { // DBPath path of the DB DBPath string `mapstructure:"DBPath"` // BlockFinality indicates the status of the blocks that will be queried in order to sync - BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` + BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` //nolint:lll // InitialBlockNum is the first block that will be queried when starting the synchronization from scratch. // It should be a number equal or bellow the creation of the bridge contract InitialBlockNum uint64 `mapstructure:"InitialBlockNum"` diff --git a/bridgesync/downloader.go b/bridgesync/downloader.go index 2763fcfe..b34267ce 100644 --- a/bridgesync/downloader.go +++ b/bridgesync/downloader.go @@ -10,7 +10,7 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/etrog/polygonzkevmbridgev2" rpcTypes "github.com/0xPolygon/cdk-rpc/types" "github.com/0xPolygon/cdk/sync" - "github.com/0xPolygon/cdk/tree" + tree "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -22,13 +22,16 @@ import ( ) var ( - bridgeEventSignature = crypto.Keccak256Hash([]byte("BridgeEvent(uint8,uint32,address,uint32,address,uint256,bytes,uint32)")) + bridgeEventSignature = crypto.Keccak256Hash([]byte( + "BridgeEvent(uint8,uint32,address,uint32,address,uint256,bytes,uint32)", + )) claimEventSignature = crypto.Keccak256Hash([]byte("ClaimEvent(uint256,uint32,address,address,uint256)")) claimEventSignaturePreEtrog = crypto.Keccak256Hash([]byte("ClaimEvent(uint32,uint32,address,address,uint256)")) methodIDClaimAsset = common.Hex2Bytes("ccaa2d11") methodIDClaimMessage = common.Hex2Bytes("f5efcd79") ) +// EthClienter defines the methods required to interact with an Ethereum client. type EthClienter interface { ethereum.LogFilterer ethereum.BlockNumberReader @@ -52,11 +55,13 @@ func buildAppender(client EthClienter, bridge common.Address, syncFullClaims boo bridge, err := bridgeContractV2.ParseBridgeEvent(l) if err != nil { return fmt.Errorf( - "error parsing log %+v using d.bridgeContractV2.ParseBridgeEvent: %v", + "error parsing log %+v using d.bridgeContractV2.ParseBridgeEvent: %w", l, err, ) } b.Events = append(b.Events, Event{Bridge: &Bridge{ + BlockNum: b.Num, + BlockPos: uint64(l.Index), LeafType: bridge.LeafType, OriginNetwork: bridge.OriginNetwork, OriginAddress: bridge.OriginAddress, @@ -66,6 +71,7 @@ func buildAppender(client EthClienter, bridge common.Address, syncFullClaims boo Metadata: bridge.Metadata, DepositCount: bridge.DepositCount, }}) + return nil } @@ -73,11 +79,13 @@ func buildAppender(client EthClienter, bridge common.Address, syncFullClaims boo claimEvent, err := bridgeContractV2.ParseClaimEvent(l) if err != nil { return fmt.Errorf( - "error parsing log %+v using d.bridgeContractV2.ParseClaimEvent: %v", + "error parsing log %+v using d.bridgeContractV2.ParseClaimEvent: %w", l, err, ) } claim := &Claim{ + BlockNum: b.Num, + BlockPos: uint64(l.Index), GlobalIndex: claimEvent.GlobalIndex, OriginNetwork: claimEvent.OriginNetwork, OriginAddress: claimEvent.OriginAddress, @@ -85,7 +93,9 @@ func buildAppender(client EthClienter, bridge common.Address, syncFullClaims boo Amount: claimEvent.Amount, } if syncFullClaims { - setClaimCalldata(client, bridge, l.TxHash, claim) + if err := setClaimCalldata(client, bridge, l.TxHash, claim); err != nil { + return err + } } b.Events = append(b.Events, Event{Claim: claim}) return nil @@ -95,11 +105,13 @@ func buildAppender(client EthClienter, bridge common.Address, syncFullClaims boo claimEvent, err := bridgeContractV1.ParseClaimEvent(l) if err != nil { return fmt.Errorf( - "error parsing log %+v using d.bridgeContractV1.ParseClaimEvent: %v", + "error parsing log %+v using d.bridgeContractV1.ParseClaimEvent: %w", l, err, ) } claim := &Claim{ + BlockNum: b.Num, + BlockPos: uint64(l.Index), GlobalIndex: big.NewInt(int64(claimEvent.Index)), OriginNetwork: claimEvent.OriginNetwork, OriginAddress: claimEvent.OriginAddress, @@ -107,7 +119,9 @@ func buildAppender(client EthClienter, bridge common.Address, syncFullClaims boo Amount: claimEvent.Amount, } if syncFullClaims { - setClaimCalldata(client, bridge, l.TxHash, claim) + if err := setClaimCalldata(client, bridge, l.TxHash, claim); err != nil { + return err + } } b.Events = append(b.Events, Event{Claim: claim}) return nil @@ -144,7 +158,13 @@ func setClaimCalldata(client EthClienter, bridge common.Address, txHash common.H if callStack.Len() == 0 { break } - currentCall := callStack.Pop().(call) + + currentCallInterface := callStack.Pop() + currentCall, ok := currentCallInterface.(call) + if !ok { + return fmt.Errorf("unexpected type for 'currentCall'. Expected 'call', got '%T'", currentCallInterface) + } + if currentCall.To == bridge { found, err := setClaimIfFoundOnInput( currentCall.Input, @@ -169,9 +189,9 @@ func setClaimIfFoundOnInput(input []byte, claim *Claim) (bool, error) { if err != nil { return false, err } - methodId := input[:4] + methodID := input[:4] // Recover Method from signature and ABI - method, err := smcAbi.MethodById(methodId) + method, err := smcAbi.MethodById(methodID) if err != nil { return false, err } @@ -180,13 +200,13 @@ func setClaimIfFoundOnInput(input []byte, claim *Claim) (bool, error) { return false, err } // Ignore other methods - if bytes.Equal(methodId, methodIDClaimAsset) || bytes.Equal(methodId, methodIDClaimMessage) { + if bytes.Equal(methodID, methodIDClaimAsset) || bytes.Equal(methodID, methodIDClaimMessage) { found, err := decodeClaimCallDataAndSetIfFound(data, claim) if err != nil { return false, err } if found { - if bytes.Equal(methodId, methodIDClaimMessage) { + if bytes.Equal(methodID, methodIDClaimMessage) { claim.IsMessage = true } return true, nil @@ -228,25 +248,52 @@ func decodeClaimCallDataAndSetIfFound(data []interface{}, claim *Claim) (bool, e 10: metadata, ) */ - actualGlobalIndex := data[2].(*big.Int) + actualGlobalIndex, ok := data[2].(*big.Int) + if !ok { + return false, fmt.Errorf("unexpected type for actualGlobalIndex, expected *big.Int got '%T'", data[2]) + } if actualGlobalIndex.Cmp(claim.GlobalIndex) != 0 { // not the claim we're looking for return false, nil } else { proofLER := [tree.DefaultHeight]common.Hash{} - proofLERBytes := data[0].([32][32]byte) + proofLERBytes, ok := data[0].([32][32]byte) + if !ok { + return false, fmt.Errorf("unexpected type for proofLERBytes, expected [32][32]byte got '%T'", data[0]) + } + proofRER := [tree.DefaultHeight]common.Hash{} - proofRERBytes := data[1].([32][32]byte) + proofRERBytes, ok := data[1].([32][32]byte) + if !ok { + return false, fmt.Errorf("unexpected type for proofRERBytes, expected [32][32]byte got '%T'", data[1]) + } + for i := 0; i < int(tree.DefaultHeight); i++ { proofLER[i] = proofLERBytes[i] proofRER[i] = proofRERBytes[i] } claim.ProofLocalExitRoot = proofLER claim.ProofRollupExitRoot = proofRER - claim.MainnetExitRoot = data[3].([32]byte) - claim.RollupExitRoot = data[4].([32]byte) - claim.DestinationNetwork = data[7].(uint32) - claim.Metadata = data[10].([]byte) + + claim.MainnetExitRoot, ok = data[3].([32]byte) + if !ok { + return false, fmt.Errorf("unexpected type for 'MainnetExitRoot'. Expected '[32]byte', got '%T'", data[3]) + } + + claim.RollupExitRoot, ok = data[4].([32]byte) + if !ok { + return false, fmt.Errorf("unexpected type for 'RollupExitRoot'. Expected '[32]byte', got '%T'", data[4]) + } + + claim.DestinationNetwork, ok = data[7].(uint32) + if !ok { + return false, fmt.Errorf("unexpected type for 'DestinationNetwork'. Expected 'uint32', got '%T'", data[7]) + } + claim.Metadata, ok = data[10].([]byte) + if !ok { + return false, fmt.Errorf("unexpected type for 'claim Metadata'. Expected '[]byte', got '%T'", data[10]) + } + return true, nil } } diff --git a/bridgesync/e2e_test.go b/bridgesync/e2e_test.go index 6eff5548..a19afb8d 100644 --- a/bridgesync/e2e_test.go +++ b/bridgesync/e2e_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "math/big" + "path" "testing" "time" @@ -26,26 +27,28 @@ func newSimulatedClient(t *testing.T, auth *bind.TransactOpts) ( bridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2, ) { t.Helper() + var err error - balance, _ := big.NewInt(0).SetString("10000000000000000000000000", 10) //nolint:gomnd + balance, _ := big.NewInt(0).SetString("10000000000000000000000000", 10) address := auth.From genesisAlloc := map[common.Address]types.Account{ address: { Balance: balance, }, } - blockGasLimit := uint64(999999999999999999) //nolint:gomnd + blockGasLimit := uint64(999999999999999999) client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) bridgeAddr, _, bridgeContract, err = polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(auth, client.Client()) require.NoError(t, err) client.Commit() + return } func TestBridgeEventE2E(t *testing.T) { ctx := context.Background() - dbPathSyncer := t.TempDir() + dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") dbPathReorg := t.TempDir() privateKey, err := crypto.GenerateKey() require.NoError(t, err) @@ -54,17 +57,21 @@ func TestBridgeEventE2E(t *testing.T) { client, bridgeAddr, bridgeSc := newSimulatedClient(t, auth) rd, err := reorgdetector.New(client.Client(), reorgdetector.Config{DBPath: dbPathReorg}) require.NoError(t, err) - go rd.Start(ctx) + + go rd.Start(ctx) //nolint:errcheck testClient := helpers.TestClient{ClientRenamed: client.Client()} syncer, err := bridgesync.NewL1(ctx, dbPathSyncer, bridgeAddr, 10, etherman.LatestBlock, rd, testClient, 0, time.Millisecond*10, 0, 0) require.NoError(t, err) + go syncer.Start(ctx) // Send bridge txs expectedBridges := []bridgesync.Bridge{} + for i := 0; i < 100; i++ { bridge := bridgesync.Bridge{ + BlockNum: uint64(2 + i), Amount: big.NewInt(0), DepositCount: uint32(i), DestinationNetwork: 3, @@ -89,16 +96,20 @@ func TestBridgeEventE2E(t *testing.T) { // Wait for syncer to catch up syncerUpToDate := false + var errMsg string lb, err := client.Client().BlockNumber(ctx) require.NoError(t, err) + for i := 0; i < 10; i++ { lpb, err := syncer.GetLastProcessedBlock(ctx) require.NoError(t, err) if lpb == lb { syncerUpToDate = true + break } + time.Sleep(time.Millisecond * 100) errMsg = fmt.Sprintf("last block from client: %d, last block from syncer: %d", lb, lpb) } @@ -107,14 +118,8 @@ func TestBridgeEventE2E(t *testing.T) { // Get bridges lastBlock, err := client.Client().BlockNumber(ctx) require.NoError(t, err) - events, err := syncer.GetClaimsAndBridges(ctx, 0, lastBlock) + actualBridges, err := syncer.GetBridges(ctx, 0, lastBlock) require.NoError(t, err) - actualBridges := []bridgesync.Bridge{} - for _, event := range events { - if event.Bridge != nil { - actualBridges = append(actualBridges, *event.Bridge) - } - } // Assert bridges require.Equal(t, expectedBridges, actualBridges) diff --git a/bridgesync/migrations/bridgesync0001.sql b/bridgesync/migrations/bridgesync0001.sql new file mode 100644 index 00000000..de90910c --- /dev/null +++ b/bridgesync/migrations/bridgesync0001.sql @@ -0,0 +1,42 @@ +-- +migrate Down +DROP TABLE IF EXISTS block; +DROP TABLE IF EXISTS claim; +DROP TABLE IF EXISTS bridge; + +-- +migrate Up +CREATE TABLE block ( + num BIGINT PRIMARY KEY +); + +CREATE TABLE bridge ( + block_num INTEGER NOT NULL REFERENCES block(num) ON DELETE CASCADE, + block_pos INTEGER NOT NULL, + leaf_type INTEGER NOT NULL, + origin_network INTEGER NOT NULL, + origin_address VARCHAR NOT NULL, + destination_network INTEGER NOT NULL, + destination_address VARCHAR NOT NULL, + amount DECIMAL(78, 0) NOT NULL, + metadata BLOB, + deposit_count INTEGER NOT NULL, + PRIMARY KEY (block_num, block_pos) +); + +CREATE TABLE claim ( + block_num INTEGER NOT NULL REFERENCES block(num) ON DELETE CASCADE, + block_pos INTEGER NOT NULL, + global_index DECIMAL(78, 0) NOT NULL, + origin_network INTEGER NOT NULL, + origin_address VARCHAR NOT NULL, + destination_address VARCHAR NOT NULL, + amount DECIMAL(78, 0) NOT NULL, + proof_local_exit_root VARCHAR, + proof_rollup_exit_root VARCHAR, + mainnet_exit_root VARCHAR, + rollup_exit_root VARCHAR, + global_exit_root VARCHAR, + destination_network INTEGER NOT NULL, + metadata BLOB, + is_message BOOLEAN, + PRIMARY KEY (block_num, block_pos) +); \ No newline at end of file diff --git a/bridgesync/migrations/bridgesync0001_test.go b/bridgesync/migrations/bridgesync0001_test.go new file mode 100644 index 00000000..d117e0e2 --- /dev/null +++ b/bridgesync/migrations/bridgesync0001_test.go @@ -0,0 +1,61 @@ +package migrations + +import ( + "context" + "path" + "testing" + + "github.com/0xPolygon/cdk/db" + "github.com/stretchr/testify/require" +) + +func Test001(t *testing.T) { + dbPath := path.Join(t.TempDir(), "file::memory:?cache=shared") + + err := RunMigrations(dbPath) + require.NoError(t, err) + db, err := db.NewSQLiteDB(dbPath) + require.NoError(t, err) + + ctx := context.Background() + tx, err := db.BeginTx(ctx, nil) + require.NoError(t, err) + + _, err = tx.Exec(` + INSERT INTO block (num) VALUES (1); + + INSERT INTO bridge ( + block_num, + block_pos, + leaf_type, + origin_network, + origin_address, + destination_network, + destination_address, + amount, + metadata, + deposit_count + ) VALUES (1, 0, 0, 0, '0x0000', 0, '0x0000', 0, NULL, 0); + + INSERT INTO claim ( + block_num, + block_pos, + global_index, + origin_network, + origin_address, + destination_address, + amount, + proof_local_exit_root, + proof_rollup_exit_root, + mainnet_exit_root, + rollup_exit_root, + global_exit_root, + destination_network, + metadata, + is_message + ) VALUES (1, 0, 0, 0, '0x0000', '0x0000', 0, '0x000,0x000', '0x000,0x000', '0x000', '0x000', '0x0', 0, NULL, FALSE); + `) + require.NoError(t, err) + err = tx.Commit() + require.NoError(t, err) +} diff --git a/bridgesync/migrations/migrations.go b/bridgesync/migrations/migrations.go new file mode 100644 index 00000000..c500ee38 --- /dev/null +++ b/bridgesync/migrations/migrations.go @@ -0,0 +1,23 @@ +package migrations + +import ( + _ "embed" + + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/db/types" + treeMigrations "github.com/0xPolygon/cdk/tree/migrations" +) + +//go:embed bridgesync0001.sql +var mig001 string + +func RunMigrations(dbPath string) error { + migrations := []types.Migration{ + { + ID: "bridgesync0001", + SQL: mig001, + }, + } + migrations = append(migrations, treeMigrations.Migrations...) + return db.RunMigrations(dbPath, migrations) +} diff --git a/bridgesync/processor.go b/bridgesync/processor.go index bd96732f..47b26595 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -2,42 +2,42 @@ package bridgesync import ( "context" + "database/sql" "encoding/binary" - "encoding/json" "errors" + "fmt" "math/big" - dbCommon "github.com/0xPolygon/cdk/common" + "github.com/0xPolygon/cdk/bridgesync/migrations" + "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sync" "github.com/0xPolygon/cdk/tree" + "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/common" "github.com/iden3/go-iden3-crypto/keccak256" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/mdbx" -) - -const ( - eventsTableSufix = "-events" - lastBlockTableSufix = "-lastBlock" + "github.com/russross/meddler" + _ "modernc.org/sqlite" ) var ( + // ErrBlockNotProcessed indicates that the given block(s) have not been processed yet. ErrBlockNotProcessed = errors.New("given block(s) have not been processed yet") ErrNotFound = errors.New("not found") - lastBlockKey = []byte("lb") ) // Bridge is the representation of a bridge event type Bridge struct { - LeafType uint8 - OriginNetwork uint32 - OriginAddress common.Address - DestinationNetwork uint32 - DestinationAddress common.Address - Amount *big.Int - Metadata []byte - DepositCount uint32 + BlockNum uint64 `meddler:"block_num"` + BlockPos uint64 `meddler:"block_pos"` + LeafType uint8 `meddler:"leaf_type"` + OriginNetwork uint32 `meddler:"origin_network"` + OriginAddress common.Address `meddler:"origin_address"` + DestinationNetwork uint32 `meddler:"destination_network"` + DestinationAddress common.Address `meddler:"destination_address"` + Amount *big.Int `meddler:"amount,bigint"` + Metadata []byte `meddler:"metadata"` + DepositCount uint32 `meddler:"deposit_count"` } // Hash returns the hash of the bridge event as expected by the exit tree @@ -56,6 +56,7 @@ func (b *Bridge) Hash() common.Hash { if b.Amount == nil { b.Amount = big.NewInt(0) } + return common.BytesToHash(keccak256.Hash( []byte{b.LeafType}, origNet, @@ -69,243 +70,232 @@ func (b *Bridge) Hash() common.Hash { // Claim representation of a claim event type Claim struct { - // From claim event - GlobalIndex *big.Int - OriginNetwork uint32 - OriginAddress common.Address - DestinationAddress common.Address - Amount *big.Int - // From call data - ProofLocalExitRoot [tree.DefaultHeight]common.Hash - ProofRollupExitRoot [tree.DefaultHeight]common.Hash - MainnetExitRoot common.Hash - RollupExitRoot common.Hash - DestinationNetwork uint32 - Metadata []byte - // Meta - IsMessage bool + BlockNum uint64 `meddler:"block_num"` + BlockPos uint64 `meddler:"block_pos"` + GlobalIndex *big.Int `meddler:"global_index,bigint"` + OriginNetwork uint32 `meddler:"origin_network"` + OriginAddress common.Address `meddler:"origin_address"` + DestinationAddress common.Address `meddler:"destination_address"` + Amount *big.Int `meddler:"amount,bigint"` + ProofLocalExitRoot types.Proof `meddler:"proof_local_exit_root,merkleproof"` + ProofRollupExitRoot types.Proof `meddler:"proof_rollup_exit_root,merkleproof"` + MainnetExitRoot common.Hash `meddler:"mainnet_exit_root,hash"` + RollupExitRoot common.Hash `meddler:"rollup_exit_root,hash"` + GlobalExitRoot common.Hash `meddler:"global_exit_root,hash"` + DestinationNetwork uint32 `meddler:"destination_network"` + Metadata []byte `meddler:"metadata"` + IsMessage bool `meddler:"is_message"` } // Event combination of bridge and claim events type Event struct { + Pos uint64 Bridge *Bridge Claim *Claim } type processor struct { - db kv.RwDB - eventsTable string - lastBlockTable string - exitTree *tree.AppendOnlyTree - log *log.Logger + db *sql.DB + exitTree *tree.AppendOnlyTree + log *log.Logger } -func newProcessor(ctx context.Context, dbPath, dbPrefix string) (*processor, error) { - eventsTable := dbPrefix + eventsTableSufix - lastBlockTable := dbPrefix + lastBlockTableSufix - logger := log.WithFields("bridge-syncer", dbPrefix) - tableCfgFunc := func(defaultBuckets kv.TableCfg) kv.TableCfg { - cfg := kv.TableCfg{ - eventsTable: {}, - lastBlockTable: {}, - } - tree.AddTables(cfg, dbPrefix) - return cfg - } - db, err := mdbx.NewMDBX(nil). - Path(dbPath). - WithTableCfg(tableCfgFunc). - Open() +func newProcessor(dbPath, loggerPrefix string) (*processor, error) { + err := migrations.RunMigrations(dbPath) if err != nil { return nil, err } - exitTree, err := tree.NewAppendOnlyTree(ctx, db, dbPrefix) + db, err := db.NewSQLiteDB(dbPath) if err != nil { return nil, err } + logger := log.WithFields("bridge-syncer", loggerPrefix) + exitTree := tree.NewAppendOnlyTree(db, "") return &processor{ - db: db, - eventsTable: eventsTable, - lastBlockTable: lastBlockTable, - exitTree: exitTree, - log: logger, + db: db, + exitTree: exitTree, + log: logger, }, nil } -// GetClaimsAndBridges returns the claims and bridges occurred between fromBlock, toBlock both included. -// If toBlock has not been porcessed yet, ErrBlockNotProcessed will be returned -func (p *processor) GetClaimsAndBridges( +func (p *processor) GetBridges( ctx context.Context, fromBlock, toBlock uint64, -) ([]Event, error) { - events := []Event{} - - tx, err := p.db.BeginRo(ctx) +) ([]Bridge, error) { + tx, err := p.db.BeginTx(ctx, &sql.TxOptions{ReadOnly: true}) if err != nil { return nil, err } - defer tx.Rollback() - lpb, err := p.getLastProcessedBlockWithTx(tx) + defer func() { + if err := tx.Rollback(); err != nil { + log.Warnf("error rolling back tx: %v", err) + } + }() + rows, err := p.queryBlockRange(tx, fromBlock, toBlock, "bridge") if err != nil { return nil, err } - if lpb < toBlock { - return nil, ErrBlockNotProcessed + bridgePtrs := []*Bridge{} + if err = meddler.ScanAll(rows, &bridgePtrs); err != nil { + return nil, err + } + bridgesIface := db.SlicePtrsToSlice(bridgePtrs) + bridges, ok := bridgesIface.([]Bridge) + if !ok { + return nil, errors.New("failed to convert from []*Bridge to []Bridge") } - c, err := tx.Cursor(p.eventsTable) + return bridges, nil +} + +func (p *processor) GetClaims( + ctx context.Context, fromBlock, toBlock uint64, +) ([]Claim, error) { + tx, err := p.db.BeginTx(ctx, &sql.TxOptions{ReadOnly: true}) if err != nil { return nil, err } - defer c.Close() - - for k, v, err := c.Seek(dbCommon.Uint64ToBytes(fromBlock)); k != nil; k, v, err = c.Next() { - if err != nil { - return nil, err + defer func() { + if err := tx.Rollback(); err != nil { + log.Warnf("error rolling back tx: %v", err) } - if dbCommon.BytesToUint64(k) > toBlock { - break - } - blockEvents := []Event{} - err := json.Unmarshal(v, &blockEvents) - if err != nil { - return nil, err + }() + rows, err := p.queryBlockRange(tx, fromBlock, toBlock, "claim") + if err != nil { + return nil, err + } + claimPtrs := []*Claim{} + if err = meddler.ScanAll(rows, &claimPtrs); err != nil { + return nil, err + } + claimsIface := db.SlicePtrsToSlice(claimPtrs) + claims, ok := claimsIface.([]Claim) + if !ok { + return nil, errors.New("failed to convert from []*Claim to []Claim") + } + return claims, nil +} + +func (p *processor) queryBlockRange(tx db.Querier, fromBlock, toBlock uint64, table string) (*sql.Rows, error) { + if err := p.isBlockProcessed(tx, toBlock); err != nil { + return nil, err + } + rows, err := tx.Query(fmt.Sprintf(` + SELECT * FROM %s + WHERE block_num >= $1 AND block_num <= $2; + `, table), fromBlock, toBlock) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, ErrNotFound } - events = append(events, blockEvents...) + return nil, err } + return rows, nil +} - return events, nil +func (p *processor) isBlockProcessed(tx db.Querier, blockNum uint64) error { + lpb, err := p.getLastProcessedBlockWithTx(tx) + if err != nil { + return err + } + if lpb < blockNum { + return ErrBlockNotProcessed + } + return nil } // GetLastProcessedBlock returns the last processed block by the processor, including blocks // that don't have events func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, error) { - tx, err := p.db.BeginRo(ctx) - if err != nil { - return 0, err - } - defer tx.Rollback() - return p.getLastProcessedBlockWithTx(tx) + return p.getLastProcessedBlockWithTx(p.db) } -func (p *processor) getLastProcessedBlockWithTx(tx kv.Tx) (uint64, error) { - if blockNumBytes, err := tx.GetOne(p.lastBlockTable, lastBlockKey); err != nil { - return 0, err - } else if blockNumBytes == nil { +func (p *processor) getLastProcessedBlockWithTx(tx db.Querier) (uint64, error) { + var lastProcessedBlock uint64 + row := tx.QueryRow("SELECT num FROM BLOCK ORDER BY num DESC LIMIT 1;") + err := row.Scan(&lastProcessedBlock) + if errors.Is(err, sql.ErrNoRows) { return 0, nil - } else { - return dbCommon.BytesToUint64(blockNumBytes), nil } + return lastProcessedBlock, err } // Reorg triggers a purge and reset process on the processor to leaf it on a state // as if the last block processed was firstReorgedBlock-1 func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { - tx, err := p.db.BeginRw(ctx) + tx, err := db.NewTx(ctx, p.db) if err != nil { return err } - defer tx.Rollback() - c, err := tx.Cursor(p.eventsTable) - if err != nil { - return err - } - defer c.Close() - firstKey := dbCommon.Uint64ToBytes(firstReorgedBlock) - firstDepositCountReorged := int64(-1) - for k, v, err := c.Seek(firstKey); k != nil; k, _, err = c.Next() { + defer func() { if err != nil { - tx.Rollback() - return err - } - if err := tx.Delete(p.eventsTable, k); err != nil { - tx.Rollback() - return err - } - if firstDepositCountReorged == -1 { - events := []Event{} - if err := json.Unmarshal(v, &events); err != nil { - tx.Rollback() - return err - } - for _, event := range events { - if event.Bridge != nil { - firstDepositCountReorged = int64(event.Bridge.DepositCount) - break - } + if errRllbck := tx.Rollback(); errRllbck != nil { + log.Errorf("error while rolling back tx %v", errRllbck) } } - } - if err := p.updateLastProcessedBlock(tx, firstReorgedBlock-1); err != nil { - tx.Rollback() + }() + + _, err = tx.Exec(`DELETE FROM block WHERE num >= $1;`, firstReorgedBlock) + if err != nil { return err } - exitTreeRollback := func() {} - if firstDepositCountReorged != -1 { - if exitTreeRollback, err = p.exitTree.Reorg(tx, uint32(firstDepositCountReorged)); err != nil { - tx.Rollback() - exitTreeRollback() - return err - } + + if err = p.exitTree.Reorg(tx, firstReorgedBlock); err != nil { + return err } if err := tx.Commit(); err != nil { - exitTreeRollback() return err } + return nil } // ProcessBlock process the events of the block to build the exit tree // and updates the last processed block (can be called without events for that purpose) func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { - tx, err := p.db.BeginRw(ctx) + tx, err := db.NewTx(ctx, p.db) if err != nil { return err } - leaves := []tree.Leaf{} - if len(block.Events) > 0 { - events := []Event{} - for _, e := range block.Events { - event := e.(Event) - events = append(events, event) - if event.Bridge != nil { - leaves = append(leaves, tree.Leaf{ - Index: event.Bridge.DepositCount, - Hash: event.Bridge.Hash(), - }) - } - } - value, err := json.Marshal(events) + defer func() { if err != nil { - tx.Rollback() - return err - } - if err := tx.Put(p.eventsTable, dbCommon.Uint64ToBytes(block.Num), value); err != nil { - tx.Rollback() - return err + if errRllbck := tx.Rollback(); errRllbck != nil { + log.Errorf("error while rolling back tx %v", errRllbck) + } } - } + }() - if err := p.updateLastProcessedBlock(tx, block.Num); err != nil { - tx.Rollback() + if _, err := tx.Exec(`INSERT INTO block (num) VALUES ($1)`, block.Num); err != nil { return err } - - exitTreeRollback, err := p.exitTree.AddLeaves(tx, leaves) - if err != nil { - tx.Rollback() - exitTreeRollback() - return err + for _, e := range block.Events { + event, ok := e.(Event) + if !ok { + return errors.New("failed to convert sync.Block.Event to Event") + } + if event.Bridge != nil { + if err = p.exitTree.AddLeaf(tx, block.Num, event.Pos, types.Leaf{ + Index: event.Bridge.DepositCount, + Hash: event.Bridge.Hash(), + }); err != nil { + return err + } + if err = meddler.Insert(tx, "bridge", event.Bridge); err != nil { + return err + } + } + if event.Claim != nil { + if err = meddler.Insert(tx, "claim", event.Claim); err != nil { + return err + } + } } + if err := tx.Commit(); err != nil { - exitTreeRollback() return err } + p.log.Debugf("processed %d events until block %d", len(block.Events), block.Num) - return nil -} -func (p *processor) updateLastProcessedBlock(tx kv.RwTx, blockNum uint64) error { - blockNumBytes := dbCommon.Uint64ToBytes(blockNum) - return tx.Put(p.lastBlockTable, lastBlockKey, blockNumBytes) + return nil } func GenerateGlobalIndex(mainnetFlag bool, rollupIndex uint32, localExitRootIndex uint32) *big.Int { @@ -323,5 +313,6 @@ func GenerateGlobalIndex(mainnetFlag bool, rollupIndex uint32, localExitRootInde } leri := big.NewInt(0).SetUint64(uint64(localExitRootIndex)).FillBytes(buf[:]) globalIndexBytes = append(globalIndexBytes, leri...) + return big.NewInt(0).SetBytes(globalIndexBytes) } diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index 90fe74be..2ff03c76 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -6,9 +6,12 @@ import ( "fmt" "math/big" "os" + "path" "slices" "testing" + migrationsBridge "github.com/0xPolygon/cdk/bridgesync/migrations" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sync" "github.com/0xPolygon/cdk/tree/testvectors" "github.com/ethereum/go-ethereum/common" @@ -16,8 +19,11 @@ import ( ) func TestProceessor(t *testing.T) { - path := t.TempDir() - p, err := newProcessor(context.Background(), path, "foo") + path := path.Join(t.TempDir(), "file::memory:?cache=shared") + log.Debugf("sqlite path: %s", path) + err := migrationsBridge.RunMigrations(path) + require.NoError(t, err) + p, err := newProcessor(path, "foo") require.NoError(t, err) actions := []processAction{ // processed: ~ @@ -40,15 +46,24 @@ func TestProceessor(t *testing.T) { firstReorgedBlock: 1, expectedErr: nil, }, - &getClaimsAndBridgesAction{ + &getClaims{ p: p, description: "on an empty processor", ctx: context.Background(), fromBlock: 0, toBlock: 2, - expectedEvents: nil, + expectedClaims: nil, expectedErr: ErrBlockNotProcessed, }, + &getBridges{ + p: p, + description: "on an empty processor", + ctx: context.Background(), + fromBlock: 0, + toBlock: 2, + expectedBridges: nil, + expectedErr: ErrBlockNotProcessed, + }, &processBlockAction{ p: p, description: "block1", @@ -63,24 +78,42 @@ func TestProceessor(t *testing.T) { expectedLastProcessedBlock: 1, expectedErr: nil, }, - &getClaimsAndBridgesAction{ + &getClaims{ p: p, description: "after block1: range 0, 2", ctx: context.Background(), fromBlock: 0, toBlock: 2, - expectedEvents: nil, + expectedClaims: nil, expectedErr: ErrBlockNotProcessed, }, - &getClaimsAndBridgesAction{ + &getBridges{ + p: p, + description: "after block1: range 0, 2", + ctx: context.Background(), + fromBlock: 0, + toBlock: 2, + expectedBridges: nil, + expectedErr: ErrBlockNotProcessed, + }, + &getClaims{ p: p, description: "after block1: range 1, 1", ctx: context.Background(), fromBlock: 1, toBlock: 1, - expectedEvents: eventsToBridgeEvents(block1.Events), + expectedClaims: eventsToClaims(block1.Events), expectedErr: nil, }, + &getBridges{ + p: p, + description: "after block1: range 1, 1", + ctx: context.Background(), + fromBlock: 1, + toBlock: 1, + expectedBridges: eventsToBridges(block1.Events), + expectedErr: nil, + }, &reorgAction{ p: p, description: "after block1", @@ -88,15 +121,24 @@ func TestProceessor(t *testing.T) { expectedErr: nil, }, // processed: ~ - &getClaimsAndBridgesAction{ + &getClaims{ p: p, description: "after block1 reorged", ctx: context.Background(), fromBlock: 0, toBlock: 2, - expectedEvents: nil, + expectedClaims: nil, expectedErr: ErrBlockNotProcessed, }, + &getBridges{ + p: p, + description: "after block1 reorged", + ctx: context.Background(), + fromBlock: 0, + toBlock: 2, + expectedBridges: nil, + expectedErr: ErrBlockNotProcessed, + }, &processBlockAction{ p: p, description: "block1 (after it's reorged)", @@ -118,24 +160,45 @@ func TestProceessor(t *testing.T) { expectedLastProcessedBlock: 3, expectedErr: nil, }, - &getClaimsAndBridgesAction{ + &getClaims{ p: p, description: "after block3: range 2, 2", ctx: context.Background(), fromBlock: 2, toBlock: 2, - expectedEvents: []Event{}, + expectedClaims: []Claim{}, expectedErr: nil, }, - &getClaimsAndBridgesAction{ + &getClaims{ p: p, description: "after block3: range 1, 3", ctx: context.Background(), fromBlock: 1, toBlock: 3, - expectedEvents: append( - eventsToBridgeEvents(block1.Events), - eventsToBridgeEvents(block3.Events)..., + expectedClaims: append( + eventsToClaims(block1.Events), + eventsToClaims(block3.Events)..., + ), + expectedErr: nil, + }, + &getBridges{ + p: p, + description: "after block3: range 2, 2", + ctx: context.Background(), + fromBlock: 2, + toBlock: 2, + expectedBridges: []Bridge{}, + expectedErr: nil, + }, + &getBridges{ + p: p, + description: "after block3: range 1, 3", + ctx: context.Background(), + fromBlock: 1, + toBlock: 3, + expectedBridges: append( + eventsToBridges(block1.Events), + eventsToBridges(block3.Events)..., ), expectedErr: nil, }, @@ -150,7 +213,7 @@ func TestProceessor(t *testing.T) { p: p, description: "after block3 reorged", ctx: context.Background(), - expectedLastProcessedBlock: 2, + expectedLastProcessedBlock: 1, expectedErr: nil, }, &reorgAction{ @@ -194,48 +257,49 @@ func TestProceessor(t *testing.T) { expectedLastProcessedBlock: 5, expectedErr: nil, }, - &getClaimsAndBridgesAction{ + &getClaims{ p: p, description: "after block5: range 1, 3", ctx: context.Background(), fromBlock: 1, toBlock: 3, - expectedEvents: append( - eventsToBridgeEvents(block1.Events), - eventsToBridgeEvents(block3.Events)..., + expectedClaims: append( + eventsToClaims(block1.Events), + eventsToClaims(block3.Events)..., ), expectedErr: nil, }, - &getClaimsAndBridgesAction{ + &getClaims{ p: p, description: "after block5: range 4, 5", ctx: context.Background(), fromBlock: 4, toBlock: 5, - expectedEvents: append( - eventsToBridgeEvents(block4.Events), - eventsToBridgeEvents(block5.Events)..., + expectedClaims: append( + eventsToClaims(block4.Events), + eventsToClaims(block5.Events)..., ), expectedErr: nil, }, - &getClaimsAndBridgesAction{ + &getClaims{ p: p, description: "after block5: range 0, 5", ctx: context.Background(), fromBlock: 0, toBlock: 5, - expectedEvents: slices.Concat( - eventsToBridgeEvents(block1.Events), - eventsToBridgeEvents(block3.Events), - eventsToBridgeEvents(block4.Events), - eventsToBridgeEvents(block5.Events), + expectedClaims: slices.Concat( + eventsToClaims(block1.Events), + eventsToClaims(block3.Events), + eventsToClaims(block4.Events), + eventsToClaims(block5.Events), ), expectedErr: nil, }, } for _, a := range actions { - t.Run(fmt.Sprintf("%s: %s", a.method(), a.desc()), a.execute) + log.Debugf("%s: %s", a.method(), a.desc()) + a.execute(t) } } @@ -248,6 +312,8 @@ var ( Num: 1, Events: []interface{}{ Event{Bridge: &Bridge{ + BlockNum: 1, + BlockPos: 0, LeafType: 1, OriginNetwork: 1, OriginAddress: common.HexToAddress("01"), @@ -258,6 +324,8 @@ var ( DepositCount: 0, }}, Event{Claim: &Claim{ + BlockNum: 1, + BlockPos: 1, GlobalIndex: big.NewInt(1), OriginNetwork: 1, OriginAddress: common.HexToAddress("01"), @@ -270,6 +338,8 @@ var ( Num: 3, Events: []interface{}{ Event{Bridge: &Bridge{ + BlockNum: 3, + BlockPos: 0, LeafType: 2, OriginNetwork: 2, OriginAddress: common.HexToAddress("02"), @@ -280,12 +350,14 @@ var ( DepositCount: 1, }}, Event{Bridge: &Bridge{ + BlockNum: 3, + BlockPos: 1, LeafType: 3, OriginNetwork: 3, OriginAddress: common.HexToAddress("03"), DestinationNetwork: 3, DestinationAddress: common.HexToAddress("03"), - Amount: nil, + Amount: big.NewInt(0), Metadata: common.Hex2Bytes("03"), DepositCount: 2, }}, @@ -299,6 +371,8 @@ var ( Num: 5, Events: []interface{}{ Event{Claim: &Claim{ + BlockNum: 4, + BlockPos: 0, GlobalIndex: big.NewInt(4), OriginNetwork: 4, OriginAddress: common.HexToAddress("04"), @@ -306,6 +380,8 @@ var ( Amount: big.NewInt(4), }}, Event{Claim: &Claim{ + BlockNum: 4, + BlockPos: 1, GlobalIndex: big.NewInt(5), OriginNetwork: 5, OriginAddress: common.HexToAddress("05"), @@ -324,29 +400,57 @@ type processAction interface { execute(t *testing.T) } -// GetClaimsAndBridges +// GetClaims -type getClaimsAndBridgesAction struct { +type getClaims struct { p *processor description string ctx context.Context fromBlock uint64 toBlock uint64 - expectedEvents []Event + expectedClaims []Claim expectedErr error } -func (a *getClaimsAndBridgesAction) method() string { - return "GetClaimsAndBridges" +func (a *getClaims) method() string { + return "GetClaims" } -func (a *getClaimsAndBridgesAction) desc() string { +func (a *getClaims) desc() string { return a.description } -func (a *getClaimsAndBridgesAction) execute(t *testing.T) { - actualEvents, actualErr := a.p.GetClaimsAndBridges(a.ctx, a.fromBlock, a.toBlock) - require.Equal(t, a.expectedEvents, actualEvents) +func (a *getClaims) execute(t *testing.T) { + t.Helper() + actualEvents, actualErr := a.p.GetClaims(a.ctx, a.fromBlock, a.toBlock) + require.Equal(t, a.expectedErr, actualErr) + require.Equal(t, a.expectedClaims, actualEvents) +} + +// GetBridges + +type getBridges struct { + p *processor + description string + ctx context.Context + fromBlock uint64 + toBlock uint64 + expectedBridges []Bridge + expectedErr error +} + +func (a *getBridges) method() string { + return "GetBridges" +} + +func (a *getBridges) desc() string { + return a.description +} + +func (a *getBridges) execute(t *testing.T) { + t.Helper() + actualEvents, actualErr := a.p.GetBridges(a.ctx, a.fromBlock, a.toBlock) + require.Equal(t, a.expectedBridges, actualEvents) require.Equal(t, a.expectedErr, actualErr) } @@ -369,6 +473,8 @@ func (a *getLastProcessedBlockAction) desc() string { } func (a *getLastProcessedBlockAction) execute(t *testing.T) { + t.Helper() + actualLastProcessedBlock, actualErr := a.p.GetLastProcessedBlock(a.ctx) require.Equal(t, a.expectedLastProcessedBlock, actualLastProcessedBlock) require.Equal(t, a.expectedErr, actualErr) @@ -392,6 +498,8 @@ func (a *reorgAction) desc() string { } func (a *reorgAction) execute(t *testing.T) { + t.Helper() + actualErr := a.p.Reorg(context.Background(), a.firstReorgedBlock) require.Equal(t, a.expectedErr, actualErr) } @@ -414,16 +522,38 @@ func (a *processBlockAction) desc() string { } func (a *processBlockAction) execute(t *testing.T) { + t.Helper() + actualErr := a.p.ProcessBlock(context.Background(), a.block) require.Equal(t, a.expectedErr, actualErr) } -func eventsToBridgeEvents(events []interface{}) []Event { - bridgeEvents := []Event{} +func eventsToBridges(events []interface{}) []Bridge { + bridges := []Bridge{} + for _, event := range events { + e, ok := event.(Event) + if !ok { + panic("should be ok") + } + if e.Bridge != nil { + bridges = append(bridges, *e.Bridge) + } + } + return bridges +} + +func eventsToClaims(events []interface{}) []Claim { + claims := []Claim{} for _, event := range events { - bridgeEvents = append(bridgeEvents, event.(Event)) + e, ok := event.(Event) + if !ok { + panic("should be ok") + } + if e.Claim != nil { + claims = append(claims, *e.Claim) + } } - return bridgeEvents + return claims } func TestHashBridge(t *testing.T) { diff --git a/claimsponsor/claimsponsor.go b/claimsponsor/claimsponsor.go index e0d8e7b8..fbcdca73 100644 --- a/claimsponsor/claimsponsor.go +++ b/claimsponsor/claimsponsor.go @@ -64,6 +64,7 @@ type ClaimSender interface { } type ClaimSponsor struct { + logger *log.Logger db kv.RwDB sender ClaimSender rh *sync.RetryHandler @@ -72,6 +73,7 @@ type ClaimSponsor struct { } func newClaimSponsor( + logger *log.Logger, dbPath string, sender ClaimSender, retryAfterErrorPeriod time.Duration, @@ -84,6 +86,7 @@ func newClaimSponsor( claimTable: {}, queueTable: {}, } + return cfg } db, err := mdbx.NewMDBX(nil). @@ -97,7 +100,9 @@ func newClaimSponsor( MaxRetryAttemptsAfterError: maxRetryAttemptsAfterError, RetryAfterErrorPeriod: retryAfterErrorPeriod, } + return &ClaimSponsor{ + logger: logger, db: db, sender: sender, rh: rh, @@ -119,27 +124,28 @@ func (c *ClaimSponsor) Start(ctx context.Context) { tx, err2 := c.db.BeginRw(ctx) if err2 != nil { err = err2 - log.Errorf("error calling BeginRw: %v", err) + c.logger.Errorf("error calling BeginRw: %v", err) continue } queueIndex, globalIndex, err2 := getFirstQueueIndex(tx) if err2 != nil { err = err2 tx.Rollback() - if err == ErrNotFound { - log.Debugf("queue is empty") + if errors.Is(err, ErrNotFound) { + c.logger.Debugf("queue is empty") err = nil time.Sleep(c.waitOnEmptyQueue) + continue } - log.Errorf("error calling getFirstQueueIndex: %v", err) + c.logger.Errorf("error calling getFirstQueueIndex: %v", err) continue } claim, err2 := getClaim(tx, globalIndex) if err2 != nil { err = err2 tx.Rollback() - log.Errorf("error calling getClaim with globalIndex %s: %v", globalIndex.String(), err) + c.logger.Errorf("error calling getClaim with globalIndex %s: %v", globalIndex.String(), err) continue } if claim.TxID == "" { @@ -147,7 +153,7 @@ func (c *ClaimSponsor) Start(ctx context.Context) { if err2 != nil { err = err2 tx.Rollback() - log.Errorf("error calling sendClaim with globalIndex %s: %v", globalIndex.String(), err) + c.logger.Errorf("error calling sendClaim with globalIndex %s: %v", globalIndex.String(), err) continue } claim.TxID = txID @@ -156,29 +162,29 @@ func (c *ClaimSponsor) Start(ctx context.Context) { if err2 != nil { err = err2 tx.Rollback() - log.Errorf("error calling putClaim with globalIndex %s: %v", globalIndex.String(), err) + c.logger.Errorf("error calling putClaim with globalIndex %s: %v", globalIndex.String(), err) continue } } err2 = tx.Commit() if err2 != nil { err = err2 - log.Errorf("error calling tx.Commit after putting claim: %v", err) + c.logger.Errorf("error calling tx.Commit after putting claim: %v", err) continue } - log.Infof("waiting for tx %s with global index %s to succeed or fail", claim.TxID, globalIndex.String()) + c.logger.Infof("waiting for tx %s with global index %s to succeed or fail", claim.TxID, globalIndex.String()) status, err2 := c.waitTxToBeSuccessOrFail(ctx, claim.TxID) if err2 != nil { err = err2 - log.Errorf("error calling waitTxToBeSuccessOrFail for tx %s: %v", claim.TxID, err) + c.logger.Errorf("error calling waitTxToBeSuccessOrFail for tx %s: %v", claim.TxID, err) continue } - log.Infof("tx %s with global index %s concluded with status: %s", claim.TxID, globalIndex.String(), status) + c.logger.Infof("tx %s with global index %s concluded with status: %s", claim.TxID, globalIndex.String(), status) tx, err2 = c.db.BeginRw(ctx) if err2 != nil { err = err2 - log.Errorf("error calling BeginRw: %v", err) + c.logger.Errorf("error calling BeginRw: %v", err) continue } claim.Status = status @@ -186,20 +192,20 @@ func (c *ClaimSponsor) Start(ctx context.Context) { if err2 != nil { err = err2 tx.Rollback() - log.Errorf("error calling putClaim with globalIndex %s: %v", globalIndex.String(), err) + c.logger.Errorf("error calling putClaim with globalIndex %s: %v", globalIndex.String(), err) continue } err2 = tx.Delete(queueTable, dbCommon.Uint64ToBytes(queueIndex)) if err2 != nil { err = err2 tx.Rollback() - log.Errorf("error calling delete on the queue table with index %d: %v", queueIndex, err) + c.logger.Errorf("error calling delete on the queue table with index %d: %v", queueIndex, err) continue } err2 = tx.Commit() if err2 != nil { err = err2 - log.Errorf("error calling tx.Commit after putting claim: %v", err) + c.logger.Errorf("error calling tx.Commit after putting claim: %v", err) continue } @@ -236,12 +242,14 @@ func (c *ClaimSponsor) AddClaimToQueue(ctx context.Context, claim *Claim) error } _, err = getClaim(tx, claim.GlobalIndex) - if err != ErrNotFound { + if !errors.Is(err, ErrNotFound) { if err != nil { tx.Rollback() + return err } else { tx.Rollback() + return errors.New("claim already added") } } @@ -249,22 +257,29 @@ func (c *ClaimSponsor) AddClaimToQueue(ctx context.Context, claim *Claim) error err = putClaim(tx, claim) if err != nil { tx.Rollback() + return err } var queuePosition uint64 lastQueuePosition, _, err := getLastQueueIndex(tx) - if err == ErrNotFound { + switch { + case errors.Is(err, ErrNotFound): queuePosition = 0 - } else if err != nil { + + case err != nil: tx.Rollback() + return err - } else { + + default: queuePosition = lastQueuePosition + 1 } + err = tx.Put(queueTable, dbCommon.Uint64ToBytes(queuePosition), claim.Key()) if err != nil { tx.Rollback() + return err } @@ -276,6 +291,7 @@ func putClaim(tx kv.RwTx, claim *Claim) error { if err != nil { return err } + return tx.Put(claimTable, claim.Key(), value) } @@ -306,6 +322,7 @@ func getLastQueueIndex(tx kv.Tx) (uint64, *big.Int, error) { if err != nil { return 0, nil, err } + return getIndex(iter) } @@ -318,6 +335,7 @@ func getFirstQueueIndex(tx kv.Tx) (uint64, *big.Int, error) { if err != nil { return 0, nil, err } + return getIndex(iter) } @@ -330,6 +348,7 @@ func getIndex(iter iter.KV) (uint64, *big.Int, error) { return 0, nil, ErrNotFound } globalIndex := new(big.Int).SetBytes(v) + return dbCommon.BytesToUint64(k), globalIndex, nil } @@ -339,6 +358,7 @@ func (c *ClaimSponsor) GetClaim(ctx context.Context, globalIndex *big.Int) (*Cla return nil, err } defer tx.Rollback() + return getClaim(tx, globalIndex) } @@ -352,5 +372,6 @@ func getClaim(tx kv.Tx, globalIndex *big.Int) (*Claim, error) { } claim := &Claim{} err = json.Unmarshal(claimBytes, claim) + return claim, err } diff --git a/claimsponsor/e2e_test.go b/claimsponsor/e2e_test.go index 796a09ba..8a037a58 100644 --- a/claimsponsor/e2e_test.go +++ b/claimsponsor/e2e_test.go @@ -5,12 +5,14 @@ import ( "errors" "fmt" "math/big" + "path" "testing" "time" "github.com/0xPolygon/cdk/bridgesync" "github.com/0xPolygon/cdk/claimsponsor" "github.com/0xPolygon/cdk/etherman" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/test/helpers" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -21,7 +23,7 @@ func TestE2EL1toEVML2(t *testing.T) { // start other needed components ctx := context.Background() env := helpers.SetupAggoracleWithEVMChain(t) - dbPathBridgeSyncL1 := t.TempDir() + dbPathBridgeSyncL1 := path.Join(t.TempDir(), "file::memory:?cache=shared") testClient := helpers.TestClient{ClientRenamed: env.L1Client.Client()} bridgeSyncL1, err := bridgesync.NewL1(ctx, dbPathBridgeSyncL1, env.BridgeL1Addr, 10, etherman.LatestBlock, env.ReorgDetector, testClient, 0, time.Millisecond*10, 0, 0) require.NoError(t, err) @@ -30,6 +32,7 @@ func TestE2EL1toEVML2(t *testing.T) { // start claim sponsor dbPathClaimSponsor := t.TempDir() claimer, err := claimsponsor.NewEVMClaimSponsor( + log.GetDefaultLogger(), dbPathClaimSponsor, env.L2Client.Client(), env.BridgeL2Addr, @@ -92,6 +95,7 @@ func TestE2EL1toEVML2(t *testing.T) { require.NoError(t, errors.New("claim failed")) } else if claim.Status == claimsponsor.SuccessClaimStatus { succeed = true + break } time.Sleep(100 * time.Millisecond) diff --git a/claimsponsor/evmclaimsponsor.go b/claimsponsor/evmclaimsponsor.go index e7b94b20..540f3203 100644 --- a/claimsponsor/evmclaimsponsor.go +++ b/claimsponsor/evmclaimsponsor.go @@ -8,6 +8,7 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/etrog/polygonzkevmbridgev2" configTypes "github.com/0xPolygon/cdk/config/types" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" @@ -21,7 +22,8 @@ const ( LeafTypeAsset uint8 = 0 // LeafTypeMessage represents a bridge message LeafTypeMessage uint8 = 1 - gasTooHighErrTemplate = "Claim tx estimated to consume more gas than the maximum allowed by the service. Estimated %d, maximum allowed: %d" + gasTooHighErrTemplate = "Claim tx estimated to consume more gas than the maximum allowed by the service. " + + "Estimated %d, maximum allowed: %d" ) type EthClienter interface { @@ -31,9 +33,11 @@ type EthClienter interface { type EthTxManager interface { Remove(ctx context.Context, id common.Hash) error - ResultsByStatus(ctx context.Context, statuses []ethtxmanager.MonitoredTxStatus) ([]ethtxmanager.MonitoredTxResult, error) + ResultsByStatus(ctx context.Context, statuses []ethtxmanager.MonitoredTxStatus, + ) ([]ethtxmanager.MonitoredTxResult, error) Result(ctx context.Context, id common.Hash) (ethtxmanager.MonitoredTxResult, error) - Add(ctx context.Context, to *common.Address, forcedNonce *uint64, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar) (common.Hash, error) + Add(ctx context.Context, to *common.Address, forcedNonce *uint64, value *big.Int, data []byte, + gasOffset uint64, sidecar *types.BlobTxSidecar) (common.Hash, error) } type EVMClaimSponsor struct { @@ -76,6 +80,7 @@ type EVMClaimSponsorConfig struct { } func NewEVMClaimSponsor( + logger *log.Logger, dbPath string, l2Client EthClienter, bridge common.Address, @@ -106,6 +111,7 @@ func NewEVMClaimSponsor( ethTxManager: ethTxManager, } baseSponsor, err := newClaimSponsor( + logger, dbPath, evmSponsor, retryAfterErrorPeriod, @@ -117,6 +123,7 @@ func NewEVMClaimSponsor( return nil, err } evmSponsor.ClaimSponsor = baseSponsor + return baseSponsor, nil } @@ -136,6 +143,7 @@ func (c *EVMClaimSponsor) checkClaim(ctx context.Context, claim *Claim) error { if gas > c.maxGas { return fmt.Errorf(gasTooHighErrTemplate, gas, c.maxGas) } + return nil } @@ -148,6 +156,7 @@ func (c *EVMClaimSponsor) sendClaim(ctx context.Context, claim *Claim) (string, if err != nil { return "", err } + return id.Hex(), nil } diff --git a/cmd/main.go b/cmd/main.go index 4686902f..300851e7 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -4,6 +4,7 @@ import ( "os" zkevm "github.com/0xPolygon/cdk" + "github.com/0xPolygon/cdk/common" "github.com/0xPolygon/cdk/config" "github.com/0xPolygon/cdk/log" "github.com/urfave/cli/v2" @@ -12,19 +13,8 @@ import ( const appName = "cdk" const ( - // SEQUENCE_SENDER name to identify the sequence-sender component - SEQUENCE_SENDER = "sequence-sender" - // AGGREGATOR name to identify the aggregator component - AGGREGATOR = "aggregator" - // AGGORACLE name to identify the aggoracle component - AGGORACLE = "aggoracle" - // RPC name to identify the rpc component - RPC = "rpc" -) - -const ( - // NETWORK_CONFIGFILE name to identify the netowk_custom (genesis) config-file - NETWORK_CONFIGFILE = "custom_network" + // NETWORK_CONFIGFILE name to identify the network_custom (genesis) config-file + NETWORK_CONFIGFILE = "custom_network" //nolint:stylecheck ) var ( @@ -51,7 +41,7 @@ var ( Aliases: []string{"co"}, Usage: "List of components to run", Required: false, - Value: cli.NewStringSlice(SEQUENCE_SENDER, AGGREGATOR, AGGORACLE, RPC), + Value: cli.NewStringSlice(common.SEQUENCE_SENDER, common.AGGREGATOR, common.AGGORACLE, common.RPC), } ) diff --git a/cmd/run.go b/cmd/run.go index b960b376..773c5e24 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -18,6 +18,7 @@ import ( "github.com/0xPolygon/cdk/aggregator/db" "github.com/0xPolygon/cdk/bridgesync" "github.com/0xPolygon/cdk/claimsponsor" + cdkcommon "github.com/0xPolygon/cdk/common" "github.com/0xPolygon/cdk/config" "github.com/0xPolygon/cdk/dataavailability" "github.com/0xPolygon/cdk/dataavailability/datacommittee" @@ -62,35 +63,53 @@ func start(cliCtx *cli.Context) error { components := cliCtx.StringSlice(config.FlagComponents) l1Client := runL1ClientIfNeeded(components, c.Etherman.URL) l2Client := runL2ClientIfNeeded(components, c.AggOracle.EVMSender.URLRPCL2) - reorgDetectorL1 := runReorgDetectorL1IfNeeded(cliCtx.Context, components, l1Client, &c.ReorgDetectorL1) - reorgDetectorL2 := runReorgDetectorL2IfNeeded(cliCtx.Context, components, l2Client, &c.ReorgDetectorL2) + reorgDetectorL1, errChanL1 := runReorgDetectorL1IfNeeded(cliCtx.Context, components, l1Client, &c.ReorgDetectorL1) + go func() { + if err := <-errChanL1; err != nil { + log.Fatal("Error from ReorgDetectorL1: ", err) + } + }() + + reorgDetectorL2, errChanL2 := runReorgDetectorL2IfNeeded(cliCtx.Context, components, l2Client, &c.ReorgDetectorL2) + go func() { + if err := <-errChanL2; err != nil { + log.Fatal("Error from ReorgDetectorL2: ", err) + } + }() + l1InfoTreeSync := runL1InfoTreeSyncerIfNeeded(cliCtx.Context, components, *c, l1Client, reorgDetectorL1) claimSponsor := runClaimSponsorIfNeeded(cliCtx.Context, components, l2Client, c.ClaimSponsor) l1BridgeSync := runBridgeSyncL1IfNeeded(cliCtx.Context, components, c.BridgeL1Sync, reorgDetectorL1, l1Client) l2BridgeSync := runBridgeSyncL2IfNeeded(cliCtx.Context, components, c.BridgeL2Sync, reorgDetectorL2, l2Client) - l1Bridge2InfoIndexSync := runL1Bridge2InfoIndexSyncIfNeeded(cliCtx.Context, components, c.L1Bridge2InfoIndexSync, l1BridgeSync, l1InfoTreeSync, l1Client) - lastGERSync := runLastGERSyncIfNeeded(cliCtx.Context, components, c.LastGERSync, reorgDetectorL2, l2Client, l1InfoTreeSync) + l1Bridge2InfoIndexSync := runL1Bridge2InfoIndexSyncIfNeeded( + cliCtx.Context, components, c.L1Bridge2InfoIndexSync, + l1BridgeSync, l1InfoTreeSync, l1Client, + ) + lastGERSync := runLastGERSyncIfNeeded( + cliCtx.Context, components, c.LastGERSync, reorgDetectorL2, l2Client, l1InfoTreeSync, + ) for _, component := range components { switch component { - case SEQUENCE_SENDER: + case cdkcommon.SEQUENCE_SENDER: c.SequenceSender.Log = c.Log seqSender := createSequenceSender(*c, l1Client, l1InfoTreeSync) // start sequence sender in a goroutine, checking for errors go seqSender.Start(cliCtx.Context) - case AGGREGATOR: + case cdkcommon.AGGREGATOR: aggregator := createAggregator(cliCtx.Context, *c, !cliCtx.Bool(config.FlagMigrations)) // start aggregator in a goroutine, checking for errors go func() { if err := aggregator.Start(); err != nil { + aggregator.Stop() log.Fatal(err) } }() - case AGGORACLE: + case cdkcommon.AGGORACLE: aggOracle := createAggoracle(*c, l1Client, l2Client, l1InfoTreeSync) go aggOracle.Start(cliCtx.Context) - case RPC: + case cdkcommon.RPC: server := createRPC( c.RPC, c.Common.NetworkID, @@ -115,41 +134,46 @@ func start(cliCtx *cli.Context) error { } func createAggregator(ctx context.Context, c config.Config, runMigrations bool) *aggregator.Aggregator { + logger := log.WithFields("module", cdkcommon.AGGREGATOR) // Migrations if runMigrations { - log.Infof("Running DB migrations host: %s:%s db:%s user:%s", c.Aggregator.DB.Host, c.Aggregator.DB.Port, c.Aggregator.DB.Name, c.Aggregator.DB.User) + logger.Infof( + "Running DB migrations host: %s:%s db:%s user:%s", + c.Aggregator.DB.Host, c.Aggregator.DB.Port, c.Aggregator.DB.Name, c.Aggregator.DB.User, + ) runAggregatorMigrations(c.Aggregator.DB) } // DB - stateSqlDB, err := db.NewSQLDB(c.Aggregator.DB) + stateSQLDB, err := db.NewSQLDB(logger, c.Aggregator.DB) if err != nil { - log.Fatal(err) + logger.Fatal(err) } etherman, err := newEtherman(c) if err != nil { - log.Fatal(err) + logger.Fatal(err) } // READ CHAIN ID FROM POE SC l2ChainID, err := etherman.GetL2ChainID() if err != nil { - log.Fatal(err) + logger.Fatal(err) } - st := newState(&c, l2ChainID, stateSqlDB) + st := newState(&c, l2ChainID, stateSQLDB) c.Aggregator.ChainID = l2ChainID // Populate Network config - c.Aggregator.Synchronizer.Etherman.Contracts.GlobalExitRootManagerAddr = c.NetworkConfig.L1Config.GlobalExitRootManagerAddr + c.Aggregator.Synchronizer.Etherman.Contracts.GlobalExitRootManagerAddr = + c.NetworkConfig.L1Config.GlobalExitRootManagerAddr c.Aggregator.Synchronizer.Etherman.Contracts.RollupManagerAddr = c.NetworkConfig.L1Config.RollupManagerAddr c.Aggregator.Synchronizer.Etherman.Contracts.ZkEVMAddr = c.NetworkConfig.L1Config.ZkEVMAddr - aggregator, err := aggregator.New(ctx, c.Aggregator, st, etherman) + aggregator, err := aggregator.New(ctx, c.Aggregator, logger, st, etherman) if err != nil { - log.Fatal(err) + logger.Fatal(err) } return aggregator @@ -160,6 +184,7 @@ func createSequenceSender( l1Client *ethclient.Client, l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, ) *sequencesender.SequenceSender { + logger := log.WithFields("module", cdkcommon.SEQUENCE_SENDER) ethman, err := etherman.NewClient(ethermanconfig.Config{ EthermanConfig: ethtxman.Config{ URL: cfg.SequenceSender.EthTxManager.Etherman.URL, @@ -173,26 +198,27 @@ func createSequenceSender( }, }, cfg.NetworkConfig.L1Config, cfg.Common) if err != nil { - log.Fatalf("Failed to create etherman. Err: %w, ", err) + logger.Fatalf("Failed to create etherman. Err: %w, ", err) } auth, _, err := ethman.LoadAuthFromKeyStore(cfg.SequenceSender.PrivateKey.Path, cfg.SequenceSender.PrivateKey.Password) if err != nil { - log.Fatal(err) + logger.Fatal(err) } cfg.SequenceSender.SenderAddress = auth.From blockFialityType := etherman.BlockNumberFinality(cfg.SequenceSender.BlockFinality) + blockFinality, err := blockFialityType.ToBlockNum() if err != nil { - log.Fatalf("Failed to create block finality. Err: %w, ", err) + logger.Fatalf("Failed to create block finality. Err: %w, ", err) } - txBuilder, err := newTxBuilder(cfg, ethman, l1Client, l1InfoTreeSync, blockFinality) + txBuilder, err := newTxBuilder(cfg, logger, ethman, l1Client, l1InfoTreeSync, blockFinality) if err != nil { - log.Fatal(err) + logger.Fatal(err) } - seqSender, err := sequencesender.New(cfg.SequenceSender, ethman, txBuilder) + seqSender, err := sequencesender.New(cfg.SequenceSender, logger, ethman, txBuilder) if err != nil { - log.Fatal(err) + logger.Fatal(err) } return seqSender @@ -200,6 +226,7 @@ func createSequenceSender( func newTxBuilder( cfg config.Config, + logger *log.Logger, ethman *etherman.Client, l1Client *ethclient.Client, l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, @@ -219,6 +246,7 @@ func newTxBuilder( case contracts.VersionBanana: if cfg.Common.IsValidiumMode { txBuilder = txbuilder.NewTxBuilderBananaValidium( + logger, ethman.Contracts.Banana.Rollup, ethman.Contracts.Banana.GlobalExitRoot, da, @@ -230,6 +258,7 @@ func newTxBuilder( ) } else { txBuilder = txbuilder.NewTxBuilderBananaZKEVM( + logger, ethman.Contracts.Banana.Rollup, ethman.Contracts.Banana.GlobalExitRoot, *auth, @@ -241,9 +270,13 @@ func newTxBuilder( } case contracts.VersionElderberry: if cfg.Common.IsValidiumMode { - txBuilder = txbuilder.NewTxBuilderElderberryValidium(ethman.Contracts.Elderberry.Rollup, da, *auth, cfg.SequenceSender.MaxBatchesForL1) + txBuilder = txbuilder.NewTxBuilderElderberryValidium( + logger, ethman.Contracts.Elderberry.Rollup, da, *auth, cfg.SequenceSender.MaxBatchesForL1, + ) } else { - txBuilder = txbuilder.NewTxBuilderElderberryZKEVM(ethman.Contracts.Elderberry.Rollup, *auth, cfg.SequenceSender.MaxTxSizeForL1) + txBuilder = txbuilder.NewTxBuilderElderberryZKEVM( + logger, ethman.Contracts.Elderberry.Rollup, *auth, cfg.SequenceSender.MaxTxSizeForL1, + ) } default: err = fmt.Errorf("unknown contract version: %s", cfg.Common.ContractVersions) @@ -252,7 +285,13 @@ func newTxBuilder( return txBuilder, err } -func createAggoracle(cfg config.Config, l1Client, l2Client *ethclient.Client, syncer *l1infotreesync.L1InfoTreeSync) *aggoracle.AggOracle { +func createAggoracle( + cfg config.Config, + l1Client, + l2Client *ethclient.Client, + syncer *l1infotreesync.L1InfoTreeSync, +) *aggoracle.AggOracle { + logger := log.WithFields("module", cdkcommon.AGGORACLE) var sender aggoracle.ChainSender switch cfg.AggOracle.TargetChainType { case aggoracle.EVMChain: @@ -267,6 +306,7 @@ func createAggoracle(cfg config.Config, l1Client, l2Client *ethclient.Client, sy } go ethTxManager.Start() sender, err = chaingersender.NewEVMChainGERSender( + logger, cfg.AggOracle.EVMSender.GlobalExitRootL2Addr, cfg.AggOracle.EVMSender.SenderAddr, l2Client, @@ -284,6 +324,7 @@ func createAggoracle(cfg config.Config, l1Client, l2Client *ethclient.Client, sy ) } aggOracle, err := aggoracle.New( + logger, sender, l1Client, syncer, @@ -291,7 +332,7 @@ func createAggoracle(cfg config.Config, l1Client, l2Client *ethclient.Client, sy cfg.AggOracle.WaitPeriodNextGER.Duration, ) if err != nil { - log.Fatal(err) + logger.Fatal(err) } return aggOracle @@ -301,14 +342,15 @@ func newDataAvailability(c config.Config, etherman *etherman.Client) (*dataavail if !c.Common.IsValidiumMode { return nil, nil } - translator := translator.NewTranslatorImpl() - log.Infof("Translator rules: %v", c.Common.Translator) + logger := log.WithFields("module", "da-committee") + translator := translator.NewTranslatorImpl(logger) + logger.Infof("Translator rules: %v", c.Common.Translator) translator.AddConfigRules(c.Common.Translator) // Backend specific config daProtocolName, err := etherman.GetDAProtocolName() if err != nil { - return nil, fmt.Errorf("error getting data availability protocol name: %v", err) + return nil, fmt.Errorf("error getting data availability protocol name: %w", err) } var daBackend dataavailability.DABackender switch daProtocolName { @@ -323,10 +365,11 @@ func newDataAvailability(c config.Config, etherman *etherman.Client) (*dataavail } dacAddr, err := etherman.GetDAProtocolAddr() if err != nil { - return nil, fmt.Errorf("error getting trusted sequencer URI. Error: %v", err) + return nil, fmt.Errorf("error getting trusted sequencer URI. Error: %w", err) } daBackend, err = datacommittee.New( + logger, c.SequenceSender.EthTxManager.Etherman.URL, dacAddr, pk, @@ -401,9 +444,10 @@ func newState(c *config.Config, l2ChainID uint64, sqlDB *pgxpool.Pool) *state.St ChainID: l2ChainID, } - stateDb := pgstatestorage.NewPostgresStorage(stateCfg, sqlDB) + stateDB := pgstatestorage.NewPostgresStorage(stateCfg, sqlDB) + + st := state.NewState(stateCfg, stateDB) - st := state.NewState(stateCfg, stateDb) return st } @@ -415,6 +459,7 @@ func newReorgDetector( if err != nil { log.Fatal(err) } + return rd } @@ -426,6 +471,7 @@ func isNeeded(casesWhereNeeded, actualCases []string) bool { } } } + return false } @@ -436,7 +482,7 @@ func runL1InfoTreeSyncerIfNeeded( l1Client *ethclient.Client, reorgDetector *reorgdetector.ReorgDetector, ) *l1infotreesync.L1InfoTreeSync { - if !isNeeded([]string{AGGORACLE, RPC, SEQUENCE_SENDER}, components) { + if !isNeeded([]string{cdkcommon.AGGORACLE, cdkcommon.RPC, cdkcommon.SEQUENCE_SENDER}, components) { return nil } l1InfoTreeSync, err := l1infotreesync.New( @@ -457,11 +503,15 @@ func runL1InfoTreeSyncerIfNeeded( log.Fatal(err) } go l1InfoTreeSync.Start(ctx) + return l1InfoTreeSync } func runL1ClientIfNeeded(components []string, urlRPCL1 string) *ethclient.Client { - if !isNeeded([]string{SEQUENCE_SENDER, AGGREGATOR, AGGORACLE, RPC}, components) { + if !isNeeded([]string{ + cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGREGATOR, + cdkcommon.AGGORACLE, cdkcommon.RPC, + }, components) { return nil } log.Debugf("dialing L1 client at: %s", urlRPCL1) @@ -469,11 +519,12 @@ func runL1ClientIfNeeded(components []string, urlRPCL1 string) *ethclient.Client if err != nil { log.Fatal(err) } + return l1CLient } func runL2ClientIfNeeded(components []string, urlRPCL2 string) *ethclient.Client { - if !isNeeded([]string{AGGORACLE, RPC}, components) { + if !isNeeded([]string{cdkcommon.AGGORACLE, cdkcommon.RPC}, components) { return nil } log.Debugf("dialing L2 client at: %s", urlRPCL2) @@ -481,25 +532,55 @@ func runL2ClientIfNeeded(components []string, urlRPCL2 string) *ethclient.Client if err != nil { log.Fatal(err) } + return l2CLient } -func runReorgDetectorL1IfNeeded(ctx context.Context, components []string, l1Client *ethclient.Client, cfg *reorgdetector.Config) *reorgdetector.ReorgDetector { - if !isNeeded([]string{SEQUENCE_SENDER, AGGREGATOR, AGGORACLE, RPC}, components) { - return nil +func runReorgDetectorL1IfNeeded( + ctx context.Context, + components []string, + l1Client *ethclient.Client, + cfg *reorgdetector.Config, +) (*reorgdetector.ReorgDetector, chan error) { + if !isNeeded([]string{ + cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGREGATOR, + cdkcommon.AGGORACLE, cdkcommon.RPC}, + components) { + return nil, nil } rd := newReorgDetector(cfg, l1Client) - go rd.Start(ctx) - return rd + + errChan := make(chan error) + go func() { + if err := rd.Start(ctx); err != nil { + errChan <- err + } + close(errChan) + }() + + return rd, errChan } -func runReorgDetectorL2IfNeeded(ctx context.Context, components []string, l2Client *ethclient.Client, cfg *reorgdetector.Config) *reorgdetector.ReorgDetector { - if !isNeeded([]string{AGGORACLE, RPC}, components) { - return nil +func runReorgDetectorL2IfNeeded( + ctx context.Context, + components []string, + l2Client *ethclient.Client, + cfg *reorgdetector.Config, +) (*reorgdetector.ReorgDetector, chan error) { + if !isNeeded([]string{cdkcommon.AGGORACLE, cdkcommon.RPC}, components) { + return nil, nil } rd := newReorgDetector(cfg, l2Client) - go rd.Start(ctx) - return rd + + errChan := make(chan error) + go func() { + if err := rd.Start(ctx); err != nil { + errChan <- err + } + close(errChan) + }() + + return rd, errChan } func runClaimSponsorIfNeeded( @@ -508,17 +589,20 @@ func runClaimSponsorIfNeeded( l2Client *ethclient.Client, cfg claimsponsor.EVMClaimSponsorConfig, ) *claimsponsor.ClaimSponsor { - if !isNeeded([]string{RPC}, components) || !cfg.Enabled { + if !isNeeded([]string{cdkcommon.RPC}, components) || !cfg.Enabled { return nil } + + logger := log.WithFields("module", cdkcommon.CLAIM_SPONSOR) // In the future there may support different backends other than EVM, and this will require different config. // But today only EVM is supported ethTxManagerL2, err := ethtxmanager.New(cfg.EthTxManager) if err != nil { - log.Fatal(err) + logger.Fatal(err) } go ethTxManagerL2.Start() cs, err := claimsponsor.NewEVMClaimSponsor( + logger, cfg.DBPath, l2Client, cfg.BridgeAddrL2, @@ -532,9 +616,10 @@ func runClaimSponsorIfNeeded( cfg.WaitTxToBeMinedPeriod.Duration, ) if err != nil { - log.Fatalf("error creating claim sponsor: %s", err) + logger.Fatalf("error creating claim sponsor: %s", err) } go cs.Start(ctx) + return cs } @@ -546,7 +631,7 @@ func runL1Bridge2InfoIndexSyncIfNeeded( l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, l1Client *ethclient.Client, ) *l1bridge2infoindexsync.L1Bridge2InfoIndexSync { - if !isNeeded([]string{RPC}, components) { + if !isNeeded([]string{cdkcommon.RPC}, components) { return nil } l1Bridge2InfoIndexSync, err := l1bridge2infoindexsync.New( @@ -562,6 +647,7 @@ func runL1Bridge2InfoIndexSyncIfNeeded( log.Fatalf("error creating l1Bridge2InfoIndexSync: %s", err) } go l1Bridge2InfoIndexSync.Start(ctx) + return l1Bridge2InfoIndexSync } @@ -573,7 +659,7 @@ func runLastGERSyncIfNeeded( l2Client *ethclient.Client, l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, ) *lastgersync.LastGERSync { - if !isNeeded([]string{RPC}, components) { + if !isNeeded([]string{cdkcommon.RPC}, components) { return nil } lastGERSync, err := lastgersync.New( @@ -593,6 +679,7 @@ func runLastGERSyncIfNeeded( log.Fatalf("error creating lastGERSync: %s", err) } go lastGERSync.Start(ctx) + return lastGERSync } @@ -603,7 +690,7 @@ func runBridgeSyncL1IfNeeded( reorgDetectorL1 *reorgdetector.ReorgDetector, l1Client *ethclient.Client, ) *bridgesync.BridgeSync { - if !isNeeded([]string{RPC}, components) { + if !isNeeded([]string{cdkcommon.RPC}, components) { return nil } bridgeSyncL1, err := bridgesync.NewL1( @@ -623,6 +710,7 @@ func runBridgeSyncL1IfNeeded( log.Fatalf("error creating bridgeSyncL1: %s", err) } go bridgeSyncL1.Start(ctx) + return bridgeSyncL1 } @@ -634,7 +722,7 @@ func runBridgeSyncL2IfNeeded( l2Client *ethclient.Client, ) *bridgesync.BridgeSync { // TODO: will be needed by AGGSENDER - if !isNeeded([]string{RPC}, components) { + if !isNeeded([]string{cdkcommon.RPC}, components) { return nil } bridgeSyncL2, err := bridgesync.NewL2( @@ -654,6 +742,7 @@ func runBridgeSyncL2IfNeeded( log.Fatalf("error creating bridgeSyncL2: %s", err) } go bridgeSyncL2.Start(ctx) + return bridgeSyncL2 } @@ -667,10 +756,12 @@ func createRPC( bridgeL1 *bridgesync.BridgeSync, bridgeL2 *bridgesync.BridgeSync, ) *jRPC.Server { - return jRPC.NewServer(cfg, []jRPC.Service{ + logger := log.WithFields("module", cdkcommon.RPC) + services := []jRPC.Service{ { Name: rpc.BRIDGE, Service: rpc.NewBridgeEndpoints( + logger, cfg.WriteTimeout.Duration, cfg.ReadTimeout.Duration, cdkNetworkID, @@ -682,5 +773,7 @@ func createRPC( bridgeL2, ), }, - }) + } + + return jRPC.NewServer(cfg, services, jRPC.WithLogger(logger.GetSugaredLogger())) } diff --git a/cmd/version.go b/cmd/version.go index 51766c02..acba8f9f 100644 --- a/cmd/version.go +++ b/cmd/version.go @@ -9,5 +9,6 @@ import ( func versionCmd(*cli.Context) error { zkevm.PrintVersion(os.Stdout) + return nil } diff --git a/common/common.go b/common/common.go index 259b2a8d..cd5b5d70 100644 --- a/common/common.go +++ b/common/common.go @@ -11,7 +11,9 @@ import ( // Uint64ToBytes converts a uint64 to a byte slice func Uint64ToBytes(num uint64) []byte { - bytes := make([]byte, 8) + const uint64ByteSize = 8 + + bytes := make([]byte, uint64ByteSize) binary.BigEndian.PutUint64(bytes, num) return bytes @@ -22,10 +24,13 @@ func BytesToUint64(bytes []byte) uint64 { return binary.BigEndian.Uint64(bytes) } -// Uint32To2Bytes converts a uint32 to a byte slice +// Uint32ToBytes converts a uint32 to a byte slice in big-endian order func Uint32ToBytes(num uint32) []byte { - key := make([]byte, 4) + const uint32ByteSize = 4 + + key := make([]byte, uint32ByteSize) binary.BigEndian.PutUint32(key, num) + return key } @@ -34,7 +39,9 @@ func BytesToUint32(bytes []byte) uint32 { return binary.BigEndian.Uint32(bytes) } +// CalculateAccInputHash computes the hash of accumulated input data for a given batch. func CalculateAccInputHash( + logger *log.Logger, oldAccInputHash common.Hash, batchData []byte, l1InfoRoot common.Hash, @@ -53,27 +60,31 @@ func CalculateAccInputHash( for len(v1) < 32 { v1 = append([]byte{0}, v1...) } + for len(v3) < 32 { v3 = append([]byte{0}, v3...) } + for len(v4) < 8 { v4 = append([]byte{0}, v4...) } + for len(v5) < 20 { v5 = append([]byte{0}, v5...) } + for len(v6) < 32 { v6 = append([]byte{0}, v6...) } v2 = keccak256.Hash(v2) - log.Debugf("OldAccInputHash: %v", oldAccInputHash) - log.Debugf("BatchHashData: %v", common.Bytes2Hex(v2)) - log.Debugf("L1InfoRoot: %v", l1InfoRoot) - log.Debugf("TimeStampLimit: %v", timestampLimit) - log.Debugf("Sequencer Address: %v", sequencerAddr) - log.Debugf("Forced BlockHashL1: %v", forcedBlockhashL1) + logger.Debugf("OldAccInputHash: %v", oldAccInputHash) + logger.Debugf("BatchHashData: %v", common.Bytes2Hex(v2)) + logger.Debugf("L1InfoRoot: %v", l1InfoRoot) + logger.Debugf("TimeStampLimit: %v", timestampLimit) + logger.Debugf("Sequencer Address: %v", sequencerAddr) + logger.Debugf("Forced BlockHashL1: %v", forcedBlockhashL1) return common.BytesToHash(keccak256.Hash(v1, v2, v3, v4, v5, v6)) } diff --git a/common/components.go b/common/components.go new file mode 100644 index 00000000..0c2df8d7 --- /dev/null +++ b/common/components.go @@ -0,0 +1,16 @@ +package common + +const ( + // SEQUENCE_SENDER name to identify the sequence-sender component + SEQUENCE_SENDER = "sequence-sender" //nolint:stylecheck + // AGGREGATOR name to identify the aggregator component + AGGREGATOR = "aggregator" + // AGGORACLE name to identify the aggoracle component + AGGORACLE = "aggoracle" + // RPC name to identify the rpc component + RPC = "rpc" + // CLAIM_SPONSOR name to identify the claim sponsor component + CLAIM_SPONSOR = "claim-sponsor" //nolint:stylecheck + // PROVER name to identify the prover component + PROVER = "prover" +) diff --git a/common/config.go b/common/config.go index 62670c6f..fab4d0fd 100644 --- a/common/config.go +++ b/common/config.go @@ -2,6 +2,7 @@ package common import "github.com/0xPolygon/cdk/translator" +// Config holds the configuration for the CDK. type Config struct { // IsValidiumMode has the value true if the sequence sender is running in validium mode. IsValidiumMode bool `mapstructure:"IsValidiumMode"` diff --git a/config/config.go b/config/config.go index 76abbf20..cb899df8 100644 --- a/config/config.go +++ b/config/config.go @@ -2,6 +2,7 @@ package config import ( "bytes" + "errors" "fmt" "path/filepath" "strings" @@ -40,7 +41,8 @@ const ( FlagComponents = "components" // FlagHTTPAPI is the flag for http.api. FlagHTTPAPI = "http.api" - // FlagKeyStorePath is the path of the key store file containing the private key of the account going to sing and approve the tokens + // FlagKeyStorePath is the path of the key store file containing the private key + // of the account going to sing and approve the tokens. FlagKeyStorePath = "key-store-path" // FlagPassword is the password needed to decrypt the key store FlagPassword = "password" @@ -118,10 +120,12 @@ func Default() (*Config, error) { if err != nil { return nil, err } + err = viper.Unmarshal(&cfg, viper.DecodeHook(mapstructure.TextUnmarshallerHookFunc())) if err != nil { return nil, err } + return &cfg, nil } @@ -131,6 +135,7 @@ func Load(ctx *cli.Context) (*Config, error) { if err != nil { return nil, err } + configFilePath := ctx.String(FlagCfg) if configFilePath != "" { dirName, fileName := filepath.Split(configFilePath) @@ -142,24 +147,32 @@ func Load(ctx *cli.Context) (*Config, error) { viper.SetConfigName(fileNameWithoutExtension) viper.SetConfigType(fileExtension) } + viper.AutomaticEnv() replacer := strings.NewReplacer(".", "_") viper.SetEnvKeyReplacer(replacer) viper.SetEnvPrefix("CDK") + err = viper.ReadInConfig() if err != nil { - _, ok := err.(viper.ConfigFileNotFoundError) - if ok { - log.Infof("config file not found") + var configNotFoundError viper.ConfigFileNotFoundError + if errors.As(err, &configNotFoundError) { + log.Error("config file not found") } else { - log.Infof("error reading config file: ", err) + log.Errorf("error reading config file: ", err) + return nil, err } } decodeHooks := []viper.DecoderConfigOption{ // this allows arrays to be decoded from env var separated by ",", example: MY_VAR="value1,value2,value3" - viper.DecodeHook(mapstructure.ComposeDecodeHookFunc(mapstructure.TextUnmarshallerHookFunc(), mapstructure.StringToSliceHookFunc(","))), + viper.DecodeHook( + mapstructure.ComposeDecodeHookFunc( + mapstructure.TextUnmarshallerHookFunc(), + mapstructure.StringToSliceHookFunc(","), + ), + ), } err = viper.Unmarshal(&cfg, decodeHooks...) diff --git a/config/default.go b/config/default.go index b6956a3d..d9ff2158 100644 --- a/config/default.go +++ b/config/default.go @@ -61,7 +61,6 @@ SenderAddress = "" CleanupLockedProofsInterval = "2m" GeneratingProofCleanupThreshold = "10m" BatchProofSanityCheckEnabled = true -FinalProofSanityCheckEnabled = true ForkId = 9 GasOffset = 0 WitnessURL = "localhost:8123" diff --git a/config/network.go b/config/network.go index 96359233..fc3f75ce 100644 --- a/config/network.go +++ b/config/network.go @@ -68,6 +68,7 @@ type genesisAccountFromJSON struct { func (cfg *Config) loadNetworkConfig(ctx *cli.Context) { cfgPath := ctx.String(FlagCustomNetwork) + networkJSON, err := LoadGenesisFileAsString(cfgPath) if err != nil { panic(err.Error()) @@ -75,7 +76,7 @@ func (cfg *Config) loadNetworkConfig(ctx *cli.Context) { config, err := LoadGenesisFromJSONString(networkJSON) if err != nil { - panic(fmt.Errorf("failed to load genesis configuration from file. Error: %v", err)) + panic(fmt.Errorf("failed to load genesis configuration from file. Error: %w", err)) } cfg.NetworkConfig = config } @@ -83,10 +84,11 @@ func (cfg *Config) loadNetworkConfig(ctx *cli.Context) { // LoadGenesisFileAsString loads the genesis file as a string func LoadGenesisFileAsString(cfgPath string) (string, error) { if cfgPath != "" { - f, err := os.Open(cfgPath) //nolint:gosec + f, err := os.Open(cfgPath) if err != nil { return "", err } + defer func() { err := f.Close() if err != nil { @@ -98,6 +100,7 @@ func LoadGenesisFileAsString(cfgPath string) (string, error) { if err != nil { return "", err } + return string(b), nil } else { return "", errors.New("custom netwrork file not provided. Please use the custom-network-file flag") @@ -133,6 +136,7 @@ func LoadGenesisFromJSONString(jsonStr string) (NetworkConfig, error) { } cfg.Genesis.Actions = append(cfg.Genesis.Actions, action) } + if account.Nonce != "" && account.Nonce != "0" { action := &state.GenesisAction{ Address: account.Address, @@ -141,6 +145,7 @@ func LoadGenesisFromJSONString(jsonStr string) (NetworkConfig, error) { } cfg.Genesis.Actions = append(cfg.Genesis.Actions, action) } + if account.Bytecode != "" { action := &state.GenesisAction{ Address: account.Address, @@ -149,6 +154,7 @@ func LoadGenesisFromJSONString(jsonStr string) (NetworkConfig, error) { } cfg.Genesis.Actions = append(cfg.Genesis.Actions, action) } + if len(account.Storage) > 0 { for storageKey, storageValue := range account.Storage { action := &state.GenesisAction{ diff --git a/config/types/duration.go b/config/types/duration.go index 7612291f..d6855d10 100644 --- a/config/types/duration.go +++ b/config/types/duration.go @@ -18,6 +18,7 @@ func (d *Duration) UnmarshalText(data []byte) error { return err } d.Duration = duration + return nil } diff --git a/config/types/duration_test.go b/config/types/duration_test.go index 71e06a04..c11bd083 100644 --- a/config/types/duration_test.go +++ b/config/types/duration_test.go @@ -43,7 +43,7 @@ func TestDurationUnmarshal(t *testing.T) { err = json.Unmarshal(input, &d) if testCase.expectedResult != nil { - require.Equal(t, (*testCase.expectedResult).Nanoseconds(), d.Nanoseconds()) + require.Equal(t, testCase.expectedResult.Nanoseconds(), d.Nanoseconds()) } if err != nil { diff --git a/dataavailability/dataavailability.go b/dataavailability/dataavailability.go index 2e8ec124..fc4a482e 100644 --- a/dataavailability/dataavailability.go +++ b/dataavailability/dataavailability.go @@ -20,10 +20,14 @@ func New(backend DABackender) (*DataAvailability, error) { return da, da.backend.Init() } -func (d *DataAvailability) PostSequenceBanana(ctx context.Context, sequenceBanana etherman.SequenceBanana) ([]byte, error) { +// PostSequenceBanana sends sequence data to the backend and returns a response. +func (d *DataAvailability) PostSequenceBanana( + ctx context.Context, sequenceBanana etherman.SequenceBanana, +) ([]byte, error) { return d.backend.PostSequenceBanana(ctx, sequenceBanana) } +// PostSequenceElderberry sends batch data to the backend and returns a response. func (d *DataAvailability) PostSequenceElderberry(ctx context.Context, batchesData [][]byte) ([]byte, error) { return d.backend.PostSequenceElderberry(ctx, batchesData) } diff --git a/dataavailability/datacommittee/datacommittee.go b/dataavailability/datacommittee/datacommittee.go index 22abd589..01b96a13 100644 --- a/dataavailability/datacommittee/datacommittee.go +++ b/dataavailability/datacommittee/datacommittee.go @@ -2,10 +2,10 @@ package datacommittee import ( "crypto/ecdsa" + "crypto/rand" "errors" "fmt" "math/big" - "math/rand" "sort" "strings" @@ -42,6 +42,7 @@ type DataCommittee struct { // Backend implements the DAC integration type Backend struct { + logger *log.Logger dataCommitteeContract *polygondatacommittee.Polygondatacommittee privKey *ecdsa.PrivateKey dataCommitteeClientFactory client.Factory @@ -54,6 +55,7 @@ type Backend struct { // New creates an instance of Backend func New( + logger *log.Logger, l1RPCURL string, dataCommitteeAddr common.Address, privKey *ecdsa.PrivateKey, @@ -62,7 +64,7 @@ func New( ) (*Backend, error) { ethClient, err := ethclient.Dial(l1RPCURL) if err != nil { - log.Errorf("error connecting to %s: %+v", l1RPCURL, err) + logger.Errorf("error connecting to %s: %+v", l1RPCURL, err) return nil, err } @@ -72,6 +74,7 @@ func New( } return &Backend{ + logger: logger, dataCommitteeContract: dataCommittee, privKey: privKey, dataCommitteeClientFactory: dataCommitteeClientFactory, @@ -90,17 +93,22 @@ func (d *Backend) Init() error { if committee != nil { d.committeeMembers = committee.Members if len(committee.Members) > 0 { - selectedCommitteeMember = rand.Intn(len(committee.Members)) //nolint:gosec + nBig, err := rand.Int(rand.Reader, big.NewInt(int64(len(committee.Members)))) + if err != nil { + return err + } + selectedCommitteeMember = int(nBig.Int64()) } } d.selectedCommitteeMember = selectedCommitteeMember + return nil } // GetSequence gets backend data one hash at a time. This should be optimized on the DAC side to get them all at once. func (d *Backend) GetSequence(_ context.Context, hashes []common.Hash, _ []byte) ([][]byte, error) { // TODO: optimize this on the DAC side by implementing a multi batch retrieve api) - var batchData [][]byte + batchData := make([][]byte, 0, len(hashes)) for _, h := range hashes { data, err := d.GetBatchL2Data(h) if err != nil { @@ -108,6 +116,7 @@ func (d *Backend) GetSequence(_ context.Context, hashes []common.Hash, _ []byte) } batchData = append(batchData, data) } + return batchData, nil } @@ -117,11 +126,11 @@ func (d *Backend) GetBatchL2Data(hash common.Hash) ([]byte, error) { found := false for !found && intialMember != -1 { member := d.committeeMembers[d.selectedCommitteeMember] - log.Infof("trying to get data from %s at %s", member.Addr.Hex(), member.URL) + d.logger.Infof("trying to get data from %s at %s", member.Addr.Hex(), member.URL) c := d.dataCommitteeClientFactory.New(member.URL) data, err := c.GetOffChainData(d.ctx, hash) if err != nil { - log.Warnf( + d.logger.Warnf( "error getting data from DAC node %s at %s: %s", member.Addr.Hex(), member.URL, err, ) @@ -129,6 +138,7 @@ func (d *Backend) GetBatchL2Data(hash common.Hash) ([]byte, error) { if d.selectedCommitteeMember == intialMember { break } + continue } actualTransactionsHash := crypto.Keccak256Hash(data) @@ -136,7 +146,7 @@ func (d *Backend) GetBatchL2Data(hash common.Hash) ([]byte, error) { unexpectedHash := fmt.Errorf( unexpectedHashTemplate, hash, actualTransactionsHash, ) - log.Warnf( + d.logger.Warnf( "error getting data from DAC node %s at %s: %s", member.Addr.Hex(), member.URL, unexpectedHash, ) @@ -144,13 +154,16 @@ func (d *Backend) GetBatchL2Data(hash common.Hash) ([]byte, error) { if d.selectedCommitteeMember == intialMember { break } + continue } + return data, nil } if err := d.Init(); err != nil { - return nil, fmt.Errorf("error loading data committee: %s", err) + return nil, fmt.Errorf("error loading data committee: %w", err) } + return nil, fmt.Errorf("couldn't get the data from any committee member") } @@ -160,6 +173,7 @@ type signatureMsg struct { err error } +// PostSequenceElderberry submits batches and collects signatures from committee members. func (d *Backend) PostSequenceElderberry(ctx context.Context, batchesData [][]byte) ([]byte, error) { // Get current committee committee, err := d.getCurrentDataCommittee() @@ -185,12 +199,13 @@ func (d *Backend) PostSequenceElderberry(ctx context.Context, batchesData [][]by Sequence: sequence, Signature: signedSequence, } - go requestSignatureFromMember(signatureCtx, &signedSequenceElderberry, + go d.requestSignatureFromMember(signatureCtx, &signedSequenceElderberry, func(c client.Client) ([]byte, error) { return c.SignSequence(ctx, signedSequenceElderberry) }, member, ch) } - return collectSignatures(committee, ch, cancelSignatureCollection) + return d.collectSignatures(committee, ch, cancelSignatureCollection) } +// PostSequenceBanana submits a sequence to the data committee and collects the signed response from them. func (d *Backend) PostSequenceBanana(ctx context.Context, sequence etherman.SequenceBanana) ([]byte, error) { // Get current committee committee, err := d.getCurrentDataCommittee() @@ -236,16 +251,17 @@ func (d *Backend) PostSequenceBanana(ctx context.Context, sequence etherman.Sequ Sequence: sequenceBanana, Signature: signature, } - go requestSignatureFromMember(signatureCtx, + go d.requestSignatureFromMember(signatureCtx, &signedSequenceBanana, func(c client.Client) ([]byte, error) { return c.SignSequenceBanana(ctx, signedSequenceBanana) }, member, ch) } - return collectSignatures(committee, ch, cancelSignatureCollection) + return d.collectSignatures(committee, ch, cancelSignatureCollection) } -func collectSignatures(committee *DataCommittee, ch chan signatureMsg, cancelSignatureCollection context.CancelFunc) ([]byte, error) { +func (d *Backend) collectSignatures( + committee *DataCommittee, ch chan signatureMsg, cancelSignatureCollection context.CancelFunc) ([]byte, error) { // Collect signatures // Stop requesting as soon as we have N valid signatures var ( @@ -256,14 +272,15 @@ func collectSignatures(committee *DataCommittee, ch chan signatureMsg, cancelSig for collectedSignatures < committee.RequiredSignatures { msg := <-ch if msg.err != nil { - log.Errorf("error when trying to get signature from %s: %s", msg.addr, msg.err) + d.logger.Errorf("error when trying to get signature from %s: %s", msg.addr, msg.err) failedToCollect++ if len(committee.Members)-int(failedToCollect) < int(committee.RequiredSignatures) { cancelSignatureCollection() + return nil, errors.New("too many members failed to send their signature") } } else { - log.Infof("received signature from %s", msg.addr) + d.logger.Infof("received signature from %s", msg.addr) collectedSignatures++ } msgs = append(msgs, msg) @@ -271,7 +288,7 @@ func collectSignatures(committee *DataCommittee, ch chan signatureMsg, cancelSig cancelSignatureCollection() - return buildSignaturesAndAddrs(msgs, committee.Members), nil + return d.buildSignaturesAndAddrs(msgs, committee.Members), nil } type funcSignType func(c client.Client) ([]byte, error) @@ -279,7 +296,7 @@ type funcSignType func(c client.Client) ([]byte, error) // funcSetSignatureType: is not possible to define a SetSignature function because // the type daTypes.SequenceBanana and daTypes.Sequence belong to different packages // So a future refactor is define a common interface for both -func requestSignatureFromMember(ctx context.Context, signedSequence daTypes.SignedSequenceInterface, +func (d *Backend) requestSignatureFromMember(ctx context.Context, signedSequence daTypes.SignedSequenceInterface, funcSign funcSignType, member DataCommitteeMember, ch chan signatureMsg) { select { @@ -290,8 +307,8 @@ func requestSignatureFromMember(ctx context.Context, signedSequence daTypes.Sign // request c := client.New(member.URL) - log.Infof("sending request to sign the sequence to %s at %s", member.Addr.Hex(), member.URL) - //funcSign must call something like that c.SignSequenceBanana(ctx, signedSequence) + d.logger.Infof("sending request to sign the sequence to %s at %s", member.Addr.Hex(), member.URL) + // funcSign must call something like that c.SignSequenceBanana(ctx, signedSequence) signature, err := funcSign(c) if err != nil { @@ -299,6 +316,7 @@ func requestSignatureFromMember(ctx context.Context, signedSequence daTypes.Sign addr: member.Addr, err: err, } + return } // verify returned signature @@ -309,6 +327,7 @@ func requestSignatureFromMember(ctx context.Context, signedSequence daTypes.Sign addr: member.Addr, err: err, } + return } if signer != member.Addr { @@ -316,6 +335,7 @@ func requestSignatureFromMember(ctx context.Context, signedSequence daTypes.Sign addr: member.Addr, err: fmt.Errorf("invalid signer. Expected %s, actual %s", member.Addr.Hex(), signer.Hex()), } + return } ch <- signatureMsg{ @@ -324,21 +344,21 @@ func requestSignatureFromMember(ctx context.Context, signedSequence daTypes.Sign } } -func buildSignaturesAndAddrs(sigs signatureMsgs, members []DataCommitteeMember) []byte { +func (d *Backend) buildSignaturesAndAddrs(sigs signatureMsgs, members []DataCommitteeMember) []byte { const ( sigLen = 65 ) res := make([]byte, 0, len(sigs)*sigLen+len(members)*common.AddressLength) sort.Sort(sigs) for _, msg := range sigs { - log.Debugf("adding signature %s from %s", common.Bytes2Hex(msg.signature), msg.addr.Hex()) + d.logger.Debugf("adding signature %s from %s", common.Bytes2Hex(msg.signature), msg.addr.Hex()) res = append(res, msg.signature...) } for _, member := range members { - log.Debugf("adding addr %s", common.Bytes2Hex(member.Addr.Bytes())) + d.logger.Debugf("adding addr %s", common.Bytes2Hex(member.Addr.Bytes())) res = append(res, member.Addr.Bytes()...) } - log.Debugf("full res %s", common.Bytes2Hex(res)) + d.logger.Debugf("full res %s", common.Bytes2Hex(res)) return res } @@ -394,5 +414,6 @@ func (d *Backend) getCurrentDataCommitteeMembers() ([]DataCommitteeMember, error URL: member.Url, }) } + return members, nil } diff --git a/dataavailability/datacommittee/datacommittee_test.go b/dataavailability/datacommittee/datacommittee_test.go index 17376a13..fcacef3c 100644 --- a/dataavailability/datacommittee/datacommittee_test.go +++ b/dataavailability/datacommittee/datacommittee_test.go @@ -2,7 +2,6 @@ package datacommittee import ( "errors" - "fmt" "math/big" "testing" @@ -84,6 +83,7 @@ func newTestingEnv(t *testing.T) ( if err != nil { log.Fatal(err) } + return dac, ethBackend, auth, da } @@ -101,14 +101,14 @@ func newSimulatedDacman(t *testing.T, auth *bind.TransactOpts) ( return &Backend{}, nil, nil, nil } // 10000000 ETH in wei - balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) //nolint:gomnd + balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) address := auth.From genesisAlloc := map[common.Address]types.Account{ address: { Balance: balance, }, } - blockGasLimit := uint64(999999999999999999) //nolint:gomnd + blockGasLimit := uint64(999999999999999999) client := simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) // DAC Setup @@ -137,6 +137,7 @@ func newSimulatedDacman(t *testing.T, auth *bind.TransactOpts) ( c := &Backend{ dataCommitteeContract: da, } + return c, client, da, nil } @@ -162,7 +163,8 @@ func deployDACProxy(auth *bind.TransactOpts, client bind.ContractBackend, dacImp if err != nil { return common.Address{}, err } - fmt.Println("DAC proxy deployed at", proxyAddr) + log.Debugf("DAC proxy deployed at", proxyAddr) + return proxyAddr, nil } @@ -176,5 +178,6 @@ func deployProxy(auth *bind.TransactOpts, implementationAddr, initializeParams, ) + return addr, err } diff --git a/dataavailability/interfaces.go b/dataavailability/interfaces.go index b3630871..f79b3c76 100644 --- a/dataavailability/interfaces.go +++ b/dataavailability/interfaces.go @@ -21,12 +21,14 @@ type SequenceSender interface { SequenceSenderBanana } +// SequenceSenderElderberry defines methods for sending sequence data to the data availability backend. type SequenceSenderElderberry interface { // PostSequence sends the sequence data to the data availability backend, and returns the dataAvailabilityMessage // as expected by the contract PostSequenceElderberry(ctx context.Context, batchesData [][]byte) ([]byte, error) } +// SequenceSenderBanana defines methods for sending sequence data to the data availability backend. type SequenceSenderBanana interface { // PostSequence sends the sequence data to the data availability backend, and returns the dataAvailabilityMessage // as expected by the contract diff --git a/db/interface.go b/db/interface.go new file mode 100644 index 00000000..03f81aba --- /dev/null +++ b/db/interface.go @@ -0,0 +1,17 @@ +package db + +import ( + "context" + "database/sql" +) + +type Querier interface { + Exec(query string, args ...interface{}) (sql.Result, error) + Query(query string, args ...interface{}) (*sql.Rows, error) + QueryRow(query string, args ...interface{}) *sql.Row +} + +type DBer interface { + Querier + BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error) +} diff --git a/db/meddler.go b/db/meddler.go new file mode 100644 index 00000000..90071916 --- /dev/null +++ b/db/meddler.go @@ -0,0 +1,178 @@ +package db + +import ( + "errors" + "fmt" + "math/big" + "reflect" + "strings" + + tree "github.com/0xPolygon/cdk/tree/types" + "github.com/ethereum/go-ethereum/common" + sqlite "github.com/mattn/go-sqlite3" + "github.com/russross/meddler" +) + +// initMeddler registers tags to be used to read/write from SQL DBs using meddler +func initMeddler() { + meddler.Default = meddler.SQLite + meddler.Register("bigint", BigIntMeddler{}) + meddler.Register("merkleproof", MerkleProofMeddler{}) + meddler.Register("hash", HashMeddler{}) +} + +func SQLiteErr(err error) (*sqlite.Error, bool) { + sqliteErr := &sqlite.Error{} + if ok := errors.As(err, sqliteErr); ok { + return sqliteErr, true + } + if driverErr, ok := meddler.DriverErr(err); ok { + return sqliteErr, errors.As(driverErr, sqliteErr) + } + return sqliteErr, false +} + +// SliceToSlicePtrs converts any []Foo to []*Foo +func SliceToSlicePtrs(slice interface{}) interface{} { + v := reflect.ValueOf(slice) + vLen := v.Len() + typ := v.Type().Elem() + res := reflect.MakeSlice(reflect.SliceOf(reflect.PtrTo(typ)), vLen, vLen) + for i := 0; i < vLen; i++ { + res.Index(i).Set(v.Index(i).Addr()) + } + return res.Interface() +} + +// SlicePtrsToSlice converts any []*Foo to []Foo +func SlicePtrsToSlice(slice interface{}) interface{} { + v := reflect.ValueOf(slice) + vLen := v.Len() + typ := v.Type().Elem().Elem() + res := reflect.MakeSlice(reflect.SliceOf(typ), vLen, vLen) + for i := 0; i < vLen; i++ { + res.Index(i).Set(v.Index(i).Elem()) + } + return res.Interface() +} + +// BigIntMeddler encodes or decodes the field value to or from JSON +type BigIntMeddler struct{} + +// PreRead is called before a Scan operation for fields that have the BigIntMeddler +func (b BigIntMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) { + // give a pointer to a byte buffer to grab the raw data + return new(string), nil +} + +// PostRead is called after a Scan operation for fields that have the BigIntMeddler +func (b BigIntMeddler) PostRead(fieldPtr, scanTarget interface{}) error { + ptr, ok := scanTarget.(*string) + if !ok { + return errors.New("scanTarget is not *string") + } + if ptr == nil { + return fmt.Errorf("BigIntMeddler.PostRead: nil pointer") + } + field, ok := fieldPtr.(**big.Int) + if !ok { + return errors.New("fieldPtr is not *big.Int") + } + decimal := 10 + *field, ok = new(big.Int).SetString(*ptr, decimal) + if !ok { + return fmt.Errorf("big.Int.SetString failed on \"%v\"", *ptr) + } + return nil +} + +// PreWrite is called before an Insert or Update operation for fields that have the BigIntMeddler +func (b BigIntMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err error) { + field, ok := fieldPtr.(*big.Int) + if !ok { + return nil, errors.New("fieldPtr is not *big.Int") + } + + return field.String(), nil +} + +// MerkleProofMeddler encodes or decodes the field value to or from JSON +type MerkleProofMeddler struct{} + +// PreRead is called before a Scan operation for fields that have the ProofMeddler +func (b MerkleProofMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) { + // give a pointer to a byte buffer to grab the raw data + return new(string), nil +} + +// PostRead is called after a Scan operation for fields that have the ProofMeddler +func (b MerkleProofMeddler) PostRead(fieldPtr, scanTarget interface{}) error { + ptr, ok := scanTarget.(*string) + if !ok { + return errors.New("scanTarget is not *string") + } + if ptr == nil { + return errors.New("ProofMeddler.PostRead: nil pointer") + } + field, ok := fieldPtr.(*tree.Proof) + if !ok { + return errors.New("fieldPtr is not tree.Proof") + } + strHashes := strings.Split(*ptr, ",") + if len(strHashes) != int(tree.DefaultHeight) { + return fmt.Errorf("unexpected len of hashes: expected %d actual %d", tree.DefaultHeight, len(strHashes)) + } + for i, strHash := range strHashes { + field[i] = common.HexToHash(strHash) + } + return nil +} + +// PreWrite is called before an Insert or Update operation for fields that have the ProofMeddler +func (b MerkleProofMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err error) { + field, ok := fieldPtr.(tree.Proof) + if !ok { + return nil, errors.New("fieldPtr is not tree.Proof") + } + var s string + for _, f := range field { + s += f.Hex() + "," + } + s = strings.TrimSuffix(s, ",") + return s, nil +} + +// HashMeddler encodes or decodes the field value to or from JSON +type HashMeddler struct{} + +// PreRead is called before a Scan operation for fields that have the ProofMeddler +func (b HashMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) { + // give a pointer to a byte buffer to grab the raw data + return new(string), nil +} + +// PostRead is called after a Scan operation for fields that have the ProofMeddler +func (b HashMeddler) PostRead(fieldPtr, scanTarget interface{}) error { + ptr, ok := scanTarget.(*string) + if !ok { + return errors.New("scanTarget is not *string") + } + if ptr == nil { + return fmt.Errorf("HashMeddler.PostRead: nil pointer") + } + field, ok := fieldPtr.(*common.Hash) + if !ok { + return errors.New("fieldPtr is not common.Hash") + } + *field = common.HexToHash(*ptr) + return nil +} + +// PreWrite is called before an Insert or Update operation for fields that have the ProofMeddler +func (b HashMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err error) { + field, ok := fieldPtr.(common.Hash) + if !ok { + return nil, errors.New("fieldPtr is not common.Hash") + } + return field.Hex(), nil +} diff --git a/db/migrations.go b/db/migrations.go new file mode 100644 index 00000000..1a56874e --- /dev/null +++ b/db/migrations.go @@ -0,0 +1,48 @@ +package db + +import ( + "fmt" + "strings" + + "github.com/0xPolygon/cdk/db/types" + "github.com/0xPolygon/cdk/log" + _ "github.com/mattn/go-sqlite3" + migrate "github.com/rubenv/sql-migrate" +) + +const ( + upDownSeparator = "-- +migrate Up" + dbPrefixReplacer = "/*dbprefix*/" +) + +// RunMigrations will execute pending migrations if needed to keep +// the database updated with the latest changes in either direction, +// up or down. +func RunMigrations(dbPath string, migrations []types.Migration) error { + db, err := NewSQLiteDB(dbPath) + if err != nil { + return fmt.Errorf("error creating DB %w", err) + } + migs := &migrate.MemoryMigrationSource{Migrations: []*migrate.Migration{}} + for _, m := range migrations { + prefixed := strings.ReplaceAll(m.SQL, dbPrefixReplacer, m.Prefix) + splitted := strings.Split(prefixed, upDownSeparator) + migs.Migrations = append(migs.Migrations, &migrate.Migration{ + Id: m.Prefix + m.ID, + Up: []string{splitted[1]}, + Down: []string{splitted[0]}, + }) + } + + log.Debugf("running migrations:") + for _, m := range migs.Migrations { + log.Debugf("%+v", m.Id) + } + nMigrations, err := migrate.Exec(db, "sqlite3", migs, migrate.Up) + if err != nil { + return fmt.Errorf("error executing migration %w", err) + } + + log.Infof("successfully ran %d migrations", nMigrations) + return nil +} diff --git a/db/sqlite.go b/db/sqlite.go new file mode 100644 index 00000000..e30e9e26 --- /dev/null +++ b/db/sqlite.go @@ -0,0 +1,27 @@ +package db + +import ( + "database/sql" + + _ "github.com/mattn/go-sqlite3" +) + +const ( + UniqueConstrain = 1555 +) + +// NewSQLiteDB creates a new SQLite DB +func NewSQLiteDB(dbPath string) (*sql.DB, error) { + initMeddler() + db, err := sql.Open("sqlite3", dbPath) + if err != nil { + return nil, err + } + _, err = db.Exec(` + PRAGMA foreign_keys = ON; + pragma journal_mode = WAL; + pragma synchronous = normal; + pragma journal_size_limit = 6144000; + `) + return db, err +} diff --git a/db/tx.go b/db/tx.go new file mode 100644 index 00000000..926da07c --- /dev/null +++ b/db/tx.go @@ -0,0 +1,60 @@ +package db + +import ( + "context" +) + +type SQLTxer interface { + Querier + Commit() error + Rollback() error +} + +type Txer interface { + SQLTxer + AddRollbackCallback(cb func()) + AddCommitCallback(cb func()) +} + +type Tx struct { + SQLTxer + rollbackCallbacks []func() + commitCallbacks []func() +} + +func NewTx(ctx context.Context, db DBer) (Txer, error) { + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return nil, err + } + return &Tx{ + SQLTxer: tx, + }, nil +} + +func (s *Tx) AddRollbackCallback(cb func()) { + s.rollbackCallbacks = append(s.rollbackCallbacks, cb) +} +func (s *Tx) AddCommitCallback(cb func()) { + s.commitCallbacks = append(s.commitCallbacks, cb) +} + +func (s *Tx) Commit() error { + if err := s.SQLTxer.Commit(); err != nil { + return err + } + for _, cb := range s.commitCallbacks { + cb() + } + return nil +} + +func (s *Tx) Rollback() error { + if err := s.SQLTxer.Rollback(); err != nil { + return err + } + for _, cb := range s.rollbackCallbacks { + cb() + } + return nil +} diff --git a/db/types/types.go b/db/types/types.go new file mode 100644 index 00000000..ade19092 --- /dev/null +++ b/db/types/types.go @@ -0,0 +1,7 @@ +package types + +type Migration struct { + ID string + SQL string + Prefix string +} diff --git a/etherman/aggregator.go b/etherman/aggregator.go index 87384a95..4197c7a2 100644 --- a/etherman/aggregator.go +++ b/etherman/aggregator.go @@ -16,7 +16,9 @@ import ( ) // BuildTrustedVerifyBatchesTxData builds a []bytes to be sent to the PoE SC method TrustedVerifyBatches. -func (etherMan *Client) BuildTrustedVerifyBatchesTxData(lastVerifiedBatch, newVerifiedBatch uint64, inputs *ethmanTypes.FinalProofInputs, beneficiary common.Address) (to *common.Address, data []byte, err error) { +func (etherMan *Client) BuildTrustedVerifyBatchesTxData( + lastVerifiedBatch, newVerifiedBatch uint64, inputs *ethmanTypes.FinalProofInputs, beneficiary common.Address, +) (to *common.Address, data []byte, err error) { opts, err := etherMan.generateRandomAuth() if err != nil { return nil, nil, fmt.Errorf("failed to build trusted verify batches, err: %w", err) @@ -36,6 +38,7 @@ func (etherMan *Client) BuildTrustedVerifyBatchesTxData(lastVerifiedBatch, newVe proof, err := convertProof(inputs.FinalProof.Proof) if err != nil { log.Errorf("error converting proof. Error: %v, Proof: %s", err, inputs.FinalProof.Proof) + return nil, nil, err } @@ -56,6 +59,7 @@ func (etherMan *Client) BuildTrustedVerifyBatchesTxData(lastVerifiedBatch, newVe if parsedErr, ok := TryParseError(err); ok { err = parsedErr } + return nil, nil, err } @@ -63,16 +67,19 @@ func (etherMan *Client) BuildTrustedVerifyBatchesTxData(lastVerifiedBatch, newVe } // GetBatchAccInputHash gets the batch accumulated input hash from the ethereum -func (etherman *Client) GetBatchAccInputHash(ctx context.Context, batchNumber uint64) (common.Hash, error) { - rollupData, err := etherman.Contracts.Banana.RollupManager.GetRollupSequencedBatches(&bind.CallOpts{Pending: false}, etherman.RollupID, batchNumber) +func (etherMan *Client) GetBatchAccInputHash(ctx context.Context, batchNumber uint64) (common.Hash, error) { + rollupData, err := etherMan.Contracts.Banana.RollupManager.GetRollupSequencedBatches( + &bind.CallOpts{Pending: false}, etherMan.RollupID, batchNumber, + ) if err != nil { return common.Hash{}, err } + return rollupData.AccInputHash, nil } // GetRollupId returns the rollup id -func (etherMan *Client) GetRollupId() uint32 { +func (etherMan *Client) GetRollupId() uint32 { //nolint:stylecheck return etherMan.RollupID } @@ -109,6 +116,7 @@ func convertProof(p string) ([24][32]byte, error) { copy(aux[:], p) proof[i] = aux } + return proof, nil } @@ -117,5 +125,6 @@ func DecodeBytes(val *string) ([]byte, error) { if val == nil { return []byte{}, nil } + return hex.DecodeString(strings.TrimPrefix(*val, "0x")) } diff --git a/etherman/contracts/base.go b/etherman/contracts/base.go index c2aabd02..acc19e76 100644 --- a/etherman/contracts/base.go +++ b/etherman/contracts/base.go @@ -41,15 +41,23 @@ func (e *ContractBase) String() string { return e.Version() + "/" + e.Name() + "@" + e.Address().String() } -func NewContractMagic[C any, T any](constructor contractConstructorFunc[T], address common.Address, backend bind.ContractBackend, name NameType, version VersionType) (*C, error) { +func NewContractMagic[C any, T any]( + constructor contractConstructorFunc[T], + address common.Address, + backend bind.ContractBackend, + name NameType, + version VersionType, +) (*C, error) { contractBind, err := constructor(address, backend) if err != nil { log.Errorf("failed to bind contract %s at address %s. Err:%w", name, address.String(), err) + return nil, err } tmp := new(C) values := reflect.ValueOf(tmp).Elem() values.FieldByIndex([]int{0}).Set(reflect.ValueOf(contractBind)) values.FieldByIndex([]int{1}).Set(reflect.ValueOf(NewContractBase(address, backend, name, version))) + return tmp, nil } diff --git a/etherman/contracts/contracts_banana.go b/etherman/contracts/contracts_banana.go index 39e3eb12..d3d28f90 100644 --- a/etherman/contracts/contracts_banana.go +++ b/etherman/contracts/contracts_banana.go @@ -30,17 +30,23 @@ type ContractsBanana struct { } func NewContractsBanana(cfg config.L1Config, backend bind.ContractBackend) (*ContractsBanana, error) { - - ger, err := NewContractMagic[GlobalExitRootBananaType](polygonzkevmglobalexitrootv2.NewPolygonzkevmglobalexitrootv2, cfg.GlobalExitRootManagerAddr, backend, ContractNameGlobalExitRoot, VersionBanana) + ger, err := NewContractMagic[GlobalExitRootBananaType]( + polygonzkevmglobalexitrootv2.NewPolygonzkevmglobalexitrootv2, + cfg.GlobalExitRootManagerAddr, + backend, ContractNameGlobalExitRoot, VersionBanana) if err != nil { return nil, err } - rollup, err := NewContractMagic[RollupBananaType](polygonvalidiumetrog.NewPolygonvalidiumetrog, cfg.ZkEVMAddr, backend, ContractNameRollup, VersionBanana) + rollup, err := NewContractMagic[RollupBananaType]( + polygonvalidiumetrog.NewPolygonvalidiumetrog, cfg.ZkEVMAddr, + backend, ContractNameRollup, VersionBanana) if err != nil { return nil, err } - rollupManager, err := NewContractMagic[RollupManagerBananaType](polygonrollupmanager.NewPolygonrollupmanager, cfg.RollupManagerAddr, backend, ContractNameRollupManager, VersionBanana) + rollupManager, err := NewContractMagic[RollupManagerBananaType]( + polygonrollupmanager.NewPolygonrollupmanager, cfg.RollupManagerAddr, + backend, ContractNameRollupManager, VersionBanana) if err != nil { return nil, err } @@ -53,6 +59,6 @@ func NewContractsBanana(cfg config.L1Config, backend bind.ContractBackend) (*Con } func (c *ContractsBanana) String() string { - return "RollupManager: " + c.RollupManager.String() + "\nGlobalExitRoot: " + c.GlobalExitRoot.String() + "\nRollup: " + c.Rollup.String() - + return "RollupManager: " + c.RollupManager.String() + "\nGlobalExitRoot: " + + c.GlobalExitRoot.String() + "\nRollup: " + c.Rollup.String() } diff --git a/etherman/contracts/contracts_elderberry.go b/etherman/contracts/contracts_elderberry.go index 45f53d14..3a3bf574 100644 --- a/etherman/contracts/contracts_elderberry.go +++ b/etherman/contracts/contracts_elderberry.go @@ -30,16 +30,34 @@ type ContractsElderberry struct { } func NewContractsElderberry(cfg config.L1Config, backend bind.ContractBackend) (*ContractsElderberry, error) { - ger, err := NewContractMagic[GlobalExitRootElderberryType](polygonzkevmglobalexitrootv2.NewPolygonzkevmglobalexitrootv2, cfg.GlobalExitRootManagerAddr, backend, ContractNameGlobalExitRoot, VersionElderberry) + ger, err := NewContractMagic[GlobalExitRootElderberryType]( + polygonzkevmglobalexitrootv2.NewPolygonzkevmglobalexitrootv2, + cfg.GlobalExitRootManagerAddr, + backend, + ContractNameGlobalExitRoot, + VersionElderberry, + ) if err != nil { return nil, err } - rollup, err := NewContractMagic[RollupElderberryType](polygonvalidiumetrog.NewPolygonvalidiumetrog, cfg.ZkEVMAddr, backend, ContractNameRollup, VersionElderberry) + rollup, err := NewContractMagic[RollupElderberryType]( + polygonvalidiumetrog.NewPolygonvalidiumetrog, + cfg.ZkEVMAddr, + backend, + ContractNameRollup, + VersionElderberry, + ) if err != nil { return nil, err } - rollupManager, err := NewContractMagic[RollupManagerElderberryType](polygonrollupmanager.NewPolygonrollupmanager, cfg.RollupManagerAddr, backend, ContractNameRollupManager, VersionElderberry) + rollupManager, err := NewContractMagic[RollupManagerElderberryType]( + polygonrollupmanager.NewPolygonrollupmanager, + cfg.RollupManagerAddr, + backend, + ContractNameRollupManager, + VersionElderberry, + ) if err != nil { return nil, err } @@ -52,6 +70,6 @@ func NewContractsElderberry(cfg config.L1Config, backend bind.ContractBackend) ( } func (c *ContractsElderberry) String() string { - return "RollupManager: " + c.RollupManager.String() + "\nGlobalExitRoot: " + c.GlobalExitRoot.String() + "\nRollup: " + c.Rollup.String() - + return "RollupManager: " + c.RollupManager.String() + "\nGlobalExitRoot: " + + c.GlobalExitRoot.String() + "\nRollup: " + c.Rollup.String() } diff --git a/etherman/errors.go b/etherman/errors.go index bb7123fb..a2d748e7 100644 --- a/etherman/errors.go +++ b/etherman/errors.go @@ -10,14 +10,19 @@ var ( ErrGasRequiredExceedsAllowance = errors.New("gas required exceeds allowance") // ErrContentLengthTooLarge content length is too large ErrContentLengthTooLarge = errors.New("content length too large") - //ErrTimestampMustBeInsideRange Timestamp must be inside range + // ErrTimestampMustBeInsideRange Timestamp must be inside range ErrTimestampMustBeInsideRange = errors.New("timestamp must be inside range") - //ErrInsufficientAllowance insufficient allowance + // ErrInsufficientAllowance insufficient allowance ErrInsufficientAllowance = errors.New("insufficient allowance") // ErrBothGasPriceAndMaxFeeGasAreSpecified both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified - ErrBothGasPriceAndMaxFeeGasAreSpecified = errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") - // ErrMaxFeeGasAreSpecifiedButLondonNotActive maxFeePerGas or maxPriorityFeePerGas specified but london is not active yet - ErrMaxFeeGasAreSpecifiedButLondonNotActive = errors.New("maxFeePerGas or maxPriorityFeePerGas specified but london is not active yet") + ErrBothGasPriceAndMaxFeeGasAreSpecified = errors.New( + "both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified", + ) + // ErrMaxFeeGasAreSpecifiedButLondonNotActive maxFeePerGas or maxPriorityFeePerGas + // specified but london fork is not active yet + ErrMaxFeeGasAreSpecifiedButLondonNotActive = errors.New( + "maxFeePerGas or maxPriorityFeePerGas specified but london is not active yet", + ) // ErrNoSigner no signer to authorize the transaction with ErrNoSigner = errors.New("no signer to authorize the transaction with") // ErrMissingTrieNode means that a node is missing on the trie @@ -48,5 +53,6 @@ func TryParseError(err error) (error, bool) { } } } + return parsedError, exists } diff --git a/etherman/errors_test.go b/etherman/errors_test.go index cfc02ccc..91ca6c50 100644 --- a/etherman/errors_test.go +++ b/etherman/errors_test.go @@ -19,7 +19,7 @@ func TestTryParseWithExactMatch(t *testing.T) { func TestTryParseWithContains(t *testing.T) { expected := ErrTimestampMustBeInsideRange - smartContractErr := fmt.Errorf(" execution reverted: ProofOfEfficiency::sequenceBatches: %s", expected) + smartContractErr := fmt.Errorf(" execution reverted: ProofOfEfficiency::sequenceBatches: %w", expected) actualErr, ok := TryParseError(smartContractErr) diff --git a/etherman/etherman.go b/etherman/etherman.go index 707fac5b..4f9e1c81 100644 --- a/etherman/etherman.go +++ b/etherman/etherman.go @@ -71,7 +71,7 @@ type L1Config struct { // PolAddr Address of the L1 Pol token Contract PolAddr common.Address `json:"polTokenAddress" mapstructure:"PolAddr"` // GlobalExitRootManagerAddr Address of the L1 GlobalExitRootManager contract - GlobalExitRootManagerAddr common.Address `json:"polygonZkEVMGlobalExitRootAddress" mapstructure:"GlobalExitRootManagerAddr"` + GlobalExitRootManagerAddr common.Address `json:"polygonZkEVMGlobalExitRootAddress" mapstructure:"GlobalExitRootManagerAddr"` //nolint:lll } // Client is a simple implementation of EtherMan. @@ -93,11 +93,13 @@ func NewClient(cfg config.Config, l1Config config.L1Config, commonConfig cdkcomm ethClient, err := ethclient.Dial(cfg.EthermanConfig.URL) if err != nil { log.Errorf("error connecting to %s: %+v", cfg.EthermanConfig.URL, err) + return nil, err } L1chainID, err := ethClient.ChainID(context.Background()) if err != nil { log.Errorf("error getting L1chainID from %s: %+v", cfg.EthermanConfig.URL, err) + return nil, err } log.Infof("L1ChainID: %d", L1chainID.Uint64()) @@ -110,10 +112,14 @@ func NewClient(cfg config.Config, l1Config config.L1Config, commonConfig cdkcomm rollupID, err := contracts.Banana.RollupManager.RollupAddressToID(&bind.CallOpts{Pending: false}, l1Config.ZkEVMAddr) if err != nil { log.Errorf("error getting rollupID from %s : %+v", contracts.Banana.RollupManager.String(), err) + return nil, err } if rollupID == 0 { - return nil, errors.New("rollupID is 0, is not a valid value. Check that rollup Address is correct " + l1Config.ZkEVMAddr.String()) + return nil, errors.New( + "rollupID is 0, is not a valid value. Check that rollup Address is correct " + + l1Config.ZkEVMAddr.String(), + ) } log.Infof("rollupID: %d (obtenied from SMC: %s )", rollupID, contracts.Banana.RollupManager.String()) @@ -149,7 +155,9 @@ type Order struct { } // WaitTxToBeMined waits for an L1 tx to be mined. It will return error if the tx is reverted or timeout is exceeded -func (etherMan *Client) WaitTxToBeMined(ctx context.Context, tx *types.Transaction, timeout time.Duration) (bool, error) { +func (etherMan *Client) WaitTxToBeMined( + ctx context.Context, tx *types.Transaction, timeout time.Duration, +) (bool, error) { // err := operations.WaitTxToBeMined(ctx, etherMan.EthClient, tx, timeout) // if errors.Is(err, context.DeadlineExceeded) { // return false, nil @@ -167,6 +175,7 @@ func (etherMan *Client) GetSendSequenceFee(numBatches uint64) (*big.Int, error) return nil, err } fee := new(big.Int).Mul(f, new(big.Int).SetUint64(numBatches)) + return fee, nil } @@ -188,17 +197,23 @@ func (etherMan *Client) EthBlockByNumber(ctx context.Context, blockNumber uint64 if errors.Is(err, ethereum.NotFound) || err.Error() == "block does not exist in blockchain" { return nil, ErrNotFound } + return nil, err } + return block, nil } // GetLatestBatchNumber function allows to retrieve the latest proposed batch in the smc func (etherMan *Client) GetLatestBatchNumber() (uint64, error) { - rollupData, err := etherMan.Contracts.Banana.RollupManager.RollupIDToRollupData(&bind.CallOpts{Pending: false}, etherMan.RollupID) + rollupData, err := etherMan.Contracts.Banana.RollupManager.RollupIDToRollupData( + &bind.CallOpts{Pending: false}, + etherMan.RollupID, + ) if err != nil { return 0, err } + return rollupData.LastBatchSequenced, nil } @@ -223,6 +238,7 @@ func (etherMan *Client) getBlockNumber(ctx context.Context, blockNumber rpc.Bloc if err != nil || header == nil { return 0, err } + return header.Number.Uint64(), nil } @@ -232,15 +248,20 @@ func (etherMan *Client) GetLatestBlockTimestamp(ctx context.Context) (uint64, er if err != nil || header == nil { return 0, err } + return header.Time, nil } // GetLatestVerifiedBatchNum gets latest verified batch from ethereum func (etherMan *Client) GetLatestVerifiedBatchNum() (uint64, error) { - rollupData, err := etherMan.Contracts.Banana.RollupManager.RollupIDToRollupData(&bind.CallOpts{Pending: false}, etherMan.RollupID) + rollupData, err := etherMan.Contracts.Banana.RollupManager.RollupIDToRollupData( + &bind.CallOpts{Pending: false}, + etherMan.RollupID, + ) if err != nil { return 0, err } + return rollupData.LastVerifiedBatch, nil } @@ -261,14 +282,19 @@ func (etherMan *Client) GetTrustedSequencerURL() (string, error) { // GetL2ChainID returns L2 Chain ID func (etherMan *Client) GetL2ChainID() (uint64, error) { - rollupData, err := etherMan.Contracts.Banana.RollupManager.RollupIDToRollupData(&bind.CallOpts{Pending: false}, etherMan.RollupID) + rollupData, err := etherMan.Contracts.Banana.RollupManager.RollupIDToRollupData( + &bind.CallOpts{Pending: false}, + etherMan.RollupID, + ) log.Debug("chainID read from rollupManager: ", rollupData.ChainID) if err != nil { log.Debug("error from rollupManager: ", err) + return 0, err } else if rollupData.ChainID == 0 { return rollupData.ChainID, fmt.Errorf("error: chainID received is 0") } + return rollupData.ChainID, nil } @@ -283,7 +309,9 @@ func (etherMan *Client) CurrentNonce(ctx context.Context, account common.Address } // EstimateGas returns the estimated gas for the tx -func (etherMan *Client) EstimateGas(ctx context.Context, from common.Address, to *common.Address, value *big.Int, data []byte) (uint64, error) { +func (etherMan *Client) EstimateGas( + ctx context.Context, from common.Address, to *common.Address, value *big.Int, data []byte, +) (uint64, error) { return etherMan.EthClient.EstimateGas(ctx, ethereum.CallMsg{ From: from, To: to, @@ -305,15 +333,18 @@ func (etherMan *Client) CheckTxWasMined(ctx context.Context, txHash common.Hash) } // SignTx tries to sign a transaction accordingly to the provided sender -func (etherMan *Client) SignTx(ctx context.Context, sender common.Address, tx *types.Transaction) (*types.Transaction, error) { +func (etherMan *Client) SignTx( + ctx context.Context, sender common.Address, tx *types.Transaction, +) (*types.Transaction, error) { auth, err := etherMan.getAuthByAddress(sender) - if err == ErrNotFound { + if errors.Is(err, ErrNotFound) { return nil, ErrPrivateKeyNotFound } signedTx, err := auth.Signer(auth.From, tx) if err != nil { return nil, err } + return signedTx, nil } @@ -342,6 +373,7 @@ func (etherMan *Client) GetRevertMessage(ctx context.Context, tx *types.Transact func (etherMan *Client) AddOrReplaceAuth(auth bind.TransactOpts) error { log.Infof("added or replaced authorization for address: %v", auth.From.String()) etherMan.auth[auth.From] = auth + return nil } @@ -354,6 +386,7 @@ func (etherMan *Client) LoadAuthFromKeyStore(path, password string) (*bind.Trans log.Infof("loaded authorization for address: %v", auth.From.String()) etherMan.auth[auth.From] = auth + return &auth, pk, nil } @@ -371,6 +404,7 @@ func newKeyFromKeystore(path, password string) (*keystore.Key, error) { if err != nil { return nil, err } + return key, nil } @@ -388,6 +422,7 @@ func newAuthFromKeystore(path, password string, chainID uint64) (bind.TransactOp if err != nil { return bind.TransactOpts{}, nil, err } + return *auth, key.PrivateKey, nil } @@ -397,6 +432,7 @@ func (etherMan *Client) getAuthByAddress(addr common.Address) (bind.TransactOpts if !found { return bind.TransactOpts{}, ErrNotFound } + return auth, nil } @@ -406,6 +442,7 @@ func (etherMan *Client) GetLatestBlockHeader(ctx context.Context) (*types.Header if err != nil || header == nil { return nil, err } + return header, nil } @@ -433,7 +470,10 @@ func (etherMan *Client) GetL1InfoRoot(indexL1InfoRoot uint32) (common.Hash, erro ) if indexL1InfoRoot > 0 { - lastL1InfoTreeRoot, err = etherMan.Contracts.Banana.GlobalExitRoot.L1InfoRootMap(&bind.CallOpts{Pending: false}, indexL1InfoRoot) + lastL1InfoTreeRoot, err = etherMan.Contracts.Banana.GlobalExitRoot.L1InfoRootMap( + &bind.CallOpts{Pending: false}, + indexL1InfoRoot, + ) if err != nil { log.Errorf("error calling SC globalexitroot L1InfoLeafMap: %v", err) } diff --git a/go.mod b/go.mod index 0f229b12..a2ca38f4 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22.4 require ( github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240819092536-5a65d4761b2f github.com/0xPolygon/cdk-data-availability v0.0.9 - github.com/0xPolygon/cdk-rpc v0.0.0-20240419104226-c0a62ba0f49d + github.com/0xPolygon/cdk-rpc v0.0.0-20240905074455-431d3c271fe8 github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240716105056-c051c96d0234 github.com/0xPolygonHermez/zkevm-synchronizer-l1 v0.7.0 @@ -17,8 +17,10 @@ require ( github.com/jackc/pgconn v1.14.3 github.com/jackc/pgx/v4 v4.18.3 github.com/ledgerwatch/erigon-lib v1.0.0 + github.com/mattn/go-sqlite3 v1.14.23 github.com/mitchellh/mapstructure v1.5.0 github.com/rubenv/sql-migrate v1.6.1 + github.com/russross/meddler v1.0.1 github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.9.0 github.com/urfave/cli/v2 v2.27.2 @@ -30,6 +32,7 @@ require ( golang.org/x/sync v0.7.0 google.golang.org/grpc v1.64.0 google.golang.org/protobuf v1.34.2 + modernc.org/sqlite v1.32.0 ) require ( @@ -59,6 +62,7 @@ require ( github.com/deckarep/golang-set/v2 v2.6.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect github.com/didip/tollbooth/v6 v6.1.2 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/erigontech/mdbx-go v0.27.14 // indirect github.com/ethereum/c-kzg-4844 v1.0.0 // indirect github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0 // indirect @@ -79,6 +83,7 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.1 // indirect github.com/hashicorp/go-bexpr v0.1.10 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/hcl v1.0.1-0.20180906183839-65a6292f0157 // indirect github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect @@ -106,6 +111,7 @@ require ( github.com/miguelmota/go-solidity-sha3 v0.1.1 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect + github.com/ncruces/go-strftime v0.1.9 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/onsi/gomega v1.27.10 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect @@ -116,6 +122,7 @@ require ( github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.48.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rivo/uniseg v0.2.0 // indirect github.com/rogpeppe/go-internal v1.11.0 // indirect github.com/rs/cors v1.7.0 // indirect @@ -142,12 +149,18 @@ require ( go.opentelemetry.io/otel/trace v1.24.0 // indirect go.uber.org/multierr v1.10.0 // indirect golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect - golang.org/x/sys v0.21.0 // indirect + golang.org/x/sys v0.22.0 // indirect golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + modernc.org/gc/v3 v3.0.0-20240801135723-a856999a2e4a // indirect + modernc.org/libc v1.60.0 // indirect + modernc.org/mathutil v1.6.0 // indirect + modernc.org/memory v1.8.0 // indirect + modernc.org/strutil v1.2.0 // indirect + modernc.org/token v1.1.0 // indirect rsc.io/tmplfunc v0.0.3 // indirect ) diff --git a/go.sum b/go.sum index 92207638..818a9b5d 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,8 @@ github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240819092536-5a65d4761b2f h1 github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240819092536-5a65d4761b2f/go.mod h1:mFlcEjsm2YBBsu8atHJ3zyVnwM+Z/fMXpVmIJge+WVU= github.com/0xPolygon/cdk-data-availability v0.0.9 h1:KkP+hJH9nY5fljpSNtW2pfP5YQCJOsSRzfnl0yT78rI= github.com/0xPolygon/cdk-data-availability v0.0.9/go.mod h1:5A+CU4FGeyG8zTDJc0khMzRxPzFzmpRydeEWmLztgh4= -github.com/0xPolygon/cdk-rpc v0.0.0-20240419104226-c0a62ba0f49d h1:sxh6hZ2jF/sxxj2jd5o1vuNNCZjYmn4aRG9SRlVaEFs= -github.com/0xPolygon/cdk-rpc v0.0.0-20240419104226-c0a62ba0f49d/go.mod h1:2scWqMMufrQXu7TikDgQ3BsyaKoX8qP26D6E262vSOg= +github.com/0xPolygon/cdk-rpc v0.0.0-20240905074455-431d3c271fe8 h1:Jri+ydl8PudddGacnVLatrCuAG9e1Ut8W4J0GoawssU= +github.com/0xPolygon/cdk-rpc v0.0.0-20240905074455-431d3c271fe8/go.mod h1:2scWqMMufrQXu7TikDgQ3BsyaKoX8qP26D6E262vSOg= github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 h1:BSO1uu6dmLQ5kKb3uyDvsUxbnIoyumKvlwr0OtpTYMo= github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6/go.mod h1:RC6ouyNsUtJrv5aGPcM6Dm5xhXN209tRSzcsJsaOtZI= github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240716105056-c051c96d0234 h1:QElCysO7f2xaknY/RDjxcs7IVmcgORfsCX2g+YD0Ko4= @@ -89,6 +89,8 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= github.com/didip/tollbooth/v6 v6.1.2 h1:Kdqxmqw9YTv0uKajBUiWQg+GURL/k4vy9gmLCL01PjQ= github.com/didip/tollbooth/v6 v6.1.2/go.mod h1:xjcse6CTHCLuOkzsWrEgdy9WPJFv+p/x6v+MyfP+O9s= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/erigontech/mdbx-go v0.27.14 h1:IVVeQVCAjZRpAR8bThlP2ISxrOwdV35NZdGwAgotaRw= github.com/erigontech/mdbx-go v0.27.14/go.mod h1:FAMxbOgqOnRDx51j8HjuJZIgznbDwjX7LItd+/UWyA4= github.com/ethereum/c-kzg-4844 v1.0.0 h1:0X1LBXxaEtYD9xsyj9B9ctQEZIpnvVDeoBx8aHEwTNA= @@ -164,6 +166,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd h1:gbpYu9NMq8jhDVbvlGkMFWCjLFlqqEZjEmObmhUy6Vo= +github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -174,6 +178,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDa github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.1-0.20180906183839-65a6292f0157 h1:uyodBE3xDz0ynKs1tLBU26wOQoEkAqqiY18DbZ+FZrA= github.com/hashicorp/hcl v1.0.1-0.20180906183839-65a6292f0157/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hermeznetwork/tracerr v0.3.2 h1:QB3TlQxO/4XHyixsg+nRZPuoel/FFQlQ7oAoHDD5l1c= @@ -296,8 +302,9 @@ github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.14.19 h1:fhGleo2h1p8tVChob4I9HpmVFIAkKGpiukdrgQbWfGI= -github.com/mattn/go-sqlite3 v1.14.19/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.23 h1:gbShiuAP1W5j9UOksQ06aiiqPMxYecovVGwmTxWtuw0= +github.com/mattn/go-sqlite3 v1.14.23/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/miguelmota/go-solidity-sha3 v0.1.1 h1:3Y08sKZDtudtE5kbTBPC9RYJznoSYyWI9VD6mghU0CA= github.com/miguelmota/go-solidity-sha3 v0.1.1/go.mod h1:sax1FvQF+f71j8W1uUHMZn8NxKyl5rYLks2nqj8RFEw= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -308,6 +315,8 @@ github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8oh github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= +github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= +github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -348,6 +357,8 @@ github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSz github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -363,6 +374,8 @@ github.com/rubenv/sql-migrate v1.6.1 h1:bo6/sjsan9HaXAsNxYP/jCEDUGibHp8JmOBw7NTG github.com/rubenv/sql-migrate v1.6.1/go.mod h1:tPzespupJS0jacLfhbwto/UjSX+8h2FdWB7ar+QlHa0= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/meddler v1.0.1 h1:JLR7Z4M4iGm1nr7DIURBq18UW8cTrm+qArUFgOhELo8= +github.com/russross/meddler v1.0.1/go.mod h1:GzGDChbFHuzxlFwt8gnJMRRNyFSQDSudmy2kHh7GYnQ= github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= @@ -472,6 +485,8 @@ golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKG golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -524,8 +539,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -552,6 +567,8 @@ golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -595,5 +612,31 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +modernc.org/cc/v4 v4.21.4 h1:3Be/Rdo1fpr8GrQ7IVw9OHtplU4gWbb+wNgeoBMmGLQ= +modernc.org/cc/v4 v4.21.4/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ= +modernc.org/ccgo/v4 v4.21.0 h1:kKPI3dF7RIag8YcToh5ZwDcVMIv6VGa0ED5cvh0LMW4= +modernc.org/ccgo/v4 v4.21.0/go.mod h1:h6kt6H/A2+ew/3MW/p6KEoQmrq/i3pr0J/SiwiaF/g0= +modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE= +modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ= +modernc.org/gc/v2 v2.5.0 h1:bJ9ChznK1L1mUtAQtxi0wi5AtAs5jQuw4PrPHO5pb6M= +modernc.org/gc/v2 v2.5.0/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU= +modernc.org/gc/v3 v3.0.0-20240801135723-a856999a2e4a h1:CfbpOLEo2IwNzJdMvE8aiRbPMxoTpgAJeyePh0SmO8M= +modernc.org/gc/v3 v3.0.0-20240801135723-a856999a2e4a/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4= +modernc.org/libc v1.60.0 h1:XeRF1gXky7JE5E8IErtYAdKj+ykZPdYUsgJNQ8RFWIA= +modernc.org/libc v1.60.0/go.mod h1:xJuobKuNxKH3RUatS7GjR+suWj+5c2K7bi4m/S5arOY= +modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= +modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= +modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E= +modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU= +modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc= +modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss= +modernc.org/sqlite v1.32.0 h1:6BM4uGza7bWypsw4fdLRsLxut6bHe4c58VeqjRgST8s= +modernc.org/sqlite v1.32.0/go.mod h1:UqoylwmTb9F+IqXERT8bW9zzOWN8qwAIcLdzeBZs4hA= +modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= +modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= +modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= diff --git a/hex/hex.go b/hex/hex.go index 4699eb20..c7e1f860 100644 --- a/hex/hex.go +++ b/hex/hex.go @@ -53,7 +53,7 @@ func DecodeHex(str string) ([]byte, error) { func MustDecodeHex(str string) []byte { buf, err := DecodeHex(str) if err != nil { - panic(fmt.Errorf("could not decode hex: %v", err)) + panic(fmt.Errorf("could not decode hex: %w", err)) } return buf @@ -62,13 +62,15 @@ func MustDecodeHex(str string) []byte { // DecodeUint64 type-checks and converts a hex string to a uint64 func DecodeUint64(str string) uint64 { i := DecodeBig(str) + return i.Uint64() } // EncodeUint64 encodes a number as a hex string with 0x prefix. func EncodeUint64(i uint64) string { - enc := make([]byte, 2, 10) //nolint:gomnd + enc := make([]byte, 2, 10) //nolint:mnd copy(enc, "0x") + return string(strconv.AppendUint(enc, i, Base)) } @@ -81,9 +83,9 @@ func DecodeNibble(in byte) uint64 { case in >= '0' && in <= '9': return uint64(in - '0') case in >= 'A' && in <= 'F': - return uint64(in - 'A' + 10) //nolint:gomnd + return uint64(in - 'A' + 10) //nolint:mnd case in >= 'a' && in <= 'f': - return uint64(in - 'a' + 10) //nolint:gomnd + return uint64(in - 'a' + 10) //nolint:mnd default: return BadNibble } @@ -117,5 +119,6 @@ func IsValid(s string) bool { return false } } + return true } diff --git a/l1bridge2infoindexsync/downloader.go b/l1bridge2infoindexsync/downloader.go index f14fcf8e..f4db8422 100644 --- a/l1bridge2infoindexsync/downloader.go +++ b/l1bridge2infoindexsync/downloader.go @@ -6,6 +6,7 @@ import ( "github.com/0xPolygon/cdk/bridgesync" "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/rpc" @@ -34,6 +35,7 @@ func (d *downloader) getLastFinalizedL1Block(ctx context.Context) (uint64, error if err != nil { return 0, err } + return b.NumberU64(), nil } @@ -50,6 +52,7 @@ func (d *downloader) getLastL1InfoIndexUntilBlock(ctx context.Context, blockNum if err != nil { return 0, err } + return info.L1InfoTreeIndex, nil } @@ -58,9 +61,10 @@ func (d *downloader) getMainnetExitRootAtL1InfoTreeIndex(ctx context.Context, in if err != nil { return common.Hash{}, err } + return leaf.MainnetExitRoot, nil } -func (d *downloader) getBridgeIndex(ctx context.Context, mainnetExitRoot common.Hash) (uint32, error) { - return d.l1Bridge.GetBridgeIndexByRoot(ctx, mainnetExitRoot) +func (d *downloader) getBridgeIndex(ctx context.Context, mainnetExitRoot common.Hash) (types.Root, error) { + return d.l1Bridge.GetBridgeRootByHash(ctx, mainnetExitRoot) } diff --git a/l1bridge2infoindexsync/driver.go b/l1bridge2infoindexsync/driver.go index ce681bf0..921a0c41 100644 --- a/l1bridge2infoindexsync/driver.go +++ b/l1bridge2infoindexsync/driver.go @@ -2,6 +2,7 @@ package l1bridge2infoindexsync import ( "context" + "errors" "time" "github.com/0xPolygon/cdk/l1infotreesync" @@ -43,8 +44,10 @@ func (d *driver) sync(ctx context.Context) { attempts++ log.Errorf("error getting last processed block and index: %v", err) d.rh.Handle("GetLastProcessedBlockAndL1InfoTreeIndex", attempts) + continue } + break } for { @@ -59,13 +62,16 @@ func (d *driver) sync(ctx context.Context) { attempts++ log.Errorf("error getting target sync block: %v", err) d.rh.Handle("getTargetSynchronizationBlock", attempts) + continue } + break } if shouldWait { log.Debugf("waiting for syncers to catch up") time.Sleep(d.waitForSyncersPeriod) + continue } @@ -75,20 +81,24 @@ func (d *driver) sync(ctx context.Context) { for { lastL1InfoTreeIndex, err = d.downloader.getLastL1InfoIndexUntilBlock(ctx, syncUntilBlock) if err != nil { - if err == l1infotreesync.ErrNotFound || err == l1infotreesync.ErrBlockNotProcessed { + if errors.Is(err, l1infotreesync.ErrNotFound) || errors.Is(err, l1infotreesync.ErrBlockNotProcessed) { log.Debugf("l1 info tree index not ready, querying until block %d: %s", syncUntilBlock, err) + break } attempts++ log.Errorf("error getting last l1 info tree index: %v", err) d.rh.Handle("getLastL1InfoIndexUntilBlock", attempts) + continue } found = true + break } if !found { time.Sleep(d.waitForSyncersPeriod) + continue } @@ -108,9 +118,11 @@ func (d *driver) sync(ctx context.Context) { attempts++ log.Errorf("error getting relation: %v", err) d.rh.Handle("getRelation", attempts) + continue } relations = append(relations, relation) + break } } @@ -122,8 +134,10 @@ func (d *driver) sync(ctx context.Context) { attempts++ log.Errorf("error processing block: %v", err) d.rh.Handle("processUntilBlock", attempts) + continue } + break } @@ -135,8 +149,11 @@ func (d *driver) sync(ctx context.Context) { } } -func (d *driver) getTargetSynchronizationBlock(ctx context.Context, lpbProcessor uint64) (syncUntilBlock uint64, shouldWait bool, err error) { - lastFinalised, err := d.downloader.getLastFinalizedL1Block(ctx) // NOTE: if this had configurable finality, it would be needed to deal with reorgs +func (d *driver) getTargetSynchronizationBlock( + ctx context.Context, lpbProcessor uint64, +) (syncUntilBlock uint64, shouldWait bool, err error) { + // NOTE: if this had configurable finality, it would be needed to deal with reorgs + lastFinalised, err := d.downloader.getLastFinalizedL1Block(ctx) if err != nil { return } @@ -146,8 +163,10 @@ func (d *driver) getTargetSynchronizationBlock(ctx context.Context, lpbProcessor "should wait because the last processed block (%d) is greater or equal than the %s (%d)", blockToCheck, blockType, lastProcessed) shouldWait = true + return true } + return false } if checkProcessedBlockFn(lpbProcessor, lastFinalised, "last finalised") { @@ -180,6 +199,7 @@ func (d *driver) getTargetSynchronizationBlock(ctx context.Context, lpbProcessor log.Debugf("target sync block is the last processed block from bridge (%d)", lpbBridge) syncUntilBlock = lpbBridge } + return } @@ -189,13 +209,13 @@ func (d *driver) getRelation(ctx context.Context, l1InfoIndex uint32) (bridge2L1 return bridge2L1InfoRelation{}, err } - bridgeIndex, err := d.downloader.getBridgeIndex(ctx, mer) + bridgeRoot, err := d.downloader.getBridgeIndex(ctx, mer) if err != nil { return bridge2L1InfoRelation{}, err } return bridge2L1InfoRelation{ - bridgeIndex: bridgeIndex, + bridgeIndex: bridgeRoot.Index, l1InfoTreeIndex: l1InfoIndex, }, nil } diff --git a/l1bridge2infoindexsync/e2e_test.go b/l1bridge2infoindexsync/e2e_test.go index 2aa8e38f..e134c1ab 100644 --- a/l1bridge2infoindexsync/e2e_test.go +++ b/l1bridge2infoindexsync/e2e_test.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "math/big" + "path" "strconv" "testing" "time" @@ -37,7 +38,7 @@ func newSimulatedClient(authDeployer, authCaller *bind.TransactOpts) ( err error, ) { ctx := context.Background() - balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) //nolint:gomnd + balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) genesisAlloc := map[common.Address]types.Account{ authDeployer.From: { Balance: balance, @@ -46,27 +47,26 @@ func newSimulatedClient(authDeployer, authCaller *bind.TransactOpts) ( Balance: balance, }, } - blockGasLimit := uint64(999999999999999999) //nolint:gomnd + blockGasLimit := uint64(999999999999999999) client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) bridgeImplementationAddr, _, _, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(authDeployer, client.Client()) if err != nil { - return + return nil, common.Address{}, common.Address{}, nil, nil, err } client.Commit() nonce, err := client.Client().PendingNonceAt(ctx, authDeployer.From) if err != nil { - return + return nil, common.Address{}, common.Address{}, nil, nil, err } precalculatedAddr := crypto.CreateAddress(authDeployer.From, nonce+1) bridgeABI, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi() if err != nil { - return + return nil, common.Address{}, common.Address{}, nil, nil, err } if bridgeABI == nil { - err = errors.New("GetABI returned nil") - return + return nil, common.Address{}, common.Address{}, nil, nil, errors.New("GetABI returned nil") } dataCallProxy, err := bridgeABI.Pack("initialize", uint32(0), // networkIDMainnet @@ -77,7 +77,7 @@ func newSimulatedClient(authDeployer, authCaller *bind.TransactOpts) ( []byte{}, // gasTokenMetadata ) if err != nil { - return + return nil, common.Address{}, common.Address{}, nil, nil, err } bridgeAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy( authDeployer, @@ -87,39 +87,41 @@ func newSimulatedClient(authDeployer, authCaller *bind.TransactOpts) ( dataCallProxy, ) if err != nil { - return + return nil, common.Address{}, common.Address{}, nil, nil, err } client.Commit() bridgeContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridgeAddr, client.Client()) if err != nil { - return + return nil, common.Address{}, common.Address{}, nil, nil, err } checkGERAddr, err := bridgeContract.GlobalExitRootManager(&bind.CallOpts{}) if err != nil { - return + return nil, common.Address{}, common.Address{}, nil, nil, err } if precalculatedAddr != checkGERAddr { err = errors.New("error deploying bridge") + return } gerAddr, _, gerContract, err = polygonzkevmglobalexitrootv2.DeployPolygonzkevmglobalexitrootv2( authDeployer, client.Client(), authCaller.From, bridgeAddr, ) if err != nil { - return + return nil, common.Address{}, common.Address{}, nil, nil, err } client.Commit() if precalculatedAddr != gerAddr { - err = errors.New("error calculating addr") + return nil, common.Address{}, common.Address{}, nil, nil, errors.New("error calculating addr") } - return + + return client, gerAddr, bridgeAddr, gerContract, bridgeContract, nil } func TestE2E(t *testing.T) { ctx := context.Background() - dbPathBridgeSync := t.TempDir() - dbPathL1Sync := t.TempDir() + dbPathBridgeSync := path.Join(t.TempDir(), "file::memory:?cache=shared") + dbPathL1Sync := path.Join(t.TempDir(), "file::memory:?cache=shared") dbPathReorg := t.TempDir() dbPathL12InfoSync := t.TempDir() @@ -186,6 +188,7 @@ func TestE2E(t *testing.T) { // Wait for block to be finalised updateAtBlock, err := client.Client().BlockNumber(ctx) + require.NoError(t, err) for { lastFinalisedBlock, err := client.Client().BlockByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) require.NoError(t, err) @@ -199,13 +202,14 @@ func TestE2E(t *testing.T) { // Wait for syncer to catch up syncerUpToDate := false var errMsg string + lb, err := client.Client().BlockByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) + require.NoError(t, err) for i := 0; i < 10; i++ { lpb, err := bridge2InfoSync.GetLastProcessedBlock(ctx) require.NoError(t, err) - lb, err := client.Client().BlockByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) - require.NoError(t, err) if lpb == lb.NumberU64() { syncerUpToDate = true + break } time.Sleep(time.Millisecond * 100) diff --git a/l1bridge2infoindexsync/l1bridge2infoindexsync.go b/l1bridge2infoindexsync/l1bridge2infoindexsync.go index b1c8fc55..c24bebba 100644 --- a/l1bridge2infoindexsync/l1bridge2infoindexsync.go +++ b/l1bridge2infoindexsync/l1bridge2infoindexsync.go @@ -47,11 +47,16 @@ func (s *L1Bridge2InfoIndexSync) Start(ctx context.Context) { s.driver.sync(ctx) } +// GetLastProcessedBlock retrieves the last processed block number by the processor. func (s *L1Bridge2InfoIndexSync) GetLastProcessedBlock(ctx context.Context) (uint64, error) { lpb, _, err := s.processor.GetLastProcessedBlockAndL1InfoTreeIndex(ctx) + return lpb, err } -func (s *L1Bridge2InfoIndexSync) GetL1InfoTreeIndexByDepositCount(ctx context.Context, depositCount uint32) (uint32, error) { +// GetL1InfoTreeIndexByDepositCount retrieves the L1 Info Tree index for a given deposit count. +func (s *L1Bridge2InfoIndexSync) GetL1InfoTreeIndexByDepositCount( + ctx context.Context, depositCount uint32, +) (uint32, error) { return s.processor.getL1InfoTreeIndexByBridgeIndex(ctx, depositCount) } diff --git a/l1bridge2infoindexsync/processor.go b/l1bridge2infoindexsync/processor.go index 9b86ad9b..bfe9f3a6 100644 --- a/l1bridge2infoindexsync/processor.go +++ b/l1bridge2infoindexsync/processor.go @@ -39,11 +39,13 @@ func (lp *lastProcessed) MarshalBinary() ([]byte, error) { } func (lp *lastProcessed) UnmarshalBinary(data []byte) error { - if len(data) != 12 { - return fmt.Errorf("expected len %d, actual len %d", 12, len(data)) + const expectedDataLength = 12 + if len(data) != expectedDataLength { + return fmt.Errorf("expected len %d, actual len %d", expectedDataLength, len(data)) } lp.block = common.BytesToUint64(data[:8]) lp.index = common.BytesToUint32(data[8:]) + return nil } @@ -61,6 +63,7 @@ func newProcessor(dbPath string) (*processor, error) { if err != nil { return nil, err } + return &processor{ db: db, }, nil @@ -74,6 +77,7 @@ func (p *processor) GetLastProcessedBlockAndL1InfoTreeIndex(ctx context.Context) return 0, 0, err } defer tx.Rollback() + return p.getLastProcessedBlockAndL1InfoTreeIndexWithTx(tx) } @@ -87,19 +91,24 @@ func (p *processor) getLastProcessedBlockAndL1InfoTreeIndexWithTx(tx kv.Tx) (uin if err := lp.UnmarshalBinary(lastProcessedBytes); err != nil { return 0, 0, err } + return lp.block, lp.index, nil } } -func (p *processor) updateLastProcessedBlockAndL1InfoTreeIndex(ctx context.Context, blockNum uint64, index uint32) error { +func (p *processor) updateLastProcessedBlockAndL1InfoTreeIndex( + ctx context.Context, blockNum uint64, index uint32, +) error { tx, err := p.db.BeginRw(ctx) if err != nil { return err } if err := p.updateLastProcessedBlockAndL1InfoTreeIndexWithTx(tx, blockNum, index); err != nil { tx.Rollback() + return err } + return tx.Commit() } @@ -112,10 +121,13 @@ func (p *processor) updateLastProcessedBlockAndL1InfoTreeIndexWithTx(tx kv.RwTx, if err != nil { return err } + return tx.Put(lastProcessedTable, lastProcessedKey, value) } -func (p *processor) processUntilBlock(ctx context.Context, lastProcessedBlock uint64, relations []bridge2L1InfoRelation) error { +func (p *processor) processUntilBlock( + ctx context.Context, lastProcessedBlock uint64, relations []bridge2L1InfoRelation, +) error { tx, err := p.db.BeginRw(ctx) if err != nil { return err @@ -125,6 +137,7 @@ func (p *processor) processUntilBlock(ctx context.Context, lastProcessedBlock ui _, lastIndex, err := p.getLastProcessedBlockAndL1InfoTreeIndexWithTx(tx) if err != nil { tx.Rollback() + return err } if err := p.updateLastProcessedBlockAndL1InfoTreeIndexWithTx( @@ -133,13 +146,15 @@ func (p *processor) processUntilBlock(ctx context.Context, lastProcessedBlock ui lastIndex, ); err != nil { tx.Rollback() + return err } + return tx.Commit() } for _, relation := range relations { - if _, err := p.getL1InfoTreeIndexByBridgeIndexWithTx(tx, relation.bridgeIndex); err != ErrNotFound { + if _, err := p.getL1InfoTreeIndexByBridgeIndexWithTx(tx, relation.bridgeIndex); !errors.Is(err, ErrNotFound) { // Note that indexes could be repeated as the L1 Info tree update can be produced by a rollup and not mainnet. // Hence if the index already exist, do not update as it's better to have the lowest index possible for the relation continue @@ -150,6 +165,7 @@ func (p *processor) processUntilBlock(ctx context.Context, lastProcessedBlock ui common.Uint32ToBytes(relation.l1InfoTreeIndex), ); err != nil { tx.Rollback() + return err } } @@ -160,6 +176,7 @@ func (p *processor) processUntilBlock(ctx context.Context, lastProcessedBlock ui relations[len(relations)-1].l1InfoTreeIndex, ); err != nil { tx.Rollback() + return err } @@ -184,5 +201,6 @@ func (p *processor) getL1InfoTreeIndexByBridgeIndexWithTx(tx kv.Tx, depositCount if indexBytes == nil { return 0, ErrNotFound } + return common.BytesToUint32(indexBytes), nil } diff --git a/l1infotree/hash.go b/l1infotree/hash.go index b07c3f10..5a33f5a3 100644 --- a/l1infotree/hash.go +++ b/l1infotree/hash.go @@ -13,9 +13,10 @@ func Hash(data ...[32]byte) [32]byte { var res [32]byte hash := sha3.NewLegacyKeccak256() for _, d := range data { - hash.Write(d[:]) //nolint:errcheck,gosec + hash.Write(d[:]) } copy(res[:], hash.Sum(nil)) + return res } @@ -23,19 +24,22 @@ func generateZeroHashes(height uint8) [][32]byte { var zeroHashes = [][32]byte{ common.Hash{}, } - // This generates a leaf = HashZero in position 0. In the rest of the positions that are equivalent to the ascending levels, - // we set the hashes of the nodes. So all nodes from level i=5 will have the same value and same children nodes. + // This generates a leaf = HashZero in position 0. In the rest of the positions that + // are equivalent to the ascending levels, we set the hashes of the nodes. + // So all nodes from level i=5 will have the same value and same children nodes. for i := 1; i <= int(height); i++ { zeroHashes = append(zeroHashes, Hash(zeroHashes[i-1], zeroHashes[i-1])) } + return zeroHashes } // HashLeafData calculates the keccak hash of the leaf values. func HashLeafData(ger, prevBlockHash common.Hash, minTimestamp uint64) [32]byte { var res [32]byte - t := make([]byte, 8) //nolint:gomnd + t := make([]byte, 8) //nolint:mnd binary.BigEndian.PutUint64(t, minTimestamp) copy(res[:], keccak256.Hash(ger.Bytes(), prevBlockHash.Bytes(), t)) + return res } diff --git a/l1infotree/tree.go b/l1infotree/tree.go index 6a5ad3bc..f3ad6d36 100644 --- a/l1infotree/tree.go +++ b/l1infotree/tree.go @@ -9,6 +9,7 @@ import ( // L1InfoTree provides methods to compute L1InfoTree type L1InfoTree struct { + logger *log.Logger height uint8 zeroHashes [][32]byte count uint32 @@ -17,8 +18,9 @@ type L1InfoTree struct { } // NewL1InfoTree creates new L1InfoTree. -func NewL1InfoTree(height uint8, initialLeaves [][32]byte) (*L1InfoTree, error) { +func NewL1InfoTree(logger *log.Logger, height uint8, initialLeaves [][32]byte) (*L1InfoTree, error) { mt := &L1InfoTree{ + logger: logger, zeroHashes: generateZeroHashes(height), height: height, count: uint32(len(initialLeaves)), @@ -26,30 +28,33 @@ func NewL1InfoTree(height uint8, initialLeaves [][32]byte) (*L1InfoTree, error) var err error mt.siblings, mt.currentRoot, err = mt.initSiblings(initialLeaves) if err != nil { - log.Error("error initializing siblings. Error: ", err) + mt.logger.Error("error initializing siblings. Error: ", err) + return nil, err } - log.Debug("Initial count: ", mt.count) - log.Debug("Initial root: ", mt.currentRoot) + mt.logger.Debug("Initial count: ", mt.count) + mt.logger.Debug("Initial root: ", mt.currentRoot) return mt, nil } // ResetL1InfoTree resets the L1InfoTree. func (mt *L1InfoTree) ResetL1InfoTree(initialLeaves [][32]byte) (*L1InfoTree, error) { - log.Info("Resetting L1InfoTree...") + const defaultTreeHeight = 32 + mt.logger.Info("Resetting L1InfoTree...") newMT := &L1InfoTree{ - zeroHashes: generateZeroHashes(32), // nolint:gomnd - height: 32, // nolint:gomnd + zeroHashes: generateZeroHashes(defaultTreeHeight), + height: defaultTreeHeight, count: uint32(len(initialLeaves)), } var err error newMT.siblings, newMT.currentRoot, err = newMT.initSiblings(initialLeaves) if err != nil { - log.Error("error initializing siblings. Error: ", err) + mt.logger.Error("error initializing siblings. Error: ", err) + return nil, err } - log.Debug("Reset initial count: ", newMT.count) - log.Debug("Reset initial root: ", newMT.currentRoot) + mt.logger.Debug("Reset initial count: ", newMT.count) + mt.logger.Debug("Reset initial root: ", newMT.currentRoot) return newMT, nil } @@ -59,11 +64,12 @@ func buildIntermediate(leaves [][32]byte) ([][][]byte, [][32]byte) { hashes [][32]byte ) for i := 0; i < len(leaves); i += 2 { - var left, right int = i, i + 1 + var left, right = i, i + 1 hash := Hash(leaves[left], leaves[right]) nodes = append(nodes, [][]byte{hash[:], leaves[left][:], leaves[right][:]}) hashes = append(hashes, hash) } + return nodes, hashes } @@ -106,7 +112,7 @@ func (mt *L1InfoTree) ComputeMerkleProof(gerIndex uint32, leaves [][32]byte) ([] if index >= uint32(len(leaves)) { siblings = append(siblings, mt.zeroHashes[h]) } else { - if index%2 == 1 { //If it is odd + if index%2 == 1 { // If it is odd siblings = append(siblings, leaves[index-1]) } else { // It is even siblings = append(siblings, leaves[index+1]) @@ -117,14 +123,14 @@ func (mt *L1InfoTree) ComputeMerkleProof(gerIndex uint32, leaves [][32]byte) ([] hashes [][32]byte ) for i := 0; i < len(leaves); i += 2 { - var left, right int = i, i + 1 + var left, right = i, i + 1 hash := Hash(leaves[left], leaves[right]) nsi = append(nsi, [][]byte{hash[:], leaves[left][:], leaves[right][:]}) hashes = append(hashes, hash) } // Find the index of the leaf in the next level of the tree. // Divide the index by 2 to find the position in the upper level - index = uint32(float64(index) / 2) //nolint:gomnd + index = uint32(float64(index) / 2) //nolint:mnd ns = nsi leaves = hashes } @@ -165,6 +171,7 @@ func (mt *L1InfoTree) AddLeaf(index uint32, leaf [32]byte) (common.Hash, error) } mt.currentRoot = cur mt.count++ + return cur, nil } @@ -183,9 +190,10 @@ func (mt *L1InfoTree) initSiblings(initialLeaves [][32]byte) ([][32]byte, common } root, err := mt.BuildL1InfoRoot(initialLeaves) if err != nil { - log.Error("error calculating initial root: ", err) + mt.logger.Error("error calculating initial root: ", err) return nil, [32]byte{}, err } + return siblings, root, nil } diff --git a/l1infotreesync/config.go b/l1infotreesync/config.go index 1b1d8014..64318fae 100644 --- a/l1infotreesync/config.go +++ b/l1infotreesync/config.go @@ -11,7 +11,7 @@ type Config struct { RollupManagerAddr common.Address `mapstructure:"RollupManagerAddr"` SyncBlockChunkSize uint64 `mapstructure:"SyncBlockChunkSize"` // BlockFinality indicates the status of the blocks that will be queried in order to sync - BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` + BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` //nolint:lll URLRPCL1 string `mapstructure:"URLRPCL1"` WaitForNewBlocksPeriod types.Duration `mapstructure:"WaitForNewBlocksPeriod"` InitialBlock uint64 `mapstructure:"InitialBlock"` diff --git a/l1infotreesync/downloader.go b/l1infotreesync/downloader.go index bc4305bc..2051f7b5 100644 --- a/l1infotreesync/downloader.go +++ b/l1infotreesync/downloader.go @@ -15,11 +15,15 @@ import ( ) var ( - updateL1InfoTreeSignatureV1 = crypto.Keccak256Hash([]byte("UpdateL1InfoTree(bytes32,bytes32)")) - updateL1InfoTreeSignatureV2 = crypto.Keccak256Hash([]byte("UpdateL1InfoTreeV2(bytes32,uint32,uint256,uint64)")) - verifyBatchesSignature = crypto.Keccak256Hash([]byte("VerifyBatches(uint32,uint64,bytes32,bytes32,address)")) - verifyBatchesTrustedAggregatorSignature = crypto.Keccak256Hash([]byte("VerifyBatchesTrustedAggregator(uint32,uint64,bytes32,bytes32,address)")) - initL1InfoRootMapSignature = crypto.Keccak256Hash([]byte("InitL1InfoRootMap(uint32,bytes32)")) + updateL1InfoTreeSignatureV1 = crypto.Keccak256Hash([]byte("UpdateL1InfoTree(bytes32,bytes32)")) + updateL1InfoTreeSignatureV2 = crypto.Keccak256Hash([]byte("UpdateL1InfoTreeV2(bytes32,uint32,uint256,uint64)")) + verifyBatchesSignature = crypto.Keccak256Hash( + []byte("VerifyBatches(uint32,uint64,bytes32,bytes32,address)"), + ) + verifyBatchesTrustedAggregatorSignature = crypto.Keccak256Hash( + []byte("VerifyBatchesTrustedAggregator(uint32,uint64,bytes32,bytes32,address)"), + ) + initL1InfoRootMapSignature = crypto.Keccak256Hash([]byte("InitL1InfoRootMap(uint32,bytes32)")) ) type EthClienter interface { @@ -43,7 +47,7 @@ func buildAppender(client EthClienter, globalExitRoot, rollupManager common.Addr init, err := ger.ParseInitL1InfoRootMap(l) if err != nil { return fmt.Errorf( - "error parsing log %+v using ger.ParseInitL1InfoRootMap: %v", + "error parsing log %+v using ger.ParseInitL1InfoRootMap: %w", l, err, ) } @@ -51,69 +55,77 @@ func buildAppender(client EthClienter, globalExitRoot, rollupManager common.Addr LeafCount: init.LeafCount, CurrentL1InfoRoot: init.CurrentL1InfoRoot, }}) + return nil } appender[updateL1InfoTreeSignatureV1] = func(b *sync.EVMBlock, l types.Log) error { l1InfoTreeUpdate, err := ger.ParseUpdateL1InfoTree(l) if err != nil { return fmt.Errorf( - "error parsing log %+v using ger.ParseUpdateL1InfoTree: %v", + "error parsing log %+v using ger.ParseUpdateL1InfoTree: %w", l, err, ) } b.Events = append(b.Events, Event{UpdateL1InfoTree: &UpdateL1InfoTree{ + BlockPosition: uint64(l.Index), MainnetExitRoot: l1InfoTreeUpdate.MainnetExitRoot, RollupExitRoot: l1InfoTreeUpdate.RollupExitRoot, ParentHash: b.ParentHash, Timestamp: b.Timestamp, }}) + return nil } // TODO: integrate this event to perform sanity checks - appender[updateL1InfoTreeSignatureV2] = func(b *sync.EVMBlock, l types.Log) error { + appender[updateL1InfoTreeSignatureV2] = func(b *sync.EVMBlock, l types.Log) error { //nolint:unparam l1InfoTreeUpdate, err := ger.ParseUpdateL1InfoTreeV2(l) if err != nil { return fmt.Errorf( - "error parsing log %+v using ger.ParseUpdateL1InfoTreeV2: %v", + "error parsing log %+v using ger.ParseUpdateL1InfoTreeV2: %w", l, err, ) } log.Infof("updateL1InfoTreeSignatureV2: expected root: %s", common.Bytes2Hex(l1InfoTreeUpdate.CurrentL1InfoRoot[:])) + return nil } appender[verifyBatchesSignature] = func(b *sync.EVMBlock, l types.Log) error { verifyBatches, err := rm.ParseVerifyBatches(l) if err != nil { return fmt.Errorf( - "error parsing log %+v using rm.ParseVerifyBatches: %v", + "error parsing log %+v using rm.ParseVerifyBatches: %w", l, err, ) } b.Events = append(b.Events, Event{VerifyBatches: &VerifyBatches{ - RollupID: verifyBatches.RollupID, - NumBatch: verifyBatches.NumBatch, - StateRoot: verifyBatches.StateRoot, - ExitRoot: verifyBatches.ExitRoot, - Aggregator: verifyBatches.Aggregator, + BlockPosition: uint64(l.Index), + RollupID: verifyBatches.RollupID, + NumBatch: verifyBatches.NumBatch, + StateRoot: verifyBatches.StateRoot, + ExitRoot: verifyBatches.ExitRoot, + Aggregator: verifyBatches.Aggregator, }}) + return nil } appender[verifyBatchesTrustedAggregatorSignature] = func(b *sync.EVMBlock, l types.Log) error { verifyBatches, err := rm.ParseVerifyBatchesTrustedAggregator(l) if err != nil { return fmt.Errorf( - "error parsing log %+v using rm.ParseVerifyBatches: %v", + "error parsing log %+v using rm.ParseVerifyBatches: %w", l, err, ) } b.Events = append(b.Events, Event{VerifyBatches: &VerifyBatches{ - RollupID: verifyBatches.RollupID, - NumBatch: verifyBatches.NumBatch, - StateRoot: verifyBatches.StateRoot, - ExitRoot: verifyBatches.ExitRoot, - Aggregator: verifyBatches.Aggregator, + BlockPosition: uint64(l.Index), + RollupID: verifyBatches.RollupID, + NumBatch: verifyBatches.NumBatch, + StateRoot: verifyBatches.StateRoot, + ExitRoot: verifyBatches.ExitRoot, + Aggregator: verifyBatches.Aggregator, }}) + return nil } diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index 562f0e39..90f7f091 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "math/big" + "path" "strconv" "testing" "time" @@ -33,14 +34,14 @@ func newSimulatedClient(auth *bind.TransactOpts) ( err error, ) { ctx := context.Background() - balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) //nolint:gomnd + balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) address := auth.From genesisAlloc := map[common.Address]types.Account{ address: { Balance: balance, }, } - blockGasLimit := uint64(999999999999999999) //nolint:gomnd + blockGasLimit := uint64(999999999999999999) client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) nonce, err := client.Client().PendingNonceAt(ctx, auth.From) @@ -63,12 +64,13 @@ func newSimulatedClient(auth *bind.TransactOpts) ( if precalculatedAddr != gerAddr { err = errors.New("error calculating addr") } + return } func TestE2E(t *testing.T) { ctx := context.Background() - dbPath := t.TempDir() + dbPath := path.Join(t.TempDir(), "file::memory:?cache=shared") privateKey, err := crypto.GenerateKey() require.NoError(t, err) auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) @@ -107,7 +109,7 @@ func TestE2E(t *testing.T) { require.Equal(t, g, expectedRoot) actualRoot, err := syncer.GetL1InfoTreeRootByIndex(ctx, uint32(i)) require.NoError(t, err) - require.Equal(t, common.Hash(expectedRoot), actualRoot) + require.Equal(t, common.Hash(expectedRoot), actualRoot.Hash) } // Update 3 rollups (verify batches event) 3 times @@ -128,41 +130,11 @@ func TestE2E(t *testing.T) { require.NoError(t, err) actualRollupExitRoot, err := syncer.GetLastRollupExitRoot(ctx) require.NoError(t, err) - require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot, fmt.Sprintf("rollupID: %d, i: %d", rollupID, i)) + require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash, fmt.Sprintf("rollupID: %d, i: %d", rollupID, i)) } } } -func TestFinalised(t *testing.T) { - ctx := context.Background() - privateKey, err := crypto.GenerateKey() - require.NoError(t, err) - auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) - require.NoError(t, err) - client, _, _, _, _, err := newSimulatedClient(auth) - require.NoError(t, err) - for i := 0; i < 100; i++ { - client.Commit() - } - - n4, err := client.Client().HeaderByNumber(ctx, big.NewInt(-4)) - require.NoError(t, err) - fmt.Println("-4", n4.Number) - n3, err := client.Client().HeaderByNumber(ctx, big.NewInt(-3)) - require.NoError(t, err) - fmt.Println("-3", n3.Number) - n2, err := client.Client().HeaderByNumber(ctx, big.NewInt(-2)) - require.NoError(t, err) - fmt.Println("-2", n2.Number) - n1, err := client.Client().HeaderByNumber(ctx, big.NewInt(-1)) - require.NoError(t, err) - fmt.Println("-1", n1.Number) - n0, err := client.Client().HeaderByNumber(ctx, nil) - require.NoError(t, err) - fmt.Println("0", n0.Number) - fmt.Printf("amount of blocks latest - finalised: %d", n0.Number.Uint64()-n3.Number.Uint64()) -} - func TestStressAndReorgs(t *testing.T) { const ( totalIterations = 200 // Have tested with much larger number (+10k) @@ -174,7 +146,7 @@ func TestStressAndReorgs(t *testing.T) { ) ctx := context.Background() - dbPathSyncer := t.TempDir() + dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") dbPathReorg := t.TempDir() privateKey, err := crypto.GenerateKey() require.NoError(t, err) @@ -215,20 +187,22 @@ func TestStressAndReorgs(t *testing.T) { if targetReorgBlockNum < currentBlockNum { // we are dealing with uints... reorgBlock, err := client.Client().BlockByNumber(ctx, big.NewInt(int64(targetReorgBlockNum))) require.NoError(t, err) - client.Fork(reorgBlock.Hash()) + err = client.Fork(reorgBlock.Hash()) + require.NoError(t, err) } } } syncerUpToDate := false var errMsg string + lb, err := client.Client().BlockNumber(ctx) + require.NoError(t, err) for i := 0; i < 50; i++ { lpb, err := syncer.GetLastProcessedBlock(ctx) require.NoError(t, err) - lb, err := client.Client().BlockNumber(ctx) - require.NoError(t, err) if lpb == lb { syncerUpToDate = true + break } time.Sleep(time.Millisecond * 100) @@ -241,18 +215,18 @@ func TestStressAndReorgs(t *testing.T) { require.NoError(t, err) actualRollupExitRoot, err := syncer.GetLastRollupExitRoot(ctx) require.NoError(t, err) - require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot) + require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) // Assert L1 Info tree root expectedL1InfoRoot, err := gerSc.GetRoot(&bind.CallOpts{Pending: false}) require.NoError(t, err) expectedGER, err := gerSc.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false}) require.NoError(t, err) - index, actualL1InfoRoot, err := syncer.GetLastL1InfoTreeRootAndIndex(ctx) + lastRoot, err := syncer.GetLastL1InfoTreeRoot(ctx) require.NoError(t, err) - info, err := syncer.GetInfoByIndex(ctx, index) - require.NoError(t, err, fmt.Sprintf("index: %d", index)) + info, err := syncer.GetInfoByIndex(ctx, lastRoot.Index) + require.NoError(t, err, fmt.Sprintf("index: %d", lastRoot.Index)) - require.Equal(t, common.Hash(expectedL1InfoRoot), actualL1InfoRoot) + require.Equal(t, common.Hash(expectedL1InfoRoot), lastRoot.Hash) require.Equal(t, common.Hash(expectedGER), info.GlobalExitRoot, fmt.Sprintf("%+v", info)) } diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go index 8cd3ee70..546a8ead 100644 --- a/l1infotreesync/l1infotreesync.go +++ b/l1infotreesync/l1infotreesync.go @@ -8,6 +8,7 @@ import ( "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/sync" "github.com/0xPolygon/cdk/tree" + "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/common" ) @@ -36,7 +37,7 @@ func New( retryAfterErrorPeriod time.Duration, maxRetryAttemptsAfterError int, ) (*L1InfoTreeSync, error) { - processor, err := newProcessor(ctx, dbPath) + processor, err := newProcessor(dbPath) if err != nil { return nil, err } @@ -80,6 +81,7 @@ func New( if err != nil { return nil, err } + return &L1InfoTreeSync{ processor: processor, driver: driver, @@ -92,15 +94,20 @@ func (s *L1InfoTreeSync) Start(ctx context.Context) { } // GetL1InfoTreeMerkleProof creates a merkle proof for the L1 Info tree -func (s *L1InfoTreeSync) GetL1InfoTreeMerkleProof(ctx context.Context, index uint32) ([32]common.Hash, common.Hash, error) { +func (s *L1InfoTreeSync) GetL1InfoTreeMerkleProof(ctx context.Context, index uint32) (types.Proof, types.Root, error) { return s.processor.GetL1InfoTreeMerkleProof(ctx, index) } // GetRollupExitTreeMerkleProof creates a merkle proof for the rollup exit tree -func (s *L1InfoTreeSync) GetRollupExitTreeMerkleProof(ctx context.Context, networkID uint32, root common.Hash) ([32]common.Hash, error) { +func (s *L1InfoTreeSync) GetRollupExitTreeMerkleProof( + ctx context.Context, + networkID uint32, + root common.Hash, +) (types.Proof, error) { if networkID == 0 { return tree.EmptyProof, nil } + return s.processor.rollupExitTree.GetProof(ctx, networkID-1, root) } @@ -116,24 +123,18 @@ func (s *L1InfoTreeSync) GetInfoByIndex(ctx context.Context, index uint32) (*L1I } // GetL1InfoTreeRootByIndex returns the root of the L1 info tree at the moment the leaf with the given index was added -func (s *L1InfoTreeSync) GetL1InfoTreeRootByIndex(ctx context.Context, index uint32) (common.Hash, error) { - tx, err := s.processor.db.BeginRo(ctx) - if err != nil { - return common.Hash{}, err - } - defer tx.Rollback() - - return s.processor.l1InfoTree.GetRootByIndex(tx, index) +func (s *L1InfoTreeSync) GetL1InfoTreeRootByIndex(ctx context.Context, index uint32) (types.Root, error) { + return s.processor.l1InfoTree.GetRootByIndex(ctx, index) } // GetLastRollupExitRoot return the last rollup exit root processed -func (s *L1InfoTreeSync) GetLastRollupExitRoot(ctx context.Context) (common.Hash, error) { +func (s *L1InfoTreeSync) GetLastRollupExitRoot(ctx context.Context) (types.Root, error) { return s.processor.rollupExitTree.GetLastRoot(ctx) } -// GetLastL1InfoTreeRootAndIndex return the last root and index processed from the L1 Info tree -func (s *L1InfoTreeSync) GetLastL1InfoTreeRootAndIndex(ctx context.Context) (uint32, common.Hash, error) { - return s.processor.l1InfoTree.GetLastIndexAndRoot(ctx) +// GetLastL1InfoTreeRoot return the last root and index processed from the L1 Info tree +func (s *L1InfoTreeSync) GetLastL1InfoTreeRoot(ctx context.Context) (types.Root, error) { + return s.processor.l1InfoTree.GetLastRoot(ctx) } // GetLastProcessedBlock return the last processed block @@ -141,9 +142,12 @@ func (s *L1InfoTreeSync) GetLastProcessedBlock(ctx context.Context) (uint64, err return s.processor.GetLastProcessedBlock(ctx) } -func (s *L1InfoTreeSync) GetLocalExitRoot(ctx context.Context, networkID uint32, rollupExitRoot common.Hash) (common.Hash, error) { +func (s *L1InfoTreeSync) GetLocalExitRoot( + ctx context.Context, networkID uint32, rollupExitRoot common.Hash, +) (common.Hash, error) { if networkID == 0 { return common.Hash{}, errors.New("network 0 is not a rollup, and it's not part of the rollup exit tree") } + return s.processor.rollupExitTree.GetLeaf(ctx, networkID-1, rollupExitRoot) } diff --git a/l1infotreesync/migrations/l1infotreesync0001.sql b/l1infotreesync/migrations/l1infotreesync0001.sql new file mode 100644 index 00000000..39a45dd4 --- /dev/null +++ b/l1infotreesync/migrations/l1infotreesync0001.sql @@ -0,0 +1,22 @@ +-- +migrate Down +DROP TABLE IF EXISTS block; +DROP TABLE IF EXISTS claim; +DROP TABLE IF EXISTS bridge; + +-- +migrate Up +CREATE TABLE block ( + num BIGINT PRIMARY KEY +); + +CREATE TABLE l1info_leaf ( + block_num INTEGER NOT NULL REFERENCES block(num) ON DELETE CASCADE, + block_pos INTEGER NOT NULL, + position INTEGER NOT NULL, + previous_block_hash VARCHAR NOT NULL, + timestamp INTEGER NOT NULL, + mainnet_exit_root VARCHAR NOT NULL, + rollup_exit_root VARCHAR NOT NULL, + global_exit_root VARCHAR NOT NULL, + hash VARCHAR NOT NULL, + PRIMARY KEY (block_num, block_pos) +); diff --git a/l1infotreesync/migrations/migrations.go b/l1infotreesync/migrations/migrations.go new file mode 100644 index 00000000..768dde37 --- /dev/null +++ b/l1infotreesync/migrations/migrations.go @@ -0,0 +1,39 @@ +package migrations + +import ( + _ "embed" + + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/db/types" + treeMigrations "github.com/0xPolygon/cdk/tree/migrations" +) + +const ( + RollupExitTreePrefix = "rollup_exit_" + L1InfoTreePrefix = "l1_info_" +) + +//go:embed l1infotreesync0001.sql +var mig001 string + +func RunMigrations(dbPath string) error { + migrations := []types.Migration{ + { + ID: "l1infotreesync0001", + SQL: mig001, + }, + } + for _, tm := range treeMigrations.Migrations { + migrations = append(migrations, types.Migration{ + ID: tm.ID, + SQL: tm.SQL, + Prefix: RollupExitTreePrefix, + }) + migrations = append(migrations, types.Migration{ + ID: tm.ID, + SQL: tm.SQL, + Prefix: L1InfoTreePrefix, + }) + } + return db.RunMigrations(dbPath, migrations) +} diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go index 286ee4fc..c76d7aac 100644 --- a/l1infotreesync/processor.go +++ b/l1infotreesync/processor.go @@ -2,55 +2,38 @@ package l1infotreesync import ( "context" + "database/sql" "encoding/binary" - "encoding/json" "errors" "fmt" - "github.com/0xPolygon/cdk/common" + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/l1infotreesync/migrations" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sync" "github.com/0xPolygon/cdk/tree" + treeTypes "github.com/0xPolygon/cdk/tree/types" ethCommon "github.com/ethereum/go-ethereum/common" "github.com/iden3/go-iden3-crypto/keccak256" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/mdbx" + "github.com/russross/meddler" "golang.org/x/crypto/sha3" ) -const ( - dbPrefix = "l1infotreesync" - l1InfoTreeSuffix = "-l1infotree" - rollupExitTreeSuffix = "-rollupexittree" - - // infoTable stores the information of L1 Info Tree (the leaves) - // Key: index (uint32 converted to bytes) - // Value: JSON of storeLeaf struct - infoTable = dbPrefix + "-info" - // blockTable stores the first and last index of L1 Info Tree that have been updated on - // Value: JSON of blockWithLeafs - blockTable = dbPrefix + "-block" - // lastBlockTable used to store the last block processed. This is needed to know the last processed blcok - lastBlockTable = dbPrefix + "-lastBlock" - - treeHeight uint8 = 32 -) - var ( ErrBlockNotProcessed = errors.New("given block(s) have not been processed yet") ErrNotFound = errors.New("not found") ErrNoBlock0 = errors.New("blockNum must be greater than 0") - lastBlockKey = []byte("lb") ) type processor struct { - db kv.RwDB + db *sql.DB l1InfoTree *tree.AppendOnlyTree rollupExitTree *tree.UpdatableTree } // UpdateL1InfoTree representation of the UpdateL1InfoTree event type UpdateL1InfoTree struct { + BlockPosition uint64 MainnetExitRoot ethCommon.Hash RollupExitRoot ethCommon.Hash ParentHash ethCommon.Hash @@ -59,11 +42,12 @@ type UpdateL1InfoTree struct { // VerifyBatches representation of the VerifyBatches and VerifyBatchesTrustedAggregator events type VerifyBatches struct { - RollupID uint32 - NumBatch uint64 - StateRoot ethCommon.Hash - ExitRoot ethCommon.Hash - Aggregator ethCommon.Address + BlockPosition uint64 + RollupID uint32 + NumBatch uint64 + StateRoot ethCommon.Hash + ExitRoot ethCommon.Hash + Aggregator ethCommon.Address } type InitL1InfoRootMap struct { @@ -79,105 +63,63 @@ type Event struct { // L1InfoTreeLeaf representation of a leaf of the L1 Info tree type L1InfoTreeLeaf struct { - L1InfoTreeIndex uint32 - PreviousBlockHash ethCommon.Hash - BlockNumber uint64 - Timestamp uint64 - MainnetExitRoot ethCommon.Hash - RollupExitRoot ethCommon.Hash - GlobalExitRoot ethCommon.Hash -} - -type storeLeaf struct { - BlockNumber uint64 - MainnetExitRoot ethCommon.Hash - RollupExitRoot ethCommon.Hash - ParentHash ethCommon.Hash - Index uint32 - Timestamp uint64 + BlockNumber uint64 `meddler:"block_num"` + BlockPosition uint64 `meddler:"block_pos"` + L1InfoTreeIndex uint32 `meddler:"position"` + PreviousBlockHash ethCommon.Hash `meddler:"previous_block_hash,hash"` + Timestamp uint64 `meddler:"timestamp"` + MainnetExitRoot ethCommon.Hash `meddler:"mainnet_exit_root,hash"` + RollupExitRoot ethCommon.Hash `meddler:"rollup_exit_root,hash"` + GlobalExitRoot ethCommon.Hash `meddler:"global_exit_root,hash"` + Hash ethCommon.Hash `meddler:"hash,hash"` } // Hash as expected by the tree -func (l *storeLeaf) Hash() ethCommon.Hash { +func (l *L1InfoTreeLeaf) hash() ethCommon.Hash { var res [32]byte - t := make([]byte, 8) //nolint:gomnd + t := make([]byte, 8) //nolint:mnd binary.BigEndian.PutUint64(t, l.Timestamp) - copy(res[:], keccak256.Hash(l.GlobalExitRoot().Bytes(), l.ParentHash.Bytes(), t)) + copy(res[:], keccak256.Hash(l.globalExitRoot().Bytes(), l.PreviousBlockHash.Bytes(), t)) return res } -type blockWithLeafs struct { - // inclusive - FirstIndex uint32 - // not inclusive - LastIndex uint32 -} - // GlobalExitRoot returns the GER -func (l *storeLeaf) GlobalExitRoot() ethCommon.Hash { +func (l *L1InfoTreeLeaf) globalExitRoot() ethCommon.Hash { var gerBytes [32]byte hasher := sha3.NewLegacyKeccak256() hasher.Write(l.MainnetExitRoot[:]) hasher.Write(l.RollupExitRoot[:]) copy(gerBytes[:], hasher.Sum(nil)) + return gerBytes } -func newProcessor(ctx context.Context, dbPath string) (*processor, error) { - tableCfgFunc := func(defaultBuckets kv.TableCfg) kv.TableCfg { - cfg := kv.TableCfg{ - infoTable: {}, - blockTable: {}, - lastBlockTable: {}, - } - tree.AddTables(cfg, dbPrefix+rollupExitTreeSuffix) - tree.AddTables(cfg, dbPrefix+l1InfoTreeSuffix) - return cfg - } - db, err := mdbx.NewMDBX(nil). - Path(dbPath). - WithTableCfg(tableCfgFunc). - Open() +func newProcessor(dbPath string) (*processor, error) { + err := migrations.RunMigrations(dbPath) if err != nil { return nil, err } - p := &processor{ - db: db, - } - - l1InfoTree, err := tree.NewAppendOnlyTree(ctx, db, dbPrefix+l1InfoTreeSuffix) - if err != nil { - return nil, err - } - p.l1InfoTree = l1InfoTree - rollupExitTree, err := tree.NewUpdatableTree(ctx, db, dbPrefix+rollupExitTreeSuffix) + db, err := db.NewSQLiteDB(dbPath) if err != nil { return nil, err } - p.rollupExitTree = rollupExitTree - return p, nil + return &processor{ + db: db, + l1InfoTree: tree.NewAppendOnlyTree(db, migrations.L1InfoTreePrefix), + rollupExitTree: tree.NewUpdatableTree(db, migrations.RollupExitTreePrefix), + }, nil } // GetL1InfoTreeMerkleProof creates a merkle proof for the L1 Info tree -func (p *processor) GetL1InfoTreeMerkleProof(ctx context.Context, index uint32) ([32]ethCommon.Hash, ethCommon.Hash, error) { - tx, err := p.db.BeginRo(ctx) - if err != nil { - return tree.EmptyProof, ethCommon.Hash{}, err - } - defer tx.Rollback() - - root, err := p.l1InfoTree.GetRootByIndex(tx, index) - if err != nil { - return tree.EmptyProof, ethCommon.Hash{}, err - } - - proof, err := p.l1InfoTree.GetProof(ctx, index, root) +func (p *processor) GetL1InfoTreeMerkleProof( + ctx context.Context, index uint32, +) (treeTypes.Proof, treeTypes.Root, error) { + root, err := p.l1InfoTree.GetRootByIndex(ctx, index) if err != nil { - return tree.EmptyProof, ethCommon.Hash{}, err + return treeTypes.Proof{}, treeTypes.Root{}, err } - - // TODO: check if we need to return root or wat - return proof, root, nil + proof, err := p.l1InfoTree.GetProof(ctx, root.Index, root.Hash) + return proof, root, err } // GetLatestInfoUntilBlock returns the most recent L1InfoTreeLeaf that occurred before or at blockNum. @@ -186,11 +128,16 @@ func (p *processor) GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64 if blockNum == 0 { return nil, ErrNoBlock0 } - tx, err := p.db.BeginRo(ctx) + tx, err := p.db.BeginTx(ctx, &sql.TxOptions{ReadOnly: true}) if err != nil { return nil, err } - defer tx.Rollback() + defer func() { + if err := tx.Rollback(); err != nil { + log.Warnf("error rolling back tx: %v", err) + } + }() + lpb, err := p.getLastProcessedBlockWithTx(tx) if err != nil { return nil, err @@ -198,289 +145,179 @@ func (p *processor) GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64 if lpb < blockNum { return nil, ErrBlockNotProcessed } - iter, err := tx.RangeDescend(blockTable, common.Uint64ToBytes(blockNum), common.Uint64ToBytes(0), 1) - if err != nil { - return nil, fmt.Errorf( - "error calling RangeDescend(blockTable, %d, 0, 1): %w", blockNum, err, - ) - } - k, v, err := iter.Next() + + info := &L1InfoTreeLeaf{} + err = meddler.QueryRow( + tx, info, + `SELECT * FROM l1info_leaf ORDER BY block_num DESC, block_pos DESC LIMIT 1;`, + ) if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, ErrNotFound + } return nil, err } - if k == nil { - return nil, ErrNotFound - } - blk := blockWithLeafs{} - if err := json.Unmarshal(v, &blk); err != nil { - return nil, err - } - return p.getInfoByIndexWithTx(tx, blk.LastIndex-1) + return info, nil } // GetInfoByIndex returns the value of a leaf (not the hash) of the L1 info tree func (p *processor) GetInfoByIndex(ctx context.Context, index uint32) (*L1InfoTreeLeaf, error) { - tx, err := p.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - return p.getInfoByIndexWithTx(tx, index) + return p.getInfoByIndexWithTx(p.db, index) } -func (p *processor) getInfoByIndexWithTx(tx kv.Tx, index uint32) (*L1InfoTreeLeaf, error) { - infoBytes, err := tx.GetOne(infoTable, common.Uint32ToBytes(index)) - if err != nil { - return nil, err - } - if infoBytes == nil { - return nil, ErrNotFound - } - var info storeLeaf - if err := json.Unmarshal(infoBytes, &info); err != nil { - return nil, err - } - return &L1InfoTreeLeaf{ - L1InfoTreeIndex: info.Index, - PreviousBlockHash: info.ParentHash, - BlockNumber: info.BlockNumber, - Timestamp: info.Timestamp, - MainnetExitRoot: info.MainnetExitRoot, - RollupExitRoot: info.RollupExitRoot, - GlobalExitRoot: info.GlobalExitRoot(), - }, nil +func (p *processor) getInfoByIndexWithTx(tx db.DBer, index uint32) (*L1InfoTreeLeaf, error) { + info := &L1InfoTreeLeaf{} + return info, meddler.QueryRow( + tx, info, + `SELECT * FROM l1info_leaf WHERE position = $1;`, index, + ) } // GetLastProcessedBlock returns the last processed block func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, error) { - tx, err := p.db.BeginRo(ctx) - if err != nil { - return 0, err - } - defer tx.Rollback() - return p.getLastProcessedBlockWithTx(tx) + return p.getLastProcessedBlockWithTx(p.db) } -func (p *processor) getLastProcessedBlockWithTx(tx kv.Tx) (uint64, error) { - blockNumBytes, err := tx.GetOne(lastBlockTable, lastBlockKey) - if err != nil { - return 0, err - } else if blockNumBytes == nil { +func (p *processor) getLastProcessedBlockWithTx(tx db.Querier) (uint64, error) { + var lastProcessedBlock uint64 + row := tx.QueryRow("SELECT num FROM BLOCK ORDER BY num DESC LIMIT 1;") + err := row.Scan(&lastProcessedBlock) + if errors.Is(err, sql.ErrNoRows) { return 0, nil } - return common.BytesToUint64(blockNumBytes), nil + return lastProcessedBlock, err } // Reorg triggers a purge and reset process on the processor to leaf it on a state // as if the last block processed was firstReorgedBlock-1 func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { - tx, err := p.db.BeginRw(ctx) + tx, err := db.NewTx(ctx, p.db) if err != nil { return err } + defer func() { + if err != nil { + if errRllbck := tx.Rollback(); errRllbck != nil { + log.Errorf("error while rolling back tx %v", errRllbck) + } + } + }() - c, err := tx.Cursor(blockTable) + _, err = tx.Exec(`DELETE FROM block WHERE num >= $1;`, firstReorgedBlock) if err != nil { return err } - defer c.Close() - firstKey := common.Uint64ToBytes(firstReorgedBlock) - firstReorgedL1InfoTreeIndex := int64(-1) - for blkKey, blkValue, err := c.Seek(firstKey); blkKey != nil; blkKey, blkValue, err = c.Next() { - if err != nil { - tx.Rollback() - return err - } - var blk blockWithLeafs - if err := json.Unmarshal(blkValue, &blk); err != nil { - tx.Rollback() - return err - } - for i := blk.FirstIndex; i < blk.LastIndex; i++ { - if firstReorgedL1InfoTreeIndex == -1 { - firstReorgedL1InfoTreeIndex = int64(i) - } - if err := p.deleteLeaf(tx, i); err != nil { - tx.Rollback() - return err - } - } - if err := tx.Delete(blockTable, blkKey); err != nil { - tx.Rollback() - return err - } - } - if err := p.updateLastProcessedBlock(tx, firstReorgedBlock-1); err != nil { - tx.Rollback() + if err = p.l1InfoTree.Reorg(tx, firstReorgedBlock); err != nil { return err } - var rollbackL1InfoTree func() - if firstReorgedL1InfoTreeIndex != -1 { - rollbackL1InfoTree, err = p.l1InfoTree.Reorg(tx, uint32(firstReorgedL1InfoTreeIndex)) - if err != nil { - tx.Rollback() - rollbackL1InfoTree() - return err - } - } - if err := tx.Commit(); err != nil { - rollbackL1InfoTree() + + if err = p.rollupExitTree.Reorg(tx, firstReorgedBlock); err != nil { return err } - return nil -} -func (p *processor) deleteLeaf(tx kv.RwTx, index uint32) error { - if err := tx.Delete(infoTable, common.Uint32ToBytes(index)); err != nil { + if err := tx.Commit(); err != nil { return err } + return nil } // ProcessBlock process the events of the block to build the rollup exit tree and the l1 info tree // and updates the last processed block (can be called without events for that purpose) func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error { - tx, err := p.db.BeginRw(ctx) + tx, err := db.NewTx(ctx, p.db) if err != nil { return err } - events := make([]Event, len(b.Events)) - rollupExitTreeRollback := func() {} - l1InfoTreeRollback := func() {} - rollback := func() { - tx.Rollback() - rollupExitTreeRollback() - l1InfoTreeRollback() - } - l1InfoTreeLeavesToAdd := []tree.Leaf{} - rollupExitTreeLeavesToAdd := []tree.Leaf{} - if len(b.Events) > 0 { - var initialL1InfoIndex uint32 - var l1InfoLeavesAdded uint32 - lastIndex, err := p.getLastIndex(tx) - if err == ErrNotFound { - initialL1InfoIndex = 0 - } else if err != nil { - rollback() - return err - } else { - initialL1InfoIndex = lastIndex + 1 - } - for _, e := range b.Events { - event := e.(Event) - events = append(events, event) - if event.UpdateL1InfoTree != nil { - index := initialL1InfoIndex + l1InfoLeavesAdded - leafToStore := storeLeaf{ - BlockNumber: b.Num, - Index: index, - MainnetExitRoot: event.UpdateL1InfoTree.MainnetExitRoot, - RollupExitRoot: event.UpdateL1InfoTree.RollupExitRoot, - ParentHash: event.UpdateL1InfoTree.ParentHash, - Timestamp: event.UpdateL1InfoTree.Timestamp, - } - if err := p.storeLeafInfo(tx, leafToStore); err != nil { - rollback() - return err - } - l1InfoTreeLeavesToAdd = append(l1InfoTreeLeavesToAdd, tree.Leaf{ - Index: leafToStore.Index, - Hash: leafToStore.Hash(), - }) - l1InfoLeavesAdded++ + defer func() { + if err != nil { + if errRllbck := tx.Rollback(); errRllbck != nil { + log.Errorf("error while rolling back tx %v", errRllbck) } + } + }() - if event.VerifyBatches != nil { - rollupExitTreeLeavesToAdd = append(rollupExitTreeLeavesToAdd, tree.Leaf{ - Index: event.VerifyBatches.RollupID - 1, - Hash: event.VerifyBatches.ExitRoot, - }) - } + if _, err := tx.Exec(`INSERT INTO block (num) VALUES ($1)`, b.Num); err != nil { + return fmt.Errorf("err: %w", err) + } - if event.InitL1InfoRootMap != nil { - // TODO: indicate that l1 Info tree indexes before the one on this - // event are not safe to use - log.Debugf("TODO: handle InitL1InfoRootMap event") - } + var initialL1InfoIndex uint32 + var l1InfoLeavesAdded uint32 + lastIndex, err := p.getLastIndex(tx) + + switch { + case errors.Is(err, ErrNotFound): + initialL1InfoIndex = 0 + err = nil + case err != nil: + return fmt.Errorf("err: %w", err) + default: + initialL1InfoIndex = lastIndex + 1 + } + + for _, e := range b.Events { + event, ok := e.(Event) + if !ok { + return errors.New("failed to convert from sync.Block.Event into Event") } - if l1InfoLeavesAdded > 0 { - bwl := blockWithLeafs{ - FirstIndex: initialL1InfoIndex, - LastIndex: initialL1InfoIndex + l1InfoLeavesAdded, + if event.UpdateL1InfoTree != nil { + index := initialL1InfoIndex + l1InfoLeavesAdded + info := &L1InfoTreeLeaf{ + BlockNumber: b.Num, + BlockPosition: event.UpdateL1InfoTree.BlockPosition, + L1InfoTreeIndex: index, + PreviousBlockHash: event.UpdateL1InfoTree.ParentHash, + Timestamp: event.UpdateL1InfoTree.Timestamp, + MainnetExitRoot: event.UpdateL1InfoTree.MainnetExitRoot, + RollupExitRoot: event.UpdateL1InfoTree.RollupExitRoot, } - blockValue, err := json.Marshal(bwl) + info.GlobalExitRoot = info.globalExitRoot() + info.Hash = info.hash() + err = meddler.Insert(tx, "l1info_leaf", info) if err != nil { - rollback() - return err - } - if err := tx.Put(blockTable, common.Uint64ToBytes(b.Num), blockValue); err != nil { - rollback() - return err + return fmt.Errorf("err: %w", err) } - l1InfoTreeRollback, err = p.l1InfoTree.AddLeaves(tx, l1InfoTreeLeavesToAdd) + err = p.l1InfoTree.AddLeaf(tx, info.BlockNumber, info.BlockPosition, treeTypes.Leaf{ + Index: info.L1InfoTreeIndex, + Hash: info.Hash, + }) if err != nil { - rollback() - return err + return fmt.Errorf("err: %w", err) } + l1InfoLeavesAdded++ } - if len(rollupExitTreeLeavesToAdd) > 0 { - rollupExitTreeRollback, err = p.rollupExitTree.UpseartLeaves(tx, rollupExitTreeLeavesToAdd, b.Num) + if event.VerifyBatches != nil { + err = p.rollupExitTree.UpsertLeaf(tx, b.Num, event.VerifyBatches.BlockPosition, treeTypes.Leaf{ + Index: event.VerifyBatches.RollupID - 1, + Hash: event.VerifyBatches.ExitRoot, + }) if err != nil { - rollback() - return err + return fmt.Errorf("err: %w", err) } } - } - if err := p.updateLastProcessedBlock(tx, b.Num); err != nil { - rollback() - return err + + if event.InitL1InfoRootMap != nil { + // TODO: indicate that l1 Info tree indexes before the one on this + // event are not safe to use + log.Debugf("TODO: handle InitL1InfoRootMap event") + } } if err := tx.Commit(); err != nil { - rollback() - return err + return fmt.Errorf("err: %w", err) } - log.Infof("block %d processed with events: %+v", b.Num, events) + log.Infof("block %d processed with %d events", b.Num, len(b.Events)) return nil } -func (p *processor) getLastIndex(tx kv.Tx) (uint32, error) { - bNum, err := p.getLastProcessedBlockWithTx(tx) - if err != nil { - return 0, err - } - if bNum == 0 { - return 0, nil - } - iter, err := tx.RangeDescend(blockTable, common.Uint64ToBytes(bNum), common.Uint64ToBytes(0), 1) - if err != nil { - return 0, err - } - _, blkBytes, err := iter.Next() - if err != nil { - return 0, err - } - if blkBytes == nil { +func (p *processor) getLastIndex(tx db.Querier) (uint32, error) { + var lastProcessedIndex uint32 + row := tx.QueryRow("SELECT position FROM l1info_leaf ORDER BY block_num DESC, block_pos DESC LIMIT 1;") + err := row.Scan(&lastProcessedIndex) + if errors.Is(err, sql.ErrNoRows) { return 0, ErrNotFound } - var blk blockWithLeafs - if err := json.Unmarshal(blkBytes, &blk); err != nil { - return 0, err - } - return blk.LastIndex - 1, nil -} - -func (p *processor) storeLeafInfo(tx kv.RwTx, leaf storeLeaf) error { - leafValue, err := json.Marshal(leaf) - if err != nil { - return err - } - return tx.Put(infoTable, common.Uint32ToBytes(leaf.Index), leafValue) -} - -func (p *processor) updateLastProcessedBlock(tx kv.RwTx, blockNum uint64) error { - blockNumBytes := common.Uint64ToBytes(blockNum) - return tx.Put(lastBlockTable, lastBlockKey, blockNumBytes) + return lastProcessedIndex, err } diff --git a/lastgersync/config.go b/lastgersync/config.go index 9db63bec..36b12ab6 100644 --- a/lastgersync/config.go +++ b/lastgersync/config.go @@ -9,7 +9,7 @@ type Config struct { // DBPath path of the DB DBPath string `mapstructure:"DBPath"` // BlockFinality indicates the status of the blocks that will be queried in order to sync - BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` + BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` //nolint:lll // InitialBlockNum is the first block that will be queried when starting the synchronization from scratch. // It should be a number equal or bellow the creation of the bridge contract InitialBlockNum uint64 `mapstructure:"InitialBlockNum"` @@ -22,6 +22,7 @@ type Config struct { MaxRetryAttemptsAfterError int `mapstructure:"MaxRetryAttemptsAfterError"` // WaitForNewBlocksPeriod time that will be waited when the synchronizer has reached the latest block WaitForNewBlocksPeriod types.Duration `mapstructure:"WaitForNewBlocksPeriod"` - // DownloadBufferSize buffer of events to be porcessed. When reached will stop downloading events until the processing catches up + // DownloadBufferSize buffer of events to be porcessed. When the buffer limit is reached, + // downloading will stop until the processing catches up. DownloadBufferSize int `mapstructure:"DownloadBufferSize"` } diff --git a/lastgersync/e2e_test.go b/lastgersync/e2e_test.go index 888f4b84..979d55a2 100644 --- a/lastgersync/e2e_test.go +++ b/lastgersync/e2e_test.go @@ -40,7 +40,7 @@ func TestE2E(t *testing.T) { _, err := env.GERL1Contract.UpdateExitRoot(env.AuthL1, common.HexToHash(strconv.Itoa(i))) require.NoError(t, err) env.L1Client.Commit() - time.Sleep(time.Millisecond * 50) + time.Sleep(time.Millisecond * 150) expectedGER, err := env.GERL1Contract.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false}) require.NoError(t, err) isInjected, err := env.AggOracleSender.IsGERAlreadyInjected(expectedGER) @@ -57,6 +57,7 @@ func TestE2E(t *testing.T) { require.NoError(t, err) if lpb == lb { syncerUpToDate = true + break } time.Sleep(time.Millisecond * 100) diff --git a/lastgersync/evmdownloader.go b/lastgersync/evmdownloader.go index 717eb095..91e05c7a 100644 --- a/lastgersync/evmdownloader.go +++ b/lastgersync/evmdownloader.go @@ -2,6 +2,7 @@ package lastgersync import ( "context" + "errors" "fmt" "math/big" "time" @@ -45,6 +46,7 @@ func newDownloader( if err != nil { return nil, err } + return &downloader{ EVMDownloaderImplementation: sync.NewEVMDownloaderImplementation( "lastgersync", l2Client, blockFinality, waitForNewBlocksPeriod, nil, nil, nil, rh, @@ -65,14 +67,16 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC ) for { lastIndex, err = d.processor.getLastIndex(ctx) - if err == ErrNotFound { + if errors.Is(err, ErrNotFound) { lastIndex = 0 } else if err != nil { log.Errorf("error getting last indes: %v", err) attempts++ d.rh.Handle("getLastIndex", attempts) + continue } + break } for { @@ -80,6 +84,7 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC case <-ctx.Done(): log.Debug("closing channel") close(downloadedCh) + return default: } @@ -93,12 +98,13 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC log.Errorf("error getting GERs: %v", err) attempts++ d.rh.Handle("getGERsFromIndex", attempts) + continue } + break } - attempts = 0 blockHeader := d.GetBlockHeader(ctx, lastBlock) block := &sync.EVMBlock{ EVMBlockHeader: sync.EVMBlockHeader{ @@ -111,26 +117,30 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC d.setGreatestGERInjectedFromList(block, gers) downloadedCh <- *block - if block.Events != nil { - lastIndex = block.Events[0].(Event).L1InfoTreeIndex + if len(block.Events) > 0 { + event, ok := block.Events[0].(Event) + if !ok { + log.Errorf("unexpected type %T in events", block.Events[0]) + } + lastIndex = event.L1InfoTreeIndex } } } func (d *downloader) getGERsFromIndex(ctx context.Context, fromL1InfoTreeIndex uint32) ([]Event, error) { - lastIndex, _, err := d.l1InfoTreesync.GetLastL1InfoTreeRootAndIndex(ctx) - if err == tree.ErrNotFound { + lastRoot, err := d.l1InfoTreesync.GetLastL1InfoTreeRoot(ctx) + if errors.Is(err, tree.ErrNotFound) { return nil, nil } if err != nil { - return nil, fmt.Errorf("error calling GetLastL1InfoTreeRootAndIndex: %v", err) + return nil, fmt.Errorf("error calling GetLastL1InfoTreeRoot: %w", err) } gers := []Event{} - for i := fromL1InfoTreeIndex; i <= lastIndex; i++ { + for i := fromL1InfoTreeIndex; i <= lastRoot.Index; i++ { info, err := d.l1InfoTreesync.GetInfoByIndex(ctx, i) if err != nil { - return nil, fmt.Errorf("error calling GetInfoByIndex: %v", err) + return nil, fmt.Errorf("error calling GetInfoByIndex: %w", err) } gers = append(gers, Event{ L1InfoTreeIndex: i, @@ -155,11 +165,13 @@ func (d *downloader) setGreatestGERInjectedFromList(b *sync.EVMBlock, list []Eve event.GlobalExitRoot.Hex(), err, ) d.rh.Handle("GlobalExitRootMap", attempts) + continue } if timestamp.Cmp(big.NewInt(0)) == 1 { b.Events = []interface{}{event} } + break } } diff --git a/lastgersync/lastgersync.go b/lastgersync/lastgersync.go index 2d7ef8cb..1b40bfcf 100644 --- a/lastgersync/lastgersync.go +++ b/lastgersync/lastgersync.go @@ -2,11 +2,9 @@ package lastgersync import ( "context" - "time" "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/l1infotreesync" "github.com/0xPolygon/cdk/sync" "github.com/ethereum/go-ethereum/common" @@ -75,7 +73,9 @@ func (s *LastGERSync) Start(ctx context.Context) { s.driver.Sync(ctx) } -func (s *LastGERSync) GetFirstGERAfterL1InfoTreeIndex(ctx context.Context, atOrAfterL1InfoTreeIndex uint32) (injectedL1InfoTreeIndex uint32, ger common.Hash, err error) { +func (s *LastGERSync) GetFirstGERAfterL1InfoTreeIndex( + ctx context.Context, atOrAfterL1InfoTreeIndex uint32, +) (injectedL1InfoTreeIndex uint32, ger common.Hash, err error) { return s.processor.GetFirstGERAfterL1InfoTreeIndex(ctx, atOrAfterL1InfoTreeIndex) } diff --git a/lastgersync/processor.go b/lastgersync/processor.go index 88e89be9..628ea04a 100644 --- a/lastgersync/processor.go +++ b/lastgersync/processor.go @@ -7,6 +7,7 @@ import ( "math" "github.com/0xPolygon/cdk/common" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sync" ethCommon "github.com/ethereum/go-ethereum/common" "github.com/ledgerwatch/erigon-lib/kv" @@ -41,11 +42,13 @@ func (b *blockWithGERs) MarshalBinary() ([]byte, error) { } func (b *blockWithGERs) UnmarshalBinary(data []byte) error { - if len(data) != 8 { - return fmt.Errorf("expected len %d, actual len %d", 8, len(data)) + const expectedDataLength = 8 + if len(data) != expectedDataLength { + return fmt.Errorf("expected len %d, actual len %d", expectedDataLength, len(data)) } b.FirstIndex = common.BytesToUint32(data[:4]) b.LastIndex = common.BytesToUint32(data[4:]) + return nil } @@ -60,6 +63,7 @@ func newProcessor(dbPath string) (*processor, error) { gerTable: {}, blockTable: {}, } + return cfg } db, err := mdbx.NewMDBX(nil). @@ -69,6 +73,7 @@ func newProcessor(dbPath string) (*processor, error) { if err != nil { return nil, err } + return &processor{ db: db, }, nil @@ -82,6 +87,7 @@ func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, error) { return 0, err } defer tx.Rollback() + return p.getLastProcessedBlockWithTx(tx) } @@ -107,6 +113,7 @@ func (p *processor) getLastIndexWithTx(tx kv.Tx) (uint32, error) { if k == nil { return 0, ErrNotFound } + return common.BytesToUint32(k), nil } @@ -134,18 +141,24 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { var lastIndex int64 if lenEvents > 0 { li, err := p.getLastIndexWithTx(tx) - if err == ErrNotFound { + switch { + case errors.Is(err, ErrNotFound): lastIndex = -1 - } else if err != nil { + + case err != nil: tx.Rollback() return err - } else { + + default: lastIndex = int64(li) } } for _, e := range block.Events { - event := e.(Event) + event, ok := e.(Event) + if !ok { + log.Errorf("unexpected type %T in events", e) + } if int64(event.L1InfoTreeIndex) < lastIndex { continue } @@ -156,28 +169,49 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { event.GlobalExitRoot[:], ); err != nil { tx.Rollback() + return err } } if lenEvents > 0 { + firstEvent, ok := block.Events[0].(Event) + if !ok { + log.Errorf("unexpected type %T in events", block.Events[0]) + tx.Rollback() + + return fmt.Errorf("unexpected type %T in events", block.Events[0]) + } + + lastEvent, ok := block.Events[lenEvents-1].(Event) + if !ok { + log.Errorf("unexpected type %T in events", block.Events[lenEvents-1]) + tx.Rollback() + + return fmt.Errorf("unexpected type %T in events", block.Events[lenEvents-1]) + } + bwg := blockWithGERs{ - FirstIndex: block.Events[0].(Event).L1InfoTreeIndex, - LastIndex: block.Events[lenEvents-1].(Event).L1InfoTreeIndex + 1, + FirstIndex: firstEvent.L1InfoTreeIndex, + LastIndex: lastEvent.L1InfoTreeIndex + 1, } + data, err := bwg.MarshalBinary() if err != nil { tx.Rollback() + return err } if err = tx.Put(blockTable, common.Uint64ToBytes(block.Num), data); err != nil { tx.Rollback() + return err } } if err := p.updateLastProcessedBlockWithTx(tx, block.Num); err != nil { tx.Rollback() + return err } @@ -193,26 +227,31 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { iter, err := tx.Range(blockTable, common.Uint64ToBytes(firstReorgedBlock), nil) if err != nil { tx.Rollback() + return err } for bNumBytes, bWithGERBytes, err := iter.Next(); bNumBytes != nil; bNumBytes, bWithGERBytes, err = iter.Next() { if err != nil { tx.Rollback() + return err } if err := tx.Delete(blockTable, bNumBytes); err != nil { tx.Rollback() + return err } bWithGER := &blockWithGERs{} if err := bWithGER.UnmarshalBinary(bWithGERBytes); err != nil { tx.Rollback() + return err } for i := bWithGER.FirstIndex; i < bWithGER.LastIndex; i++ { if err := tx.Delete(gerTable, common.Uint32ToBytes(i)); err != nil { tx.Rollback() + return err } } @@ -220,6 +259,7 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { if err := p.updateLastProcessedBlockWithTx(tx, firstReorgedBlock-1); err != nil { tx.Rollback() + return err } @@ -228,7 +268,9 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { // GetFirstGERAfterL1InfoTreeIndex returns the first GER injected on the chain that is related to l1InfoTreeIndex // or greater -func (p *processor) GetFirstGERAfterL1InfoTreeIndex(ctx context.Context, l1InfoTreeIndex uint32) (uint32, ethCommon.Hash, error) { +func (p *processor) GetFirstGERAfterL1InfoTreeIndex( + ctx context.Context, l1InfoTreeIndex uint32, +) (uint32, ethCommon.Hash, error) { tx, err := p.db.BeginRo(ctx) if err != nil { return 0, ethCommon.Hash{}, err @@ -246,5 +288,6 @@ func (p *processor) GetFirstGERAfterL1InfoTreeIndex(ctx context.Context, l1InfoT if l1InfoIndexBytes == nil { return 0, ethCommon.Hash{}, ErrNotFound } + return common.BytesToUint32(l1InfoIndexBytes), ethCommon.BytesToHash(ger), nil } diff --git a/log/config.go b/log/config.go index 2f166ee9..4ebbf502 100644 --- a/log/config.go +++ b/log/config.go @@ -3,11 +3,13 @@ package log // Config for log type Config struct { // Environment defining the log format ("production" or "development"). - // In development mode enables development mode (which makes DPanicLevel logs panic), uses a console encoder, writes to standard error, and disables sampling. Stacktraces are automatically included on logs of WarnLevel and above. + // In development mode enables development mode (which makes DPanicLevel logs panic), + // uses a console encoder, writes to standard error, and disables sampling. + // Stacktraces are automatically included on logs of WarnLevel and above. // Check [here](https://pkg.go.dev/go.uber.org/zap@v1.24.0#NewDevelopmentConfig) Environment LogEnvironment `mapstructure:"Environment" jsonschema:"enum=production,enum=development"` // Level of log. As lower value more logs are going to be generated - Level string `mapstructure:"Level" jsonschema:"enum=debug,enum=info,enum=warn,enum=error,enum=dpanic,enum=panic,enum=fatal"` + Level string `mapstructure:"Level" jsonschema:"enum=debug,enum=info,enum=warn,enum=error,enum=dpanic,enum=panic,enum=fatal"` //nolint:lll // Outputs Outputs []string `mapstructure:"Outputs"` } diff --git a/log/log.go b/log/log.go index 6f776fa6..b6d9d1cc 100644 --- a/log/log.go +++ b/log/log.go @@ -30,17 +30,18 @@ type Logger struct { // root logger var log atomic.Pointer[Logger] -func getDefaultLog() *Logger { +func GetDefaultLogger() *Logger { l := log.Load() if l != nil { return l } // default level: debug - zapLogger, _, err := NewLogger(Config{ - Environment: EnvironmentDevelopment, - Level: "debug", - Outputs: []string{"stderr"}, - }) + zapLogger, _, err := NewLogger( + Config{ + Environment: EnvironmentDevelopment, + Level: "debug", + Outputs: []string{"stderr"}, + }) if err != nil { panic(err) } @@ -70,7 +71,7 @@ func NewLogger(cfg Config) (*zap.SugaredLogger, *zap.AtomicLevel, error) { var level zap.AtomicLevel err := level.UnmarshalText([]byte(cfg.Level)) if err != nil { - return nil, nil, fmt.Errorf("error on setting log level: %s", err) + return nil, nil, fmt.Errorf("error on setting log level: %w", err) } var zapCfg zap.Config @@ -93,17 +94,17 @@ func NewLogger(cfg Config) (*zap.SugaredLogger, *zap.AtomicLevel, error) { if err != nil { return nil, nil, err } - defer logger.Sync() //nolint:gosec,errcheck + defer logger.Sync() //nolint:errcheck // skip 2 callers: one for our wrapper methods and one for the package functions - withOptions := logger.WithOptions(zap.AddCallerSkip(2)) //nolint:gomnd + withOptions := logger.WithOptions(zap.AddCallerSkip(2)) //nolint:mnd return withOptions.Sugar(), &level, nil } // WithFields returns a new Logger (derived from the root one) with additional // fields as per keyValuePairs. The root Logger instance is not affected. func WithFields(keyValuePairs ...interface{}) *Logger { - l := getDefaultLog().WithFields(keyValuePairs...) + l := GetDefaultLogger().WithFields(keyValuePairs...) // since we are returning a new instance, remove one caller from the // stack, because we'll be calling the retruned Logger methods @@ -121,6 +122,11 @@ func (l *Logger) WithFields(keyValuePairs ...interface{}) *Logger { } } +// GetSugaredLogger is a getter function that returns instance of already built zap.SugaredLogger. +func (l *Logger) GetSugaredLogger() *zap.SugaredLogger { + return l.x +} + func sprintStackTrace(st []tracerr.Frame) string { builder := strings.Builder{} // Skip deepest frame because it belongs to the go runtime and we don't @@ -199,72 +205,57 @@ func (l *Logger) Errorf(template string, args ...interface{}) { // Debug calls log.Debug on the root Logger. func Debug(args ...interface{}) { - getDefaultLog().Debug(args...) + GetDefaultLogger().Debug(args...) } // Info calls log.Info on the root Logger. func Info(args ...interface{}) { - getDefaultLog().Info(args...) + GetDefaultLogger().Info(args...) } // Warn calls log.Warn on the root Logger. func Warn(args ...interface{}) { - getDefaultLog().Warn(args...) + GetDefaultLogger().Warn(args...) } // Error calls log.Error on the root Logger. func Error(args ...interface{}) { args = appendStackTraceMaybeArgs(args) - getDefaultLog().Error(args...) + GetDefaultLogger().Error(args...) } // Fatal calls log.Fatal on the root Logger. func Fatal(args ...interface{}) { args = appendStackTraceMaybeArgs(args) - getDefaultLog().Fatal(args...) + GetDefaultLogger().Fatal(args...) } // Debugf calls log.Debugf on the root Logger. func Debugf(template string, args ...interface{}) { - getDefaultLog().Debugf(template, args...) + GetDefaultLogger().Debugf(template, args...) } // Infof calls log.Infof on the root Logger. func Infof(template string, args ...interface{}) { - getDefaultLog().Infof(template, args...) + GetDefaultLogger().Infof(template, args...) } // Warnf calls log.Warnf on the root Logger. func Warnf(template string, args ...interface{}) { - getDefaultLog().Warnf(template, args...) + GetDefaultLogger().Warnf(template, args...) } // Fatalf calls log.Fatalf on the root Logger. func Fatalf(template string, args ...interface{}) { args = appendStackTraceMaybeArgs(args) - getDefaultLog().Fatalf(template, args...) + GetDefaultLogger().Fatalf(template, args...) } // Errorf calls log.Errorf on the root logger and stores the error message into // the ErrorFile. func Errorf(template string, args ...interface{}) { args = appendStackTraceMaybeArgs(args) - getDefaultLog().Errorf(template, args...) -} - -// appendStackTraceMaybeKV will append the stacktrace to the KV -func appendStackTraceMaybeKV(msg string, kv []interface{}) string { - for i := range kv { - if i%2 == 0 { - continue - } - if err, ok := kv[i].(error); ok { - err = tracerr.Wrap(err) - st := tracerr.StackTrace(err) - return fmt.Sprintf("%v: %v%v\n", msg, err, sprintStackTrace(st)) - } - } - return msg + GetDefaultLogger().Errorf(template, args...) } // Debugw calls log.Debugw @@ -294,27 +285,46 @@ func (l *Logger) Fatalw(msg string, kv ...interface{}) { // Debugw calls log.Debugw on the root Logger. func Debugw(msg string, kv ...interface{}) { - getDefaultLog().Debugw(msg, kv...) + GetDefaultLogger().Debugw(msg, kv...) } // Infow calls log.Infow on the root Logger. func Infow(msg string, kv ...interface{}) { - getDefaultLog().Infow(msg, kv...) + GetDefaultLogger().Infow(msg, kv...) } // Warnw calls log.Warnw on the root Logger. func Warnw(msg string, kv ...interface{}) { - getDefaultLog().Warnw(msg, kv...) + GetDefaultLogger().Warnw(msg, kv...) } // Errorw calls log.Errorw on the root Logger. func Errorw(msg string, kv ...interface{}) { msg = appendStackTraceMaybeKV(msg, kv) - getDefaultLog().Errorw(msg, kv...) + GetDefaultLogger().Errorw(msg, kv...) } // Fatalw calls log.Fatalw on the root Logger. func Fatalw(msg string, kv ...interface{}) { msg = appendStackTraceMaybeKV(msg, kv) - getDefaultLog().Fatalw(msg, kv...) + GetDefaultLogger().Fatalw(msg, kv...) +} + +// appendStackTraceMaybeKV will append the stacktrace to the KV +func appendStackTraceMaybeKV(msg string, kv []interface{}) string { + for i := range kv { + if i%2 == 0 { + continue + } + if err, ok := kv[i].(error); ok { + err = tracerr.Wrap(err) + st := tracerr.StackTrace(err) + return fmt.Sprintf("%v: %v%v\n", msg, err, sprintStackTrace(st)) + } + } + return msg +} + +func (l *Logger) IsEnabledLogLevel(lvl zapcore.Level) bool { + return l.x.Level().Enabled(lvl) } diff --git a/log/log_test.go b/log/log_test.go index 9d33bcd0..9a596608 100644 --- a/log/log_test.go +++ b/log/log_test.go @@ -20,7 +20,7 @@ func TestLog(t *testing.T) { cfg := Config{ Environment: EnvironmentDevelopment, Level: "debug", - Outputs: []string{"stderr"}, //[]string{"stdout", "test.log"} + Outputs: []string{"stderr"}, // []string{"stdout", "test.log"} } Init(cfg) diff --git a/merkletree/key.go b/merkletree/key.go index 1534f462..7926df60 100644 --- a/merkletree/key.go +++ b/merkletree/key.go @@ -52,7 +52,8 @@ func defaultCapIn() ([4]uint64, error) { // KeyEthAddrBalance returns the key of balance leaf: // hk0: H([0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0]) -// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 0, 0], [hk0[0], hk0[1], hk0[2], hk0[3]]) +// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 0, 0], +// [hk0[0], hk0[1], hk0[2], hk0[3]]) func KeyEthAddrBalance(ethAddr common.Address) ([]byte, error) { capIn, err := defaultCapIn() if err != nil { @@ -64,7 +65,8 @@ func KeyEthAddrBalance(ethAddr common.Address) ([]byte, error) { // KeyEthAddrNonce returns the key of nonce leaf: // hk0: H([0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0]) -// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 1, 0], [hk0[0], hk0[1], hk0[2], hk0[3]] +// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 1, 0], +// [hk0[0], hk0[1], hk0[2], hk0[3]] func KeyEthAddrNonce(ethAddr common.Address) ([]byte, error) { capIn, err := defaultCapIn() if err != nil { @@ -76,7 +78,8 @@ func KeyEthAddrNonce(ethAddr common.Address) ([]byte, error) { // KeyContractCode returns the key of contract code leaf: // hk0: H([0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0]) -// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 2, 0], [hk0[0], hk0[1], hk0[2], hk0[3]] +// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 2, 0], +// [hk0[0], hk0[1], hk0[2], hk0[3]] func KeyContractCode(ethAddr common.Address) ([]byte, error) { capIn, err := defaultCapIn() if err != nil { @@ -87,8 +90,10 @@ func KeyContractCode(ethAddr common.Address) ([]byte, error) { } // KeyContractStorage returns the key of contract storage position leaf: -// hk0: H([stoPos[0:4], stoPos[4:8], stoPos[8:12], stoPos[12:16], stoPos[16:20], stoPos[20:24], stoPos[24:28], stoPos[28:32], [0, 0, 0, 0]) -// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 3, 0], [hk0[0], hk0[1], hk0[2], hk0[3]) +// hk0: H([stoPos[0:4], stoPos[4:8], stoPos[8:12], stoPos[12:16], stoPos[16:20], stoPos[20:24], +// stoPos[24:28], stoPos[28:32], [0, 0, 0, 0]) +// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 3, 0], +// [hk0[0], hk0[1], hk0[2], hk0[3]) func KeyContractStorage(ethAddr common.Address, storagePos []byte) ([]byte, error) { storageBI := new(big.Int).SetBytes(storagePos) @@ -122,14 +127,14 @@ func HashContractBytecode(code []byte) ([]uint64, error) { ) // add 0x01 - code = append(code, 0x01) // nolint:gomnd + code = append(code, 0x01) //nolint:mnd // add padding - for len(code)%(56) != 0 { // nolint:gomnd - code = append(code, 0x00) // nolint:gomnd + for len(code)%(56) != 0 { + code = append(code, 0x00) //nolint:mnd } - code[len(code)-1] = code[len(code)-1] | 0x80 // nolint:gomnd + code[len(code)-1] = code[len(code)-1] | 0x80 //nolint:mnd numHashes := int(math.Ceil(float64(len(code)) / float64(maxBytesToAdd))) @@ -190,7 +195,8 @@ func HashContractBytecode(code []byte) ([]uint64, error) { // KeyCodeLength returns the key of code length leaf: // hk0: H([0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0]) -// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 4, 0], [hk0[0], hk0[1], hk0[2], hk0[3]] +// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 4, 0], +// [hk0[0], hk0[1], hk0[2], hk0[3]] func KeyCodeLength(ethAddr common.Address) ([]byte, error) { capIn, err := defaultCapIn() if err != nil { diff --git a/merkletree/split.go b/merkletree/split.go index 63fcae33..e264807a 100644 --- a/merkletree/split.go +++ b/merkletree/split.go @@ -17,16 +17,16 @@ const wordLength = 64 // scalar2fea splits a *big.Int into array of 32bit uint64 values. func scalar2fea(value *big.Int) []uint64 { - val := make([]uint64, 8) //nolint:gomnd - mask, _ := new(big.Int).SetString("FFFFFFFF", 16) //nolint:gomnd + val := make([]uint64, 8) //nolint:mnd + mask, _ := new(big.Int).SetString("FFFFFFFF", 16) //nolint:mnd val[0] = new(big.Int).And(value, mask).Uint64() - val[1] = new(big.Int).And(new(big.Int).Rsh(value, 32), mask).Uint64() //nolint:gomnd - val[2] = new(big.Int).And(new(big.Int).Rsh(value, 64), mask).Uint64() //nolint:gomnd - val[3] = new(big.Int).And(new(big.Int).Rsh(value, 96), mask).Uint64() //nolint:gomnd - val[4] = new(big.Int).And(new(big.Int).Rsh(value, 128), mask).Uint64() //nolint:gomnd - val[5] = new(big.Int).And(new(big.Int).Rsh(value, 160), mask).Uint64() //nolint:gomnd - val[6] = new(big.Int).And(new(big.Int).Rsh(value, 192), mask).Uint64() //nolint:gomnd - val[7] = new(big.Int).And(new(big.Int).Rsh(value, 224), mask).Uint64() //nolint:gomnd + val[1] = new(big.Int).And(new(big.Int).Rsh(value, 32), mask).Uint64() //nolint:mnd + val[2] = new(big.Int).And(new(big.Int).Rsh(value, 64), mask).Uint64() //nolint:mnd + val[3] = new(big.Int).And(new(big.Int).Rsh(value, 96), mask).Uint64() //nolint:mnd + val[4] = new(big.Int).And(new(big.Int).Rsh(value, 128), mask).Uint64() //nolint:mnd + val[5] = new(big.Int).And(new(big.Int).Rsh(value, 160), mask).Uint64() //nolint:mnd + val[6] = new(big.Int).And(new(big.Int).Rsh(value, 192), mask).Uint64() //nolint:mnd + val[7] = new(big.Int).And(new(big.Int).Rsh(value, 224), mask).Uint64() //nolint:mnd return val } @@ -55,13 +55,11 @@ func H4ToString(h4 []uint64) string { // StringToh4 converts an hex string into array of 4 Scalars of 64 bits. func StringToh4(str string) ([]uint64, error) { - if strings.HasPrefix(str, "0x") { // nolint - str = str[2:] - } + str = strings.TrimPrefix(str, "0x") bi, ok := new(big.Int).SetString(str, hex.Base) if !ok { - return nil, fmt.Errorf("Could not convert %q into big int", str) + return nil, fmt.Errorf("could not convert %q into big int", str) } return scalarToh4(bi), nil @@ -71,7 +69,7 @@ func StringToh4(str string) ([]uint64, error) { func scalarToh4(s *big.Int) []uint64 { b := ScalarToFilledByteSlice(s) - r := make([]uint64, 4) //nolint:gomnd + r := make([]uint64, 4) //nolint:mnd f, _ := hex.DecodeHex("0xFFFFFFFFFFFFFFFF") fbe := binary.BigEndian.Uint64(f) diff --git a/reorgdetector/reorgdetector.go b/reorgdetector/reorgdetector.go index 22c4693e..7a995bac 100644 --- a/reorgdetector/reorgdetector.go +++ b/reorgdetector/reorgdetector.go @@ -134,7 +134,7 @@ func (rd *ReorgDetector) detectReorgInTrackedList(ctx context.Context) error { headersCacheLock.Lock() currentHeader, ok := headersCache[hdr.Num] if !ok || currentHeader == nil { - if currentHeader, err = rd.client.HeaderByNumber(ctx, big.NewInt(int64(hdr.Num))); err != nil { + if currentHeader, err = rd.client.HeaderByNumber(ctx, new(big.Int).SetUint64(hdr.Num)); err != nil { headersCacheLock.Unlock() return fmt.Errorf("failed to get the header: %w", err) } diff --git a/reorgdetector/reorgdetector_test.go b/reorgdetector/reorgdetector_test.go index 7adec4ca..7efe0892 100644 --- a/reorgdetector/reorgdetector_test.go +++ b/reorgdetector/reorgdetector_test.go @@ -18,9 +18,9 @@ import ( func newSimulatedL1(t *testing.T, auth *bind.TransactOpts) *simulated.Backend { t.Helper() - balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) //nolint:gomnd + balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) - blockGasLimit := uint64(999999999999999999) //nolint:gomnd + blockGasLimit := uint64(999999999999999999) client := simulated.NewBackend(map[common.Address]types.Account{ auth.From: { Balance: balance, diff --git a/rpc/bridge.go b/rpc/bridge.go index 0b550e72..23c67409 100644 --- a/rpc/bridge.go +++ b/rpc/bridge.go @@ -22,10 +22,13 @@ const ( // BRIDGE is the namespace of the bridge service BRIDGE = "bridge" meterName = "github.com/0xPolygon/cdk/rpc" + + zeroHex = "0x0" ) // BridgeEndpoints contains implementations for the "bridge" RPC endpoints type BridgeEndpoints struct { + logger *log.Logger meter metric.Meter readTimeout time.Duration writeTimeout time.Duration @@ -40,6 +43,7 @@ type BridgeEndpoints struct { // NewBridgeEndpoints returns InteropEndpoints func NewBridgeEndpoints( + logger *log.Logger, writeTimeout time.Duration, readTimeout time.Duration, networkID uint32, @@ -52,6 +56,7 @@ func NewBridgeEndpoints( ) *BridgeEndpoints { meter := otel.Meter(meterName) return &BridgeEndpoints{ + logger: logger, meter: meter, readTimeout: readTimeout, writeTimeout: writeTimeout, @@ -74,7 +79,7 @@ func (b *BridgeEndpoints) L1InfoTreeIndexForBridge(networkID uint32, depositCoun c, merr := b.meter.Int64Counter("l1_info_tree_index_for_bridge") if merr != nil { - log.Warnf("failed to create l1_info_tree_index_for_bridge counter: %s", merr) + b.logger.Warnf("failed to create l1_info_tree_index_for_bridge counter: %s", merr) } c.Add(ctx, 1) @@ -83,20 +88,26 @@ func (b *BridgeEndpoints) L1InfoTreeIndexForBridge(networkID uint32, depositCoun // TODO: special treatment of the error when not found, // as it's expected that it will take some time for the L1 Info tree to be updated if err != nil { - return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get l1InfoTreeIndex, error: %s", err)) + return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get l1InfoTreeIndex, error: %s", err)) } return l1InfoTreeIndex, nil } if networkID == b.networkID { // TODO: special treatment of the error when not found, // as it's expected that it will take some time for the L1 Info tree to be updated - return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("TODO: batchsync / certificatesync missing implementation")) + return zeroHex, rpc.NewRPCError( + rpc.DefaultErrorCode, + "TODO: batchsync / certificatesync missing implementation", + ) } - return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("this client does not support network %d", networkID)) + return zeroHex, rpc.NewRPCError( + rpc.DefaultErrorCode, + fmt.Sprintf("this client does not support network %d", networkID), + ) } // InjectedInfoAfterIndex return the first GER injected onto the network that is linked -// to the given index or greater. This call is usefull to understand when a bridge is ready to be claimed +// to the given index or greater. This call is useful to understand when a bridge is ready to be claimed // on its destination network func (b *BridgeEndpoints) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeIndex uint32) (interface{}, rpc.Error) { ctx, cancel := context.WithTimeout(context.Background(), b.readTimeout) @@ -104,29 +115,32 @@ func (b *BridgeEndpoints) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeInd c, merr := b.meter.Int64Counter("injected_info_after_index") if merr != nil { - log.Warnf("failed to create injected_info_after_index counter: %s", merr) + b.logger.Warnf("failed to create injected_info_after_index counter: %s", merr) } c.Add(ctx, 1) if networkID == 0 { info, err := b.l1InfoTree.GetInfoByIndex(ctx, l1InfoTreeIndex) if err != nil { - return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get global exit root, error: %s", err)) + return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get global exit root, error: %s", err)) } return info, nil } if networkID == b.networkID { injectedL1InfoTreeIndex, _, err := b.injectedGERs.GetFirstGERAfterL1InfoTreeIndex(ctx, l1InfoTreeIndex) if err != nil { - return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get global exit root, error: %s", err)) + return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get global exit root, error: %s", err)) } info, err := b.l1InfoTree.GetInfoByIndex(ctx, injectedL1InfoTreeIndex) if err != nil { - return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get global exit root, error: %s", err)) + return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get global exit root, error: %s", err)) } return info, nil } - return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("this client does not support network %d", networkID)) + return zeroHex, rpc.NewRPCError( + rpc.DefaultErrorCode, + fmt.Sprintf("this client does not support network %d", networkID), + ) } type ClaimProof struct { @@ -138,42 +152,57 @@ type ClaimProof struct { // ClaimProof returns the proofs needed to claim a bridge. NetworkID and depositCount refere to the bridge origin // while globalExitRoot should be already injected on the destination network. // This call needs to be done to a client of the same network were the bridge tx was sent -func (b *BridgeEndpoints) ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (interface{}, rpc.Error) { +func (b *BridgeEndpoints) ClaimProof( + networkID uint32, depositCount uint32, l1InfoTreeIndex uint32, +) (interface{}, rpc.Error) { ctx, cancel := context.WithTimeout(context.Background(), b.readTimeout) defer cancel() c, merr := b.meter.Int64Counter("claim_proof") if merr != nil { - log.Warnf("failed to create claim_proof counter: %s", merr) + b.logger.Warnf("failed to create claim_proof counter: %s", merr) } c.Add(ctx, 1) info, err := b.l1InfoTree.GetInfoByIndex(ctx, l1InfoTreeIndex) if err != nil { - return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get info from the tree: %s", err)) + return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get info from the tree: %s", err)) } proofRollupExitRoot, err := b.l1InfoTree.GetRollupExitTreeMerkleProof(ctx, networkID, info.GlobalExitRoot) if err != nil { - return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get rollup exit proof, error: %s", err)) + return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get rollup exit proof, error: %s", err)) } var proofLocalExitRoot [32]common.Hash - if networkID == 0 { + switch { + case networkID == 0: proofLocalExitRoot, err = b.bridgeL1.GetProof(ctx, depositCount, info.MainnetExitRoot) if err != nil { - return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get local exit proof, error: %s", err)) + return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get local exit proof, error: %s", err)) } - } else if networkID == b.networkID { + + case networkID == b.networkID: localExitRoot, err := b.l1InfoTree.GetLocalExitRoot(ctx, networkID, info.RollupExitRoot) if err != nil { - return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get local exit root from rollup exit tree, error: %s", err)) + return zeroHex, rpc.NewRPCError( + rpc.DefaultErrorCode, + fmt.Sprintf("failed to get local exit root from rollup exit tree, error: %s", err), + ) } proofLocalExitRoot, err = b.bridgeL2.GetProof(ctx, depositCount, localExitRoot) if err != nil { - return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get local exit proof, error: %s", err)) + return zeroHex, rpc.NewRPCError( + rpc.DefaultErrorCode, + fmt.Sprintf("failed to get local exit proof, error: %s", err), + ) } - } else { - return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("this client does not support network %d", networkID)) + + default: + return zeroHex, rpc.NewRPCError( + rpc.DefaultErrorCode, + fmt.Sprintf("this client does not support network %d", networkID), + ) } + return ClaimProof{ ProofLocalExitRoot: proofLocalExitRoot, ProofRollupExitRoot: proofRollupExitRoot, @@ -189,18 +218,21 @@ func (b *BridgeEndpoints) SponsorClaim(claim claimsponsor.Claim) (interface{}, r c, merr := b.meter.Int64Counter("sponsor_claim") if merr != nil { - log.Warnf("failed to create sponsor_claim counter: %s", merr) + b.logger.Warnf("failed to create sponsor_claim counter: %s", merr) } c.Add(ctx, 1) if b.sponsor == nil { - return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("this client does not support claim sponsoring")) + return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, "this client does not support claim sponsoring") } if claim.DestinationNetwork != b.networkID { - return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("this client only sponsors claims for network %d", b.networkID)) + return zeroHex, rpc.NewRPCError( + rpc.DefaultErrorCode, + fmt.Sprintf("this client only sponsors claims for network %d", b.networkID), + ) } if err := b.sponsor.AddClaimToQueue(ctx, &claim); err != nil { - return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("error adding claim to the queue %s", err)) + return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("error adding claim to the queue %s", err)) } return nil, nil } @@ -213,16 +245,16 @@ func (b *BridgeEndpoints) GetSponsoredClaimStatus(globalIndex *big.Int) (interfa c, merr := b.meter.Int64Counter("get_sponsored_claim_status") if merr != nil { - log.Warnf("failed to create get_sponsored_claim_status counter: %s", merr) + b.logger.Warnf("failed to create get_sponsored_claim_status counter: %s", merr) } c.Add(ctx, 1) if b.sponsor == nil { - return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("this client does not support claim sponsoring")) + return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, "this client does not support claim sponsoring") } claim, err := b.sponsor.GetClaim(ctx, globalIndex) if err != nil { - return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get claim status, error: %s", err)) + return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get claim status, error: %s", err)) } return claim.Status, nil } diff --git a/rpc/bridge_client.go b/rpc/bridge_client.go index 0063e660..04d57700 100644 --- a/rpc/bridge_client.go +++ b/rpc/bridge_client.go @@ -34,9 +34,11 @@ func (c *Client) L1InfoTreeIndexForBridge(networkID uint32, depositCount uint32) } // InjectedInfoAfterIndex return the first GER injected onto the network that is linked -// to the given index or greater. This call is usefull to understand when a bridge is ready to be claimed +// to the given index or greater. This call is useful to understand when a bridge is ready to be claimed // on its destination network -func (c *Client) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeIndex uint32) (*l1infotreesync.L1InfoTreeLeaf, error) { +func (c *Client) InjectedInfoAfterIndex( + networkID uint32, l1InfoTreeIndex uint32, +) (*l1infotreesync.L1InfoTreeLeaf, error) { response, err := rpc.JSONRPCCall(c.url, "bridge_injectedInfoAfterIndex", networkID, l1InfoTreeIndex) if err != nil { return nil, err diff --git a/sequencesender/config.go b/sequencesender/config.go index 68093db7..3e138e49 100644 --- a/sequencesender/config.go +++ b/sequencesender/config.go @@ -14,8 +14,9 @@ type Config struct { WaitPeriodSendSequence types.Duration `mapstructure:"WaitPeriodSendSequence"` // LastBatchVirtualizationTimeMaxWaitPeriod is time since sequences should be sent LastBatchVirtualizationTimeMaxWaitPeriod types.Duration `mapstructure:"LastBatchVirtualizationTimeMaxWaitPeriod"` - // L1BlockTimestampMargin is the time difference (margin) that must exists between last L1 block and last L2 block in the sequence before - // to send the sequence to L1. If the difference is lower than this value then sequencesender will wait until the difference is equal or greater + // L1BlockTimestampMargin is the time difference (margin) that must exists between last L1 block + // and last L2 block in the sequence before sending the sequence to L1. If the difference is + // lower than this value, then sequencesender will wait until the difference is equal or greater L1BlockTimestampMargin types.Duration `mapstructure:"L1BlockTimestampMargin"` // MaxTxSizeForL1 is the maximum size a single transaction can have. This field has // non-trivial consequences: larger transactions than 128KB are significantly harder and @@ -62,7 +63,7 @@ type Config struct { // MaxBatchesForL1 is the maximum amount of batches to be sequenced in a single L1 tx MaxBatchesForL1 uint64 `mapstructure:"MaxBatchesForL1"` // BlockFinality indicates the status of the blocks that will be queried in order to sync - BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` + BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` //nolint:lll // RPCURL is the URL of the RPC server RPCURL string `mapstructure:"RPCURL"` diff --git a/sequencesender/seqsendertypes/types.go b/sequencesender/seqsendertypes/types.go index 21b5834e..5d903dc5 100644 --- a/sequencesender/seqsendertypes/types.go +++ b/sequencesender/seqsendertypes/types.go @@ -5,7 +5,7 @@ import ( ) type Batch interface { - //underlyingType *ethmantypes.Batch + // underlyingType *ethmantypes.Batch DeepCopy() Batch LastCoinbase() common.Address ForcedBatchTimestamp() uint64 @@ -40,7 +40,4 @@ type Sequence interface { String() string // WRITE SetLastVirtualBatchNumber(batchNumber uint64) - //SetL1InfoRoot(hash common.Hash) - //SetOldAccInputHash(hash common.Hash) - //SetAccInputHash(hash common.Hash) } diff --git a/sequencesender/sequencesender.go b/sequencesender/sequencesender.go index b29d8ad3..c1378bb9 100644 --- a/sequencesender/sequencesender.go +++ b/sequencesender/sequencesender.go @@ -22,6 +22,7 @@ import ( // SequenceSender represents a sequence sender type SequenceSender struct { cfg Config + logger *log.Logger ethTxManager *ethtxmanager.Client etherman *etherman.Client currentNonce uint64 @@ -49,10 +50,12 @@ type sequenceData struct { } // New inits sequence sender -func New(cfg Config, etherman *etherman.Client, txBuilder txbuilder.TxBuilder) (*SequenceSender, error) { +func New(cfg Config, logger *log.Logger, + etherman *etherman.Client, txBuilder txbuilder.TxBuilder) (*SequenceSender, error) { // Create sequencesender s := SequenceSender{ cfg: cfg, + logger: logger, etherman: etherman, ethTransactions: make(map[common.Hash]*ethTxData), ethTxData: make(map[common.Hash][]byte), @@ -61,12 +64,13 @@ func New(cfg Config, etherman *etherman.Client, txBuilder txbuilder.TxBuilder) ( seqSendingStopped: false, TxBuilder: txBuilder, } - log.Infof("Seq_sender: %s", txBuilder.String()) + + logger.Infof("TxBuilder configuration: %s", txBuilder.String()) // Restore pending sent sequences err := s.loadSentSequencesTransactions() if err != nil { - log.Fatalf("error restoring sent sequences from file", err) + s.logger.Fatalf("error restoring sent sequences from file", err) return nil, err } @@ -79,7 +83,7 @@ func New(cfg Config, etherman *etherman.Client, txBuilder txbuilder.TxBuilder) ( s.ethTxManager, err = ethtxmanager.New(cfg.EthTxManager) if err != nil { - log.Fatalf("error creating ethtxmanager client: %v", err) + s.logger.Fatalf("error creating ethtxmanager client: %v", err) return nil, err } @@ -96,22 +100,22 @@ func (s *SequenceSender) Start(ctx context.Context) { s.nonceMutex.Lock() s.currentNonce, err = s.etherman.CurrentNonce(ctx, s.cfg.L2Coinbase) if err != nil { - log.Fatalf("failed to get current nonce from %v, error: %v", s.cfg.L2Coinbase, err) + s.logger.Fatalf("failed to get current nonce from %v, error: %v", s.cfg.L2Coinbase, err) } else { - log.Infof("current nonce for %v is %d", s.cfg.L2Coinbase, s.currentNonce) + s.logger.Infof("current nonce for %v is %d", s.cfg.L2Coinbase, s.currentNonce) } s.nonceMutex.Unlock() // Get latest virtual state batch from L1 err = s.getLatestVirtualBatch() if err != nil { - log.Fatalf("error getting latest sequenced batch, error: %v", err) + s.logger.Fatalf("error getting latest sequenced batch, error: %v", err) } // Sync all monitored sent L1 tx err = s.syncAllEthTxResults(ctx) if err != nil { - log.Fatalf("failed to sync monitored tx results, error: %v", err) + s.logger.Fatalf("failed to sync monitored tx results, error: %v", err) } // Current batch to sequence @@ -138,16 +142,16 @@ func (s *SequenceSender) batchRetrieval(ctx context.Context) error { for { select { case <-ctx.Done(): - log.Info("context cancelled, stopping batch retrieval") + s.logger.Info("context cancelled, stopping batch retrieval") return ctx.Err() default: // Try to retrieve batch from RPC rpcBatch, err := s.getBatchFromRPC(currentBatchNumber) if err != nil { if err == state.ErrNotFound { - log.Infof("batch %d not found in RPC", currentBatchNumber) + s.logger.Infof("batch %d not found in RPC", currentBatchNumber) } else { - log.Errorf("error getting batch %d from RPC: %v", currentBatchNumber, err) + s.logger.Errorf("error getting batch %d from RPC: %v", currentBatchNumber, err) } <-ticker.C continue @@ -155,7 +159,7 @@ func (s *SequenceSender) batchRetrieval(ctx context.Context) error { // Check if the batch is closed if !rpcBatch.IsClosed() { - log.Infof("batch %d is not closed yet", currentBatchNumber) + s.logger.Infof("batch %d is not closed yet", currentBatchNumber) <-ticker.C continue } @@ -180,7 +184,7 @@ func (s *SequenceSender) populateSequenceData(rpcBatch *rpcbatch.RPCBatch, batch // Decode batch to retrieve the l1 info tree index batchRaw, err := state.DecodeBatchV2(rpcBatch.L2Data()) if err != nil { - log.Errorf("Failed to decode batch data, err: %v", err) + s.logger.Errorf("Failed to decode batch data, err: %v", err) return err } @@ -238,7 +242,7 @@ func (s *SequenceSender) purgeSequences() { } delete(s.sequenceData, toPurge[i]) } - log.Infof("batches purged count: %d, fromBatch: %d, toBatch: %d", len(toPurge), firstPurged, lastPurged) + s.logger.Infof("batches purged count: %d, fromBatch: %d, toBatch: %d", len(toPurge), firstPurged, lastPurged) } s.mutexSequence.Unlock() } @@ -246,14 +250,14 @@ func (s *SequenceSender) purgeSequences() { // tryToSendSequence checks if there is a sequence and it's worth it to send to L1 func (s *SequenceSender) tryToSendSequence(ctx context.Context) { // Update latest virtual batch - log.Infof("updating virtual batch") + s.logger.Infof("updating virtual batch") err := s.getLatestVirtualBatch() if err != nil { return } // Update state of transactions - log.Infof("updating tx results") + s.logger.Infof("updating tx results") countPending, err := s.syncEthTxResults(ctx) if err != nil { return @@ -261,22 +265,22 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { // Check if the sequence sending is stopped if s.seqSendingStopped { - log.Warnf("sending is stopped!") + s.logger.Warnf("sending is stopped!") return } // Check if reached the maximum number of pending transactions if countPending >= s.cfg.MaxPendingTx { - log.Infof("max number of pending txs (%d) reached. Waiting for some to be completed", countPending) + s.logger.Infof("max number of pending txs (%d) reached. Waiting for some to be completed", countPending) return } // Check if should send sequence to L1 - log.Infof("getting sequences to send") + s.logger.Infof("getting sequences to send") sequence, err := s.getSequencesToSend(ctx) if err != nil || sequence == nil || sequence.Len() == 0 { if err != nil { - log.Errorf("error getting sequences: %v", err) + s.logger.Errorf("error getting sequences: %v", err) } return } @@ -286,33 +290,35 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { lastBatch := sequence.LastBatch() lastL2BlockTimestamp := lastBatch.LastL2BLockTimestamp() - log.Debugf(sequence.String()) - log.Infof("sending sequences to L1. From batch %d to batch %d", firstBatch.BatchNumber(), lastBatch.BatchNumber()) + s.logger.Debugf(sequence.String()) + s.logger.Infof("sending sequences to L1. From batch %d to batch %d", firstBatch.BatchNumber(), lastBatch.BatchNumber()) - // Wait until last L1 block timestamp is L1BlockTimestampMargin seconds above the timestamp of the last L2 block in the sequence + // Wait until last L1 block timestamp is L1BlockTimestampMargin seconds above the timestamp + // of the last L2 block in the sequence timeMargin := int64(s.cfg.L1BlockTimestampMargin.Seconds()) for { // Get header of the last L1 block lastL1BlockHeader, err := s.etherman.GetLatestBlockHeader(ctx) if err != nil { - log.Errorf("failed to get last L1 block timestamp, err: %v", err) + s.logger.Errorf("failed to get last L1 block timestamp, err: %v", err) return } elapsed, waitTime := s.marginTimeElapsed(lastL2BlockTimestamp, lastL1BlockHeader.Time, timeMargin) if !elapsed { - log.Infof("waiting at least %d seconds to send sequences, time difference between last L1 block %d (ts: %d) and last L2 block %d (ts: %d) in the sequence is lower than %d seconds", + s.logger.Infof("waiting at least %d seconds to send sequences, time difference between last L1 block %d (ts: %d) and last L2 block %d (ts: %d) in the sequence is lower than %d seconds", waitTime, lastL1BlockHeader.Number, lastL1BlockHeader.Time, lastBatch.BatchNumber(), lastL2BlockTimestamp, timeMargin) time.Sleep(time.Duration(waitTime) * time.Second) } else { - log.Infof("continuing, time difference between last L1 block %d (ts: %d) and last L2 block %d (ts: %d) in the sequence is greater than %d seconds", + s.logger.Infof("continuing, time difference between last L1 block %d (ts: %d) and last L2 block %d (ts: %d) in the sequence is greater than %d seconds", lastL1BlockHeader.Number, lastL1BlockHeader.Time, lastBatch.BatchNumber, lastL2BlockTimestamp, timeMargin) break } } - // Sanity check: Wait also until current time is L1BlockTimestampMargin seconds above the timestamp of the last L2 block in the sequence + // Sanity check: Wait also until current time is L1BlockTimestampMargin seconds above the + // timestamp of the last L2 block in the sequence for { currentTime := uint64(time.Now().Unix()) @@ -320,43 +326,43 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { // Wait if the time difference is less than L1BlockTimestampMargin if !elapsed { - log.Infof("waiting at least %d seconds to send sequences, time difference between now (ts: %d) and last L2 block %d (ts: %d) in the sequence is lower than %d seconds", + s.logger.Infof("waiting at least %d seconds to send sequences, time difference between now (ts: %d) and last L2 block %d (ts: %d) in the sequence is lower than %d seconds", waitTime, currentTime, lastBatch.BatchNumber, lastL2BlockTimestamp, timeMargin) time.Sleep(time.Duration(waitTime) * time.Second) } else { - log.Infof("[SeqSender]sending sequences now, time difference between now (ts: %d) and last L2 block %d (ts: %d) in the sequence is also greater than %d seconds", + s.logger.Infof("[SeqSender]sending sequences now, time difference between now (ts: %d) and last L2 block %d (ts: %d) in the sequence is also greater than %d seconds", currentTime, lastBatch.BatchNumber, lastL2BlockTimestamp, timeMargin) break } } // Send sequences to L1 - log.Debugf(sequence.String()) - log.Infof("sending sequences to L1. From batch %d to batch %d", firstBatch.BatchNumber(), lastBatch.BatchNumber()) + s.logger.Debugf(sequence.String()) + s.logger.Infof("sending sequences to L1. From batch %d to batch %d", firstBatch.BatchNumber(), lastBatch.BatchNumber()) tx, err := s.TxBuilder.BuildSequenceBatchesTx(ctx, sequence) if err != nil { - log.Errorf("error building sequenceBatches tx: %v", err) + s.logger.Errorf("error building sequenceBatches tx: %v", err) return } // Get latest virtual state batch from L1 err = s.getLatestVirtualBatch() if err != nil { - log.Fatalf("error getting latest sequenced batch, error: %v", err) + s.logger.Fatalf("error getting latest sequenced batch, error: %v", err) } sequence.SetLastVirtualBatchNumber(s.latestVirtualBatchNumber) txToEstimateGas, err := s.TxBuilder.BuildSequenceBatchesTx(ctx, sequence) if err != nil { - log.Errorf("error building sequenceBatches tx to estimate gas: %v", err) + s.logger.Errorf("error building sequenceBatches tx to estimate gas: %v", err) return } gas, err := s.etherman.EstimateGas(ctx, s.cfg.SenderAddress, tx.To(), nil, txToEstimateGas.Data()) if err != nil { - log.Errorf("error estimating gas: ", err) + s.logger.Errorf("error estimating gas: ", err) return } @@ -385,7 +391,10 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) (seqsendertypes // Check if the next batch belongs to a new forkid, in this case we need to stop sequencing as we need to // wait the upgrade of forkid is completed and s.cfg.NumBatchForkIdUpgrade is disabled (=0) again if (s.cfg.ForkUpgradeBatchNumber != 0) && (batchNumber == (s.cfg.ForkUpgradeBatchNumber + 1)) { - return nil, fmt.Errorf("aborting sequencing process as we reached the batch %d where a new forkid is applied (upgrade)", s.cfg.ForkUpgradeBatchNumber+1) + return nil, fmt.Errorf( + "aborting sequencing process as we reached the batch %d where a new forkid is applied (upgrade)", + s.cfg.ForkUpgradeBatchNumber+1, + ) } // New potential batch to add to the sequence @@ -393,7 +402,10 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) (seqsendertypes // If the coinbase changes, the sequence ends here if len(sequenceBatches) > 0 && batch.LastCoinbase() != prevCoinbase { - log.Infof("batch with different coinbase (batch %v, sequence %v), sequence will be sent to this point", prevCoinbase, batch.LastCoinbase) + s.logger.Infof( + "batch with different coinbase (batch %v, sequence %v), sequence will be sent to this point", + prevCoinbase, batch.LastCoinbase, + ) return s.TxBuilder.NewSequence(ctx, sequenceBatches, s.cfg.L2Coinbase) } prevCoinbase = batch.LastCoinbase() @@ -409,25 +421,29 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) (seqsendertypes return newSeq, nil } - // Check if the current batch is the last before a change to a new forkid, in this case we need to close and send the sequence to L1 + // Check if the current batch is the last before a change to a new forkid + // In this case we need to close and send the sequence to L1 if (s.cfg.ForkUpgradeBatchNumber != 0) && (batchNumber == (s.cfg.ForkUpgradeBatchNumber)) { - log.Infof("sequence should be sent to L1, as we have reached the batch %d from which a new forkid is applied (upgrade)", s.cfg.ForkUpgradeBatchNumber) + s.logger.Infof("sequence should be sent to L1, as we have reached the batch %d "+ + "from which a new forkid is applied (upgrade)", + s.cfg.ForkUpgradeBatchNumber, + ) return s.TxBuilder.NewSequence(ctx, sequenceBatches, s.cfg.L2Coinbase) } } // Reached the latest batch. Decide if it's worth to send the sequence, or wait for new batches if len(sequenceBatches) == 0 { - log.Infof("no batches to be sequenced") + s.logger.Infof("no batches to be sequenced") return nil, nil } if s.latestVirtualTime.Before(time.Now().Add(-s.cfg.LastBatchVirtualizationTimeMaxWaitPeriod.Duration)) { - log.Infof("sequence should be sent, too much time without sending anything to L1") + s.logger.Infof("sequence should be sent, too much time without sending anything to L1") return s.TxBuilder.NewSequence(ctx, sequenceBatches, s.cfg.L2Coinbase) } - log.Infof("not enough time has passed since last batch was virtualized and the sequence could be bigger") + s.logger.Infof("not enough time has passed since last batch was virtualized and the sequence could be bigger") return nil, nil } @@ -441,22 +457,24 @@ func (s *SequenceSender) getLatestVirtualBatch() error { s.latestVirtualBatchNumber, err = s.etherman.GetLatestBatchNumber() if err != nil { - log.Errorf("error getting latest virtual batch, error: %v", err) + s.logger.Errorf("error getting latest virtual batch, error: %v", err) return errors.New("fail to get latest virtual batch") } - log.Infof("latest virtual batch is %d", s.latestVirtualBatchNumber) + s.logger.Infof("latest virtual batch is %d", s.latestVirtualBatchNumber) return nil } // marginTimeElapsed checks if the time between currentTime and l2BlockTimestamp is greater than timeMargin. // If it's greater returns true, otherwise it returns false and the waitTime needed to achieve this timeMargin -func (s *SequenceSender) marginTimeElapsed(l2BlockTimestamp uint64, currentTime uint64, timeMargin int64) (bool, int64) { +func (s *SequenceSender) marginTimeElapsed( + l2BlockTimestamp uint64, currentTime uint64, timeMargin int64, +) (bool, int64) { // Check the time difference between L2 block and currentTime var timeDiff int64 if l2BlockTimestamp >= currentTime { - //L2 block timestamp is above currentTime, negative timeDiff. We do in this way to avoid uint64 overflow + // L2 block timestamp is above currentTime, negative timeDiff. We do in this way to avoid uint64 overflow timeDiff = int64(-(l2BlockTimestamp - currentTime)) } else { timeDiff = int64(currentTime - l2BlockTimestamp) @@ -465,7 +483,7 @@ func (s *SequenceSender) marginTimeElapsed(l2BlockTimestamp uint64, currentTime // Check if the time difference is less than timeMargin (L1BlockTimestampMargin) if timeDiff < timeMargin { var waitTime int64 - if timeDiff < 0 { //L2 block timestamp is above currentTime + if timeDiff < 0 { // L2 block timestamp is above currentTime waitTime = timeMargin + (-timeDiff) } else { waitTime = timeMargin - timeDiff @@ -480,8 +498,8 @@ func (s *SequenceSender) marginTimeElapsed(l2BlockTimestamp uint64, currentTime func (s *SequenceSender) logFatalf(template string, args ...interface{}) { s.seqSendingStopped = true for { - log.Errorf(template, args...) - log.Errorf("sequence sending stopped.") + s.logger.Errorf(template, args...) + s.logger.Errorf("sequence sending stopped.") time.Sleep(10 * time.Second) } } diff --git a/sequencesender/txbuilder/banana_base.go b/sequencesender/txbuilder/banana_base.go index 15a25f56..7b451ed8 100644 --- a/sequencesender/txbuilder/banana_base.go +++ b/sequencesender/txbuilder/banana_base.go @@ -34,6 +34,7 @@ type l1Client interface { } type TxBuilderBananaBase struct { + logger *log.Logger rollupContract rollupBananaBaseContractor globalExitRootContract globalExitRootBananaContractor l1InfoTree l1InfoSyncer @@ -43,6 +44,7 @@ type TxBuilderBananaBase struct { } func NewTxBuilderBananaBase( + logger *log.Logger, rollupContract rollupBananaBaseContractor, gerContract globalExitRootBananaContractor, l1InfoTree l1InfoSyncer, @@ -51,6 +53,7 @@ func NewTxBuilderBananaBase( opts bind.TransactOpts, ) *TxBuilderBananaBase { return &TxBuilderBananaBase{ + logger: logger, rollupContract: rollupContract, globalExitRootContract: gerContract, l1InfoTree: l1InfoTree, @@ -58,7 +61,6 @@ func NewTxBuilderBananaBase( blockFinality: blockFinality, opts: opts, } - } func (t *TxBuilderBananaBase) NewBatchFromL2Block(l2Block *datastream.L2Block) seqsendertypes.Batch { @@ -72,11 +74,10 @@ func (t *TxBuilderBananaBase) NewBatchFromL2Block(l2Block *datastream.L2Block) s return NewBananaBatch(batch) } -func (t *TxBuilderBananaBase) NewSequence(ctx context.Context, batches []seqsendertypes.Batch, coinbase common.Address) (seqsendertypes.Sequence, error) { - ethBatches, err := toEthermanBatches(batches) - if err != nil { - return nil, err - } +func (t *TxBuilderBananaBase) NewSequence( + ctx context.Context, batches []seqsendertypes.Batch, coinbase common.Address, +) (seqsendertypes.Sequence, error) { + ethBatches := toEthermanBatches(batches) sequence := etherman.NewSequenceBanana(ethBatches, coinbase) var greatestL1Index uint32 for _, b := range sequence.Batches { @@ -86,11 +87,11 @@ func (t *TxBuilderBananaBase) NewSequence(ctx context.Context, batches []seqsend } header, err := t.ethClient.HeaderByNumber(ctx, t.blockFinality) if err != nil { - return nil, fmt.Errorf("error calling HeaderByNumber, with block finality %d: %v", t.blockFinality.Int64(), err) + return nil, fmt.Errorf("error calling HeaderByNumber, with block finality %d: %w", t.blockFinality.Int64(), err) } info, err := t.l1InfoTree.GetLatestInfoUntilBlock(ctx, header.Number.Uint64()) if err != nil { - return nil, fmt.Errorf("error calling GetLatestInfoUntilBlock with block num %d: %v", header.Number.Uint64(), err) + return nil, fmt.Errorf("error calling GetLatestInfoUntilBlock with block num %d: %w", header.Number.Uint64(), err) } if info.L1InfoTreeIndex >= greatestL1Index { sequence.CounterL1InfoRoot = info.L1InfoTreeIndex + 1 @@ -113,7 +114,7 @@ func (t *TxBuilderBananaBase) NewSequence(ctx context.Context, batches []seqsend return nil, err } - oldAccInputHash := common.BytesToHash(accInputHash[:]) //copy it + oldAccInputHash := common.BytesToHash(accInputHash[:]) // copy it for _, batch := range sequence.Batches { infoRootHash := sequence.L1InfoRoot @@ -126,7 +127,9 @@ func (t *TxBuilderBananaBase) NewSequence(ctx context.Context, batches []seqsend blockHash = batch.ForcedBlockHashL1 } - accInputHash = cdkcommon.CalculateAccInputHash(accInputHash, batch.L2Data, infoRootHash, timestamp, batch.LastCoinbase, blockHash) + accInputHash = cdkcommon.CalculateAccInputHash( + t.logger, accInputHash, batch.L2Data, infoRootHash, timestamp, batch.LastCoinbase, blockHash, + ) } sequence.OldAccInputHash = oldAccInputHash @@ -156,17 +159,14 @@ func convertToSequenceBanana(sequences seqsendertypes.Sequence) (etherman.Sequen } for _, batch := range sequences.Batches() { - ethBatch, err := toEthermanBatch(batch) - if err != nil { - return etherman.SequenceBanana{}, err - } + ethBatch := toEthermanBatch(batch) ethermanSequence.Batches = append(ethermanSequence.Batches, ethBatch) } return ethermanSequence, nil } -func toEthermanBatch(batch seqsendertypes.Batch) (etherman.Batch, error) { +func toEthermanBatch(batch seqsendertypes.Batch) etherman.Batch { return etherman.Batch{ L2Data: batch.L2Data(), LastCoinbase: batch.LastCoinbase(), @@ -177,18 +177,14 @@ func toEthermanBatch(batch seqsendertypes.Batch) (etherman.Batch, error) { L1InfoTreeIndex: batch.L1InfoTreeIndex(), LastL2BLockTimestamp: batch.LastL2BLockTimestamp(), GlobalExitRoot: batch.GlobalExitRoot(), - }, nil + } } -func toEthermanBatches(batch []seqsendertypes.Batch) ([]etherman.Batch, error) { +func toEthermanBatches(batch []seqsendertypes.Batch) []etherman.Batch { result := make([]etherman.Batch, len(batch)) for i, b := range batch { - var err error - result[i], err = toEthermanBatch(b) - if err != nil { - return nil, err - } + result[i] = toEthermanBatch(b) } - return result, nil + return result } diff --git a/sequencesender/txbuilder/banana_base_test.go b/sequencesender/txbuilder/banana_base_test.go index 6ad08b80..af4b05c0 100644 --- a/sequencesender/txbuilder/banana_base_test.go +++ b/sequencesender/txbuilder/banana_base_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/sequencesender/txbuilder/mocks_txbuilder" @@ -31,7 +32,7 @@ func TestBananaBaseNewSequenceEmpty(t *testing.T) { require.NotNil(t, seq) require.NoError(t, err) // TODO check values - //require.Equal(t, lastAcc, seq.LastAccInputHash()) + // require.Equal(t, lastAcc, seq.LastAccInputHash()) } func TestBananaBaseNewBatchFromL2Block(t *testing.T) { @@ -88,12 +89,15 @@ type testDataBananaBase struct { } func newBananaBaseTestData(t *testing.T) *testDataBananaBase { + t.Helper() + zkevmContractMock := mocks_txbuilder.NewRollupBananaBaseContractor(t) gerContractMock := mocks_txbuilder.NewGlobalExitRootBananaContractor(t) opts := bind.TransactOpts{} l1Client := mocks_txbuilder.NewL1Client(t) l1InfoSyncer := mocks_txbuilder.NewL1InfoSyncer(t) sut := txbuilder.NewTxBuilderBananaBase( + log.GetDefaultLogger(), zkevmContractMock, gerContractMock, l1InfoSyncer, l1Client, big.NewInt(0), opts, diff --git a/sequencesender/txbuilder/banana_types.go b/sequencesender/txbuilder/banana_types.go index 5a38cab0..c09095b6 100644 --- a/sequencesender/txbuilder/banana_types.go +++ b/sequencesender/txbuilder/banana_types.go @@ -57,8 +57,12 @@ func (b *BananaSequence) Len() int { } func (b *BananaSequence) String() string { - res := fmt.Sprintf("Seq/Banana: L2Coinbase: %s, OldAccInputHash: %x, AccInputHash: %x, L1InfoRoot: %x, MaxSequenceTimestamp: %d, IndexL1InfoRoot: %d", - b.L2Coinbase().String(), b.OldAccInputHash.String(), b.AccInputHash.String(), b.L1InfoRoot().String(), b.MaxSequenceTimestamp(), b.IndexL1InfoRoot()) + res := fmt.Sprintf( + "Seq/Banana: L2Coinbase: %s, OldAccInputHash: %x, AccInputHash: %x, L1InfoRoot: %x, "+ + "MaxSequenceTimestamp: %d, IndexL1InfoRoot: %d", + b.L2Coinbase().String(), b.OldAccInputHash.String(), b.AccInputHash.String(), b.L1InfoRoot().String(), + b.MaxSequenceTimestamp(), b.IndexL1InfoRoot(), + ) for i, batch := range b.Batches() { res += fmt.Sprintf("\n\tBatch %d: %s", i, batch.String()) @@ -127,8 +131,12 @@ func (b *BananaBatch) L1InfoTreeIndex() uint32 { } func (b *BananaBatch) String() string { - return fmt.Sprintf("Batch/Banana: LastCoinbase: %s, ForcedBatchTimestamp: %d, ForcedGlobalExitRoot: %x, ForcedBlockHashL1: %x, L2Data: %x, LastL2BLockTimestamp: %d, BatchNumber: %d, GlobalExitRoot: %x, L1InfoTreeIndex: %d", - b.LastCoinbase().String(), b.ForcedBatchTimestamp(), b.ForcedGlobalExitRoot().String(), b.ForcedBlockHashL1().String(), b.L2Data(), b.LastL2BLockTimestamp(), b.BatchNumber(), b.GlobalExitRoot().String(), b.L1InfoTreeIndex(), + return fmt.Sprintf("Batch/Banana: LastCoinbase: %s, ForcedBatchTimestamp: %d, ForcedGlobalExitRoot: %x, "+ + "ForcedBlockHashL1: %x, L2Data: %x, LastL2BLockTimestamp: %d, BatchNumber: %d, "+ + "GlobalExitRoot: %x, L1InfoTreeIndex: %d", + b.LastCoinbase().String(), b.ForcedBatchTimestamp(), b.ForcedGlobalExitRoot().String(), + b.ForcedBlockHashL1().String(), b.L2Data(), b.LastL2BLockTimestamp(), b.BatchNumber(), + b.GlobalExitRoot().String(), b.L1InfoTreeIndex(), ) } diff --git a/sequencesender/txbuilder/banana_validium.go b/sequencesender/txbuilder/banana_validium.go index 9ab1b929..68fa6762 100644 --- a/sequencesender/txbuilder/banana_validium.go +++ b/sequencesender/txbuilder/banana_validium.go @@ -25,10 +25,19 @@ type TxBuilderBananaValidium struct { type rollupBananaValidiumContractor interface { rollupBananaBaseContractor - SequenceBatchesValidium(opts *bind.TransactOpts, batches []polygonvalidiumetrog.PolygonValidiumEtrogValidiumBatchData, indexL1InfoRoot uint32, maxSequenceTimestamp uint64, expectedFinalAccInputHash [32]byte, l2Coinbase common.Address, dataAvailabilityMessage []byte) (*types.Transaction, error) + SequenceBatchesValidium( + opts *bind.TransactOpts, + batches []polygonvalidiumetrog.PolygonValidiumEtrogValidiumBatchData, + indexL1InfoRoot uint32, + maxSequenceTimestamp uint64, + expectedFinalAccInputHash [32]byte, + l2Coinbase common.Address, + dataAvailabilityMessage []byte, + ) (*types.Transaction, error) } func NewTxBuilderBananaValidium( + logger *log.Logger, rollupContract rollupBananaValidiumContractor, gerContract globalExitRootBananaContractor, da dataavailability.SequenceSenderBanana, opts bind.TransactOpts, maxBatchesForL1 uint64, @@ -36,15 +45,20 @@ func NewTxBuilderBananaValidium( ethClient l1Client, blockFinality *big.Int, ) *TxBuilderBananaValidium { + txBuilderBase := *NewTxBuilderBananaBase(logger, rollupContract, + gerContract, l1InfoTree, ethClient, blockFinality, opts) + return &TxBuilderBananaValidium{ - TxBuilderBananaBase: *NewTxBuilderBananaBase(rollupContract, gerContract, l1InfoTree, ethClient, blockFinality, opts), + TxBuilderBananaBase: txBuilderBase, da: da, condNewSeq: NewConditionalNewSequenceNumBatches(maxBatchesForL1), rollupContract: rollupContract, } } -func (t *TxBuilderBananaValidium) NewSequenceIfWorthToSend(ctx context.Context, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, batchNumber uint64) (seqsendertypes.Sequence, error) { +func (t *TxBuilderBananaValidium) NewSequenceIfWorthToSend( + ctx context.Context, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, batchNumber uint64, +) (seqsendertypes.Sequence, error) { return t.condNewSeq.NewSequenceIfWorthToSend(ctx, t, sequenceBatches, l2Coinbase) } @@ -55,32 +69,34 @@ func (t *TxBuilderBananaValidium) SetCondNewSeq(cond CondNewSequence) CondNewSeq return previous } -func (t *TxBuilderBananaValidium) BuildSequenceBatchesTx(ctx context.Context, sequences seqsendertypes.Sequence) (*types.Transaction, error) { +func (t *TxBuilderBananaValidium) BuildSequenceBatchesTx( + ctx context.Context, sequences seqsendertypes.Sequence, +) (*types.Transaction, error) { // TODO: param sender // Post sequences to DA backend var dataAvailabilityMessage []byte var err error ethseq, err := convertToSequenceBanana(sequences) if err != nil { - log.Error("error converting sequences to etherman: ", err) + t.logger.Error("error converting sequences to etherman: ", err) return nil, err } dataAvailabilityMessage, err = t.da.PostSequenceBanana(ctx, ethseq) if err != nil { - log.Error("error posting sequences to the data availability protocol: ", err) + t.logger.Error("error posting sequences to the data availability protocol: ", err) return nil, err } if dataAvailabilityMessage == nil { err := fmt.Errorf("data availability message is nil") - log.Error("error posting sequences to the data availability protocol: ", err.Error()) + t.logger.Error("error posting sequences to the data availability protocol: ", err.Error()) return nil, err } // Build sequence data tx, err := t.internalBuildSequenceBatchesTx(ethseq, dataAvailabilityMessage) if err != nil { - log.Errorf("error estimating new sequenceBatches to add to ethtxmanager: ", err) + t.logger.Errorf("error estimating new sequenceBatches to add to ethtxmanager: ", err) return nil, err } return tx, nil @@ -100,7 +116,9 @@ func (t *TxBuilderBananaValidium) internalBuildSequenceBatchesTx(sequence etherm return t.sequenceBatchesValidium(newopts, sequence, dataAvailabilityMessage) } -func (t *TxBuilderBananaValidium) sequenceBatchesValidium(opts bind.TransactOpts, sequence etherman.SequenceBanana, dataAvailabilityMessage []byte) (*types.Transaction, error) { +func (t *TxBuilderBananaValidium) sequenceBatchesValidium( + opts bind.TransactOpts, sequence etherman.SequenceBanana, dataAvailabilityMessage []byte, +) (*types.Transaction, error) { batches := make([]polygonvalidiumetrog.PolygonValidiumEtrogValidiumBatchData, len(sequence.Batches)) for i, batch := range sequence.Batches { var ger common.Hash @@ -116,12 +134,15 @@ func (t *TxBuilderBananaValidium) sequenceBatchesValidium(opts bind.TransactOpts } } - log.Infof("building banana sequence tx. AccInputHash: %s", sequence.AccInputHash.Hex()) - tx, err := t.rollupContract.SequenceBatchesValidium(&opts, batches, sequence.CounterL1InfoRoot, sequence.MaxSequenceTimestamp, sequence.AccInputHash, sequence.L2Coinbase, dataAvailabilityMessage) + t.logger.Infof("building banana sequence tx. AccInputHash: %s", sequence.AccInputHash.Hex()) + tx, err := t.rollupContract.SequenceBatchesValidium( + &opts, batches, sequence.CounterL1InfoRoot, sequence.MaxSequenceTimestamp, + sequence.AccInputHash, sequence.L2Coinbase, dataAvailabilityMessage, + ) if err != nil { - log.Debugf("Batches to send: %+v", batches) - log.Debug("l2CoinBase: ", sequence.L2Coinbase) - log.Debug("Sequencer address: ", opts.From) + t.logger.Debugf("Batches to send: %+v", batches) + t.logger.Debug("l2CoinBase: ", sequence.L2Coinbase) + t.logger.Debug("Sequencer address: ", opts.From) } return tx, err diff --git a/sequencesender/txbuilder/banana_validium_test.go b/sequencesender/txbuilder/banana_validium_test.go index 97ec2286..8f764595 100644 --- a/sequencesender/txbuilder/banana_validium_test.go +++ b/sequencesender/txbuilder/banana_validium_test.go @@ -9,6 +9,7 @@ import ( "github.com/0xPolygon/cdk/dataavailability/mocks_da" "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/sequencesender/txbuilder/mocks_txbuilder" @@ -16,7 +17,6 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - ethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) @@ -45,7 +45,6 @@ func TestBananaValidiumBuildSequenceBatchesTxSequenceErrorsFromDA(t *testing.T) testData.da.EXPECT().PostSequenceBanana(ctx, mock.Anything).Return(nil, fmt.Errorf("test error")) _, err = testData.sut.BuildSequenceBatchesTx(ctx, seq) require.Error(t, err, "error posting sequences to the data availability protocol: test error") - } func TestBananaValidiumBuildSequenceBatchesTxSequenceDAOk(t *testing.T) { @@ -59,8 +58,8 @@ func TestBananaValidiumBuildSequenceBatchesTxSequenceDAOk(t *testing.T) { ctx := context.TODO() daMessage := []byte{1} testData.da.EXPECT().PostSequenceBanana(ctx, mock.Anything).Return(daMessage, nil) - inner := ðtypes.LegacyTx{} - seqBatchesTx := ethtypes.NewTx(inner) + inner := &types.LegacyTx{} + seqBatchesTx := types.NewTx(inner) testData.rollupContract.EXPECT().SequenceBatchesValidium(mock.MatchedBy(func(opts *bind.TransactOpts) bool { return opts.NoSend == true }), mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, daMessage).Return(seqBatchesTx, nil).Once() @@ -81,6 +80,8 @@ type testDataBananaValidium struct { } func newBananaValidiumTestData(t *testing.T, maxBatchesForL1 uint64) *testDataBananaValidium { + t.Helper() + zkevmContractMock := mocks_txbuilder.NewRollupBananaValidiumContractor(t) gerContractMock := mocks_txbuilder.NewGlobalExitRootBananaContractor(t) condMock := mocks_txbuilder.NewCondNewSequence(t) @@ -90,6 +91,7 @@ func newBananaValidiumTestData(t *testing.T, maxBatchesForL1 uint64) *testDataBa opts := bind.TransactOpts{} sut := txbuilder.NewTxBuilderBananaValidium( + log.GetDefaultLogger(), zkevmContractMock, gerContractMock, daMock, diff --git a/sequencesender/txbuilder/banana_zkevm.go b/sequencesender/txbuilder/banana_zkevm.go index c0216d52..42668323 100644 --- a/sequencesender/txbuilder/banana_zkevm.go +++ b/sequencesender/txbuilder/banana_zkevm.go @@ -21,7 +21,14 @@ type TxBuilderBananaZKEVM struct { type rollupBananaZKEVMContractor interface { rollupBananaBaseContractor - SequenceBatches(opts *bind.TransactOpts, batches []polygonvalidiumetrog.PolygonRollupBaseEtrogBatchData, indexL1InfoRoot uint32, maxSequenceTimestamp uint64, expectedFinalAccInputHash [32]byte, l2Coinbase common.Address) (*types.Transaction, error) + SequenceBatches( + opts *bind.TransactOpts, + batches []polygonvalidiumetrog.PolygonRollupBaseEtrogBatchData, + indexL1InfoRoot uint32, + maxSequenceTimestamp uint64, + expectedFinalAccInputHash [32]byte, + l2Coinbase common.Address, + ) (*types.Transaction, error) } type globalExitRootBananaZKEVMContractor interface { @@ -29,6 +36,7 @@ type globalExitRootBananaZKEVMContractor interface { } func NewTxBuilderBananaZKEVM( + logger *log.Logger, rollupContract rollupBananaZKEVMContractor, gerContract globalExitRootBananaZKEVMContractor, opts bind.TransactOpts, @@ -37,14 +45,19 @@ func NewTxBuilderBananaZKEVM( ethClient l1Client, blockFinality *big.Int, ) *TxBuilderBananaZKEVM { + txBuilderBase := *NewTxBuilderBananaBase(logger, rollupContract, + gerContract, l1InfoTree, ethClient, blockFinality, opts) + return &TxBuilderBananaZKEVM{ - TxBuilderBananaBase: *NewTxBuilderBananaBase(rollupContract, gerContract, l1InfoTree, ethClient, blockFinality, opts), + TxBuilderBananaBase: txBuilderBase, condNewSeq: NewConditionalNewSequenceMaxSize(maxTxSizeForL1), rollupContract: rollupContract, } } -func (t *TxBuilderBananaZKEVM) NewSequenceIfWorthToSend(ctx context.Context, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, batchNumber uint64) (seqsendertypes.Sequence, error) { +func (t *TxBuilderBananaZKEVM) NewSequenceIfWorthToSend( + ctx context.Context, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, batchNumber uint64, +) (seqsendertypes.Sequence, error) { return t.condNewSeq.NewSequenceIfWorthToSend(ctx, t, sequenceBatches, l2Coinbase) } @@ -55,11 +68,13 @@ func (t *TxBuilderBananaZKEVM) SetCondNewSeq(cond CondNewSequence) CondNewSequen return previous } -func (t *TxBuilderBananaZKEVM) BuildSequenceBatchesTx(ctx context.Context, sequences seqsendertypes.Sequence) (*types.Transaction, error) { +func (t *TxBuilderBananaZKEVM) BuildSequenceBatchesTx( + ctx context.Context, sequences seqsendertypes.Sequence, +) (*types.Transaction, error) { var err error ethseq, err := convertToSequenceBanana(sequences) if err != nil { - log.Error("error converting sequences to etherman: ", err) + t.logger.Error("error converting sequences to etherman: ", err) return nil, err } newopts := t.opts @@ -72,13 +87,15 @@ func (t *TxBuilderBananaZKEVM) BuildSequenceBatchesTx(ctx context.Context, seque // Build sequence data tx, err := t.sequenceBatchesRollup(newopts, ethseq) if err != nil { - log.Errorf("error estimating new sequenceBatches to add to ethtxmanager: ", err) + t.logger.Errorf("error estimating new sequenceBatches to add to ethtxmanager: ", err) return nil, err } return tx, nil } -func (t *TxBuilderBananaZKEVM) sequenceBatchesRollup(opts bind.TransactOpts, sequence etherman.SequenceBanana) (*types.Transaction, error) { +func (t *TxBuilderBananaZKEVM) sequenceBatchesRollup( + opts bind.TransactOpts, sequence etherman.SequenceBanana, +) (*types.Transaction, error) { batches := make([]polygonvalidiumetrog.PolygonRollupBaseEtrogBatchData, len(sequence.Batches)) for i, batch := range sequence.Batches { var ger common.Hash @@ -94,11 +111,13 @@ func (t *TxBuilderBananaZKEVM) sequenceBatchesRollup(opts bind.TransactOpts, seq } } - tx, err := t.rollupContract.SequenceBatches(&opts, batches, sequence.CounterL1InfoRoot, sequence.MaxSequenceTimestamp, sequence.AccInputHash, sequence.L2Coinbase) + tx, err := t.rollupContract.SequenceBatches( + &opts, batches, sequence.CounterL1InfoRoot, sequence.MaxSequenceTimestamp, sequence.AccInputHash, sequence.L2Coinbase, + ) if err != nil { - log.Debugf("Batches to send: %+v", batches) - log.Debug("l2CoinBase: ", sequence.L2Coinbase) - log.Debug("Sequencer address: ", opts.From) + t.logger.Debugf("Batches to send: %+v", batches) + t.logger.Debug("l2CoinBase: ", sequence.L2Coinbase) + t.logger.Debug("Sequencer address: ", opts.From) } return tx, err diff --git a/sequencesender/txbuilder/banana_zkevm_test.go b/sequencesender/txbuilder/banana_zkevm_test.go index 10043b8a..a4ff4bd7 100644 --- a/sequencesender/txbuilder/banana_zkevm_test.go +++ b/sequencesender/txbuilder/banana_zkevm_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/sequencesender/txbuilder/mocks_txbuilder" @@ -15,7 +16,6 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - ethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) @@ -43,8 +43,8 @@ func TestBananaZkevmBuildSequenceBatchesTxOk(t *testing.T) { seq, err := newSequenceBananaZKEVMForTest(testData) require.NoError(t, err) - inner := ðtypes.LegacyTx{} - tx := ethtypes.NewTx(inner) + inner := &types.LegacyTx{} + tx := types.NewTx(inner) // It check that SequenceBatches is not going to be send testData.rollupContract.EXPECT().SequenceBatches(mock.MatchedBy(func(opts *bind.TransactOpts) bool { @@ -82,6 +82,8 @@ type testDataBananaZKEVM struct { } func newBananaZKEVMTestData(t *testing.T, maxTxSizeForL1 uint64) *testDataBananaZKEVM { + t.Helper() + zkevmContractMock := mocks_txbuilder.NewRollupBananaZKEVMContractor(t) gerContractMock := mocks_txbuilder.NewGlobalExitRootBananaContractor(t) condMock := mocks_txbuilder.NewCondNewSequence(t) @@ -89,6 +91,7 @@ func newBananaZKEVMTestData(t *testing.T, maxTxSizeForL1 uint64) *testDataBanana l1Client := mocks_txbuilder.NewL1Client(t) l1InfoSyncer := mocks_txbuilder.NewL1InfoSyncer(t) sut := txbuilder.NewTxBuilderBananaZKEVM( + log.GetDefaultLogger(), zkevmContractMock, gerContractMock, opts, diff --git a/sequencesender/txbuilder/elderberry_base.go b/sequencesender/txbuilder/elderberry_base.go index c076d7d9..8e61e174 100644 --- a/sequencesender/txbuilder/elderberry_base.go +++ b/sequencesender/txbuilder/elderberry_base.go @@ -4,6 +4,7 @@ import ( "context" "github.com/0xPolygon/cdk/etherman" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/state/datastream" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -11,12 +12,14 @@ import ( ) type TxBuilderElderberryBase struct { - opts bind.TransactOpts + logger *log.Logger + opts bind.TransactOpts } -func NewTxBuilderElderberryBase(opts bind.TransactOpts) *TxBuilderElderberryBase { +func NewTxBuilderElderberryBase(logger *log.Logger, opts bind.TransactOpts) *TxBuilderElderberryBase { return &TxBuilderElderberryBase{ - opts: opts, + logger: logger, + opts: opts, } } @@ -25,7 +28,9 @@ func (t *TxBuilderElderberryBase) SetAuth(auth *bind.TransactOpts) { t.opts = *auth } -func (t *TxBuilderElderberryBase) NewSequence(ctx context.Context, batches []seqsendertypes.Batch, coinbase common.Address) (seqsendertypes.Sequence, error) { +func (t *TxBuilderElderberryBase) NewSequence( + ctx context.Context, batches []seqsendertypes.Batch, coinbase common.Address, +) (seqsendertypes.Sequence, error) { seq := ElderberrySequence{ l2Coinbase: coinbase, batches: batches, diff --git a/sequencesender/txbuilder/elderberry_base_test.go b/sequencesender/txbuilder/elderberry_base_test.go index c2507907..806a47f8 100644 --- a/sequencesender/txbuilder/elderberry_base_test.go +++ b/sequencesender/txbuilder/elderberry_base_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/state/datastream" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -13,7 +14,7 @@ import ( func TestElderberryBaseNewSequence(t *testing.T) { opts := bind.TransactOpts{} - sut := NewTxBuilderElderberryBase(opts) + sut := NewTxBuilderElderberryBase(log.GetDefaultLogger(), opts) require.NotNil(t, sut) seq, err := sut.NewSequence(context.TODO(), nil, common.Address{}) require.NotNil(t, seq) @@ -36,7 +37,6 @@ func TestElderberryBaseNewBatchFromL2Block(t *testing.T) { require.Equal(t, l2Block.L1InfotreeIndex, batch.L1InfoTreeIndex()) require.Equal(t, common.BytesToAddress(l2Block.Coinbase), batch.LastCoinbase()) require.Equal(t, common.BytesToHash(l2Block.GlobalExitRoot), batch.GlobalExitRoot()) - } func TestElderberryBasegetLastSequencedBatchNumberEmpty(t *testing.T) { @@ -92,8 +92,10 @@ func TestElderberryBaseGetLastSequencedBatchFirstBatchIsZeroThrowAPanic(t *testi } func newElderberryBaseSUT(t *testing.T) *TxBuilderElderberryBase { + t.Helper() + opts := bind.TransactOpts{} - sut := NewTxBuilderElderberryBase(opts) + sut := NewTxBuilderElderberryBase(log.GetDefaultLogger(), opts) require.NotNil(t, sut) return sut } diff --git a/sequencesender/txbuilder/elderberry_validium.go b/sequencesender/txbuilder/elderberry_validium.go index c2acc649..62973b02 100644 --- a/sequencesender/txbuilder/elderberry_validium.go +++ b/sequencesender/txbuilder/elderberry_validium.go @@ -26,20 +26,31 @@ type TxBuilderElderberryValidium struct { } type rollupElderberryValidiumContractor interface { - SequenceBatchesValidium(opts *bind.TransactOpts, batches []polygonvalidiumetrog.PolygonValidiumEtrogValidiumBatchData, maxSequenceTimestamp uint64, initSequencedBatch uint64, l2Coinbase common.Address, dataAvailabilityMessage []byte) (*types.Transaction, error) + SequenceBatchesValidium( + opts *bind.TransactOpts, + batches []polygonvalidiumetrog.PolygonValidiumEtrogValidiumBatchData, + maxSequenceTimestamp uint64, + initSequencedBatch uint64, + l2Coinbase common.Address, + dataAvailabilityMessage []byte, + ) (*types.Transaction, error) } -func NewTxBuilderElderberryValidium(zkevm contracts.RollupElderberryType, +func NewTxBuilderElderberryValidium( + logger *log.Logger, + zkevm contracts.RollupElderberryType, da dataavailability.SequenceSenderElderberry, opts bind.TransactOpts, maxBatchesForL1 uint64) *TxBuilderElderberryValidium { return &TxBuilderElderberryValidium{ da: da, - TxBuilderElderberryBase: *NewTxBuilderElderberryBase(opts), + TxBuilderElderberryBase: *NewTxBuilderElderberryBase(logger, opts), condNewSeq: NewConditionalNewSequenceNumBatches(maxBatchesForL1), rollupContract: zkevm, } } -func (t *TxBuilderElderberryValidium) NewSequenceIfWorthToSend(ctx context.Context, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, batchNumber uint64) (seqsendertypes.Sequence, error) { +func (t *TxBuilderElderberryValidium) NewSequenceIfWorthToSend( + ctx context.Context, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, batchNumber uint64, +) (seqsendertypes.Sequence, error) { return t.condNewSeq.NewSequenceIfWorthToSend(ctx, t, sequenceBatches, l2Coinbase) } @@ -50,19 +61,21 @@ func (t *TxBuilderElderberryValidium) SetCondNewSeq(cond CondNewSequence) CondNe return previous } -func (t *TxBuilderElderberryValidium) BuildSequenceBatchesTx(ctx context.Context, sequences seqsendertypes.Sequence) (*types.Transaction, error) { +func (t *TxBuilderElderberryValidium) BuildSequenceBatchesTx( + ctx context.Context, sequences seqsendertypes.Sequence, +) (*types.Transaction, error) { if sequences == nil || sequences.Len() == 0 { return nil, fmt.Errorf("can't sequence an empty sequence") } batchesData := convertToBatchesData(sequences) dataAvailabilityMessage, err := t.da.PostSequenceElderberry(ctx, batchesData) if err != nil { - log.Error("error posting sequences to the data availability protocol: ", err) + t.logger.Error("error posting sequences to the data availability protocol: ", err) return nil, err } if dataAvailabilityMessage == nil { err := fmt.Errorf("data availability message is nil") - log.Error("error posting sequences to the data availability protocol: ", err.Error()) + t.logger.Error("error posting sequences to the data availability protocol: ", err.Error()) return nil, err } newopts := t.opts @@ -87,13 +100,16 @@ func (t *TxBuilderElderberryValidium) buildSequenceBatchesTxValidium(opts *bind. batches[i] = polygonvalidiumetrog.PolygonValidiumEtrogValidiumBatchData{ TransactionsHash: crypto.Keccak256Hash(seq.L2Data()), ForcedGlobalExitRoot: ger, - ForcedTimestamp: uint64(seq.ForcedBatchTimestamp()), + ForcedTimestamp: seq.ForcedBatchTimestamp(), ForcedBlockHashL1: seq.ForcedBlockHashL1(), } } lastSequencedBatchNumber := getLastSequencedBatchNumber(sequences) - log.Infof("SequenceBatchesValidium(from=%s, len(batches)=%d, MaxSequenceTimestamp=%d, lastSequencedBatchNumber=%d, L2Coinbase=%s, dataAvailabilityMessage=%s)", - t.opts.From.String(), len(batches), sequences.MaxSequenceTimestamp(), lastSequencedBatchNumber, sequences.L2Coinbase().String(), hex.EncodeToString(dataAvailabilityMessage)) + t.logger.Infof("SequenceBatchesValidium(from=%s, len(batches)=%d, MaxSequenceTimestamp=%d, "+ + "lastSequencedBatchNumber=%d, L2Coinbase=%s, dataAvailabilityMessage=%s)", + t.opts.From.String(), len(batches), sequences.MaxSequenceTimestamp(), lastSequencedBatchNumber, + sequences.L2Coinbase().String(), hex.EncodeToString(dataAvailabilityMessage), + ) tx, err := t.rollupContract.SequenceBatchesValidium(opts, batches, sequences.MaxSequenceTimestamp(), lastSequencedBatchNumber, sequences.L2Coinbase(), dataAvailabilityMessage) if err != nil { diff --git a/sequencesender/txbuilder/elderberry_validium_test.go b/sequencesender/txbuilder/elderberry_validium_test.go index adc8456a..6ca80a58 100644 --- a/sequencesender/txbuilder/elderberry_validium_test.go +++ b/sequencesender/txbuilder/elderberry_validium_test.go @@ -10,6 +10,7 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry/polygonvalidiumetrog" "github.com/0xPolygon/cdk/dataavailability/mocks_da" "github.com/0xPolygon/cdk/etherman/contracts" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/state/datastream" @@ -98,6 +99,8 @@ type testDataElderberryValidium struct { } func newElderberryValidiumSUT(t *testing.T) *testDataElderberryValidium { + t.Helper() + zkevmContract, err := contracts.NewContractMagic[contracts.RollupElderberryType](polygonvalidiumetrog.NewPolygonvalidiumetrog, common.Address{}, nil, contracts.ContractNameRollup, contracts.VersionElderberry) require.NoError(t, err) privateKey, err := crypto.HexToECDSA("64e679029f5032046955d41713dcc4b565de77ab891748d31bcf38864b54c175") @@ -107,7 +110,7 @@ func newElderberryValidiumSUT(t *testing.T) *testDataElderberryValidium { da := mocks_da.NewSequenceSenderElderberry(t) - sut := txbuilder.NewTxBuilderElderberryValidium(*zkevmContract, da, *opts, uint64(100)) + sut := txbuilder.NewTxBuilderElderberryValidium(log.GetDefaultLogger(), *zkevmContract, da, *opts, uint64(100)) require.NotNil(t, sut) return &testDataElderberryValidium{ mockDA: da, diff --git a/sequencesender/txbuilder/elderberry_zkevm.go b/sequencesender/txbuilder/elderberry_zkevm.go index 870be1bb..a4d3bb56 100644 --- a/sequencesender/txbuilder/elderberry_zkevm.go +++ b/sequencesender/txbuilder/elderberry_zkevm.go @@ -21,18 +21,29 @@ type TxBuilderElderberryZKEVM struct { } type rollupElderberryZKEVMContractor interface { - SequenceBatches(opts *bind.TransactOpts, batches []polygonvalidiumetrog.PolygonRollupBaseEtrogBatchData, maxSequenceTimestamp uint64, initSequencedBatch uint64, l2Coinbase common.Address) (*types.Transaction, error) + SequenceBatches( + opts *bind.TransactOpts, + batches []polygonvalidiumetrog.PolygonRollupBaseEtrogBatchData, + maxSequenceTimestamp uint64, + initSequencedBatch uint64, + l2Coinbase common.Address, + ) (*types.Transaction, error) } -func NewTxBuilderElderberryZKEVM(zkevm rollupElderberryZKEVMContractor, opts bind.TransactOpts, maxTxSizeForL1 uint64) *TxBuilderElderberryZKEVM { +func NewTxBuilderElderberryZKEVM( + logger *log.Logger, zkevm rollupElderberryZKEVMContractor, + opts bind.TransactOpts, maxTxSizeForL1 uint64, +) *TxBuilderElderberryZKEVM { return &TxBuilderElderberryZKEVM{ - TxBuilderElderberryBase: *NewTxBuilderElderberryBase(opts), + TxBuilderElderberryBase: *NewTxBuilderElderberryBase(logger, opts), condNewSeq: NewConditionalNewSequenceMaxSize(maxTxSizeForL1), rollupContract: zkevm, } } -func (t *TxBuilderElderberryZKEVM) NewSequenceIfWorthToSend(ctx context.Context, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, batchNumber uint64) (seqsendertypes.Sequence, error) { +func (t *TxBuilderElderberryZKEVM) NewSequenceIfWorthToSend( + ctx context.Context, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, batchNumber uint64, +) (seqsendertypes.Sequence, error) { return t.condNewSeq.NewSequenceIfWorthToSend(ctx, t, sequenceBatches, l2Coinbase) } @@ -43,7 +54,9 @@ func (t *TxBuilderElderberryZKEVM) SetCondNewSeq(cond CondNewSequence) CondNewSe return previous } -func (t *TxBuilderElderberryZKEVM) BuildSequenceBatchesTx(ctx context.Context, sequences seqsendertypes.Sequence) (*types.Transaction, error) { +func (t *TxBuilderElderberryZKEVM) BuildSequenceBatchesTx( + ctx context.Context, sequences seqsendertypes.Sequence, +) (*types.Transaction, error) { newopts := t.opts newopts.NoSend = true @@ -55,7 +68,9 @@ func (t *TxBuilderElderberryZKEVM) BuildSequenceBatchesTx(ctx context.Context, s return t.sequenceBatchesRollup(newopts, sequences) } -func (t *TxBuilderElderberryZKEVM) sequenceBatchesRollup(opts bind.TransactOpts, sequences seqsendertypes.Sequence) (*types.Transaction, error) { +func (t *TxBuilderElderberryZKEVM) sequenceBatchesRollup( + opts bind.TransactOpts, sequences seqsendertypes.Sequence, +) (*types.Transaction, error) { if sequences == nil || sequences.Len() == 0 { return nil, fmt.Errorf("can't sequence an empty sequence") } @@ -69,13 +84,15 @@ func (t *TxBuilderElderberryZKEVM) sequenceBatchesRollup(opts bind.TransactOpts, batches[i] = polygonvalidiumetrog.PolygonRollupBaseEtrogBatchData{ Transactions: seq.L2Data(), ForcedGlobalExitRoot: ger, - ForcedTimestamp: uint64(seq.ForcedBatchTimestamp()), + ForcedTimestamp: seq.ForcedBatchTimestamp(), // TODO: Check that is ok to use ForcedBlockHashL1 instead PrevBlockHash ForcedBlockHashL1: seq.ForcedBlockHashL1(), } } lastSequencedBatchNumber := getLastSequencedBatchNumber(sequences) - tx, err := t.rollupContract.SequenceBatches(&opts, batches, sequences.MaxSequenceTimestamp(), lastSequencedBatchNumber, sequences.L2Coinbase()) + tx, err := t.rollupContract.SequenceBatches( + &opts, batches, sequences.MaxSequenceTimestamp(), lastSequencedBatchNumber, sequences.L2Coinbase(), + ) if err != nil { t.warningMessage(batches, sequences.L2Coinbase(), &opts) if parsedErr, ok := etherman.TryParseError(err); ok { @@ -86,8 +103,9 @@ func (t *TxBuilderElderberryZKEVM) sequenceBatchesRollup(opts bind.TransactOpts, return tx, err } -func (t *TxBuilderElderberryZKEVM) warningMessage(batches []polygonvalidiumetrog.PolygonRollupBaseEtrogBatchData, l2Coinbase common.Address, opts *bind.TransactOpts) { - log.Warnf("Sequencer address: ", opts.From, "l2CoinBase: ", l2Coinbase, " Batches to send: %+v", batches) +func (t *TxBuilderElderberryZKEVM) warningMessage( + batches []polygonvalidiumetrog.PolygonRollupBaseEtrogBatchData, l2Coinbase common.Address, opts *bind.TransactOpts) { + t.logger.Warnf("Sequencer address: ", opts.From, "l2CoinBase: ", l2Coinbase, " Batches to send: %+v", batches) } func (t *TxBuilderElderberryZKEVM) String() string { diff --git a/sequencesender/txbuilder/elderberry_zkevm_test.go b/sequencesender/txbuilder/elderberry_zkevm_test.go index 94623048..3544a700 100644 --- a/sequencesender/txbuilder/elderberry_zkevm_test.go +++ b/sequencesender/txbuilder/elderberry_zkevm_test.go @@ -8,6 +8,7 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry/polygonvalidiumetrog" "github.com/0xPolygon/cdk/etherman/contracts" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/state/datastream" @@ -20,7 +21,7 @@ import ( func TestElderberryZkevmName(t *testing.T) { zkevmContract := contracts.RollupElderberryType{} opts := bind.TransactOpts{} - sut := txbuilder.NewTxBuilderElderberryZKEVM(zkevmContract, opts, 100) + sut := txbuilder.NewTxBuilderElderberryZKEVM(log.GetDefaultLogger(), zkevmContract, opts, 100) require.NotNil(t, sut) require.True(t, strings.Contains(sut.String(), "Elderberry")) require.True(t, strings.Contains(sut.String(), "ZKEVM")) @@ -29,7 +30,7 @@ func TestElderberryZkevmName(t *testing.T) { func TestElderberryZkevmNewSequence(t *testing.T) { zkevmContract := contracts.RollupElderberryType{} opts := bind.TransactOpts{} - sut := txbuilder.NewTxBuilderElderberryZKEVM(zkevmContract, opts, 100) + sut := txbuilder.NewTxBuilderElderberryZKEVM(log.GetDefaultLogger(), zkevmContract, opts, 100) require.NotNil(t, sut) seq, err := sut.NewSequence(context.TODO(), nil, common.Address{}) require.NoError(t, err) @@ -98,13 +99,15 @@ func TestElderberryZkevmNewSequenceIfWorthToSend(t *testing.T) { } func newElderberryZkevmSUT(t *testing.T) *txbuilder.TxBuilderElderberryZKEVM { + t.Helper() + zkevmContract, err := contracts.NewContractMagic[contracts.RollupElderberryType](polygonvalidiumetrog.NewPolygonvalidiumetrog, common.Address{}, nil, contracts.ContractNameRollup, contracts.VersionElderberry) require.NoError(t, err) privateKey, err := crypto.HexToECDSA("64e679029f5032046955d41713dcc4b565de77ab891748d31bcf38864b54c175") require.NoError(t, err) opts, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1)) require.NoError(t, err) - sut := txbuilder.NewTxBuilderElderberryZKEVM(*zkevmContract, *opts, 100) + sut := txbuilder.NewTxBuilderElderberryZKEVM(log.GetDefaultLogger(), *zkevmContract, *opts, 100) require.NotNil(t, sut) return sut } diff --git a/sequencesender/txbuilder/interface.go b/sequencesender/txbuilder/interface.go index bde8b634..f9a6275d 100644 --- a/sequencesender/txbuilder/interface.go +++ b/sequencesender/txbuilder/interface.go @@ -2,6 +2,7 @@ package txbuilder import ( "context" + "fmt" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/state/datastream" @@ -10,20 +11,28 @@ import ( ) type TxBuilder interface { + // Stringer interface + fmt.Stringer + // BuildSequenceBatchesTx Builds a sequence of batches transaction BuildSequenceBatchesTx(ctx context.Context, sequences seqsendertypes.Sequence) (*ethtypes.Transaction, error) // NewSequence Creates a new sequence - NewSequence(ctx context.Context, batches []seqsendertypes.Batch, coinbase common.Address) (seqsendertypes.Sequence, error) + NewSequence( + ctx context.Context, batches []seqsendertypes.Batch, coinbase common.Address, + ) (seqsendertypes.Sequence, error) // NewSequenceIfWorthToSend Creates a new sequence if it is worth sending - NewSequenceIfWorthToSend(ctx context.Context, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, batchNumber uint64) (seqsendertypes.Sequence, error) + NewSequenceIfWorthToSend( + ctx context.Context, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, batchNumber uint64, + ) (seqsendertypes.Sequence, error) // NewBatchFromL2Block Creates a new batch from the L2 block from a datastream NewBatchFromL2Block(l2Block *datastream.L2Block) seqsendertypes.Batch - //SetCondNewSeq Allows to override the condition to send a new sequence, returns previous one + // SetCondNewSeq Allows to override the condition to send a new sequence, returns previous one SetCondNewSeq(cond CondNewSequence) CondNewSequence - String() string } type CondNewSequence interface { - //NewSequenceIfWorthToSend Return nil, nil if the sequence is not worth sending - NewSequenceIfWorthToSend(ctx context.Context, txBuilder TxBuilder, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address) (seqsendertypes.Sequence, error) + // NewSequenceIfWorthToSend Return nil, nil if the sequence is not worth sending + NewSequenceIfWorthToSend( + ctx context.Context, txBuilder TxBuilder, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, + ) (seqsendertypes.Sequence, error) } diff --git a/sequencesender/txbuilder/interface_test.go b/sequencesender/txbuilder/interface_test.go index 6db0216a..428f04b7 100644 --- a/sequencesender/txbuilder/interface_test.go +++ b/sequencesender/txbuilder/interface_test.go @@ -17,6 +17,8 @@ This test ara auxiliars function based on the common behaviour of the interfaces */ func testSequenceIfWorthToSendNoNewSeq(t *testing.T, sut txbuilder.TxBuilder) { + t.Helper() + cond := mocks_txbuilder.NewCondNewSequence(t) sut.SetCondNewSeq(cond) cond.EXPECT().NewSequenceIfWorthToSend(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil).Once() @@ -26,6 +28,8 @@ func testSequenceIfWorthToSendNoNewSeq(t *testing.T, sut txbuilder.TxBuilder) { } func testSequenceIfWorthToSendErr(t *testing.T, sut txbuilder.TxBuilder) { + t.Helper() + cond := mocks_txbuilder.NewCondNewSequence(t) sut.SetCondNewSeq(cond) returnErr := fmt.Errorf("test-error") @@ -36,6 +40,8 @@ func testSequenceIfWorthToSendErr(t *testing.T, sut txbuilder.TxBuilder) { } func testSetCondNewSeq(t *testing.T, sut txbuilder.TxBuilder) { + t.Helper() + cond := mocks_txbuilder.NewCondNewSequence(t) sut.SetCondNewSeq(cond) cond2 := mocks_txbuilder.NewCondNewSequence(t) diff --git a/sequencesender/txbuilder/validium_cond_num_batches.go b/sequencesender/txbuilder/validium_cond_num_batches.go index 11329a06..35173d8e 100644 --- a/sequencesender/txbuilder/validium_cond_num_batches.go +++ b/sequencesender/txbuilder/validium_cond_num_batches.go @@ -20,8 +20,10 @@ func NewConditionalNewSequenceNumBatches(maxBatchesForL1 uint64) *ConditionalNew } } -func (c *ConditionalNewSequenceNumBatches) NewSequenceIfWorthToSend(ctx context.Context, txBuilder TxBuilder, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address) (seqsendertypes.Sequence, error) { - if c.maxBatchesForL1 != MaxBatchesForL1Disabled && len(sequenceBatches) >= int(c.maxBatchesForL1) { +func (c *ConditionalNewSequenceNumBatches) NewSequenceIfWorthToSend( + ctx context.Context, txBuilder TxBuilder, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, +) (seqsendertypes.Sequence, error) { + if c.maxBatchesForL1 != MaxBatchesForL1Disabled && uint64(len(sequenceBatches)) >= c.maxBatchesForL1 { log.Infof( "sequence should be sent to L1, because MaxBatchesForL1 (%d) has been reached", c.maxBatchesForL1, diff --git a/sequencesender/txbuilder/zkevm_cond_max_size.go b/sequencesender/txbuilder/zkevm_cond_max_size.go index 5c931f8e..66eb9446 100644 --- a/sequencesender/txbuilder/zkevm_cond_max_size.go +++ b/sequencesender/txbuilder/zkevm_cond_max_size.go @@ -27,7 +27,9 @@ func NewConditionalNewSequenceMaxSize(maxTxSizeForL1 uint64) *ConditionalNewSequ } } -func (c *ConditionalNewSequenceMaxSize) NewSequenceIfWorthToSend(ctx context.Context, txBuilder TxBuilder, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address) (seqsendertypes.Sequence, error) { +func (c *ConditionalNewSequenceMaxSize) NewSequenceIfWorthToSend( + ctx context.Context, txBuilder TxBuilder, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, +) (seqsendertypes.Sequence, error) { if c.maxTxSizeForL1 == MaxTxSizeForL1Disabled { log.Debugf("maxTxSizeForL1 is %d, so is disabled", MaxTxSizeForL1Disabled) return nil, nil @@ -59,7 +61,6 @@ func (c *ConditionalNewSequenceMaxSize) NewSequenceIfWorthToSend(ctx context.Con sequenceBatches, err = handleEstimateGasSendSequenceErr(sequence.Batches(), err) if sequenceBatches != nil { // Handling the error gracefully, re-processing the sequence as a sanity check - //sequence, err = s.newSequenceBanana(sequenceBatches, s.cfg.L2Coinbase) sequence, err = txBuilder.NewSequence(ctx, sequenceBatches, l2Coinbase) if err != nil { return nil, err @@ -68,19 +69,27 @@ func (c *ConditionalNewSequenceMaxSize) NewSequenceIfWorthToSend(ctx context.Con txReduced, err := txBuilder.BuildSequenceBatchesTx(ctx, sequence) log.Debugf("After reducing batches: (txSize %d -> %d)", tx.Size(), txReduced.Size()) if err == nil && txReduced != nil && txReduced.Size() > c.maxTxSizeForL1 { - log.Warnf("After reducing batches: (txSize %d -> %d) is still too big > %d", tx.Size(), txReduced.Size(), c.maxTxSizeForL1) + log.Warnf("After reducing batches: (txSize %d -> %d) is still too big > %d", + tx.Size(), txReduced.Size(), c.maxTxSizeForL1, + ) } return sequence, err } return sequence, err } - log.Debugf("Current size:%d < max_size:%d num_batches: %d, no sequence promoted yet", tx.Size(), c.maxTxSizeForL1, sequence.Len()) + log.Debugf( + "Current size:%d < max_size:%d num_batches: %d, no sequence promoted yet", + tx.Size(), c.maxTxSizeForL1, sequence.Len(), + ) return nil, nil } -// handleEstimateGasSendSequenceErr handles an error on the estimate gas. Results: (nil,nil)=requires waiting, (nil,error)=no handled gracefully, (seq,nil) handled gracefully -func handleEstimateGasSendSequenceErr(sequenceBatches []seqsendertypes.Batch, err error) ([]seqsendertypes.Batch, error) { +// handleEstimateGasSendSequenceErr handles an error on the estimate gas. +// Results: (nil,nil)=requires waiting, (nil,error)=no handled gracefully, (seq,nil) handled gracefully +func handleEstimateGasSendSequenceErr( + sequenceBatches []seqsendertypes.Batch, err error, +) ([]seqsendertypes.Batch, error) { // Insufficient allowance if errors.Is(err, etherman.ErrInsufficientAllowance) { return nil, err @@ -89,12 +98,15 @@ func handleEstimateGasSendSequenceErr(sequenceBatches []seqsendertypes.Batch, er if isDataForEthTxTooBig(err) { errMsg = fmt.Sprintf("caused the L1 tx to be too big: %v", err) } - adjustMsg := "" + var adjustMsg string if len(sequenceBatches) > 1 { lastPrevious := sequenceBatches[len(sequenceBatches)-1].BatchNumber() sequenceBatches = sequenceBatches[:len(sequenceBatches)-1] lastCurrent := sequenceBatches[len(sequenceBatches)-1].BatchNumber() - adjustMsg = fmt.Sprintf("removing last batch: old BatchNumber:%d -> %d, new length: %d", lastPrevious, lastCurrent, len(sequenceBatches)) + adjustMsg = fmt.Sprintf( + "removing last batch: old BatchNumber:%d -> %d, new length: %d", + lastPrevious, lastCurrent, len(sequenceBatches), + ) } else { sequenceBatches = nil adjustMsg = "removing all batches" diff --git a/sonar-project.properties b/sonar-project.properties index 973e1bc7..559f7073 100644 --- a/sonar-project.properties +++ b/sonar-project.properties @@ -1,2 +1,30 @@ +# ===================================================== +# Standard properties +# ===================================================== + sonar.projectKey=0xPolygon_cdk +sonar.projectName=cdk sonar.organization=0xpolygon + +sonar.sources=. +sonar.exclusions=**/test/**,**/vendor/**,**/mocks/**,**/build/**,**/target/**,**/proto/include/**,**/*.pb.go,**/docs/**,**/*.sql + +sonar.tests=. +sonar.test.inclusions=**/*_test.go +sonar.test.exclusions=**/vendor/**,**/docs/**,**/mocks/**,**/*.pb.go,**/*.yml,**/*.yaml,**/*.json,**/*.xml,**/*.toml +sonar.issue.enforceSemantic=true + +# ===================================================== +# Meta-data for the project +# ===================================================== + +sonar.links.homepage=https://github.com/0xPolygon/cdk +sonar.links.ci=https://github.com/0xPolygon/cdk/actions +sonar.links.scm=https://github.com/0xPolygon/cdk +sonar.links.issue=https://github.com/0xPolygon/cdk/issues + +# ===================================================== +# Properties specific to Go +# ===================================================== + +sonar.go.coverage.reportPaths=coverage.out diff --git a/state/encoding_batch_v2.go b/state/encoding_batch_v2.go index 8e3561bd..f058f072 100644 --- a/state/encoding_batch_v2.go +++ b/state/encoding_batch_v2.go @@ -99,8 +99,11 @@ const ( ) var ( - // ErrBatchV2DontStartWithChangeL2Block is returned when the batch start directly with a trsansaction (without a changeL2Block) - ErrBatchV2DontStartWithChangeL2Block = errors.New("batch v2 must start with changeL2Block before Tx (suspect a V1 Batch or a ForcedBatch?))") + // ErrBatchV2DontStartWithChangeL2Block is returned when the batch start directly + // with a trsansaction (without a preceding changeL2Block) + ErrBatchV2DontStartWithChangeL2Block = errors.New( + "batch v2 must start with changeL2Block before Tx (suspect a V1 Batch or a ForcedBatch?)", + ) // ErrInvalidBatchV2 is returned when the batch is invalid. ErrInvalidBatchV2 = errors.New("invalid batch v2") // ErrInvalidRLP is returned when the rlp is invalid. @@ -206,7 +209,10 @@ func (tx L2TxRaw) Encode(batchData []byte) ([]byte, error) { // DecodeBatchV2 decodes a batch of transactions from a byte slice. func DecodeBatchV2(txsData []byte) (*BatchRawV2, error) { - // The transactions is not RLP encoded. Is the raw bytes in this form: 1 byte for the transaction type (always 0b for changeL2Block) + 4 bytes for deltaTimestamp + for bytes for indexL1InfoTree + // The transactions are not RLP encoded. They are in raw byte format as follows: + // 1 byte for the transaction type (always 0b for changeL2Block) + // 4 bytes for deltaTimestamp + // 4 bytes for indexL1InfoTree. var err error var blocks []L2BlockRaw var currentBlock *L2BlockRaw @@ -258,7 +264,10 @@ func DecodeForcedBatchV2(txsData []byte) (*ForcedBatchRawV2, error) { } // Sanity check, this should never happen if len(efficiencyPercentages) != len(txs) { - return nil, fmt.Errorf("error decoding len(efficiencyPercentages) != len(txs). len(efficiencyPercentages)=%d, len(txs)=%d : %w", len(efficiencyPercentages), len(txs), ErrInvalidRLP) + return nil, fmt.Errorf( + "error decoding len(efficiencyPercentages) != len(txs). len(efficiencyPercentages)=%d, len(txs)=%d : %w", + len(efficiencyPercentages), len(txs), ErrInvalidRLP, + ) } forcedBatch := ForcedBatchRawV2{} for i := range txs { @@ -311,12 +320,22 @@ func DecodeTxRLP(txsData []byte, offset int) (int, *L2TxRaw, error) { var rlpFields [][]byte err = rlp.DecodeBytes(txInfo, &rlpFields) if err != nil { - log.Error("error decoding tx Bytes: ", err, ". fullDataTx: ", hex.EncodeToString(fullDataTx), "\n tx: ", hex.EncodeToString(txInfo), "\n Txs received: ", hex.EncodeToString(txsData)) + log.Error( + "error decoding tx Bytes: ", err, + ". fullDataTx: ", hex.EncodeToString(fullDataTx), + "\n tx: ", hex.EncodeToString(txInfo), + "\n Txs received: ", hex.EncodeToString(txsData), + ) return 0, nil, err } legacyTx, err := RlpFieldsToLegacyTx(rlpFields, vData, rData, sData) if err != nil { - log.Debug("error creating tx from rlp fields: ", err, ". fullDataTx: ", hex.EncodeToString(fullDataTx), "\n tx: ", hex.EncodeToString(txInfo), "\n Txs received: ", hex.EncodeToString(txsData)) + log.Debug( + "error creating tx from rlp fields: ", err, + ". fullDataTx: ", hex.EncodeToString(fullDataTx), + "\n tx: ", hex.EncodeToString(txInfo), + "\n Txs received: ", hex.EncodeToString(txsData), + ) return 0, nil, err } @@ -348,7 +367,11 @@ func decodeRLPListLengthFromOffset(txsData []byte, offset int) (uint64, error) { return 0, fmt.Errorf("not enough data to get length: %w", ErrInvalidRLP) } - n, err := strconv.ParseUint(hex.EncodeToString(txsData[pos64+1:pos64+1+lengthInByteOfSize]), hex.Base, hex.BitSize64) // +1 is the header. For example 0xf7 + n, err := strconv.ParseUint( + hex.EncodeToString(txsData[pos64+1:pos64+1+lengthInByteOfSize]), // +1 is the header. For example 0xf7 + hex.Base, + hex.BitSize64, + ) if err != nil { log.Debug("error parsing length: ", err) return 0, fmt.Errorf("error parsing length value: %w", err) diff --git a/state/encoding_batch_v2_test.go b/state/encoding_batch_v2_test.go index 30b16d23..d263b5c7 100644 --- a/state/encoding_batch_v2_test.go +++ b/state/encoding_batch_v2_test.go @@ -117,8 +117,8 @@ func TestDecodeBatchV2(t *testing.T) { require.NoError(t, err) batchL2Data2, err := hex.DecodeString(codedL2Block2) require.NoError(t, err) - batch := append(batchL2Data, batchL2Data2...) - decodedBatch, err := DecodeBatchV2(batch) + batchL2Data = append(batchL2Data, batchL2Data2...) + decodedBatch, err := DecodeBatchV2(batchL2Data) require.NoError(t, err) require.Equal(t, 2, len(decodedBatch.Blocks)) require.Equal(t, uint32(0x73e6af6f), decodedBatch.Blocks[0].DeltaTimestamp) diff --git a/state/errors.go b/state/errors.go index 5a394a24..4bd68605 100644 --- a/state/errors.go +++ b/state/errors.go @@ -36,11 +36,15 @@ var ( ErrDBTxNil = errors.New("the method requires a dbTx that is not nil") // ErrExistingTxGreaterThanProcessedTx indicates that we have more txs stored // in db than the txs we want to process. - ErrExistingTxGreaterThanProcessedTx = errors.New("there are more transactions in the database than in the processed transaction set") + ErrExistingTxGreaterThanProcessedTx = errors.New( + "there are more transactions in the database than in the processed transaction set", + ) // ErrOutOfOrderProcessedTx indicates the the processed transactions of an // ongoing batch are not in the same order as the transactions stored in the // database for the same batch. - ErrOutOfOrderProcessedTx = errors.New("the processed transactions are not in the same order as the stored transactions") + ErrOutOfOrderProcessedTx = errors.New( + "the processed transactions are not in the same order as the stored transactions", + ) // ErrInsufficientFundsForTransfer is returned if the transaction sender doesn't // have enough funds for transfer(topmost call only). ErrInsufficientFundsForTransfer = errors.New("insufficient funds for transfer") diff --git a/state/forkid.go b/state/forkid.go index 27617313..13fec671 100644 --- a/state/forkid.go +++ b/state/forkid.go @@ -2,11 +2,11 @@ package state const ( // FORKID_BLUEBERRY is the fork id 4 - FORKID_BLUEBERRY = 4 + FORKID_BLUEBERRY = 4 //nolint:stylecheck // FORKID_DRAGONFRUIT is the fork id 5 - FORKID_DRAGONFRUIT = 5 + FORKID_DRAGONFRUIT = 5 //nolint:stylecheck // FORKID_INCABERRY is the fork id 6 - FORKID_INCABERRY = 6 + FORKID_INCABERRY = 6 //nolint:stylecheck // FORKID_ETROG is the fork id 7 - FORKID_ETROG = 7 + FORKID_ETROG = 7 //nolint:stylecheck ) diff --git a/state/helper.go b/state/helper.go index c717fb56..7f2b64be 100644 --- a/state/helper.go +++ b/state/helper.go @@ -63,7 +63,7 @@ func prepareRLPTxData(tx *types.Transaction) ([]byte, error) { return nil, err } - newV := new(big.Int).Add(big.NewInt(ether155V), big.NewInt(int64(sign))) + newV := new(big.Int).Add(big.NewInt(ether155V), new(big.Int).SetUint64(sign)) newRPadded := fmt.Sprintf("%064s", r.Text(hex.Base)) newSPadded := fmt.Sprintf("%064s", s.Text(hex.Base)) newVPadded := fmt.Sprintf("%02s", newV.Text(hex.Base)) @@ -99,7 +99,11 @@ func DecodeTxs(txsData []byte, forkID uint64) ([]*types.Transaction, []byte, []u log.Debug("error parsing length: ", err) return []*types.Transaction{}, txsData, []uint8{}, err } - n, err := strconv.ParseUint(hex.EncodeToString(txsData[pos+1:pos+1+num-f7]), hex.Base, hex.BitSize64) // +1 is the header. For example 0xf7 + n, err := strconv.ParseUint( + hex.EncodeToString(txsData[pos+1:pos+1+num-f7]), // +1 is the header. For example 0xf7 + hex.Base, + hex.BitSize64, + ) if err != nil { log.Debug("error parsing length: ", err) return []*types.Transaction{}, txsData, []uint8{}, err @@ -153,13 +157,23 @@ func DecodeTxs(txsData []byte, forkID uint64) ([]*types.Transaction, []byte, []u var rlpFields [][]byte err = rlp.DecodeBytes(txInfo, &rlpFields) if err != nil { - log.Error("error decoding tx Bytes: ", err, ". fullDataTx: ", hex.EncodeToString(fullDataTx), "\n tx: ", hex.EncodeToString(txInfo), "\n Txs received: ", hex.EncodeToString(txsData)) + log.Error( + "error decoding tx Bytes: ", err, + ". fullDataTx: ", hex.EncodeToString(fullDataTx), + "\n tx: ", hex.EncodeToString(txInfo), + "\n Txs received: ", hex.EncodeToString(txsData), + ) return []*types.Transaction{}, txsData, []uint8{}, ErrInvalidData } legacyTx, err := RlpFieldsToLegacyTx(rlpFields, vData, rData, sData) if err != nil { - log.Debug("error creating tx from rlp fields: ", err, ". fullDataTx: ", hex.EncodeToString(fullDataTx), "\n tx: ", hex.EncodeToString(txInfo), "\n Txs received: ", hex.EncodeToString(txsData)) + log.Debug( + "error creating tx from rlp fields: ", err, ". fullDataTx: ", + hex.EncodeToString(fullDataTx), + "\n tx: ", hex.EncodeToString(txInfo), + "\n Txs received: ", hex.EncodeToString(txsData), + ) return []*types.Transaction{}, txsData, []uint8{}, err } diff --git a/state/pgstatestorage/batch.go b/state/pgstatestorage/batch.go index db818df5..6273f064 100644 --- a/state/pgstatestorage/batch.go +++ b/state/pgstatestorage/batch.go @@ -11,9 +11,17 @@ import ( // AddBatch stores a batch func (p *PostgresStorage) AddBatch(ctx context.Context, dbBatch *state.DBBatch, dbTx pgx.Tx) error { - const addInputHashSQL = "INSERT INTO aggregator.batch (batch_num, batch, datastream, witness) VALUES ($1, $2, $3, $4) ON CONFLICT (batch_num) DO UPDATE SET batch = $2, datastream = $3, witness = $4" + const addInputHashSQL = ` + INSERT INTO aggregator.batch (batch_num, batch, datastream, witness) + VALUES ($1, $2, $3, $4) + ON CONFLICT (batch_num) DO UPDATE + SET batch = $2, datastream = $3, witness = $4 + ` e := p.getExecQuerier(dbTx) - _, err := e.Exec(ctx, addInputHashSQL, dbBatch.Batch.BatchNumber, &dbBatch.Batch, common.Bytes2Hex(dbBatch.Datastream), common.Bytes2Hex(dbBatch.Witness)) + _, err := e.Exec( + ctx, addInputHashSQL, dbBatch.Batch.BatchNumber, &dbBatch.Batch, + common.Bytes2Hex(dbBatch.Datastream), common.Bytes2Hex(dbBatch.Witness), + ) return err } @@ -38,7 +46,9 @@ func (p *PostgresStorage) GetBatch(ctx context.Context, batchNumber uint64, dbTx } // DeleteBatchesOlderThanBatchNumber deletes batches previous to the given batch number -func (p *PostgresStorage) DeleteBatchesOlderThanBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error { +func (p *PostgresStorage) DeleteBatchesOlderThanBatchNumber( + ctx context.Context, batchNumber uint64, dbTx pgx.Tx, +) error { const deleteBatchesSQL = "DELETE FROM aggregator.batch WHERE batch_num < $1" e := p.getExecQuerier(dbTx) _, err := e.Exec(ctx, deleteBatchesSQL, batchNumber) @@ -46,7 +56,9 @@ func (p *PostgresStorage) DeleteBatchesOlderThanBatchNumber(ctx context.Context, } // DeleteBatchesNewerThanBatchNumber deletes batches previous to the given batch number -func (p *PostgresStorage) DeleteBatchesNewerThanBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error { +func (p *PostgresStorage) DeleteBatchesNewerThanBatchNumber( + ctx context.Context, batchNumber uint64, dbTx pgx.Tx, +) error { const deleteBatchesSQL = "DELETE FROM aggregator.batch WHERE batch_num > $1" e := p.getExecQuerier(dbTx) _, err := e.Exec(ctx, deleteBatchesSQL, batchNumber) diff --git a/state/pgstatestorage/proof.go b/state/pgstatestorage/proof.go index 98668a44..fa32fc99 100644 --- a/state/pgstatestorage/proof.go +++ b/state/pgstatestorage/proof.go @@ -25,7 +25,9 @@ func (p *PostgresStorage) CheckProofExistsForBatch(ctx context.Context, batchNum } // CheckProofContainsCompleteSequences checks if a recursive proof contains complete sequences -func (p *PostgresStorage) CheckProofContainsCompleteSequences(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) (bool, error) { +func (p *PostgresStorage) CheckProofContainsCompleteSequences( + ctx context.Context, proof *state.Proof, dbTx pgx.Tx, +) (bool, error) { const getProofContainsCompleteSequencesSQL = ` SELECT EXISTS (SELECT 1 FROM aggregator.sequence s1 WHERE s1.from_batch_num = $1) AND EXISTS (SELECT 1 FROM aggregator.sequence s2 WHERE s2.to_batch_num = $2) @@ -40,7 +42,9 @@ func (p *PostgresStorage) CheckProofContainsCompleteSequences(ctx context.Contex } // GetProofReadyToVerify return the proof that is ready to verify -func (p *PostgresStorage) GetProofReadyToVerify(ctx context.Context, lastVerfiedBatchNumber uint64, dbTx pgx.Tx) (*state.Proof, error) { +func (p *PostgresStorage) GetProofReadyToVerify( + ctx context.Context, lastVerfiedBatchNumber uint64, dbTx pgx.Tx, +) (*state.Proof, error) { const getProofReadyToVerifySQL = ` SELECT p.batch_num, @@ -59,11 +63,15 @@ func (p *PostgresStorage) GetProofReadyToVerify(ctx context.Context, lastVerfied EXISTS (SELECT 1 FROM aggregator.sequence s2 WHERE s2.to_batch_num = p.batch_num_final) ` - var proof *state.Proof = &state.Proof{} + var proof = &state.Proof{} e := p.getExecQuerier(dbTx) row := e.QueryRow(ctx, getProofReadyToVerifySQL, lastVerfiedBatchNumber+1) - err := row.Scan(&proof.BatchNumber, &proof.BatchNumberFinal, &proof.Proof, &proof.ProofID, &proof.InputProver, &proof.Prover, &proof.ProverID, &proof.GeneratingSince, &proof.CreatedAt, &proof.UpdatedAt) + err := row.Scan( + &proof.BatchNumber, &proof.BatchNumberFinal, &proof.Proof, &proof.ProofID, + &proof.InputProver, &proof.Prover, &proof.ProverID, &proof.GeneratingSince, + &proof.CreatedAt, &proof.UpdatedAt, + ) if errors.Is(err, pgx.ErrNoRows) { return nil, state.ErrNotFound @@ -77,8 +85,8 @@ func (p *PostgresStorage) GetProofReadyToVerify(ctx context.Context, lastVerfied // GetProofsToAggregate return the next to proof that it is possible to aggregate func (p *PostgresStorage) GetProofsToAggregate(ctx context.Context, dbTx pgx.Tx) (*state.Proof, *state.Proof, error) { var ( - proof1 *state.Proof = &state.Proof{} - proof2 *state.Proof = &state.Proof{} + proof1 = &state.Proof{} + proof2 = &state.Proof{} ) // TODO: add comments to explain the query @@ -130,8 +138,13 @@ func (p *PostgresStorage) GetProofsToAggregate(ctx context.Context, dbTx pgx.Tx) e := p.getExecQuerier(dbTx) row := e.QueryRow(ctx, getProofsToAggregateSQL) err := row.Scan( - &proof1.BatchNumber, &proof1.BatchNumberFinal, &proof1.Proof, &proof1.ProofID, &proof1.InputProver, &proof1.Prover, &proof1.ProverID, &proof1.GeneratingSince, &proof1.CreatedAt, &proof1.UpdatedAt, - &proof2.BatchNumber, &proof2.BatchNumberFinal, &proof2.Proof, &proof2.ProofID, &proof2.InputProver, &proof2.Prover, &proof2.ProverID, &proof2.GeneratingSince, &proof2.CreatedAt, &proof2.UpdatedAt) + &proof1.BatchNumber, &proof1.BatchNumberFinal, &proof1.Proof, &proof1.ProofID, + &proof1.InputProver, &proof1.Prover, &proof1.ProverID, &proof1.GeneratingSince, + &proof1.CreatedAt, &proof1.UpdatedAt, + &proof2.BatchNumber, &proof2.BatchNumberFinal, &proof2.Proof, &proof2.ProofID, + &proof2.InputProver, &proof2.Prover, &proof2.ProverID, &proof2.GeneratingSince, + &proof2.CreatedAt, &proof2.UpdatedAt, + ) if errors.Is(err, pgx.ErrNoRows) { return nil, nil, state.ErrNotFound @@ -144,25 +157,51 @@ func (p *PostgresStorage) GetProofsToAggregate(ctx context.Context, dbTx pgx.Tx) // AddGeneratedProof adds a generated proof to the storage func (p *PostgresStorage) AddGeneratedProof(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) error { - const addGeneratedProofSQL = "INSERT INTO aggregator.proof (batch_num, batch_num_final, proof, proof_id, input_prover, prover, prover_id, generating_since, created_at, updated_at) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)" + const addGeneratedProofSQL = ` + INSERT INTO aggregator.proof ( + batch_num, batch_num_final, proof, proof_id, input_prover, prover, + prover_id, generating_since, created_at, updated_at + ) VALUES ( + $1, $2, $3, $4, $5, $6, $7, $8, $9, $10 + ) + ` e := p.getExecQuerier(dbTx) now := time.Now().UTC().Round(time.Microsecond) - _, err := e.Exec(ctx, addGeneratedProofSQL, proof.BatchNumber, proof.BatchNumberFinal, proof.Proof, proof.ProofID, proof.InputProver, proof.Prover, proof.ProverID, proof.GeneratingSince, now, now) + _, err := e.Exec( + ctx, addGeneratedProofSQL, proof.BatchNumber, proof.BatchNumberFinal, proof.Proof, proof.ProofID, + proof.InputProver, proof.Prover, proof.ProverID, proof.GeneratingSince, now, now, + ) return err } // UpdateGeneratedProof updates a generated proof in the storage func (p *PostgresStorage) UpdateGeneratedProof(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) error { - const addGeneratedProofSQL = "UPDATE aggregator.proof SET proof = $3, proof_id = $4, input_prover = $5, prover = $6, prover_id = $7, generating_since = $8, updated_at = $9 WHERE batch_num = $1 AND batch_num_final = $2" + const addGeneratedProofSQL = ` + UPDATE aggregator.proof + SET proof = $3, + proof_id = $4, + input_prover = $5, + prover = $6, + prover_id = $7, + generating_since = $8, + updated_at = $9 + WHERE batch_num = $1 + AND batch_num_final = $2 + ` e := p.getExecQuerier(dbTx) now := time.Now().UTC().Round(time.Microsecond) - _, err := e.Exec(ctx, addGeneratedProofSQL, proof.BatchNumber, proof.BatchNumberFinal, proof.Proof, proof.ProofID, proof.InputProver, proof.Prover, proof.ProverID, proof.GeneratingSince, now) + _, err := e.Exec( + ctx, addGeneratedProofSQL, proof.BatchNumber, proof.BatchNumberFinal, proof.Proof, proof.ProofID, + proof.InputProver, proof.Prover, proof.ProverID, proof.GeneratingSince, now, + ) return err } // DeleteGeneratedProofs deletes from the storage the generated proofs falling // inside the batch numbers range. -func (p *PostgresStorage) DeleteGeneratedProofs(ctx context.Context, batchNumber uint64, batchNumberFinal uint64, dbTx pgx.Tx) error { +func (p *PostgresStorage) DeleteGeneratedProofs( + ctx context.Context, batchNumber uint64, batchNumberFinal uint64, dbTx pgx.Tx, +) error { const deleteGeneratedProofSQL = "DELETE FROM aggregator.proof WHERE batch_num >= $1 AND batch_num_final <= $2" e := p.getExecQuerier(dbTx) _, err := e.Exec(ctx, deleteGeneratedProofSQL, batchNumber, batchNumberFinal) @@ -218,9 +257,9 @@ func toPostgresInterval(duration string) (string, error) { return "", state.ErrUnsupportedDuration } - isMoreThanOne := duration[0] != '1' || len(duration) > 2 //nolint:gomnd + isMoreThanOne := duration[0] != '1' || len(duration) > 2 //nolint:mnd if isMoreThanOne { - pgUnit = pgUnit + "s" + pgUnit += "s" } return fmt.Sprintf("%s %s", duration[:len(duration)-1], pgUnit), nil diff --git a/state/pgstatestorage/sequence.go b/state/pgstatestorage/sequence.go index 12b19f7e..7d5be9fb 100644 --- a/state/pgstatestorage/sequence.go +++ b/state/pgstatestorage/sequence.go @@ -9,7 +9,11 @@ import ( // AddSequence stores the sequence information to allow the aggregator verify sequences. func (p *PostgresStorage) AddSequence(ctx context.Context, sequence state.Sequence, dbTx pgx.Tx) error { - const addSequenceSQL = "INSERT INTO aggregator.sequence (from_batch_num, to_batch_num) VALUES($1, $2) ON CONFLICT (from_batch_num) DO UPDATE SET to_batch_num = $2" + const addSequenceSQL = ` + INSERT INTO aggregator.sequence (from_batch_num, to_batch_num) + VALUES($1, $2) + ON CONFLICT (from_batch_num) DO UPDATE SET to_batch_num = $2 + ` e := p.getExecQuerier(dbTx) _, err := e.Exec(ctx, addSequenceSQL, sequence.FromBatchNumber, sequence.ToBatchNumber) diff --git a/state/types.go b/state/types.go index d5a8d155..62c0f502 100644 --- a/state/types.go +++ b/state/types.go @@ -14,7 +14,7 @@ type ZKCounters struct { UsedArithmetics uint32 UsedBinaries uint32 UsedSteps uint32 - UsedSha256Hashes_V2 uint32 + UsedSha256Hashes_V2 uint32 //nolint:stylecheck } // BatchResources is a struct that contains the ZKEVM resources used by a batch/tx diff --git a/sync/evmdownloader.go b/sync/evmdownloader.go index 31613cf3..c9c4e661 100644 --- a/sync/evmdownloader.go +++ b/sync/evmdownloader.go @@ -143,7 +143,9 @@ func NewEVMDownloaderImplementation( } } -func (d *EVMDownloaderImplementation) WaitForNewBlocks(ctx context.Context, lastBlockSeen uint64) (newLastBlock uint64) { +func (d *EVMDownloaderImplementation) WaitForNewBlocks( + ctx context.Context, lastBlockSeen uint64, +) (newLastBlock uint64) { attempts := 0 ticker := time.NewTicker(d.waitForNewBlocksPeriod) defer ticker.Stop() @@ -175,8 +177,10 @@ func (d *EVMDownloaderImplementation) GetEventsByBlockRange(ctx context.Context, b := d.GetBlockHeader(ctx, l.BlockNumber) if b.Hash != l.BlockHash { d.log.Infof( - "there has been a block hash change between the event query and the block query for block %d: %s vs %s. Retrtying.", - l.BlockNumber, b.Hash, l.BlockHash) + "there has been a block hash change between the event query and the block query "+ + "for block %d: %s vs %s. Retrying.", + l.BlockNumber, b.Hash, l.BlockHash, + ) return d.GetEventsByBlockRange(ctx, fromBlock, toBlock) } blocks = append(blocks, EVMBlock{ @@ -242,7 +246,7 @@ func (d *EVMDownloaderImplementation) GetLogs(ctx context.Context, fromBlock, to func (d *EVMDownloaderImplementation) GetBlockHeader(ctx context.Context, blockNum uint64) EVMBlockHeader { attempts := 0 for { - header, err := d.ethClient.HeaderByNumber(ctx, big.NewInt(int64(blockNum))) + header, err := d.ethClient.HeaderByNumber(ctx, new(big.Int).SetUint64(blockNum)) if err != nil { attempts++ d.log.Errorf("error getting block header for block %d, err: %v", blockNum, err) diff --git a/sync/evmdownloader_test.go b/sync/evmdownloader_test.go index 15a6608c..59c43b8f 100644 --- a/sync/evmdownloader_test.go +++ b/sync/evmdownloader_test.go @@ -389,6 +389,8 @@ func buildAppender() LogAppenderMap { } func NewTestDownloader(t *testing.T) (*EVMDownloader, *L2Mock) { + t.Helper() + rh := &RetryHandler{ MaxRetryAttemptsAfterError: 5, RetryAfterErrorPeriod: time.Millisecond * 100, diff --git a/sync/evmdriver.go b/sync/evmdriver.go index 2edd2e15..ae7388e0 100644 --- a/sync/evmdriver.go +++ b/sync/evmdriver.go @@ -75,7 +75,7 @@ reset: lastProcessedBlock, err = d.processor.GetLastProcessedBlock(ctx) if err != nil { attempts++ - d.log.Error("error geting last processed block: ", err) + d.log.Error("error getting last processed block: ", err) d.rh.Handle("Sync", attempts) continue } @@ -122,7 +122,7 @@ func (d *EVMDriver) handleNewBlock(ctx context.Context, b EVMBlock) { err := d.processor.ProcessBlock(ctx, blockToProcess) if err != nil { attempts++ - d.log.Errorf("error processing events for blcok %d, err: ", b.Num, err) + d.log.Errorf("error processing events for block %d, err: ", b.Num, err) d.rh.Handle("handleNewBlock", attempts) continue } diff --git a/sync/evmdriver_test.go b/sync/evmdriver_test.go index 74692321..907dac28 100644 --- a/sync/evmdriver_test.go +++ b/sync/evmdriver_test.go @@ -53,8 +53,18 @@ func TestSync(t *testing.T) { } reorg1Completed := reorgSemaphore{} dm.On("Download", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - ctx := args.Get(0).(context.Context) - downloadedCh := args.Get(2).(chan EVMBlock) + ctx, ok := args.Get(0).(context.Context) + if !ok { + log.Error("failed to assert type for context") + return + } + + downloadedCh, ok := args.Get(2).(chan EVMBlock) + if !ok { + log.Error("failed to assert type for downloadedCh") + return + } + log.Info("entering mock loop") for { select { @@ -168,7 +178,6 @@ func TestHandleNewBlock(t *testing.T) { pm.On("ProcessBlock", ctx, Block{Num: b3.Num, Events: b3.Events}). Return(nil).Once() driver.handleNewBlock(ctx, b3) - } func TestHandleReorg(t *testing.T) { diff --git a/test/Makefile b/test/Makefile index 86bd147e..0c50ec35 100644 --- a/test/Makefile +++ b/test/Makefile @@ -25,10 +25,12 @@ generate-mocks-da: ## Generates mocks for dataavailability, using mockery tool .PHONY: test-e2e-elderberry-validium test-e2e-elderberry-validium: stop ## Runs e2e tests checking elderberry/validium ./run-e2e.sh cdk-validium + bats . .PHONY: test-e2e-elderberry-rollup test-e2e-elderberry-rollup: stop ## Runs e2e tests checking elderberry/rollup ./run-e2e.sh rollup + bats . .PHONY: stop stop: diff --git a/test/access-list-e2e.bats b/test/access-list-e2e.bats new file mode 100644 index 00000000..c47b004a --- /dev/null +++ b/test/access-list-e2e.bats @@ -0,0 +1,121 @@ +setup() { + load 'helpers/common-setup' + load 'helpers/common' + _common_setup + + readonly enclave=${ENCLAVE:-cdk-v1} + readonly sequencer=${KURTOSIS_NODE:-cdk-erigon-sequencer-001} + readonly node=${KURTOSIS_NODE:-cdk-erigon-node-001} + readonly rpc_url=${RPC_URL:-$(kurtosis port print "$enclave" "$node" http-rpc)} + readonly key=${SENDER_key:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} + readonly receiver=${RECEIVER:-"0x85dA99c8a7C2C95964c8EfD687E95E632Fc533D6"} + readonly data_dir=${ACL_DATA_DIR:-"/home/erigon/data/dynamic-kurtosis-sequencer/txpool/acls"} + readonly kurtosis_sequencer_wrapper=${KURTOSIS_WRAPPER:-"kurtosis service exec $enclave $sequencer"} +} + +teardown() { + run set_acl_mode "disabled" +} + +# Helper function to add address to acl dynamically +add_to_access_list() { + local acl_type="$1" + local policy="$2" + local sender=$(cast wallet address "$key") + + run $kurtosis_sequencer_wrapper "acl add --datadir $data_dir --address $sender --type $acl_type --policy $policy" +} + +# Helper function to set the acl mode command dynamically +set_acl_mode() { + local mode="$1" + + run $kurtosis_sequencer_wrapper "acl mode --datadir $data_dir --mode $mode" +} + +@test "Test Block List - Sending regular transaction when address not in block list" { + local value="10ether" + run set_acl_mode "blocklist" + run sendTx $key $receiver $value + + assert_success + assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" +} + +@test "Test Block List - Sending contracts deploy transaction when address not in block list" { + local contract_artifact="./contracts/erc20mock/ERC20Mock.json" + run set_acl_mode "blocklist" + run deployContract $key $contract_artifact + + assert_success + + contract_addr=$(echo "$output" | tail -n 1) + assert_output --regexp "0x[a-fA-F0-9]{40}" +} + +@test "Test Block List - Sending regular transaction when address is in block list" { + local value="10ether" + + run set_acl_mode "blocklist" + run add_to_access_list "blocklist" "sendTx" + + run sendTx $key $receiver $value + + assert_failure + assert_output --partial "sender disallowed to send tx by ACL policy" +} + +@test "Test Block List - Sending contracts deploy transaction when address is in block list" { + local contract_artifact="./contracts/erc20mock/ERC20Mock.json" + + run set_acl_mode "blocklist" + run add_to_access_list "blocklist" "deploy" + run deployContract $key $contract_artifact + + assert_failure + assert_output --partial "sender disallowed to deploy contract by ACL policy" +} + +@test "Test Allow List - Sending regular transaction when address not in allow list" { + local value="10ether" + + run set_acl_mode "allowlist" + run sendTx $key $receiver $value + + assert_failure + assert_output --partial "sender disallowed to send tx by ACL policy" +} + +@test "Test Allow List - Sending contracts deploy transaction when address not in allow list" { + local contract_artifact="./contracts/erc20mock/ERC20Mock.json" + + run set_acl_mode "allowlist" + run deployContract $key $contract_artifact + + assert_failure + assert_output --partial "sender disallowed to deploy contract by ACL policy" +} + +@test "Test Allow List - Sending regular transaction when address is in allow list" { + local value="10ether" + + run set_acl_mode "allowlist" + run add_to_access_list "allowlist" "sendTx" + run sendTx $key $receiver $value + + assert_success + assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" +} + +@test "Test Allow List - Sending contracts deploy transaction when address is in allow list" { + local contract_artifact="./contracts/erc20mock/ERC20Mock.json" + + run set_acl_mode "allowlist" + run add_to_access_list "allowlist" "deploy" + run deployContract $key $contract_artifact + + assert_success + + contract_addr=$(echo "$output" | tail -n 1) + assert_output --regexp "0x[a-fA-F0-9]{40}" +} diff --git a/test/basic-e2e.bats b/test/basic-e2e.bats new file mode 100644 index 00000000..cbd845f5 --- /dev/null +++ b/test/basic-e2e.bats @@ -0,0 +1,49 @@ +setup() { + load 'helpers/common-setup' + load 'helpers/common' + _common_setup + + readonly enclave=${ENCLAVE:-cdk-v1} + readonly node=${KURTOSIS_NODE:-cdk-erigon-node-001} + readonly rpc_url=${RPC_URL:-$(kurtosis port print "$enclave" "$node" http-rpc)} + readonly private_key=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} + readonly receiver=${RECEIVER:-"0x85dA99c8a7C2C95964c8EfD687E95E632Fc533D6"} +} + +@test "Send EOA transaction" { + local value="10ether" + + run sendTx "$private_key" "$receiver" "$value" + assert_success + assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" +} + +@test "Deploy ERC20Mock contract" { + local contract_artifact="./contracts/erc20mock/ERC20Mock.json" + + # Deploy ERC20Mock + run deployContract "$private_key" "$contract_artifact" + assert_success + contract_addr=$(echo "$output" | tail -n 1) + + # Mint ERC20 tokens + local mintFnSig="function mint(address receiver, uint256 amount)" + local amount="5" + + run sendTx "$private_key" "$contract_addr" "$mintFnSig" "$receiver" "$amount" + assert_success + assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" + + # Assert that balance is correct + local balanceOfFnSig="function balanceOf(address) (uint256)" + run queryContract "$contract_addr" "$balanceOfFnSig" "$receiver" + assert_success + receiverBalance=$(echo "$output" | tail -n 1) + + # Convert balance and amount to a standard format for comparison (e.g., remove any leading/trailing whitespace) + receiverBalance=$(echo "$receiverBalance" | xargs) + amount=$(echo "$amount" | xargs) + + # Check if the balance is equal to the amount + assert_equal "$receiverBalance" "$amount" +} diff --git a/test/bridge-e2e.bats b/test/bridge-e2e.bats new file mode 100644 index 00000000..98443b3b --- /dev/null +++ b/test/bridge-e2e.bats @@ -0,0 +1,71 @@ +setup() { + load 'helpers/common-setup' + _common_setup + + readonly data_availability_mode=${DATA_AVAILABILITY_MODE:-"cdk-validium"} + $PROJECT_ROOT/test/scripts/kurtosis_prepare_params_yml.sh ../kurtosis-cdk $data_availability_mode + [ $? -ne 0 ] && echo "Error preparing params.yml" && exit 1 + + # Check if the genesis file is already downloaded + if [ ! -f "./tmp/cdk/genesis/genesis.json" ]; then + mkdir -p ./tmp/cdk + kurtosis files download cdk-v1 genesis ./tmp/cdk/genesis + [ $? -ne 0 ] && echo "Error downloading genesis file" && exit 1 + fi + # Download the genesis file + readonly bridge_default_address=$(jq -r ".genesis[] | select(.contractName == \"PolygonZkEVMBridge proxy\") | .address" ./tmp/cdk/genesis/genesis.json) + + readonly skey=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} + readonly destination_net=${DESTINATION_NET:-"1"} + readonly destination_addr=${DESTINATION_ADDRESS:-"0x0bb7AA0b4FdC2D2862c088424260e99ed6299148"} + readonly ether_value=${ETHER_VALUE:-"0.0200000054"} + readonly token_addr=${TOKEN_ADDRESS:-"0x0000000000000000000000000000000000000000"} + readonly is_forced=${IS_FORCED:-"true"} + readonly bridge_addr=${BRIDGE_ADDRESS:-$bridge_default_address} + readonly meta_bytes=${META_BYTES:-"0x"} + + readonly l1_rpc_url=${L1_ETH_RPC_URL:-"$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)"} + readonly l2_rpc_url=${L2_ETH_RPC_URL:-"$(kurtosis port print cdk-v1 cdk-erigon-node-001 http-rpc)"} + readonly bridge_api_url=${BRIDGE_API_URL:-"$(kurtosis port print cdk-v1 zkevm-bridge-service-001 rpc)"} + + readonly dry_run=${DRY_RUN:-"false"} + + readonly amount=$(cast to-wei $ether_value ether) + readonly current_addr="$(cast wallet address --private-key $skey)" + readonly l1_rpc_network_id=$(cast call --rpc-url $l1_rpc_url $bridge_addr 'networkID()(uint32)') + readonly l2_rpc_network_id=$(cast call --rpc-url $l2_rpc_url $bridge_addr 'networkID()(uint32)') +} + +@test "Run deposit" { + load 'helpers/lxly-bridge-test' + echo "Running LxLy deposit" >&3 + run deposit + assert_success + assert_output --partial 'transactionHash' +} + +@test "Run claim" { + load 'helpers/lxly-bridge-test' + echo "Running LxLy claim" + + # The script timeout (in seconds). + timeout="120" + start_time=$(date +%s) + end_time=$((start_time + timeout)) + + while true; do + current_time=$(date +%s) + if ((current_time > end_time)); then + echo "[$(date '+%Y-%m-%d %H:%M:%S')] ❌ Exiting... Timeout reached!" + exit 1 + fi + + run claim + if [ $status -eq 0 ]; then + break + fi + sleep 10 + done + + assert_success +} diff --git a/test/config/test.kurtosis_template.toml b/test/config/test.kurtosis_template.toml index 971f06a9..a44df41e 100644 --- a/test/config/test.kurtosis_template.toml +++ b/test/config/test.kurtosis_template.toml @@ -72,7 +72,6 @@ AggLayerTxTimeout = "5m" AggLayerURL = "" MaxWitnessRetrievalWorkers = 2 SyncModeOnlyEnabled = false -UseFullWitness = false SequencerPrivateKey = {} [Aggregator.DB] Name = "aggregator_db" diff --git a/test/contracts/abi/claimmock.abi b/test/contracts/abi/claimmock.abi index 2b75f658..0fd2ea8e 100644 --- a/test/contracts/abi/claimmock.abi +++ b/test/contracts/abi/claimmock.abi @@ -1 +1 @@ -[{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint256","name":"globalIndex","type":"uint256"},{"indexed":false,"internalType":"uint32","name":"originNetwork","type":"uint32"},{"indexed":false,"internalType":"address","name":"originAddress","type":"address"},{"indexed":false,"internalType":"address","name":"destinationAddress","type":"address"},{"indexed":false,"internalType":"uint256","name":"amount","type":"uint256"}],"name":"ClaimEvent","type":"event"},{"inputs":[{"internalType":"bytes32[32]","name":"smtProofLocalExitRoot","type":"bytes32[32]"},{"internalType":"bytes32[32]","name":"smtProofRollupExitRoot","type":"bytes32[32]"},{"internalType":"uint256","name":"globalIndex","type":"uint256"},{"internalType":"bytes32","name":"mainnetExitRoot","type":"bytes32"},{"internalType":"bytes32","name":"rollupExitRoot","type":"bytes32"},{"internalType":"uint32","name":"originNetwork","type":"uint32"},{"internalType":"address","name":"originTokenAddress","type":"address"},{"internalType":"uint32","name":"destinationNetwork","type":"uint32"},{"internalType":"address","name":"destinationAddress","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"bytes","name":"metadata","type":"bytes"}],"name":"claimAsset","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32[32]","name":"smtProofLocalExitRoot","type":"bytes32[32]"},{"internalType":"bytes32[32]","name":"smtProofRollupExitRoot","type":"bytes32[32]"},{"internalType":"uint256","name":"globalIndex","type":"uint256"},{"internalType":"bytes32","name":"mainnetExitRoot","type":"bytes32"},{"internalType":"bytes32","name":"rollupExitRoot","type":"bytes32"},{"internalType":"uint32","name":"originNetwork","type":"uint32"},{"internalType":"address","name":"originAddress","type":"address"},{"internalType":"uint32","name":"destinationNetwork","type":"uint32"},{"internalType":"address","name":"destinationAddress","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"bytes","name":"metadata","type":"bytes"}],"name":"claimMessage","outputs":[],"stateMutability":"nonpayable","type":"function"}] \ No newline at end of file +[{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint256","name":"globalIndex","type":"uint256"},{"indexed":false,"internalType":"uint32","name":"originNetwork","type":"uint32"},{"indexed":false,"internalType":"address","name":"originAddress","type":"address"},{"indexed":false,"internalType":"address","name":"destinationAddress","type":"address"},{"indexed":false,"internalType":"uint256","name":"amount","type":"uint256"}],"name":"ClaimEvent","type":"event"},{"inputs":[{"internalType":"bytes32[32]","name":"smtProofLocalExitRoot","type":"bytes32[32]"},{"internalType":"bytes32[32]","name":"smtProofRollupExitRoot","type":"bytes32[32]"},{"internalType":"uint256","name":"globalIndex","type":"uint256"},{"internalType":"bytes32","name":"mainnetExitRoot","type":"bytes32"},{"internalType":"bytes32","name":"rollupExitRoot","type":"bytes32"},{"internalType":"uint32","name":"originNetwork","type":"uint32"},{"internalType":"address","name":"originTokenAddress","type":"address"},{"internalType":"uint32","name":"destinationNetwork","type":"uint32"},{"internalType":"address","name":"destinationAddress","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"bytes","name":"metadata","type":"bytes"}],"name":"claimAsset","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"bytes32[32]","name":"smtProofLocalExitRoot","type":"bytes32[32]"},{"internalType":"bytes32[32]","name":"smtProofRollupExitRoot","type":"bytes32[32]"},{"internalType":"uint256","name":"globalIndex","type":"uint256"},{"internalType":"bytes32","name":"mainnetExitRoot","type":"bytes32"},{"internalType":"bytes32","name":"rollupExitRoot","type":"bytes32"},{"internalType":"uint32","name":"originNetwork","type":"uint32"},{"internalType":"address","name":"originAddress","type":"address"},{"internalType":"uint32","name":"destinationNetwork","type":"uint32"},{"internalType":"address","name":"destinationAddress","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"bytes","name":"metadata","type":"bytes"}],"name":"claimMessage","outputs":[],"stateMutability":"payable","type":"function"}] \ No newline at end of file diff --git a/test/contracts/abi/claimmockcaller.abi b/test/contracts/abi/claimmockcaller.abi index b2c6e2b9..21bf6ebc 100644 --- a/test/contracts/abi/claimmockcaller.abi +++ b/test/contracts/abi/claimmockcaller.abi @@ -1 +1 @@ -[{"inputs":[{"internalType":"contract IClaimMock","name":"_claimMock","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[{"internalType":"bytes32[32]","name":"smtProofLocalExitRoot","type":"bytes32[32]"},{"internalType":"bytes32[32]","name":"smtProofRollupExitRoot","type":"bytes32[32]"},{"internalType":"uint256","name":"globalIndex","type":"uint256"},{"internalType":"bytes32","name":"mainnetExitRoot","type":"bytes32"},{"internalType":"bytes32","name":"rollupExitRoot","type":"bytes32"},{"internalType":"uint32","name":"originNetwork","type":"uint32"},{"internalType":"address","name":"originTokenAddress","type":"address"},{"internalType":"uint32","name":"destinationNetwork","type":"uint32"},{"internalType":"address","name":"destinationAddress","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"bytes","name":"metadata","type":"bytes"}],"name":"claimAsset","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32[32]","name":"smtProofLocalExitRoot","type":"bytes32[32]"},{"internalType":"bytes32[32]","name":"smtProofRollupExitRoot","type":"bytes32[32]"},{"internalType":"uint256","name":"globalIndex","type":"uint256"},{"internalType":"bytes32","name":"mainnetExitRoot","type":"bytes32"},{"internalType":"bytes32","name":"rollupExitRoot","type":"bytes32"},{"internalType":"uint32","name":"originNetwork","type":"uint32"},{"internalType":"address","name":"originAddress","type":"address"},{"internalType":"uint32","name":"destinationNetwork","type":"uint32"},{"internalType":"address","name":"destinationAddress","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"bytes","name":"metadata","type":"bytes"}],"name":"claimMessage","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"claimMock","outputs":[{"internalType":"contract IClaimMock","name":"","type":"address"}],"stateMutability":"view","type":"function"}] \ No newline at end of file +[{"inputs":[{"internalType":"contract IClaimMock","name":"_claimMock","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[{"internalType":"bytes","name":"claim1","type":"bytes"},{"internalType":"bytes","name":"claim2","type":"bytes"},{"internalType":"bool[2]","name":"reverted","type":"bool[2]"}],"name":"claim2Bytes","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32[32]","name":"smtProofLocalExitRoot","type":"bytes32[32]"},{"internalType":"bytes32[32]","name":"smtProofRollupExitRoot","type":"bytes32[32]"},{"internalType":"uint256","name":"globalIndex","type":"uint256"},{"internalType":"bytes32","name":"mainnetExitRoot","type":"bytes32"},{"internalType":"bytes32","name":"rollupExitRoot","type":"bytes32"},{"internalType":"uint32","name":"originNetwork","type":"uint32"},{"internalType":"address","name":"originTokenAddress","type":"address"},{"internalType":"uint32","name":"destinationNetwork","type":"uint32"},{"internalType":"address","name":"destinationAddress","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"bytes","name":"metadata","type":"bytes"},{"internalType":"bool","name":"reverted","type":"bool"}],"name":"claimAsset","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes","name":"claim","type":"bytes"},{"internalType":"bool","name":"reverted","type":"bool"}],"name":"claimBytes","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32[32]","name":"smtProofLocalExitRoot","type":"bytes32[32]"},{"internalType":"bytes32[32]","name":"smtProofRollupExitRoot","type":"bytes32[32]"},{"internalType":"uint256","name":"globalIndex","type":"uint256"},{"internalType":"bytes32","name":"mainnetExitRoot","type":"bytes32"},{"internalType":"bytes32","name":"rollupExitRoot","type":"bytes32"},{"internalType":"uint32","name":"originNetwork","type":"uint32"},{"internalType":"address","name":"originAddress","type":"address"},{"internalType":"uint32","name":"destinationNetwork","type":"uint32"},{"internalType":"address","name":"destinationAddress","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"bytes","name":"metadata","type":"bytes"},{"internalType":"bool","name":"reverted","type":"bool"}],"name":"claimMessage","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"claimMock","outputs":[{"internalType":"contract IClaimMock","name":"","type":"address"}],"stateMutability":"view","type":"function"}] \ No newline at end of file diff --git a/test/contracts/bin/claimmock.bin b/test/contracts/bin/claimmock.bin index 62d961e8..006fd65c 100644 --- a/test/contracts/bin/claimmock.bin +++ b/test/contracts/bin/claimmock.bin @@ -1 +1 @@ -608060405234801561001057600080fd5b50610240806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c8063ccaa2d111461003b578063f5efcd791461003b575b600080fd5b61004e610049366004610102565b610050565b005b604080518b815263ffffffff891660208201526001600160a01b0388811682840152861660608201526080810185905290517f1df3f2a973a00d6635911755c260704e95e8a5876997546798770f76396fda4d9181900360a00190a1505050505050505050505050565b8061040081018310156100cc57600080fd5b92915050565b803563ffffffff811681146100e657600080fd5b919050565b80356001600160a01b03811681146100e657600080fd5b6000806000806000806000806000806000806109208d8f03121561012557600080fd5b61012f8e8e6100ba565b9b5061013f8e6104008f016100ba565b9a506108008d013599506108208d013598506108408d013597506101666108608e016100d2565b96506101756108808e016100eb565b95506101846108a08e016100d2565b94506101936108c08e016100eb565b93506108e08d013592506109008d013567ffffffffffffffff808211156101b957600080fd5b818f0191508f601f8301126101cd57600080fd5b80823511156101db57600080fd5b508e6020823583010111156101ef57600080fd5b60208101925080359150509295989b509295989b509295989b56fea2646970667358221220ea3ccb4fef38083776607b84bdd7b00012029d7d1fee9fa7c300663fe761dcac64736f6c63430008120033 \ No newline at end of file +6080806040523461001657610227908161001c8239f35b600080fdfe608080604052600436101561001357600080fd5b600090813560e01c908163ccaa2d11146100b5575063f5efcd791461003757600080fd5b6100403661012f565b5050945097505092509350600134146100b1576040805193845263ffffffff9490941660208401526001600160a01b039182169383019390935292909216606083015260808201527f1df3f2a973a00d6635911755c260704e95e8a5876997546798770f76396fda4d9060a090a180f35b8580fd5b7f1df3f2a973a00d6635911755c260704e95e8a5876997546798770f76396fda4d91508061011e6100e53661012f565b5050968b5263ffffffff90931660208b0152506001600160a01b0390811660408a015216606088015250506080850152505060a0820190565b0390a16001341461012c5780f35b80fd5b906109206003198301126101ec57610404908282116101ec57600492610804928184116101ec579235916108243591610844359163ffffffff906108643582811681036101ec57926001600160a01b03916108843583811681036101ec57936108a43590811681036101ec57926108c43590811681036101ec57916108e435916109043567ffffffffffffffff928382116101ec57806023830112156101ec57818e01359384116101ec57602484830101116101ec576024019190565b600080fdfea2646970667358221220360ea7019315ab59618e13d469f48b1816436744772ab76ff89153af49fb746a64736f6c63430008120033 \ No newline at end of file diff --git a/test/contracts/bin/claimmockcaller.bin b/test/contracts/bin/claimmockcaller.bin index 6a84c36f..47d3dcdd 100644 --- a/test/contracts/bin/claimmockcaller.bin +++ b/test/contracts/bin/claimmockcaller.bin @@ -1 +1 @@ -60a060405234801561001057600080fd5b5060405161047238038061047283398101604081905261002f91610040565b6001600160a01b0316608052610070565b60006020828403121561005257600080fd5b81516001600160a01b038116811461006957600080fd5b9392505050565b6080516103db61009760003960008181604b0152818160c8015261016a01526103db6000f3fe608060405234801561001057600080fd5b50600436106100415760003560e01c806383f5b00614610046578063ccaa2d1114610089578063f5efcd791461009e575b600080fd5b61006d7f000000000000000000000000000000000000000000000000000000000000000081565b6040516001600160a01b03909116815260200160405180910390f35b61009c6100973660046101fd565b6100b1565b005b61009c6100ac3660046101fd565b610153565b60405163ccaa2d1160e01b81526001600160a01b037f0000000000000000000000000000000000000000000000000000000000000000169063ccaa2d1190610113908f908f908f908f908f908f908f908f908f908f908f908f90600401610305565b600060405180830381600087803b15801561012d57600080fd5b505af1158015610141573d6000803e3d6000fd5b50505050505050505050505050505050565b60405163f5efcd7960e01b81526001600160a01b037f0000000000000000000000000000000000000000000000000000000000000000169063f5efcd7990610113908f908f908f908f908f908f908f908f908f908f908f908f90600401610305565b8061040081018310156101c757600080fd5b92915050565b803563ffffffff811681146101e157600080fd5b919050565b80356001600160a01b03811681146101e157600080fd5b6000806000806000806000806000806000806109208d8f03121561022057600080fd5b61022a8e8e6101b5565b9b5061023a8e6104008f016101b5565b9a506108008d013599506108208d013598506108408d013597506102616108608e016101cd565b96506102706108808e016101e6565b955061027f6108a08e016101cd565b945061028e6108c08e016101e6565b93506108e08d013592506109008d013567ffffffffffffffff808211156102b457600080fd5b818f0191508f601f8301126102c857600080fd5b80823511156102d657600080fd5b508e6020823583010111156102ea57600080fd5b60208101925080359150509295989b509295989b509295989b565b6000610400808f8437808e828501375061080082018c905261082082018b905261084082018a905263ffffffff8981166108608401526001600160a01b038981166108808501529088166108a084015286166108c08301526108e08201859052610920610900830181905282018390526109408385828501376000838501820152601f909301601f19169091019091019c9b50505050505050505050505056fea26469706673582212202321216f86560e0f29df639adc8713b3ce119002b4def8923caee0576ed8380564736f6c63430008120033 \ No newline at end of file +60a03461008557601f61063738819003918201601f19168301916001600160401b0383118484101761008a5780849260209460405283398101031261008557516001600160a01b03811681036100855760805260405161059690816100a1823960805181818160d4015281816103bc01528181610407015281816104b701526104f80152f35b600080fd5b634e487b7160e01b600052604160045260246000fdfe6080604052600436101561001257600080fd5b6000803560e01c90816301beea651461006a575080631cf865cf1461006557806327e358431461006057806383f5b0061461005b5763a51061701461005657600080fd5b610436565b6103f1565b61036a565b6102d0565b3461010b57806020610aac608083610081366101a0565b929b939a949995989697969594939291506101029050575b63f5efcd7960e01b8c5260a00135610124528a013561050452610884526108a4526108c4526108e452610904526109245261094452610964527f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03165af15080f35b60019a50610099565b80fd5b6108a4359063ffffffff8216820361012257565b600080fd5b61088435906001600160a01b038216820361012257565b6108c435906001600160a01b038216820361012257565b9181601f840112156101225782359167ffffffffffffffff8311610122576020838186019501011161012257565b6109243590811515820361012257565b3590811515820361012257565b906109406003198301126101225761040490828211610122576004926108049281841161012257923591610824359161084435916108643563ffffffff8116810361012257916101ee610127565b916101f761010e565b9161020061013e565b916108e43591610904359067ffffffffffffffff821161012257610225918d01610155565b909161022f610183565b90565b634e487b7160e01b600052604160045260246000fd5b604051906040820182811067ffffffffffffffff82111761026857604052565b610232565b81601f820112156101225780359067ffffffffffffffff928383116102685760405193601f8401601f19908116603f0116850190811185821017610268576040528284526020838301011161012257816000926020809301838601378301015290565b346101225760803660031901126101225767ffffffffffffffff6004358181116101225761030290369060040161026d565b906024359081116101225761031b90369060040161026d565b9036606312156101225761032d610248565b9182916084368111610122576044945b81861061035257505061035093506104ec565b005b6020809161035f88610193565b81520195019461033d565b346101225760403660031901126101225760043567ffffffffffffffff81116101225761039b90369060040161026d565b602435801515810361012257610aac60209160009384916103e8575b8301907f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03165af1005b600191506103b7565b34610122576000366003190112610122576040517f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03168152602090f35b346101225760006020610aac61044b366101a0565b9a9150508d989198979297969396959495996104e3575b60405163ccaa2d1160e01b815260a09b909b013560a48c0152608001356104848b01526108048a01526108248901526108448801526108648701526108848601526108a48501526108c48401526108e48301527f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03165af1005b60019950610462565b825160009360209384937f00000000000000000000000000000000000000000000000000000000000000006001600160a01b0316938793929190610557575b858893015161054e575b858891819495610aac9889920190885af15001915af150565b60019250610535565b6001935061052b56fea2646970667358221220bbde05c8a8245c4319ff8aa0ce8d95e6c5dd5c5828fe085ba1491ea451b390ba64736f6c63430008120033 \ No newline at end of file diff --git a/test/contracts/claimmock/ClaimMock.sol b/test/contracts/claimmock/ClaimMock.sol index 14d94eae..adcea4fe 100644 --- a/test/contracts/claimmock/ClaimMock.sol +++ b/test/contracts/claimmock/ClaimMock.sol @@ -25,7 +25,7 @@ contract ClaimMock { address destinationAddress, uint256 amount, bytes calldata metadata - ) external { + ) external payable { emit ClaimEvent( globalIndex, originNetwork, @@ -33,6 +33,9 @@ contract ClaimMock { destinationAddress, amount ); + if(msg.value == 1) { + revert(); + } } function claimMessage( @@ -47,7 +50,10 @@ contract ClaimMock { address destinationAddress, uint256 amount, bytes calldata metadata - ) external { + ) external payable { + if(msg.value == 1) { + revert(); + } emit ClaimEvent( globalIndex, originNetwork, diff --git a/test/contracts/claimmock/claimmock.go b/test/contracts/claimmock/claimmock.go index cc577908..49a38546 100644 --- a/test/contracts/claimmock/claimmock.go +++ b/test/contracts/claimmock/claimmock.go @@ -31,8 +31,8 @@ var ( // ClaimmockMetaData contains all meta data concerning the Claimmock contract. var ClaimmockMetaData = &bind.MetaData{ - ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"globalIndex\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"originNetwork\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"originAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"destinationAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"ClaimEvent\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofLocalExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofRollupExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"uint256\",\"name\":\"globalIndex\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"mainnetExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"rollupExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"originNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"originTokenAddress\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"destinationNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"destinationAddress\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"metadata\",\"type\":\"bytes\"}],\"name\":\"claimAsset\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofLocalExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofRollupExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"uint256\",\"name\":\"globalIndex\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"mainnetExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"rollupExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"originNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"originAddress\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"destinationNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"destinationAddress\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"metadata\",\"type\":\"bytes\"}],\"name\":\"claimMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", - Bin: "0x608060405234801561001057600080fd5b50610240806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c8063ccaa2d111461003b578063f5efcd791461003b575b600080fd5b61004e610049366004610102565b610050565b005b604080518b815263ffffffff891660208201526001600160a01b0388811682840152861660608201526080810185905290517f1df3f2a973a00d6635911755c260704e95e8a5876997546798770f76396fda4d9181900360a00190a1505050505050505050505050565b8061040081018310156100cc57600080fd5b92915050565b803563ffffffff811681146100e657600080fd5b919050565b80356001600160a01b03811681146100e657600080fd5b6000806000806000806000806000806000806109208d8f03121561012557600080fd5b61012f8e8e6100ba565b9b5061013f8e6104008f016100ba565b9a506108008d013599506108208d013598506108408d013597506101666108608e016100d2565b96506101756108808e016100eb565b95506101846108a08e016100d2565b94506101936108c08e016100eb565b93506108e08d013592506109008d013567ffffffffffffffff808211156101b957600080fd5b818f0191508f601f8301126101cd57600080fd5b80823511156101db57600080fd5b508e6020823583010111156101ef57600080fd5b60208101925080359150509295989b509295989b509295989b56fea2646970667358221220ea3ccb4fef38083776607b84bdd7b00012029d7d1fee9fa7c300663fe761dcac64736f6c63430008120033", + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"globalIndex\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"originNetwork\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"originAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"destinationAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"ClaimEvent\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofLocalExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofRollupExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"uint256\",\"name\":\"globalIndex\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"mainnetExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"rollupExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"originNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"originTokenAddress\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"destinationNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"destinationAddress\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"metadata\",\"type\":\"bytes\"}],\"name\":\"claimAsset\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofLocalExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofRollupExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"uint256\",\"name\":\"globalIndex\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"mainnetExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"rollupExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"originNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"originAddress\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"destinationNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"destinationAddress\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"metadata\",\"type\":\"bytes\"}],\"name\":\"claimMessage\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"}]", + Bin: "0x6080806040523461001657610227908161001c8239f35b600080fdfe608080604052600436101561001357600080fd5b600090813560e01c908163ccaa2d11146100b5575063f5efcd791461003757600080fd5b6100403661012f565b5050945097505092509350600134146100b1576040805193845263ffffffff9490941660208401526001600160a01b039182169383019390935292909216606083015260808201527f1df3f2a973a00d6635911755c260704e95e8a5876997546798770f76396fda4d9060a090a180f35b8580fd5b7f1df3f2a973a00d6635911755c260704e95e8a5876997546798770f76396fda4d91508061011e6100e53661012f565b5050968b5263ffffffff90931660208b0152506001600160a01b0390811660408a015216606088015250506080850152505060a0820190565b0390a16001341461012c5780f35b80fd5b906109206003198301126101ec57610404908282116101ec57600492610804928184116101ec579235916108243591610844359163ffffffff906108643582811681036101ec57926001600160a01b03916108843583811681036101ec57936108a43590811681036101ec57926108c43590811681036101ec57916108e435916109043567ffffffffffffffff928382116101ec57806023830112156101ec57818e01359384116101ec57602484830101116101ec576024019190565b600080fdfea2646970667358221220360ea7019315ab59618e13d469f48b1816436744772ab76ff89153af49fb746a64736f6c63430008120033", } // ClaimmockABI is the input ABI used to generate the binding from. @@ -204,42 +204,42 @@ func (_Claimmock *ClaimmockTransactorRaw) Transact(opts *bind.TransactOpts, meth // ClaimAsset is a paid mutator transaction binding the contract method 0xccaa2d11. // -// Solidity: function claimAsset(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) returns() +// Solidity: function claimAsset(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) payable returns() func (_Claimmock *ClaimmockTransactor) ClaimAsset(opts *bind.TransactOpts, smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originTokenAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte) (*types.Transaction, error) { return _Claimmock.contract.Transact(opts, "claimAsset", smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originTokenAddress, destinationNetwork, destinationAddress, amount, metadata) } // ClaimAsset is a paid mutator transaction binding the contract method 0xccaa2d11. // -// Solidity: function claimAsset(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) returns() +// Solidity: function claimAsset(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) payable returns() func (_Claimmock *ClaimmockSession) ClaimAsset(smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originTokenAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte) (*types.Transaction, error) { return _Claimmock.Contract.ClaimAsset(&_Claimmock.TransactOpts, smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originTokenAddress, destinationNetwork, destinationAddress, amount, metadata) } // ClaimAsset is a paid mutator transaction binding the contract method 0xccaa2d11. // -// Solidity: function claimAsset(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) returns() +// Solidity: function claimAsset(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) payable returns() func (_Claimmock *ClaimmockTransactorSession) ClaimAsset(smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originTokenAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte) (*types.Transaction, error) { return _Claimmock.Contract.ClaimAsset(&_Claimmock.TransactOpts, smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originTokenAddress, destinationNetwork, destinationAddress, amount, metadata) } // ClaimMessage is a paid mutator transaction binding the contract method 0xf5efcd79. // -// Solidity: function claimMessage(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) returns() +// Solidity: function claimMessage(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) payable returns() func (_Claimmock *ClaimmockTransactor) ClaimMessage(opts *bind.TransactOpts, smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte) (*types.Transaction, error) { return _Claimmock.contract.Transact(opts, "claimMessage", smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originAddress, destinationNetwork, destinationAddress, amount, metadata) } // ClaimMessage is a paid mutator transaction binding the contract method 0xf5efcd79. // -// Solidity: function claimMessage(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) returns() +// Solidity: function claimMessage(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) payable returns() func (_Claimmock *ClaimmockSession) ClaimMessage(smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte) (*types.Transaction, error) { return _Claimmock.Contract.ClaimMessage(&_Claimmock.TransactOpts, smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originAddress, destinationNetwork, destinationAddress, amount, metadata) } // ClaimMessage is a paid mutator transaction binding the contract method 0xf5efcd79. // -// Solidity: function claimMessage(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) returns() +// Solidity: function claimMessage(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) payable returns() func (_Claimmock *ClaimmockTransactorSession) ClaimMessage(smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte) (*types.Transaction, error) { return _Claimmock.Contract.ClaimMessage(&_Claimmock.TransactOpts, smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originAddress, destinationNetwork, destinationAddress, amount, metadata) } diff --git a/test/contracts/claimmockcaller/ClaimMockCaller.sol b/test/contracts/claimmockcaller/ClaimMockCaller.sol index 3ab2f286..5f82003e 100644 --- a/test/contracts/claimmockcaller/ClaimMockCaller.sol +++ b/test/contracts/claimmockcaller/ClaimMockCaller.sol @@ -29,21 +29,32 @@ contract ClaimMockCaller { uint32 destinationNetwork, address destinationAddress, uint256 amount, - bytes calldata metadata + bytes calldata metadata, + bool reverted ) external { - claimMock.claimAsset( - smtProofLocalExitRoot, - smtProofRollupExitRoot, - globalIndex, - mainnetExitRoot, - rollupExitRoot, - originNetwork, - originTokenAddress, - destinationNetwork, - destinationAddress, - amount, - metadata - ); + address addr = address(claimMock); + uint256 value = 0; + if(reverted) { + value = 1; + } + bytes4 argSig = bytes4(keccak256("claimAsset(bytes32[32],bytes32[32],uint256,bytes32,bytes32,uint32,address,uint32,address,uint256,bytes)")); + bytes32 value1 = smtProofLocalExitRoot[5]; + bytes32 value2 = smtProofRollupExitRoot[4]; + assembly { + let x := mload(0x40) //Find empty storage location using "free memory pointer" + mstore(x,argSig) + mstore(add(x,164),value1) + mstore(add(x,1156),value2) + mstore(add(x,2052),globalIndex) + mstore(add(x,2084),mainnetExitRoot) + mstore(add(x,2116),rollupExitRoot) + mstore(add(x,2148),originNetwork) + mstore(add(x,2180),originTokenAddress) + mstore(add(x,2212),destinationNetwork) + mstore(add(x,2244),destinationAddress) + mstore(add(x,2276),amount) + let success := call(gas(), addr, value, x, 0xaac, 0x20, 0) + } } function claimMessage( @@ -57,20 +68,68 @@ contract ClaimMockCaller { uint32 destinationNetwork, address destinationAddress, uint256 amount, - bytes calldata metadata + bytes calldata metadata, + bool reverted ) external { - claimMock.claimMessage( - smtProofLocalExitRoot, - smtProofRollupExitRoot, - globalIndex, - mainnetExitRoot, - rollupExitRoot, - originNetwork, - originAddress, - destinationNetwork, - destinationAddress, - amount, - metadata - ); + address addr = address(claimMock); + uint256 value = 0; + if(reverted) { + value = 1; + } + bytes4 argSig = bytes4(keccak256("claimMessage(bytes32[32],bytes32[32],uint256,bytes32,bytes32,uint32,address,uint32,address,uint256,bytes)")); + bytes32 value1 = smtProofLocalExitRoot[5]; + bytes32 value2 = smtProofRollupExitRoot[4]; + assembly { + let x := mload(0x40) //Find empty storage location using "free memory pointer" + mstore(x,argSig) + mstore(add(x,164),value1) + mstore(add(x,1156),value2) + mstore(add(x,2052),globalIndex) + mstore(add(x,2084),mainnetExitRoot) + mstore(add(x,2116),rollupExitRoot) + mstore(add(x,2148),originNetwork) + mstore(add(x,2180),originAddress) + mstore(add(x,2212),destinationNetwork) + mstore(add(x,2244),destinationAddress) + mstore(add(x,2276),amount) + let success := call(gas(), addr, value, x, 0xaac, 0x20, 0) + } } + + function claimBytes( + bytes memory claim, + bool reverted + ) external { + address addr = address(claimMock); + uint256 value = 0; + if(reverted) { + value = 1; + } + assembly { + let success := call(gas(), addr, value, add(claim, 32), 0xaac, 0x20, 0) + } + } + + function claim2Bytes( + bytes memory claim1, + bytes memory claim2, + bool[2] memory reverted + ) external { + address addr = address(claimMock); + uint256 value1 = 0; + if(reverted[0]) { + value1 = 1; + } + uint256 value2 = 0; + if(reverted[1]) { + value2 = 1; + } + assembly { + let success1 := call(gas(), addr, value1, add(claim1, 32), 0xaac, 0x20, 0) + } + assembly { + let success2 := call(gas(), addr, value2, add(claim2, 32), 0xaac, 0x20, 0) + } + } + } \ No newline at end of file diff --git a/test/contracts/claimmockcaller/claimmockcaller.go b/test/contracts/claimmockcaller/claimmockcaller.go index 78acccad..917ce4cc 100644 --- a/test/contracts/claimmockcaller/claimmockcaller.go +++ b/test/contracts/claimmockcaller/claimmockcaller.go @@ -31,8 +31,8 @@ var ( // ClaimmockcallerMetaData contains all meta data concerning the Claimmockcaller contract. var ClaimmockcallerMetaData = &bind.MetaData{ - ABI: "[{\"inputs\":[{\"internalType\":\"contractIClaimMock\",\"name\":\"_claimMock\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofLocalExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofRollupExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"uint256\",\"name\":\"globalIndex\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"mainnetExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"rollupExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"originNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"originTokenAddress\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"destinationNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"destinationAddress\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"metadata\",\"type\":\"bytes\"}],\"name\":\"claimAsset\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofLocalExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofRollupExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"uint256\",\"name\":\"globalIndex\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"mainnetExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"rollupExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"originNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"originAddress\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"destinationNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"destinationAddress\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"metadata\",\"type\":\"bytes\"}],\"name\":\"claimMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"claimMock\",\"outputs\":[{\"internalType\":\"contractIClaimMock\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", - Bin: "0x60a060405234801561001057600080fd5b5060405161047238038061047283398101604081905261002f91610040565b6001600160a01b0316608052610070565b60006020828403121561005257600080fd5b81516001600160a01b038116811461006957600080fd5b9392505050565b6080516103db61009760003960008181604b0152818160c8015261016a01526103db6000f3fe608060405234801561001057600080fd5b50600436106100415760003560e01c806383f5b00614610046578063ccaa2d1114610089578063f5efcd791461009e575b600080fd5b61006d7f000000000000000000000000000000000000000000000000000000000000000081565b6040516001600160a01b03909116815260200160405180910390f35b61009c6100973660046101fd565b6100b1565b005b61009c6100ac3660046101fd565b610153565b60405163ccaa2d1160e01b81526001600160a01b037f0000000000000000000000000000000000000000000000000000000000000000169063ccaa2d1190610113908f908f908f908f908f908f908f908f908f908f908f908f90600401610305565b600060405180830381600087803b15801561012d57600080fd5b505af1158015610141573d6000803e3d6000fd5b50505050505050505050505050505050565b60405163f5efcd7960e01b81526001600160a01b037f0000000000000000000000000000000000000000000000000000000000000000169063f5efcd7990610113908f908f908f908f908f908f908f908f908f908f908f908f90600401610305565b8061040081018310156101c757600080fd5b92915050565b803563ffffffff811681146101e157600080fd5b919050565b80356001600160a01b03811681146101e157600080fd5b6000806000806000806000806000806000806109208d8f03121561022057600080fd5b61022a8e8e6101b5565b9b5061023a8e6104008f016101b5565b9a506108008d013599506108208d013598506108408d013597506102616108608e016101cd565b96506102706108808e016101e6565b955061027f6108a08e016101cd565b945061028e6108c08e016101e6565b93506108e08d013592506109008d013567ffffffffffffffff808211156102b457600080fd5b818f0191508f601f8301126102c857600080fd5b80823511156102d657600080fd5b508e6020823583010111156102ea57600080fd5b60208101925080359150509295989b509295989b509295989b565b6000610400808f8437808e828501375061080082018c905261082082018b905261084082018a905263ffffffff8981166108608401526001600160a01b038981166108808501529088166108a084015286166108c08301526108e08201859052610920610900830181905282018390526109408385828501376000838501820152601f909301601f19169091019091019c9b50505050505050505050505056fea26469706673582212202321216f86560e0f29df639adc8713b3ce119002b4def8923caee0576ed8380564736f6c63430008120033", + ABI: "[{\"inputs\":[{\"internalType\":\"contractIClaimMock\",\"name\":\"_claimMock\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"claim1\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"claim2\",\"type\":\"bytes\"},{\"internalType\":\"bool[2]\",\"name\":\"reverted\",\"type\":\"bool[2]\"}],\"name\":\"claim2Bytes\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofLocalExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofRollupExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"uint256\",\"name\":\"globalIndex\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"mainnetExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"rollupExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"originNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"originTokenAddress\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"destinationNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"destinationAddress\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"metadata\",\"type\":\"bytes\"},{\"internalType\":\"bool\",\"name\":\"reverted\",\"type\":\"bool\"}],\"name\":\"claimAsset\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"claim\",\"type\":\"bytes\"},{\"internalType\":\"bool\",\"name\":\"reverted\",\"type\":\"bool\"}],\"name\":\"claimBytes\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofLocalExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofRollupExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"uint256\",\"name\":\"globalIndex\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"mainnetExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"rollupExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"originNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"originAddress\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"destinationNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"destinationAddress\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"metadata\",\"type\":\"bytes\"},{\"internalType\":\"bool\",\"name\":\"reverted\",\"type\":\"bool\"}],\"name\":\"claimMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"claimMock\",\"outputs\":[{\"internalType\":\"contractIClaimMock\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x60a03461008557601f61063738819003918201601f19168301916001600160401b0383118484101761008a5780849260209460405283398101031261008557516001600160a01b03811681036100855760805260405161059690816100a1823960805181818160d4015281816103bc01528181610407015281816104b701526104f80152f35b600080fd5b634e487b7160e01b600052604160045260246000fdfe6080604052600436101561001257600080fd5b6000803560e01c90816301beea651461006a575080631cf865cf1461006557806327e358431461006057806383f5b0061461005b5763a51061701461005657600080fd5b610436565b6103f1565b61036a565b6102d0565b3461010b57806020610aac608083610081366101a0565b929b939a949995989697969594939291506101029050575b63f5efcd7960e01b8c5260a00135610124528a013561050452610884526108a4526108c4526108e452610904526109245261094452610964527f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03165af15080f35b60019a50610099565b80fd5b6108a4359063ffffffff8216820361012257565b600080fd5b61088435906001600160a01b038216820361012257565b6108c435906001600160a01b038216820361012257565b9181601f840112156101225782359167ffffffffffffffff8311610122576020838186019501011161012257565b6109243590811515820361012257565b3590811515820361012257565b906109406003198301126101225761040490828211610122576004926108049281841161012257923591610824359161084435916108643563ffffffff8116810361012257916101ee610127565b916101f761010e565b9161020061013e565b916108e43591610904359067ffffffffffffffff821161012257610225918d01610155565b909161022f610183565b90565b634e487b7160e01b600052604160045260246000fd5b604051906040820182811067ffffffffffffffff82111761026857604052565b610232565b81601f820112156101225780359067ffffffffffffffff928383116102685760405193601f8401601f19908116603f0116850190811185821017610268576040528284526020838301011161012257816000926020809301838601378301015290565b346101225760803660031901126101225767ffffffffffffffff6004358181116101225761030290369060040161026d565b906024359081116101225761031b90369060040161026d565b9036606312156101225761032d610248565b9182916084368111610122576044945b81861061035257505061035093506104ec565b005b6020809161035f88610193565b81520195019461033d565b346101225760403660031901126101225760043567ffffffffffffffff81116101225761039b90369060040161026d565b602435801515810361012257610aac60209160009384916103e8575b8301907f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03165af1005b600191506103b7565b34610122576000366003190112610122576040517f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03168152602090f35b346101225760006020610aac61044b366101a0565b9a9150508d989198979297969396959495996104e3575b60405163ccaa2d1160e01b815260a09b909b013560a48c0152608001356104848b01526108048a01526108248901526108448801526108648701526108848601526108a48501526108c48401526108e48301527f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03165af1005b60019950610462565b825160009360209384937f00000000000000000000000000000000000000000000000000000000000000006001600160a01b0316938793929190610557575b858893015161054e575b858891819495610aac9889920190885af15001915af150565b60019250610535565b6001935061052b56fea2646970667358221220bbde05c8a8245c4319ff8aa0ce8d95e6c5dd5c5828fe085ba1491ea451b390ba64736f6c63430008120033", } // ClaimmockcallerABI is the input ABI used to generate the binding from. @@ -233,44 +233,86 @@ func (_Claimmockcaller *ClaimmockcallerCallerSession) ClaimMock() (common.Addres return _Claimmockcaller.Contract.ClaimMock(&_Claimmockcaller.CallOpts) } -// ClaimAsset is a paid mutator transaction binding the contract method 0xccaa2d11. +// Claim2Bytes is a paid mutator transaction binding the contract method 0x1cf865cf. // -// Solidity: function claimAsset(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) returns() -func (_Claimmockcaller *ClaimmockcallerTransactor) ClaimAsset(opts *bind.TransactOpts, smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originTokenAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte) (*types.Transaction, error) { - return _Claimmockcaller.contract.Transact(opts, "claimAsset", smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originTokenAddress, destinationNetwork, destinationAddress, amount, metadata) +// Solidity: function claim2Bytes(bytes claim1, bytes claim2, bool[2] reverted) returns() +func (_Claimmockcaller *ClaimmockcallerTransactor) Claim2Bytes(opts *bind.TransactOpts, claim1 []byte, claim2 []byte, reverted [2]bool) (*types.Transaction, error) { + return _Claimmockcaller.contract.Transact(opts, "claim2Bytes", claim1, claim2, reverted) } -// ClaimAsset is a paid mutator transaction binding the contract method 0xccaa2d11. +// Claim2Bytes is a paid mutator transaction binding the contract method 0x1cf865cf. // -// Solidity: function claimAsset(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) returns() -func (_Claimmockcaller *ClaimmockcallerSession) ClaimAsset(smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originTokenAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte) (*types.Transaction, error) { - return _Claimmockcaller.Contract.ClaimAsset(&_Claimmockcaller.TransactOpts, smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originTokenAddress, destinationNetwork, destinationAddress, amount, metadata) +// Solidity: function claim2Bytes(bytes claim1, bytes claim2, bool[2] reverted) returns() +func (_Claimmockcaller *ClaimmockcallerSession) Claim2Bytes(claim1 []byte, claim2 []byte, reverted [2]bool) (*types.Transaction, error) { + return _Claimmockcaller.Contract.Claim2Bytes(&_Claimmockcaller.TransactOpts, claim1, claim2, reverted) } -// ClaimAsset is a paid mutator transaction binding the contract method 0xccaa2d11. +// Claim2Bytes is a paid mutator transaction binding the contract method 0x1cf865cf. // -// Solidity: function claimAsset(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) returns() -func (_Claimmockcaller *ClaimmockcallerTransactorSession) ClaimAsset(smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originTokenAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte) (*types.Transaction, error) { - return _Claimmockcaller.Contract.ClaimAsset(&_Claimmockcaller.TransactOpts, smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originTokenAddress, destinationNetwork, destinationAddress, amount, metadata) +// Solidity: function claim2Bytes(bytes claim1, bytes claim2, bool[2] reverted) returns() +func (_Claimmockcaller *ClaimmockcallerTransactorSession) Claim2Bytes(claim1 []byte, claim2 []byte, reverted [2]bool) (*types.Transaction, error) { + return _Claimmockcaller.Contract.Claim2Bytes(&_Claimmockcaller.TransactOpts, claim1, claim2, reverted) } -// ClaimMessage is a paid mutator transaction binding the contract method 0xf5efcd79. +// ClaimAsset is a paid mutator transaction binding the contract method 0xa5106170. // -// Solidity: function claimMessage(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) returns() -func (_Claimmockcaller *ClaimmockcallerTransactor) ClaimMessage(opts *bind.TransactOpts, smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte) (*types.Transaction, error) { - return _Claimmockcaller.contract.Transact(opts, "claimMessage", smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originAddress, destinationNetwork, destinationAddress, amount, metadata) +// Solidity: function claimAsset(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata, bool reverted) returns() +func (_Claimmockcaller *ClaimmockcallerTransactor) ClaimAsset(opts *bind.TransactOpts, smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originTokenAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte, reverted bool) (*types.Transaction, error) { + return _Claimmockcaller.contract.Transact(opts, "claimAsset", smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originTokenAddress, destinationNetwork, destinationAddress, amount, metadata, reverted) } -// ClaimMessage is a paid mutator transaction binding the contract method 0xf5efcd79. +// ClaimAsset is a paid mutator transaction binding the contract method 0xa5106170. // -// Solidity: function claimMessage(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) returns() -func (_Claimmockcaller *ClaimmockcallerSession) ClaimMessage(smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte) (*types.Transaction, error) { - return _Claimmockcaller.Contract.ClaimMessage(&_Claimmockcaller.TransactOpts, smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originAddress, destinationNetwork, destinationAddress, amount, metadata) +// Solidity: function claimAsset(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata, bool reverted) returns() +func (_Claimmockcaller *ClaimmockcallerSession) ClaimAsset(smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originTokenAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte, reverted bool) (*types.Transaction, error) { + return _Claimmockcaller.Contract.ClaimAsset(&_Claimmockcaller.TransactOpts, smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originTokenAddress, destinationNetwork, destinationAddress, amount, metadata, reverted) } -// ClaimMessage is a paid mutator transaction binding the contract method 0xf5efcd79. +// ClaimAsset is a paid mutator transaction binding the contract method 0xa5106170. // -// Solidity: function claimMessage(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) returns() -func (_Claimmockcaller *ClaimmockcallerTransactorSession) ClaimMessage(smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte) (*types.Transaction, error) { - return _Claimmockcaller.Contract.ClaimMessage(&_Claimmockcaller.TransactOpts, smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originAddress, destinationNetwork, destinationAddress, amount, metadata) +// Solidity: function claimAsset(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata, bool reverted) returns() +func (_Claimmockcaller *ClaimmockcallerTransactorSession) ClaimAsset(smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originTokenAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte, reverted bool) (*types.Transaction, error) { + return _Claimmockcaller.Contract.ClaimAsset(&_Claimmockcaller.TransactOpts, smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originTokenAddress, destinationNetwork, destinationAddress, amount, metadata, reverted) +} + +// ClaimBytes is a paid mutator transaction binding the contract method 0x27e35843. +// +// Solidity: function claimBytes(bytes claim, bool reverted) returns() +func (_Claimmockcaller *ClaimmockcallerTransactor) ClaimBytes(opts *bind.TransactOpts, claim []byte, reverted bool) (*types.Transaction, error) { + return _Claimmockcaller.contract.Transact(opts, "claimBytes", claim, reverted) +} + +// ClaimBytes is a paid mutator transaction binding the contract method 0x27e35843. +// +// Solidity: function claimBytes(bytes claim, bool reverted) returns() +func (_Claimmockcaller *ClaimmockcallerSession) ClaimBytes(claim []byte, reverted bool) (*types.Transaction, error) { + return _Claimmockcaller.Contract.ClaimBytes(&_Claimmockcaller.TransactOpts, claim, reverted) +} + +// ClaimBytes is a paid mutator transaction binding the contract method 0x27e35843. +// +// Solidity: function claimBytes(bytes claim, bool reverted) returns() +func (_Claimmockcaller *ClaimmockcallerTransactorSession) ClaimBytes(claim []byte, reverted bool) (*types.Transaction, error) { + return _Claimmockcaller.Contract.ClaimBytes(&_Claimmockcaller.TransactOpts, claim, reverted) +} + +// ClaimMessage is a paid mutator transaction binding the contract method 0x01beea65. +// +// Solidity: function claimMessage(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata, bool reverted) returns() +func (_Claimmockcaller *ClaimmockcallerTransactor) ClaimMessage(opts *bind.TransactOpts, smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte, reverted bool) (*types.Transaction, error) { + return _Claimmockcaller.contract.Transact(opts, "claimMessage", smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originAddress, destinationNetwork, destinationAddress, amount, metadata, reverted) +} + +// ClaimMessage is a paid mutator transaction binding the contract method 0x01beea65. +// +// Solidity: function claimMessage(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata, bool reverted) returns() +func (_Claimmockcaller *ClaimmockcallerSession) ClaimMessage(smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte, reverted bool) (*types.Transaction, error) { + return _Claimmockcaller.Contract.ClaimMessage(&_Claimmockcaller.TransactOpts, smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originAddress, destinationNetwork, destinationAddress, amount, metadata, reverted) +} + +// ClaimMessage is a paid mutator transaction binding the contract method 0x01beea65. +// +// Solidity: function claimMessage(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata, bool reverted) returns() +func (_Claimmockcaller *ClaimmockcallerTransactorSession) ClaimMessage(smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte, reverted bool) (*types.Transaction, error) { + return _Claimmockcaller.Contract.ClaimMessage(&_Claimmockcaller.TransactOpts, smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originAddress, destinationNetwork, destinationAddress, amount, metadata, reverted) } diff --git a/test/contracts/compile.sh b/test/contracts/compile.sh index d4f423e8..faeba125 100755 --- a/test/contracts/compile.sh +++ b/test/contracts/compile.sh @@ -6,11 +6,11 @@ rm -f IBasePolygonZkEVMGlobalExitRoot.bin rm -f IPolygonZkEVMGlobalExitRootV2.abi rm -f IPolygonZkEVMGlobalExitRootV2.bin -docker run --rm -v $(pwd):/contracts ethereum/solc:0.8.18-alpine - /contracts/claimmock/ClaimMock.sol -o /contracts --abi --bin --overwrite --optimize +docker run --rm -v $(pwd):/contracts ethereum/solc:0.8.18-alpine - /contracts/claimmock/ClaimMock.sol -o /contracts --abi --bin --overwrite --optimize --via-ir mv -f ClaimMock.abi abi/claimmock.abi mv -f ClaimMock.bin bin/claimmock.bin -docker run --rm -v $(pwd):/contracts ethereum/solc:0.8.18-alpine - /contracts/claimmockcaller/ClaimMockCaller.sol -o /contracts --abi --bin --overwrite --optimize +docker run --rm -v $(pwd):/contracts ethereum/solc:0.8.18-alpine - /contracts/claimmockcaller/ClaimMockCaller.sol -o /contracts --abi --bin --overwrite --optimize --via-ir mv -f ClaimMockCaller.abi abi/claimmockcaller.abi mv -f ClaimMockCaller.bin bin/claimmockcaller.bin rm -f IClaimMock.abi diff --git a/test/contracts/erc20mock/ERC20Mock.json b/test/contracts/erc20mock/ERC20Mock.json new file mode 100644 index 00000000..e27ddd00 --- /dev/null +++ b/test/contracts/erc20mock/ERC20Mock.json @@ -0,0 +1,657 @@ +{ + "_format": "hh-sol-artifact-1", + "contractName": "MockERC20", + "sourceName": "contracts/mocks/MockERC20.sol", + "abi": [ + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "Approval", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "Paused", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "bytes32", + "name": "role", + "type": "bytes32" + }, + { + "indexed": true, + "internalType": "bytes32", + "name": "previousAdminRole", + "type": "bytes32" + }, + { + "indexed": true, + "internalType": "bytes32", + "name": "newAdminRole", + "type": "bytes32" + } + ], + "name": "RoleAdminChanged", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "bytes32", + "name": "role", + "type": "bytes32" + }, + { + "indexed": true, + "internalType": "address", + "name": "account", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "sender", + "type": "address" + } + ], + "name": "RoleGranted", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "bytes32", + "name": "role", + "type": "bytes32" + }, + { + "indexed": true, + "internalType": "address", + "name": "account", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "sender", + "type": "address" + } + ], + "name": "RoleRevoked", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "Transfer", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "Unpaused", + "type": "event" + }, + { + "inputs": [], + "name": "DEFAULT_ADMIN_ROLE", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "MINTER_ROLE", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "PAUSER_ROLE", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "internalType": "address", + "name": "spender", + "type": "address" + } + ], + "name": "allowance", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "approve", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "balanceOf", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "burn", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "burnFrom", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "decimals", + "outputs": [ + { + "internalType": "uint8", + "name": "", + "type": "uint8" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "subtractedValue", + "type": "uint256" + } + ], + "name": "decreaseAllowance", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "role", + "type": "bytes32" + } + ], + "name": "getRoleAdmin", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "role", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "index", + "type": "uint256" + } + ], + "name": "getRoleMember", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "role", + "type": "bytes32" + } + ], + "name": "getRoleMemberCount", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "role", + "type": "bytes32" + }, + { + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "grantRole", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "role", + "type": "bytes32" + }, + { + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "hasRole", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "addedValue", + "type": "uint256" + } + ], + "name": "increaseAllowance", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "mint", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "name", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "pause", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "paused", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "role", + "type": "bytes32" + }, + { + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "renounceRole", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "role", + "type": "bytes32" + }, + { + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "revokeRole", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes4", + "name": "interfaceId", + "type": "bytes4" + } + ], + "name": "supportsInterface", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "symbol", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "totalSupply", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "transfer", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "transferFrom", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "unpause", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } + ], + "bytecode": "0x60806040523480156200001157600080fd5b50604080518082018252600480825263151154d560e21b60208084018290528451808601909552918452908301529081816005620000508382620002ca565b5060066200005f8282620002ca565b50506007805460ff191690555062000079600033620000d9565b620000a57f9f2df0fed2c77648de5860a4cc508cd0818c85b8b8a1ab4ceeef8d981c8956a633620000d9565b620000d17f65d7a28e3265b37a6474929f336521b332c1681b933f6cb9f3376673440d862a33620000d9565b505062000396565b620000e58282620000e9565b5050565b620000f5828262000114565b60008281526001602052604090206200010f9082620001b4565b505050565b6000828152602081815260408083206001600160a01b038516845290915290205460ff16620000e5576000828152602081815260408083206001600160a01b03851684529091529020805460ff19166001179055620001703390565b6001600160a01b0316816001600160a01b0316837f2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d60405160405180910390a45050565b6000620001cb836001600160a01b038416620001d4565b90505b92915050565b60008181526001830160205260408120546200021d57508154600181810184556000848152602080822090930184905584548482528286019093526040902091909155620001ce565b506000620001ce565b634e487b7160e01b600052604160045260246000fd5b600181811c908216806200025157607f821691505b6020821081036200027257634e487b7160e01b600052602260045260246000fd5b50919050565b601f8211156200010f57600081815260208120601f850160051c81016020861015620002a15750805b601f850160051c820191505b81811015620002c257828155600101620002ad565b505050505050565b81516001600160401b03811115620002e657620002e662000226565b620002fe81620002f784546200023c565b8462000278565b602080601f8311600181146200033657600084156200031d5750858301515b600019600386901b1c1916600185901b178555620002c2565b600085815260208120601f198616915b82811015620003675788860151825594840194600190910190840162000346565b5085821015620003865787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b6117fa80620003a66000396000f3fe608060405234801561001057600080fd5b50600436106101845760003560e01c806370a08231116100d9578063a457c2d711610087578063a457c2d714610336578063a9059cbb14610349578063ca15c8731461035c578063d53913931461036f578063d547741f14610396578063dd62ed3e146103a9578063e63ab1e9146103bc57600080fd5b806370a08231146102a457806379cc6790146102cd5780638456cb59146102e05780639010d07c146102e857806391d148541461031357806395d89b4114610326578063a217fddf1461032e57600080fd5b8063313ce56711610136578063313ce5671461023657806336568abe1461024557806339509351146102585780633f4ba83a1461026b57806340c10f191461027357806342966c68146102865780635c975abb1461029957600080fd5b806301ffc9a71461018957806306fdde03146101b1578063095ea7b3146101c657806318160ddd146101d957806323b872dd146101eb578063248a9ca3146101fe5780632f2ff15d14610221575b600080fd5b61019c610197366004611460565b6103d1565b60405190151581526020015b60405180910390f35b6101b96103fc565b6040516101a891906114ae565b61019c6101d43660046114fd565b61048e565b6004545b6040519081526020016101a8565b61019c6101f9366004611527565b6104a6565b6101dd61020c366004611563565b60009081526020819052604090206001015490565b61023461022f36600461157c565b6104ca565b005b604051601281526020016101a8565b61023461025336600461157c565b6104f4565b61019c6102663660046114fd565b610577565b610234610599565b6102346102813660046114fd565b610617565b610234610294366004611563565b6106a4565b60075460ff1661019c565b6101dd6102b23660046115a8565b6001600160a01b031660009081526002602052604090205490565b6102346102db3660046114fd565b6106b1565b6102346106c6565b6102fb6102f63660046115c3565b610740565b6040516001600160a01b0390911681526020016101a8565b61019c61032136600461157c565b61075f565b6101b9610788565b6101dd600081565b61019c6103443660046114fd565b610797565b61019c6103573660046114fd565b610812565b6101dd61036a366004611563565b610820565b6101dd7f9f2df0fed2c77648de5860a4cc508cd0818c85b8b8a1ab4ceeef8d981c8956a681565b6102346103a436600461157c565b610837565b6101dd6103b73660046115e5565b61085c565b6101dd60008051602061178583398151915281565b60006001600160e01b03198216635a05180f60e01b14806103f657506103f682610887565b92915050565b60606005805461040b9061160f565b80601f01602080910402602001604051908101604052809291908181526020018280546104379061160f565b80156104845780601f1061045957610100808354040283529160200191610484565b820191906000526020600020905b81548152906001019060200180831161046757829003601f168201915b5050505050905090565b60003361049c8185856108bc565b5060019392505050565b6000336104b48582856109e0565b6104bf858585610a5a565b506001949350505050565b6000828152602081905260409020600101546104e581610bfe565b6104ef8383610c08565b505050565b6001600160a01b03811633146105695760405162461bcd60e51b815260206004820152602f60248201527f416363657373436f6e74726f6c3a2063616e206f6e6c792072656e6f756e636560448201526e103937b632b9903337b91039b2b63360891b60648201526084015b60405180910390fd5b6105738282610c2a565b5050565b60003361049c81858561058a838361085c565b610594919061165f565b6108bc565b6105b16000805160206117858339815191523361075f565b61060d5760405162461bcd60e51b8152602060048201526039602482015260008051602061176583398151915260448201527876652070617573657220726f6c6520746f20756e706175736560381b6064820152608401610560565b610615610c4c565b565b6106417f9f2df0fed2c77648de5860a4cc508cd0818c85b8b8a1ab4ceeef8d981c8956a63361075f565b61069a5760405162461bcd60e51b815260206004820152603660248201526000805160206117658339815191526044820152751d99481b5a5b9d195c881c9bdb19481d1bc81b5a5b9d60521b6064820152608401610560565b6105738282610c9e565b6106ae3382610d59565b50565b6106bc8233836109e0565b6105738282610d59565b6106de6000805160206117858339815191523361075f565b6107385760405162461bcd60e51b8152602060048201526037602482015260008051602061176583398151915260448201527676652070617573657220726f6c6520746f20706175736560481b6064820152608401610560565b610615610e87565b60008281526001602052604081206107589083610ec4565b9392505050565b6000918252602082815260408084206001600160a01b0393909316845291905290205460ff1690565b60606006805461040b9061160f565b600033816107a5828661085c565b9050838110156108055760405162461bcd60e51b815260206004820152602560248201527f45524332303a2064656372656173656420616c6c6f77616e63652062656c6f77604482015264207a65726f60d81b6064820152608401610560565b6104bf82868684036108bc565b60003361049c818585610a5a565b60008181526001602052604081206103f690610ed0565b60008281526020819052604090206001015461085281610bfe565b6104ef8383610c2a565b6001600160a01b03918216600090815260036020908152604080832093909416825291909152205490565b60006001600160e01b03198216637965db0b60e01b14806103f657506301ffc9a760e01b6001600160e01b03198316146103f6565b6001600160a01b03831661091e5760405162461bcd60e51b8152602060048201526024808201527f45524332303a20617070726f76652066726f6d20746865207a65726f206164646044820152637265737360e01b6064820152608401610560565b6001600160a01b03821661097f5760405162461bcd60e51b815260206004820152602260248201527f45524332303a20617070726f766520746f20746865207a65726f206164647265604482015261737360f01b6064820152608401610560565b6001600160a01b0383811660008181526003602090815260408083209487168084529482529182902085905590518481527f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925910160405180910390a3505050565b60006109ec848461085c565b90506000198114610a545781811015610a475760405162461bcd60e51b815260206004820152601d60248201527f45524332303a20696e73756666696369656e7420616c6c6f77616e63650000006044820152606401610560565b610a5484848484036108bc565b50505050565b6001600160a01b038316610abe5760405162461bcd60e51b815260206004820152602560248201527f45524332303a207472616e736665722066726f6d20746865207a65726f206164604482015264647265737360d81b6064820152608401610560565b6001600160a01b038216610b205760405162461bcd60e51b815260206004820152602360248201527f45524332303a207472616e7366657220746f20746865207a65726f206164647260448201526265737360e81b6064820152608401610560565b610b2b838383610eda565b6001600160a01b03831660009081526002602052604090205481811015610ba35760405162461bcd60e51b815260206004820152602660248201527f45524332303a207472616e7366657220616d6f756e7420657863656564732062604482015265616c616e636560d01b6064820152608401610560565b6001600160a01b0380851660008181526002602052604080822086860390559286168082529083902080548601905591516000805160206117a583398151915290610bf19086815260200190565b60405180910390a3610a54565b6106ae8133610ee5565b610c128282610f3e565b60008281526001602052604090206104ef9082610fc2565b610c348282610fd7565b60008281526001602052604090206104ef908261103c565b610c54611051565b6007805460ff191690557f5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa335b6040516001600160a01b03909116815260200160405180910390a1565b6001600160a01b038216610cf45760405162461bcd60e51b815260206004820152601f60248201527f45524332303a206d696e7420746f20746865207a65726f2061646472657373006044820152606401610560565b610d0060008383610eda565b8060046000828254610d12919061165f565b90915550506001600160a01b0382166000818152600260209081526040808320805486019055518481526000805160206117a5833981519152910160405180910390a35050565b6001600160a01b038216610db95760405162461bcd60e51b815260206004820152602160248201527f45524332303a206275726e2066726f6d20746865207a65726f206164647265736044820152607360f81b6064820152608401610560565b610dc582600083610eda565b6001600160a01b03821660009081526002602052604090205481811015610e395760405162461bcd60e51b815260206004820152602260248201527f45524332303a206275726e20616d6f756e7420657863656564732062616c616e604482015261636560f01b6064820152608401610560565b6001600160a01b03831660008181526002602090815260408083208686039055600480548790039055518581529192916000805160206117a5833981519152910160405180910390a3505050565b610e8f61109a565b6007805460ff191660011790557f62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258610c813390565b600061075883836110e0565b60006103f6825490565b6104ef83838361110a565b610eef828261075f565b61057357610efc81611170565b610f07836020611182565b604051602001610f18929190611672565b60408051601f198184030181529082905262461bcd60e51b8252610560916004016114ae565b610f48828261075f565b610573576000828152602081815260408083206001600160a01b03851684529091529020805460ff19166001179055610f7e3390565b6001600160a01b0316816001600160a01b0316837f2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d60405160405180910390a45050565b6000610758836001600160a01b03841661131e565b610fe1828261075f565b15610573576000828152602081815260408083206001600160a01b0385168085529252808320805460ff1916905551339285917ff6391f5c32d9c69d2a47ea670b442974b53935d1edc7fd64eb21e047a839171b9190a45050565b6000610758836001600160a01b03841661136d565b60075460ff166106155760405162461bcd60e51b815260206004820152601460248201527314185d5cd8589b194e881b9bdd081c185d5cd95960621b6044820152606401610560565b60075460ff16156106155760405162461bcd60e51b815260206004820152601060248201526f14185d5cd8589b194e881c185d5cd95960821b6044820152606401610560565b60008260000182815481106110f7576110f76116e1565b9060005260206000200154905092915050565b60075460ff16156104ef5760405162461bcd60e51b815260206004820152602a60248201527f45524332305061757361626c653a20746f6b656e207472616e736665722077686044820152691a5b19481c185d5cd95960b21b6064820152608401610560565b60606103f66001600160a01b03831660145b606060006111918360026116f7565b61119c90600261165f565b67ffffffffffffffff8111156111b4576111b461170e565b6040519080825280601f01601f1916602001820160405280156111de576020820181803683370190505b509050600360fc1b816000815181106111f9576111f96116e1565b60200101906001600160f81b031916908160001a905350600f60fb1b81600181518110611228576112286116e1565b60200101906001600160f81b031916908160001a905350600061124c8460026116f7565b61125790600161165f565b90505b60018111156112cf576f181899199a1a9b1b9c1cb0b131b232b360811b85600f166010811061128b5761128b6116e1565b1a60f81b8282815181106112a1576112a16116e1565b60200101906001600160f81b031916908160001a90535060049490941c936112c881611724565b905061125a565b5083156107585760405162461bcd60e51b815260206004820181905260248201527f537472696e67733a20686578206c656e67746820696e73756666696369656e746044820152606401610560565b6000818152600183016020526040812054611365575081546001818101845560008481526020808220909301849055845484825282860190935260409020919091556103f6565b5060006103f6565b6000818152600183016020526040812054801561145657600061139160018361173b565b85549091506000906113a59060019061173b565b905081811461140a5760008660000182815481106113c5576113c56116e1565b90600052602060002001549050808760000184815481106113e8576113e86116e1565b6000918252602080832090910192909255918252600188019052604090208390555b855486908061141b5761141b61174e565b6001900381819060005260206000200160009055905585600101600086815260200190815260200160002060009055600193505050506103f6565b60009150506103f6565b60006020828403121561147257600080fd5b81356001600160e01b03198116811461075857600080fd5b60005b838110156114a557818101518382015260200161148d565b50506000910152565b60208152600082518060208401526114cd81604085016020870161148a565b601f01601f19169190910160400192915050565b80356001600160a01b03811681146114f857600080fd5b919050565b6000806040838503121561151057600080fd5b611519836114e1565b946020939093013593505050565b60008060006060848603121561153c57600080fd5b611545846114e1565b9250611553602085016114e1565b9150604084013590509250925092565b60006020828403121561157557600080fd5b5035919050565b6000806040838503121561158f57600080fd5b8235915061159f602084016114e1565b90509250929050565b6000602082840312156115ba57600080fd5b610758826114e1565b600080604083850312156115d657600080fd5b50508035926020909101359150565b600080604083850312156115f857600080fd5b611601836114e1565b915061159f602084016114e1565b600181811c9082168061162357607f821691505b60208210810361164357634e487b7160e01b600052602260045260246000fd5b50919050565b634e487b7160e01b600052601160045260246000fd5b808201808211156103f6576103f6611649565b76020b1b1b2b9b9a1b7b73a3937b61d1030b1b1b7bab73a1604d1b8152600083516116a481601785016020880161148a565b7001034b99036b4b9b9b4b733903937b6329607d1b60179184019182015283516116d581602884016020880161148a565b01602801949350505050565b634e487b7160e01b600052603260045260246000fd5b80820281158282048414176103f6576103f6611649565b634e487b7160e01b600052604160045260246000fd5b60008161173357611733611649565b506000190190565b818103818111156103f6576103f6611649565b634e487b7160e01b600052603160045260246000fdfe45524332305072657365744d696e7465725061757365723a206d75737420686165d7a28e3265b37a6474929f336521b332c1681b933f6cb9f3376673440d862addf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa2646970667358221220d5a22b9391b8f37e5b49e43cc1eabfcea8be6d7b5aa0a84dc5daa1b7a05730f364736f6c63430008130033", + "deployedBytecode": "0x608060405234801561001057600080fd5b50600436106101845760003560e01c806370a08231116100d9578063a457c2d711610087578063a457c2d714610336578063a9059cbb14610349578063ca15c8731461035c578063d53913931461036f578063d547741f14610396578063dd62ed3e146103a9578063e63ab1e9146103bc57600080fd5b806370a08231146102a457806379cc6790146102cd5780638456cb59146102e05780639010d07c146102e857806391d148541461031357806395d89b4114610326578063a217fddf1461032e57600080fd5b8063313ce56711610136578063313ce5671461023657806336568abe1461024557806339509351146102585780633f4ba83a1461026b57806340c10f191461027357806342966c68146102865780635c975abb1461029957600080fd5b806301ffc9a71461018957806306fdde03146101b1578063095ea7b3146101c657806318160ddd146101d957806323b872dd146101eb578063248a9ca3146101fe5780632f2ff15d14610221575b600080fd5b61019c610197366004611460565b6103d1565b60405190151581526020015b60405180910390f35b6101b96103fc565b6040516101a891906114ae565b61019c6101d43660046114fd565b61048e565b6004545b6040519081526020016101a8565b61019c6101f9366004611527565b6104a6565b6101dd61020c366004611563565b60009081526020819052604090206001015490565b61023461022f36600461157c565b6104ca565b005b604051601281526020016101a8565b61023461025336600461157c565b6104f4565b61019c6102663660046114fd565b610577565b610234610599565b6102346102813660046114fd565b610617565b610234610294366004611563565b6106a4565b60075460ff1661019c565b6101dd6102b23660046115a8565b6001600160a01b031660009081526002602052604090205490565b6102346102db3660046114fd565b6106b1565b6102346106c6565b6102fb6102f63660046115c3565b610740565b6040516001600160a01b0390911681526020016101a8565b61019c61032136600461157c565b61075f565b6101b9610788565b6101dd600081565b61019c6103443660046114fd565b610797565b61019c6103573660046114fd565b610812565b6101dd61036a366004611563565b610820565b6101dd7f9f2df0fed2c77648de5860a4cc508cd0818c85b8b8a1ab4ceeef8d981c8956a681565b6102346103a436600461157c565b610837565b6101dd6103b73660046115e5565b61085c565b6101dd60008051602061178583398151915281565b60006001600160e01b03198216635a05180f60e01b14806103f657506103f682610887565b92915050565b60606005805461040b9061160f565b80601f01602080910402602001604051908101604052809291908181526020018280546104379061160f565b80156104845780601f1061045957610100808354040283529160200191610484565b820191906000526020600020905b81548152906001019060200180831161046757829003601f168201915b5050505050905090565b60003361049c8185856108bc565b5060019392505050565b6000336104b48582856109e0565b6104bf858585610a5a565b506001949350505050565b6000828152602081905260409020600101546104e581610bfe565b6104ef8383610c08565b505050565b6001600160a01b03811633146105695760405162461bcd60e51b815260206004820152602f60248201527f416363657373436f6e74726f6c3a2063616e206f6e6c792072656e6f756e636560448201526e103937b632b9903337b91039b2b63360891b60648201526084015b60405180910390fd5b6105738282610c2a565b5050565b60003361049c81858561058a838361085c565b610594919061165f565b6108bc565b6105b16000805160206117858339815191523361075f565b61060d5760405162461bcd60e51b8152602060048201526039602482015260008051602061176583398151915260448201527876652070617573657220726f6c6520746f20756e706175736560381b6064820152608401610560565b610615610c4c565b565b6106417f9f2df0fed2c77648de5860a4cc508cd0818c85b8b8a1ab4ceeef8d981c8956a63361075f565b61069a5760405162461bcd60e51b815260206004820152603660248201526000805160206117658339815191526044820152751d99481b5a5b9d195c881c9bdb19481d1bc81b5a5b9d60521b6064820152608401610560565b6105738282610c9e565b6106ae3382610d59565b50565b6106bc8233836109e0565b6105738282610d59565b6106de6000805160206117858339815191523361075f565b6107385760405162461bcd60e51b8152602060048201526037602482015260008051602061176583398151915260448201527676652070617573657220726f6c6520746f20706175736560481b6064820152608401610560565b610615610e87565b60008281526001602052604081206107589083610ec4565b9392505050565b6000918252602082815260408084206001600160a01b0393909316845291905290205460ff1690565b60606006805461040b9061160f565b600033816107a5828661085c565b9050838110156108055760405162461bcd60e51b815260206004820152602560248201527f45524332303a2064656372656173656420616c6c6f77616e63652062656c6f77604482015264207a65726f60d81b6064820152608401610560565b6104bf82868684036108bc565b60003361049c818585610a5a565b60008181526001602052604081206103f690610ed0565b60008281526020819052604090206001015461085281610bfe565b6104ef8383610c2a565b6001600160a01b03918216600090815260036020908152604080832093909416825291909152205490565b60006001600160e01b03198216637965db0b60e01b14806103f657506301ffc9a760e01b6001600160e01b03198316146103f6565b6001600160a01b03831661091e5760405162461bcd60e51b8152602060048201526024808201527f45524332303a20617070726f76652066726f6d20746865207a65726f206164646044820152637265737360e01b6064820152608401610560565b6001600160a01b03821661097f5760405162461bcd60e51b815260206004820152602260248201527f45524332303a20617070726f766520746f20746865207a65726f206164647265604482015261737360f01b6064820152608401610560565b6001600160a01b0383811660008181526003602090815260408083209487168084529482529182902085905590518481527f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925910160405180910390a3505050565b60006109ec848461085c565b90506000198114610a545781811015610a475760405162461bcd60e51b815260206004820152601d60248201527f45524332303a20696e73756666696369656e7420616c6c6f77616e63650000006044820152606401610560565b610a5484848484036108bc565b50505050565b6001600160a01b038316610abe5760405162461bcd60e51b815260206004820152602560248201527f45524332303a207472616e736665722066726f6d20746865207a65726f206164604482015264647265737360d81b6064820152608401610560565b6001600160a01b038216610b205760405162461bcd60e51b815260206004820152602360248201527f45524332303a207472616e7366657220746f20746865207a65726f206164647260448201526265737360e81b6064820152608401610560565b610b2b838383610eda565b6001600160a01b03831660009081526002602052604090205481811015610ba35760405162461bcd60e51b815260206004820152602660248201527f45524332303a207472616e7366657220616d6f756e7420657863656564732062604482015265616c616e636560d01b6064820152608401610560565b6001600160a01b0380851660008181526002602052604080822086860390559286168082529083902080548601905591516000805160206117a583398151915290610bf19086815260200190565b60405180910390a3610a54565b6106ae8133610ee5565b610c128282610f3e565b60008281526001602052604090206104ef9082610fc2565b610c348282610fd7565b60008281526001602052604090206104ef908261103c565b610c54611051565b6007805460ff191690557f5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa335b6040516001600160a01b03909116815260200160405180910390a1565b6001600160a01b038216610cf45760405162461bcd60e51b815260206004820152601f60248201527f45524332303a206d696e7420746f20746865207a65726f2061646472657373006044820152606401610560565b610d0060008383610eda565b8060046000828254610d12919061165f565b90915550506001600160a01b0382166000818152600260209081526040808320805486019055518481526000805160206117a5833981519152910160405180910390a35050565b6001600160a01b038216610db95760405162461bcd60e51b815260206004820152602160248201527f45524332303a206275726e2066726f6d20746865207a65726f206164647265736044820152607360f81b6064820152608401610560565b610dc582600083610eda565b6001600160a01b03821660009081526002602052604090205481811015610e395760405162461bcd60e51b815260206004820152602260248201527f45524332303a206275726e20616d6f756e7420657863656564732062616c616e604482015261636560f01b6064820152608401610560565b6001600160a01b03831660008181526002602090815260408083208686039055600480548790039055518581529192916000805160206117a5833981519152910160405180910390a3505050565b610e8f61109a565b6007805460ff191660011790557f62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258610c813390565b600061075883836110e0565b60006103f6825490565b6104ef83838361110a565b610eef828261075f565b61057357610efc81611170565b610f07836020611182565b604051602001610f18929190611672565b60408051601f198184030181529082905262461bcd60e51b8252610560916004016114ae565b610f48828261075f565b610573576000828152602081815260408083206001600160a01b03851684529091529020805460ff19166001179055610f7e3390565b6001600160a01b0316816001600160a01b0316837f2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d60405160405180910390a45050565b6000610758836001600160a01b03841661131e565b610fe1828261075f565b15610573576000828152602081815260408083206001600160a01b0385168085529252808320805460ff1916905551339285917ff6391f5c32d9c69d2a47ea670b442974b53935d1edc7fd64eb21e047a839171b9190a45050565b6000610758836001600160a01b03841661136d565b60075460ff166106155760405162461bcd60e51b815260206004820152601460248201527314185d5cd8589b194e881b9bdd081c185d5cd95960621b6044820152606401610560565b60075460ff16156106155760405162461bcd60e51b815260206004820152601060248201526f14185d5cd8589b194e881c185d5cd95960821b6044820152606401610560565b60008260000182815481106110f7576110f76116e1565b9060005260206000200154905092915050565b60075460ff16156104ef5760405162461bcd60e51b815260206004820152602a60248201527f45524332305061757361626c653a20746f6b656e207472616e736665722077686044820152691a5b19481c185d5cd95960b21b6064820152608401610560565b60606103f66001600160a01b03831660145b606060006111918360026116f7565b61119c90600261165f565b67ffffffffffffffff8111156111b4576111b461170e565b6040519080825280601f01601f1916602001820160405280156111de576020820181803683370190505b509050600360fc1b816000815181106111f9576111f96116e1565b60200101906001600160f81b031916908160001a905350600f60fb1b81600181518110611228576112286116e1565b60200101906001600160f81b031916908160001a905350600061124c8460026116f7565b61125790600161165f565b90505b60018111156112cf576f181899199a1a9b1b9c1cb0b131b232b360811b85600f166010811061128b5761128b6116e1565b1a60f81b8282815181106112a1576112a16116e1565b60200101906001600160f81b031916908160001a90535060049490941c936112c881611724565b905061125a565b5083156107585760405162461bcd60e51b815260206004820181905260248201527f537472696e67733a20686578206c656e67746820696e73756666696369656e746044820152606401610560565b6000818152600183016020526040812054611365575081546001818101845560008481526020808220909301849055845484825282860190935260409020919091556103f6565b5060006103f6565b6000818152600183016020526040812054801561145657600061139160018361173b565b85549091506000906113a59060019061173b565b905081811461140a5760008660000182815481106113c5576113c56116e1565b90600052602060002001549050808760000184815481106113e8576113e86116e1565b6000918252602080832090910192909255918252600188019052604090208390555b855486908061141b5761141b61174e565b6001900381819060005260206000200160009055905585600101600086815260200190815260200160002060009055600193505050506103f6565b60009150506103f6565b60006020828403121561147257600080fd5b81356001600160e01b03198116811461075857600080fd5b60005b838110156114a557818101518382015260200161148d565b50506000910152565b60208152600082518060208401526114cd81604085016020870161148a565b601f01601f19169190910160400192915050565b80356001600160a01b03811681146114f857600080fd5b919050565b6000806040838503121561151057600080fd5b611519836114e1565b946020939093013593505050565b60008060006060848603121561153c57600080fd5b611545846114e1565b9250611553602085016114e1565b9150604084013590509250925092565b60006020828403121561157557600080fd5b5035919050565b6000806040838503121561158f57600080fd5b8235915061159f602084016114e1565b90509250929050565b6000602082840312156115ba57600080fd5b610758826114e1565b600080604083850312156115d657600080fd5b50508035926020909101359150565b600080604083850312156115f857600080fd5b611601836114e1565b915061159f602084016114e1565b600181811c9082168061162357607f821691505b60208210810361164357634e487b7160e01b600052602260045260246000fd5b50919050565b634e487b7160e01b600052601160045260246000fd5b808201808211156103f6576103f6611649565b76020b1b1b2b9b9a1b7b73a3937b61d1030b1b1b7bab73a1604d1b8152600083516116a481601785016020880161148a565b7001034b99036b4b9b9b4b733903937b6329607d1b60179184019182015283516116d581602884016020880161148a565b01602801949350505050565b634e487b7160e01b600052603260045260246000fd5b80820281158282048414176103f6576103f6611649565b634e487b7160e01b600052604160045260246000fd5b60008161173357611733611649565b506000190190565b818103818111156103f6576103f6611649565b634e487b7160e01b600052603160045260246000fdfe45524332305072657365744d696e7465725061757365723a206d75737420686165d7a28e3265b37a6474929f336521b332c1681b933f6cb9f3376673440d862addf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa2646970667358221220d5a22b9391b8f37e5b49e43cc1eabfcea8be6d7b5aa0a84dc5daa1b7a05730f364736f6c63430008130033", + "linkReferences": {}, + "deployedLinkReferences": {} + } + \ No newline at end of file diff --git a/test/e2e.bats b/test/e2e.bats new file mode 100644 index 00000000..c85e33ce --- /dev/null +++ b/test/e2e.bats @@ -0,0 +1,10 @@ +setup() { + load 'helpers/common-setup' + _common_setup +} + +@test "Verify batches" { + echo "Waiting 10 minutes to get some verified batch...." + run $PROJECT_ROOT/test/scripts/batch_verification_monitor.sh 0 600 + assert_success +} diff --git a/test/helpers/aggoracle_e2e.go b/test/helpers/aggoracle_e2e.go index fdde39dd..8b5073fb 100644 --- a/test/helpers/aggoracle_e2e.go +++ b/test/helpers/aggoracle_e2e.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "math/big" + "path" "testing" "time" @@ -15,6 +16,7 @@ import ( "github.com/0xPolygon/cdk/aggoracle/chaingersender" "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/reorgdetector" "github.com/0xPolygon/cdk/test/contracts/transparentupgradableproxy" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -26,7 +28,13 @@ import ( ) const ( - NetworkIDL2 = uint32(1) + NetworkIDL2 = uint32(1) + chainID = 1337 + initialBalance = "10000000000000000000000000" + blockGasLimit = uint64(999999999999999999) + syncBlockChunkSize = 10 + retries = 3 + periodRetry = time.Millisecond * 100 ) type AggoracleWithEVMChainEnv struct { @@ -51,10 +59,12 @@ type AggoracleWithEVMChainEnv struct { } func SetupAggoracleWithEVMChain(t *testing.T) *AggoracleWithEVMChainEnv { + t.Helper() + ctx := context.Background() l1Client, syncer, gerL1Contract, gerL1Addr, bridgeL1Contract, bridgeL1Addr, authL1, rd := CommonSetup(t) sender, l2Client, gerL2Contract, gerL2Addr, bridgeL2Contract, bridgeL2Addr, authL2, ethTxManMockL2 := EVMSetup(t) - oracle, err := aggoracle.New(sender, l1Client.Client(), syncer, etherman.LatestBlock, time.Millisecond*20) + oracle, err := aggoracle.New(log.GetDefaultLogger(), sender, l1Client.Client(), syncer, etherman.LatestBlock, time.Millisecond*20) //nolint:mnd require.NoError(t, err) go oracle.Start(ctx) @@ -90,12 +100,14 @@ func CommonSetup(t *testing.T) ( *bind.TransactOpts, *reorgdetector.ReorgDetector, ) { + t.Helper() + // Config and spin up ctx := context.Background() // Simulated L1 privateKeyL1, err := crypto.GenerateKey() require.NoError(t, err) - authL1, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(1337)) + authL1, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(chainID)) require.NoError(t, err) l1Client, gerL1Addr, gerL1Contract, bridgeL1Addr, bridgeL1Contract, err := newSimulatedL1(authL1) require.NoError(t, err) @@ -104,8 +116,8 @@ func CommonSetup(t *testing.T) ( reorg, err := reorgdetector.New(l1Client.Client(), reorgdetector.Config{DBPath: dbPathReorgDetector}) require.NoError(t, err) // Syncer - dbPathSyncer := t.TempDir() - syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerL1Addr, common.Address{}, 10, etherman.LatestBlock, reorg, l1Client.Client(), time.Millisecond, 0, 100*time.Millisecond, 3) + dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") + syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerL1Addr, common.Address{}, syncBlockChunkSize, etherman.LatestBlock, reorg, l1Client.Client(), time.Millisecond, 0, periodRetry, retries) require.NoError(t, err) go syncer.Start(ctx) @@ -122,14 +134,17 @@ func EVMSetup(t *testing.T) ( *bind.TransactOpts, *EthTxManagerMock, ) { + t.Helper() + privateKeyL2, err := crypto.GenerateKey() require.NoError(t, err) - authL2, err := bind.NewKeyedTransactorWithChainID(privateKeyL2, big.NewInt(1337)) + authL2, err := bind.NewKeyedTransactorWithChainID(privateKeyL2, big.NewInt(chainID)) require.NoError(t, err) l2Client, gerL2Addr, gerL2Sc, bridgeL2Addr, bridgeL2Sc, err := newSimulatedEVMAggSovereignChain(authL2) require.NoError(t, err) ethTxManMock := NewEthTxManMock(t, l2Client, authL2) - sender, err := chaingersender.NewEVMChainGERSender(gerL2Addr, authL2.From, l2Client.Client(), ethTxManMock, 0, time.Millisecond*50) + sender, err := chaingersender.NewEVMChainGERSender(log.GetDefaultLogger(), + gerL2Addr, authL2.From, l2Client.Client(), ethTxManMock, 0, time.Millisecond*50) //nolint:mnd require.NoError(t, err) return sender, l2Client, gerL2Sc, gerL2Addr, bridgeL2Sc, bridgeL2Addr, authL2, ethTxManMock @@ -144,12 +159,18 @@ func newSimulatedL1(auth *bind.TransactOpts) ( err error, ) { ctx := context.Background() + privateKeyL1, err := crypto.GenerateKey() if err != nil { - return + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to generate private key: %w", err) } - authDeployer, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(1337)) - balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) //nolint:gomnd + + authDeployer, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(chainID)) + if err != nil { + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to create transactor: %w", err) + } + + balance, _ := new(big.Int).SetString(initialBalance, 10) //nolint:mnd address := auth.From genesisAlloc := map[common.Address]types.Account{ address: { @@ -159,28 +180,29 @@ func newSimulatedL1(auth *bind.TransactOpts) ( Balance: balance, }, } - blockGasLimit := uint64(999999999999999999) //nolint:gomnd + client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) bridgeImplementationAddr, _, _, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(authDeployer, client.Client()) if err != nil { - return + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy bridge implementation: %w", err) } client.Commit() nonce, err := client.Client().PendingNonceAt(ctx, authDeployer.From) if err != nil { - return + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get pending nonce: %w", err) } precalculatedAddr := crypto.CreateAddress(authDeployer.From, nonce+1) bridgeABI, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi() if err != nil { - return + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get bridge ABI: %w", err) } if bridgeABI == nil { err = errors.New("GetABI returned nil") - return + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get bridge ABI: %w", err) } + dataCallProxy, err := bridgeABI.Pack("initialize", uint32(0), // networkIDMainnet common.Address{}, // gasTokenAddressMainnet" @@ -190,8 +212,9 @@ func newSimulatedL1(auth *bind.TransactOpts) ( []byte{}, // gasTokenMetadata ) if err != nil { - return + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to pack data for proxy initialization: %w", err) } + bridgeAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy( authDeployer, client.Client(), @@ -200,28 +223,40 @@ func newSimulatedL1(auth *bind.TransactOpts) ( dataCallProxy, ) if err != nil { - return + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy transparent upgradable proxy: %w", err) } client.Commit() + bridgeContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridgeAddr, client.Client()) if err != nil { - return + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to create bridge contract instance: %w", err) } + checkGERAddr, err := bridgeContract.GlobalExitRootManager(&bind.CallOpts{Pending: false}) if err != nil { - return + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get Global Exit Root Manager: %w", err) } if precalculatedAddr != checkGERAddr { - err = fmt.Errorf("error deploying bridge, unexpected GER addr. Expected %s. Actual %s", precalculatedAddr.Hex(), checkGERAddr.Hex()) + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf( + "error deploying bridge, unexpected GER addr. Expected %s. Actual %s", + precalculatedAddr.Hex(), checkGERAddr.Hex(), + ) } gerAddr, _, gerContract, err = gerContractL1.DeployGlobalexitrootnopush0(authDeployer, client.Client(), auth.From, bridgeAddr) - + if err != nil { + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy GER contract: %w", err) + } client.Commit() + if precalculatedAddr != gerAddr { - err = fmt.Errorf("error calculating addr. Expected %s. Actual %s", precalculatedAddr.Hex(), gerAddr.Hex()) + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf( + "error calculating GER address. Expected %s. Actual %s", + precalculatedAddr.Hex(), gerAddr.Hex(), + ) } - return + + return client, gerAddr, gerContract, bridgeAddr, bridgeContract, nil } func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) ( @@ -233,14 +268,21 @@ func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) ( err error, ) { ctx := context.Background() + privateKeyL1, err := crypto.GenerateKey() if err != nil { - return + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to generate private key: %w", err) } - authDeployer, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(1337)) - balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) //nolint:gomnd + + authDeployer, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(chainID)) + if err != nil { + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to create transactor: %w", err) + } + + balance, _ := new(big.Int).SetString(initialBalance, 10) //nolint:mnd address := auth.From precalculatedBridgeAddr := crypto.CreateAddress(authDeployer.From, 1) + genesisAlloc := map[common.Address]types.Account{ address: { Balance: balance, @@ -252,28 +294,31 @@ func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) ( Balance: balance, }, } - blockGasLimit := uint64(999999999999999999) //nolint:gomnd + + const blockGasLimit = uint64(999999999999999999) client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) bridgeImplementationAddr, _, _, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(authDeployer, client.Client()) if err != nil { - return + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy bridge implementation: %w", err) } client.Commit() nonce, err := client.Client().PendingNonceAt(ctx, authDeployer.From) if err != nil { - return + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get pending nonce: %w", err) } precalculatedAddr := crypto.CreateAddress(authDeployer.From, nonce+1) + bridgeABI, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi() if err != nil { - return + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get bridge ABI: %w", err) } if bridgeABI == nil { err = errors.New("GetABI returned nil") - return + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get bridge ABI: %w", err) } + dataCallProxy, err := bridgeABI.Pack("initialize", NetworkIDL2, common.Address{}, // gasTokenAddressMainnet" @@ -283,8 +328,9 @@ func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) ( []byte{}, // gasTokenMetadata ) if err != nil { - return + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to pack data for proxy initialization: %w", err) } + bridgeAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy( authDeployer, client.Client(), @@ -293,40 +339,52 @@ func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) ( dataCallProxy, ) if err != nil { - return + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy transparent upgradable proxy: %w", err) } if bridgeAddr != precalculatedBridgeAddr { - err = fmt.Errorf("error calculating bridge addr. Expected: %s. Actual: %s", precalculatedBridgeAddr, bridgeAddr) - return + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf( + "error calculating bridge addr. Expected: %s. Actual: %s", + precalculatedBridgeAddr, bridgeAddr, + ) } client.Commit() + bridgeContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridgeAddr, client.Client()) if err != nil { - return + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to create bridge contract instance: %w", err) } + checkGERAddr, err := bridgeContract.GlobalExitRootManager(&bind.CallOpts{}) if err != nil { - return + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get Global Exit Root Manager: %w", err) } if precalculatedAddr != checkGERAddr { - err = errors.New("error deploying bridge") + return nil, common.Address{}, nil, common.Address{}, nil, errors.New( + "error deploying bridge, unexpected GER Manager address", + ) } gerAddr, _, gerContract, err = gerContractEVMChain.DeployPessimisticglobalexitrootnopush0(authDeployer, client.Client(), auth.From) if err != nil { - return + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy GER contract: %w", err) } client.Commit() - _GLOBAL_EXIT_ROOT_SETTER_ROLE := common.HexToHash("0x7b95520991dfda409891be0afa2635b63540f92ee996fda0bf695a166e5c5176") - _, err = gerContract.GrantRole(authDeployer, _GLOBAL_EXIT_ROOT_SETTER_ROLE, auth.From) + globalExitRootSetterRole := common.HexToHash("0x7b95520991dfda409891be0afa2635b63540f92ee996fda0bf695a166e5c5176") + _, err = gerContract.GrantRole(authDeployer, globalExitRootSetterRole, auth.From) + if err != nil { + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to grant role to GER contract: %w", err) + } client.Commit() - hasRole, _ := gerContract.HasRole(&bind.CallOpts{Pending: false}, _GLOBAL_EXIT_ROOT_SETTER_ROLE, auth.From) + + hasRole, _ := gerContract.HasRole(&bind.CallOpts{Pending: false}, globalExitRootSetterRole, auth.From) if !hasRole { - err = errors.New("failed to set role") + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to set role for GER contract") } + if precalculatedAddr != gerAddr { - err = errors.New("error calculating addr") + return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("error calculating GER address") } - return + + return client, gerAddr, gerContract, bridgeAddr, bridgeContract, nil } diff --git a/test/helpers/common-setup.bash b/test/helpers/common-setup.bash new file mode 100644 index 00000000..b7691366 --- /dev/null +++ b/test/helpers/common-setup.bash @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +_common_setup() { + bats_load_library 'bats-support' + bats_load_library 'bats-assert' + + # get the containing directory of this file + # use $BATS_TEST_FILENAME instead of ${BASH_SOURCE[0]} or $0, + # as those will point to the bats executable's location or the preprocessed file respectively + PROJECT_ROOT="$( cd "$( dirname "$BATS_TEST_FILENAME" )/.." >/dev/null 2>&1 && pwd )" + # make executables in src/ visible to PATH + PATH="$PROJECT_ROOT/src:$PATH" +} diff --git a/test/helpers/common.bash b/test/helpers/common.bash new file mode 100644 index 00000000..15057d17 --- /dev/null +++ b/test/helpers/common.bash @@ -0,0 +1,188 @@ +#!/usr/bin/env bash + +function deployContract() { + local private_key="$1" + local contract_artifact="$2" + + # Check if rpc_url is available + if [[ -z "$rpc_url" ]]; then + echo "Error: rpc_url environment variable is not set." + return 1 + fi + + if [[ ! -f "$contract_artifact" ]]; then + echo "Error: Contract artifact '$contract_artifact' does not exist." + return 1 + fi + + # Get the sender address + local senderAddr=$(cast wallet address "$private_key") + if [[ $? -ne 0 ]]; then + echo "Error: Failed to retrieve sender address." + return 1 + fi + + echo "Attempting to deploy contract artifact '$contract_artifact' to $rpc_url (sender: $senderAddr)" >&3 + + # Get bytecode from the contract artifact + local bytecode=$(jq -r .bytecode "$contract_artifact") + if [[ -z "$bytecode" || "$bytecode" == "null" ]]; then + echo "Error: Failed to read bytecode from $contract_artifact" + return 1 + fi + + # Send the transaction and capture the output + local cast_output=$(cast send --rpc-url "$rpc_url" \ + --private-key "$private_key" \ + --legacy \ + --create "$bytecode" \ + 2>&1) + + # Check if cast send was successful + if [[ $? -ne 0 ]]; then + echo "Error: Failed to send transaction." + echo "$cast_output" + return 1 + fi + + echo "Deploy contract output:" >&3 + echo "$cast_output" >&3 + + # Extract the contract address from the output + local deployed_contract_address=$(echo "$cast_output" | grep 'contractAddress' | sed 's/contractAddress\s\+//') + echo "Deployed contract address: $deployed_contract_address" >&3 + + if [[ -z "$deployed_contract_address" ]]; then + echo "Error: Failed to extract deployed contract address" + echo "$cast_output" + return 1 + fi + + if [[ ! "$deployed_contract_address" =~ ^0x[a-fA-F0-9]{40}$ ]]; then + echo "Error: Invalid contract address $deployed_contract_address" + return 1 + fi + + # Print contract address for return + echo "$deployed_contract_address" + + return 0 +} + +function sendTx() { + # Check if at least 3 arguments are provided + if [[ $# -lt 3 ]]; then + echo "Usage: sendTx [ ...]" + return 1 + fi + + local private_key="$1" # Sender private key + local account_addr="$2" # Receiver address + local value_or_function_sig="$3" # Value or function signature + + # Error handling: Ensure the receiver is a valid Ethereum address + if [[ ! "$account_addr" =~ ^0x[a-fA-F0-9]{40}$ ]]; then + echo "Error: Invalid receiver address '$account_addr'." + return 1 + fi + + shift 3 # Shift the first 3 arguments (private_key, account_addr, value_or_function_sig) + + local senderAddr + senderAddr=$(cast wallet address "$private_key") + if [[ $? -ne 0 ]]; then + echo "Error: Failed to extract the sender address for $private_key" + return 1 + fi + + # Check if the first remaining argument is a numeric value (Ether to be transferred) + if [[ "$value_or_function_sig" =~ ^[0-9]+(ether)?$ ]]; then + # Case: EOA transaction (Ether transfer) + echo "Sending EOA transaction (RPC URL: $rpc_url, sender: $senderAddr) to: $account_addr " \ + "with value: $value_or_function_sig" >&3 + + cast_output=$(cast send --rpc-url "$rpc_url" \ + --private-key "$private_key" \ + "$account_addr" --value "$value_or_function_sig" \ + --legacy \ + 2>&1) + else + # Case: Smart contract transaction (contract interaction with function signature and parameters) + local params=("$@") # Collect all remaining arguments as function parameters + + echo "Function signature: '$value_or_function_sig'" >&3 + + # Verify if the function signature starts with "function" + if [[ ! "$value_or_function_sig" =~ ^function\ .+\(.+\)$ ]]; then + echo "Error: Invalid function signature format '$value_or_function_sig'." + return 1 + fi + + echo "Sending smart contract transaction (RPC URL: $rpc_url, sender: $senderAddr) to $account_addr" \ + "with function signature: '$value_or_function_sig' and params: ${params[*]}" >&3 + + # Send the smart contract interaction using cast + cast_output=$(cast send --rpc-url "$rpc_url" \ + --private-key "$private_key" \ + "$account_addr" "$value_or_function_sig" "${params[@]}" \ + --legacy \ + 2>&1) + fi + + # Check if the transaction was successful + if [[ $? -ne 0 ]]; then + echo "Error: Failed to send transaction. The cast send output:" + echo "$cast_output" + return 1 + fi + + # Extract the transaction hash from the output + local tx_hash=$(echo "$cast_output" | grep 'transactionHash' | awk '{print $2}' | tail -n 1) + echo "Tx hash: $tx_hash" + + if [[ -z "$tx_hash" ]]; then + echo "Error: Failed to extract transaction hash." + return 1 + fi + + echo "Transaction successful (transaction hash: $tx_hash)" + + return 0 +} + +function queryContract() { + local addr="$1" # Contract address + local funcSignature="$2" # Function signature + shift 2 # Shift past the first two arguments + local params=("$@") # Collect remaining arguments as parameters array + + echo "Querying state of $addr account (RPC URL: $rpc_url) with function signature: '$funcSignature' and params: ${params[*]}" >&3 + + # Check if rpc_url is available + if [[ -z "$rpc_url" ]]; then + echo "Error: rpc_url environment variable is not set." + return 1 + fi + + # Check if the contract address is valid + if [[ ! "$addr" =~ ^0x[a-fA-F0-9]{40}$ ]]; then + echo "Error: Invalid contract address '$addr'." + return 1 + fi + + # Call the contract using `cast call` + local result + result=$(cast call --rpc-url "$rpc_url" "$addr" "$funcSignature" "${params[@]}" 2>&1) + + # Check if the call was successful + if [[ $? -ne 0 ]]; then + echo "Error: Failed to query contract." + echo "$result" + return 1 + fi + + # Return the result (contract query response) + echo "$result" + + return 0 +} diff --git a/test/helpers/ethtxmanmock_e2e.go b/test/helpers/ethtxmanmock_e2e.go index b63ecc49..b6753c22 100644 --- a/test/helpers/ethtxmanmock_e2e.go +++ b/test/helpers/ethtxmanmock_e2e.go @@ -20,8 +20,17 @@ func NewEthTxManMock( client *simulated.Backend, auth *bind.TransactOpts, ) *EthTxManagerMock { + t.Helper() + + const ( + ArgToIndex = 1 + ArgDataIndex = 4 + ZeroValue = 0 + ) + ethTxMock := NewEthTxManagerMock(t) - ethTxMock.On("Add", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + ethTxMock.On( + "Add", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). Run(func(args mock.Arguments) { ctx := context.Background() nonce, err := client.Client().PendingNonceAt(ctx, auth.From) @@ -31,17 +40,17 @@ func NewEthTxManMock( } gas, err := client.Client().EstimateGas(ctx, ethereum.CallMsg{ From: auth.From, - To: args.Get(1).(*common.Address), - Value: big.NewInt(0), - Data: args.Get(4).([]byte), + To: args.Get(ArgToIndex).(*common.Address), + Value: big.NewInt(ZeroValue), + Data: args.Get(ArgDataIndex).([]byte), }) if err != nil { log.Error(err) res, err := client.Client().CallContract(ctx, ethereum.CallMsg{ From: auth.From, - To: args.Get(1).(*common.Address), - Value: big.NewInt(0), - Data: args.Get(4).([]byte), + To: args.Get(ArgToIndex).(*common.Address), + Value: big.NewInt(ZeroValue), + Data: args.Get(ArgDataIndex).([]byte), }, nil) log.Debugf("contract call: %s", res) if err != nil { @@ -53,11 +62,22 @@ func NewEthTxManMock( if err != nil { log.Error(err) } + + to, ok := args.Get(ArgToIndex).(*common.Address) + if !ok { + log.Error("expected *common.Address for ArgToIndex") + return + } + data, ok := args.Get(ArgDataIndex).([]byte) + if !ok { + log.Error("expected []byte for ArgDataIndex") + return + } tx := types.NewTx(&types.LegacyTx{ - To: args.Get(1).(*common.Address), + To: to, Nonce: nonce, - Value: big.NewInt(0), - Data: args.Get(4).([]byte), + Value: big.NewInt(ZeroValue), + Data: data, Gas: gas, GasPrice: price, }) diff --git a/test/helpers/lxly-bridge-test.bash b/test/helpers/lxly-bridge-test.bash new file mode 100644 index 00000000..bbaf45e1 --- /dev/null +++ b/test/helpers/lxly-bridge-test.bash @@ -0,0 +1,77 @@ +#!/usr/bin/env bash +# Error code reference https://hackmd.io/WwahVBZERJKdfK3BbKxzQQ +function deposit () { + readonly deposit_sig='bridgeAsset(uint32,address,uint256,address,bool,bytes)' + + if [[ $token_addr == "0x0000000000000000000000000000000000000000" ]]; then + echo "Checking the current ETH balance: " >&3 + cast balance -e --rpc-url $l1_rpc_url $current_addr >&3 + else + echo "Checking the current token balance for token at $token_addr: " >&3 + cast call --rpc-url $l1_rpc_url $token_addr 'balanceOf(address)(uint256)' $current_addr >&3 + fi + + echo "Attempting to deposit $amount wei to net $destination_net for token $token_addr" >&3 + + if [[ $dry_run == "true" ]]; then + cast calldata $deposit_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes + else + if [[ $token_addr == "0x0000000000000000000000000000000000000000" ]]; then + cast send --legacy --private-key $skey --value $amount --rpc-url $l1_rpc_url $bridge_addr $deposit_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes + else + cast send --legacy --private-key $skey --rpc-url $l1_rpc_url $bridge_addr $deposit_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes + fi + fi +} + +function claim() { + readonly claim_sig="claimAsset(bytes32[32],bytes32[32],uint256,bytes32,bytes32,uint32,address,uint32,address,uint256,bytes)" + readonly bridge_deposit_file=$(mktemp) + readonly claimable_deposit_file=$(mktemp) + echo "Getting full list of deposits" >&3 + curl -s "$bridge_api_url/bridges/$destination_addr?limit=100&offset=0" | jq '.' | tee $bridge_deposit_file + + echo "Looking for claimable deposits" >&3 + jq '[.deposits[] | select(.ready_for_claim == true and .claim_tx_hash == "" and .dest_net == '$destination_net')]' $bridge_deposit_file | tee $claimable_deposit_file + readonly claimable_count=$(jq '. | length' $claimable_deposit_file) + echo "Found $claimable_count claimable deposits" >&3 + + if [[ $claimable_count == 0 ]]; then + echo "We have no claimable deposits at this time" >&3 + exit 1 + fi + + echo "We have $claimable_count claimable deposits on network $destination_net. Let's get this party started." >&3 + readonly current_deposit=$(mktemp) + readonly current_proof=$(mktemp) + while read deposit_idx; do + echo "Starting claim for tx index: "$deposit_idx >&3 + echo "Deposit info:" >&3 + jq --arg idx $deposit_idx '.[($idx | tonumber)]' $claimable_deposit_file | tee $current_deposit >&3 + + curr_deposit_cnt=$(jq -r '.deposit_cnt' $current_deposit) + curr_network_id=$(jq -r '.network_id' $current_deposit) + curl -s "$bridge_api_url/merkle-proof?deposit_cnt=$curr_deposit_cnt&net_id=$curr_network_id" | jq '.' | tee $current_proof + + in_merkle_proof="$(jq -r -c '.proof.merkle_proof' $current_proof | tr -d '"')" + in_rollup_merkle_proof="$(jq -r -c '.proof.rollup_merkle_proof' $current_proof | tr -d '"')" + in_global_index=$(jq -r '.global_index' $current_deposit) + in_main_exit_root=$(jq -r '.proof.main_exit_root' $current_proof) + in_rollup_exit_root=$(jq -r '.proof.rollup_exit_root' $current_proof) + in_orig_net=$(jq -r '.orig_net' $current_deposit) + in_orig_addr=$(jq -r '.orig_addr' $current_deposit) + in_dest_net=$(jq -r '.dest_net' $current_deposit) + in_dest_addr=$(jq -r '.dest_addr' $current_deposit) + in_amount=$(jq -r '.amount' $current_deposit) + in_metadata=$(jq -r '.metadata' $current_deposit) + + if [[ $dry_run == "true" ]]; then + cast calldata $claim_sig "$in_merkle_proof" "$in_rollup_merkle_proof" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata + cast call --rpc-url $l2_rpc_url $bridge_addr $claim_sig "$in_merkle_proof" "$in_rollup_merkle_proof" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata + else + cast send --legacy --rpc-url $l2_rpc_url --private-key $skey $bridge_addr $claim_sig "$in_merkle_proof" "$in_rollup_merkle_proof" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata + fi + + + done < <(seq 0 $((claimable_count - 1)) ) +} diff --git a/test/run-e2e.sh b/test/run-e2e.sh index a0db5d56..6a29e416 100755 --- a/test/run-e2e.sh +++ b/test/run-e2e.sh @@ -18,11 +18,8 @@ else echo "docker cdk:latest already exists" fi -$BASE_FOLDER/scripts/kurtosis_prepare_params_yml.sh "$KURTOSIS_FOLDER" "elderberry" "cdk-validium" +$BASE_FOLDER/scripts/kurtosis_prepare_params_yml.sh "$KURTOSIS_FOLDER" $DATA_AVAILABILITY_MODE [ $? -ne 0 ] && echo "Error preparing params.yml" && exit 1 kurtosis clean --all kurtosis run --enclave cdk-v1 --args-file $DEST_KURTOSIS_PARAMS_YML --image-download always $KURTOSIS_FOLDER -#[ $? -ne 0 ] && echo "Error running kurtosis" && exit 1 -echo "Waiting 10 minutes to get some verified batch...." -scripts/batch_verification_monitor.sh 0 600 diff --git a/test/scripts/env.sh b/test/scripts/env.sh index 184a85d4..b81c18a4 100644 --- a/test/scripts/env.sh +++ b/test/scripts/env.sh @@ -1,6 +1,7 @@ #!/bin/bash ### Common variables ENCLAVE=cdk-v1 +CDK_ERIGON_NODE_NAME=cdk-erigon-node-001 TMP_CDK_FOLDER=tmp/cdk DEST_KURTOSIS_PARAMS_YML=../$TMP_CDK_FOLDER/e2e-params.yml KURTOSIS_VERSION=develop diff --git a/test/scripts/kurtosis_prepare_params_yml.sh b/test/scripts/kurtosis_prepare_params_yml.sh index 66353606..aa57e272 100755 --- a/test/scripts/kurtosis_prepare_params_yml.sh +++ b/test/scripts/kurtosis_prepare_params_yml.sh @@ -6,18 +6,19 @@ if [ -z $DEST_KURTOSIS_PARAMS_YML ]; then exit 1 fi +# Check if the destination params file exists and don't do nothing +if [ -f $DEST_KURTOSIS_PARAMS_YML ]; then + echo "Destination params file already exists" + exit 0 +fi + KURTOSIS_FOLDER=$1 if [ -z $KURTOSIS_FOLDER ]; then echo "Missing param Kurtosis Folder" exit 1 fi -FORK_NAME=$2 -if [ -z $FORK_NAME ]; then - echo "Missing param Fork Name" - exit 1 -fi -DATA_AVAILABILITY_MODE=$3 +DATA_AVAILABILITY_MODE=$2 if [ -z $DATA_AVAILABILITY_MODE ]; then echo "Missing param Data Availability Mode : [rollup, cdk-validium]" exit 1 @@ -25,5 +26,5 @@ fi mkdir -p $(dirname $DEST_KURTOSIS_PARAMS_YML) cp $KURTOSIS_FOLDER/params.yml $DEST_KURTOSIS_PARAMS_YML +yq -Y --in-place ".args.cdk_node_image = \"cdk\"" $DEST_KURTOSIS_PARAMS_YML yq -Y --in-place ".args.data_availability_mode = \"$DATA_AVAILABILITY_MODE\"" $DEST_KURTOSIS_PARAMS_YML -yq -Y --in-place ".args.zkevm_sequence_sender_image = \"cdk:latest\"" $DEST_KURTOSIS_PARAMS_YML diff --git a/translator/translator_impl.go b/translator/translator_impl.go index 1e1a2a6a..cd7fbc42 100644 --- a/translator/translator_impl.go +++ b/translator/translator_impl.go @@ -1,6 +1,6 @@ package translator -import "github.com/0xPolygonHermez/zkevm-synchronizer-l1/log" +import "github.com/0xPolygon/cdk/log" type TranslatorFullMatchRule struct { // If null match any context @@ -21,7 +21,9 @@ func (t *TranslatorFullMatchRule) Translate(contextName string, data string) str return t.NewString } -func NewTranslatorFullMatchRule(contextName *string, fullMatchString string, newString string) *TranslatorFullMatchRule { +func NewTranslatorFullMatchRule( + contextName *string, fullMatchString string, newString string, +) *TranslatorFullMatchRule { return &TranslatorFullMatchRule{ ContextName: contextName, FullMatchString: fullMatchString, @@ -30,11 +32,13 @@ func NewTranslatorFullMatchRule(contextName *string, fullMatchString string, new } type TranslatorImpl struct { + logger *log.Logger FullMatchRules []TranslatorFullMatchRule } -func NewTranslatorImpl() *TranslatorImpl { +func NewTranslatorImpl(logger *log.Logger) *TranslatorImpl { return &TranslatorImpl{ + logger: logger, FullMatchRules: []TranslatorFullMatchRule{}, } } @@ -43,7 +47,7 @@ func (t *TranslatorImpl) Translate(contextName string, data string) string { for _, rule := range t.FullMatchRules { if rule.Match(contextName, data) { translated := rule.Translate(contextName, data) - log.Debugf("Translated (ctxName=%s) %s to %s", contextName, data, translated) + t.logger.Debugf("Translated (ctxName=%s) %s to %s", contextName, data, translated) return translated } } @@ -58,7 +62,8 @@ func (t *TranslatorImpl) AddConfigRules(cfg Config) { for _, v := range cfg.FullMatchRules { var contextName *string if v.ContextName != "" { - contextName = &v.ContextName + name := v.ContextName + contextName = &name } rule := NewTranslatorFullMatchRule(contextName, v.Old, v.New) t.AddRule(*rule) diff --git a/tree/appendonlytree.go b/tree/appendonlytree.go index 5b714bfb..20d22ec1 100644 --- a/tree/appendonlytree.go +++ b/tree/appendonlytree.go @@ -1,185 +1,114 @@ package tree import ( - "context" + "database/sql" + "errors" "fmt" - dbCommon "github.com/0xPolygon/cdk/common" + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/common" - "github.com/ledgerwatch/erigon-lib/kv" ) // AppendOnlyTree is a tree where leaves are added sequentially (by index) type AppendOnlyTree struct { *Tree - lastLeftCache [DefaultHeight]common.Hash + lastLeftCache [types.DefaultHeight]common.Hash lastIndex int64 } // NewAppendOnlyTree creates a AppendOnlyTree -func NewAppendOnlyTree(ctx context.Context, db kv.RwDB, dbPrefix string) (*AppendOnlyTree, error) { +func NewAppendOnlyTree(db *sql.DB, dbPrefix string) *AppendOnlyTree { t := newTree(db, dbPrefix) - at := &AppendOnlyTree{Tree: t} - if err := at.initLastLeftCacheAndLastDepositCount(ctx); err != nil { - return nil, err + return &AppendOnlyTree{ + Tree: t, + // -1 is used to indicate no leafs, 0 means the first leaf is added (at index 0) and so on. + // In order to differentiate the "cache not initialised" we need any value smaller than -1 + lastIndex: -2, } - return at, nil } -// AddLeaves adds a list leaves into the tree. The indexes of the leaves must be consecutive, -// starting by the index of the last leaf added +1 -// It returns a function that must be called to rollback the changes done by this interaction -func (t *AppendOnlyTree) AddLeaves(tx kv.RwTx, leaves []Leaf) (func(), error) { - // Sanity check - if len(leaves) == 0 { - return func() {}, nil - } - - backupIndx := t.lastIndex - backupCache := [DefaultHeight]common.Hash{} - copy(backupCache[:], t.lastLeftCache[:]) - rollback := func() { - t.lastIndex = backupIndx - t.lastLeftCache = backupCache - } - - for _, leaf := range leaves { - if err := t.addLeaf(tx, leaf); err != nil { - return rollback, err - } - } - - return rollback, nil -} - -func (t *AppendOnlyTree) addLeaf(tx kv.RwTx, leaf Leaf) error { +func (t *AppendOnlyTree) AddLeaf(tx db.Txer, blockNum, blockPosition uint64, leaf types.Leaf) error { if int64(leaf.Index) != t.lastIndex+1 { - return fmt.Errorf( - "mismatched index. Expected: %d, actual: %d", - t.lastIndex+1, leaf.Index, - ) + // rebuild cache + if err := t.initCache(tx); err != nil { + return err + } + if int64(leaf.Index) != t.lastIndex+1 { + return fmt.Errorf( + "mismatched index. Expected: %d, actual: %d", + t.lastIndex+1, leaf.Index, + ) + } } // Calculate new tree nodes currentChildHash := leaf.Hash - newNodes := []treeNode{} - for h := uint8(0); h < DefaultHeight; h++ { - var parent treeNode + newNodes := []types.TreeNode{} + for h := uint8(0); h < types.DefaultHeight; h++ { + var parent types.TreeNode if leaf.Index&(1< 0 { // Add child to the right - parent = treeNode{ - left: t.lastLeftCache[h], - right: currentChildHash, - } + parent = newTreeNode(t.lastLeftCache[h], currentChildHash) } else { // Add child to the left - parent = treeNode{ - left: currentChildHash, - right: t.zeroHashes[h], - } + parent = newTreeNode(currentChildHash, t.zeroHashes[h]) // Update cache - // TODO: review this part of the logic, skipping ?optimizaton? - // from OG implementation t.lastLeftCache[h] = currentChildHash } - currentChildHash = parent.hash() + currentChildHash = parent.Hash newNodes = append(newNodes, parent) } // store root - t.storeRoot(tx, uint64(leaf.Index), currentChildHash) - root := currentChildHash - if err := tx.Put(t.rootTable, dbCommon.Uint64ToBytes(uint64(leaf.Index)), root[:]); err != nil { + if err := t.storeRoot(tx, types.Root{ + Hash: currentChildHash, + Index: leaf.Index, + BlockNum: blockNum, + BlockPosition: blockPosition, + }); err != nil { return err } + // store nodes if err := t.storeNodes(tx, newNodes); err != nil { return err } t.lastIndex++ + tx.AddRollbackCallback(func() { t.lastIndex-- }) return nil } -// GetRootByIndex returns the root of the tree as it was right after adding the leaf with index -func (t *AppendOnlyTree) GetRootByIndex(tx kv.Tx, index uint32) (common.Hash, error) { - return t.getRootByIndex(tx, uint64(index)) -} - -func (t *AppendOnlyTree) GetIndexByRoot(ctx context.Context, root common.Hash) (uint32, error) { - tx, err := t.db.BeginRo(ctx) - if err != nil { - return 0, err - } - defer tx.Rollback() - index, err := t.getIndexByRoot(tx, root) - return uint32(index), err -} - -// GetLastIndexAndRoot returns the last index and root added to the tree -func (t *AppendOnlyTree) GetLastIndexAndRoot(ctx context.Context) (uint32, common.Hash, error) { - tx, err := t.db.BeginRo(ctx) - if err != nil { - return 0, common.Hash{}, err - } - defer tx.Rollback() - i, root, err := t.getLastIndexAndRootWithTx(tx) - if err != nil { - return 0, common.Hash{}, err - } - if i == -1 { - return 0, common.Hash{}, ErrNotFound - } - return uint32(i), root, nil -} - -func (t *AppendOnlyTree) initLastLeftCacheAndLastDepositCount(ctx context.Context) error { - tx, err := t.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - - root, err := t.initLastIndex(tx) +func (t *AppendOnlyTree) initCache(tx db.Txer) error { + siblings := [types.DefaultHeight]common.Hash{} + lastRoot, err := t.getLastRootWithTx(tx) if err != nil { + if errors.Is(err, ErrNotFound) { + t.lastIndex = -1 + t.lastLeftCache = siblings + return nil + } return err } - return t.initLastLeftCache(tx, t.lastIndex, root) -} - -func (t *AppendOnlyTree) initLastIndex(tx kv.Tx) (common.Hash, error) { - lastIndex, root, err := t.getLastIndexAndRootWithTx(tx) - if err != nil { - return common.Hash{}, err - } - t.lastIndex = lastIndex - return root, nil -} - -func (t *AppendOnlyTree) initLastLeftCache(tx kv.Tx, lastIndex int64, lastRoot common.Hash) error { - siblings := [DefaultHeight]common.Hash{} - if lastIndex == -1 { - t.lastLeftCache = siblings - return nil - } - index := lastIndex - - currentNodeHash := lastRoot + t.lastIndex = int64(lastRoot.Index) + currentNodeHash := lastRoot.Hash + index := t.lastIndex // It starts in height-1 because 0 is the level of the leafs - for h := int(DefaultHeight - 1); h >= 0; h-- { + for h := int(types.DefaultHeight - 1); h >= 0; h-- { currentNode, err := t.getRHTNode(tx, currentNodeHash) if err != nil { return fmt.Errorf( - "error getting node %s from the RHT at height %d with root %s: %v", - currentNodeHash.Hex(), h, lastRoot.Hex(), err, + "error getting node %s from the RHT at height %d with root %s: %w", + currentNodeHash.Hex(), h, lastRoot.Hash.Hex(), err, ) } if currentNode == nil { return ErrNotFound } - siblings[h] = currentNode.left + siblings[h] = currentNode.Left if index&(1< 0 { - currentNodeHash = currentNode.right + currentNodeHash = currentNode.Right } else { - currentNodeHash = currentNode.left + currentNodeHash = currentNode.Left } } @@ -191,42 +120,3 @@ func (t *AppendOnlyTree) initLastLeftCache(tx kv.Tx, lastIndex int64, lastRoot c t.lastLeftCache = siblings return nil } - -// Reorg deletes all the data relevant from firstReorgedIndex (includded) and onwards -// and prepares the tree tfor being used as it was at firstReorgedIndex-1 -// It returns a function that must be called to rollback the changes done by this interaction -func (t *AppendOnlyTree) Reorg(tx kv.RwTx, firstReorgedIndex uint32) (func(), error) { - if t.lastIndex == -1 { - return func() {}, nil - } - // Clean root table - for i := firstReorgedIndex; i <= uint32(t.lastIndex); i++ { - if err := tx.Delete(t.rootTable, dbCommon.Uint64ToBytes(uint64(i))); err != nil { - return func() {}, err - } - } - - // Reset - root := common.Hash{} - if firstReorgedIndex > 0 { - rootBytes, err := tx.GetOne(t.rootTable, dbCommon.Uint64ToBytes(uint64(firstReorgedIndex)-1)) - if err != nil { - return func() {}, err - } - if rootBytes == nil { - return func() {}, ErrNotFound - } - root = common.Hash(rootBytes) - } - err := t.initLastLeftCache(tx, int64(firstReorgedIndex)-1, root) - if err != nil { - return func() {}, err - } - - // Note: not cleaning RHT, not worth it - backupLastIndex := t.lastIndex - t.lastIndex = int64(firstReorgedIndex) - 1 - return func() { - t.lastIndex = backupLastIndex - }, nil -} diff --git a/tree/migrations/migrations.go b/tree/migrations/migrations.go new file mode 100644 index 00000000..dd5847e7 --- /dev/null +++ b/tree/migrations/migrations.go @@ -0,0 +1,22 @@ +package migrations + +import ( + _ "embed" + + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/db/types" +) + +//go:embed tree0001.sql +var mig001 string + +var Migrations = []types.Migration{ + { + ID: "tree001", + SQL: mig001, + }, +} + +func RunMigrations(dbPath string) error { + return db.RunMigrations(dbPath, Migrations) +} diff --git a/tree/migrations/tree0001.sql b/tree/migrations/tree0001.sql new file mode 100644 index 00000000..f70d048e --- /dev/null +++ b/tree/migrations/tree0001.sql @@ -0,0 +1,17 @@ +-- +migrate Down +DROP TABLE IF EXISTS /*dbprefix*/root; +DROP TABLE IF EXISTS /*dbprefix*/rht; + +-- +migrate Up +CREATE TABLE /*dbprefix*/root ( + hash VARCHAR PRIMARY KEY, + position INTEGER NOT NULL, + block_num BIGINT NOT NULL, + block_position BIGINT NOT NULL +); + +CREATE TABLE /*dbprefix*/rht ( + hash VARCHAR PRIMARY KEY, + left VARCHAR NOT NULL, + right VARCHAR NOT NULL +); diff --git a/tree/testvectors/types.go b/tree/testvectors/types.go index 905718d8..27bc1abb 100644 --- a/tree/testvectors/types.go +++ b/tree/testvectors/types.go @@ -21,13 +21,13 @@ type DepositVectorRaw struct { } func (d *DepositVectorRaw) Hash() common.Hash { - origNet := make([]byte, 4) //nolint:gomnd + origNet := make([]byte, 4) //nolint:mnd binary.BigEndian.PutUint32(origNet, d.OriginNetwork) - destNet := make([]byte, 4) //nolint:gomnd + destNet := make([]byte, 4) //nolint:mnd binary.BigEndian.PutUint32(destNet, d.DestinationNetwork) metaHash := keccak256.Hash(common.FromHex(d.Metadata)) - var buf [32]byte //nolint:gomnd + var buf [32]byte amount, _ := big.NewInt(0).SetString(d.Amount, 0) origAddrBytes := common.HexToAddress(d.TokenAddress) destAddrBytes := common.HexToAddress(d.DestinationAddress) diff --git a/tree/tree.go b/tree/tree.go index 77c0e452..2107ba68 100644 --- a/tree/tree.go +++ b/tree/tree.go @@ -2,134 +2,72 @@ package tree import ( "context" + "database/sql" "errors" "fmt" - "math" - dbCommon "github.com/0xPolygon/cdk/common" + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/common" - "github.com/ledgerwatch/erigon-lib/kv" + "github.com/russross/meddler" "golang.org/x/crypto/sha3" ) -const ( - DefaultHeight uint8 = 32 - rootTableSufix = "-root" - rhtTableSufix = "-rht" - indexTableSufix = "-index" -) - var ( - EmptyProof = [32]common.Hash{} + EmptyProof = types.Proof{} ErrNotFound = errors.New("not found") ) -type Leaf struct { - Index uint32 - Hash common.Hash -} - type Tree struct { - db kv.RwDB + db *sql.DB + zeroHashes []common.Hash rhtTable string rootTable string - indexTable string - zeroHashes []common.Hash -} - -type treeNode struct { - left common.Hash - right common.Hash } -func (n *treeNode) hash() common.Hash { +func newTreeNode(left, right common.Hash) types.TreeNode { var hash common.Hash hasher := sha3.NewLegacyKeccak256() - hasher.Write(n.left[:]) - hasher.Write(n.right[:]) + hasher.Write(left[:]) + hasher.Write(right[:]) copy(hash[:], hasher.Sum(nil)) - return hash -} - -func (n *treeNode) MarshalBinary() ([]byte, error) { - return append(n.left[:], n.right[:]...), nil -} - -func (n *treeNode) UnmarshalBinary(data []byte) error { - if len(data) != 64 { - return fmt.Errorf("expected len %d, actual len %d", 64, len(data)) + return types.TreeNode{ + Hash: hash, + Left: left, + Right: right, } - n.left = common.Hash(data[:32]) - n.right = common.Hash(data[32:]) - return nil -} - -// AddTables add the needed tables for the tree to work in a tableCfg -func AddTables(tableCfg map[string]kv.TableCfgItem, dbPrefix string) { - rootTable := dbPrefix + rootTableSufix - rhtTable := dbPrefix + rhtTableSufix - indexTable := dbPrefix + indexTableSufix - tableCfg[rootTable] = kv.TableCfgItem{} - tableCfg[rhtTable] = kv.TableCfgItem{} - tableCfg[indexTable] = kv.TableCfgItem{} } -func newTree(db kv.RwDB, dbPrefix string) *Tree { - rootTable := dbPrefix + rootTableSufix - rhtTable := dbPrefix + rhtTableSufix - indexTable := dbPrefix + indexTableSufix +func newTree(db *sql.DB, tablePrefix string) *Tree { t := &Tree{ - rhtTable: rhtTable, - rootTable: rootTable, - indexTable: indexTable, db: db, - zeroHashes: generateZeroHashes(DefaultHeight), + zeroHashes: generateZeroHashes(types.DefaultHeight), + rhtTable: tablePrefix + "rht", + rootTable: tablePrefix + "root", } return t } -func (t *Tree) getRootByIndex(tx kv.Tx, index uint64) (common.Hash, error) { - rootBytes, err := tx.GetOne(t.rootTable, dbCommon.Uint64ToBytes(index)) - if err != nil { - return common.Hash{}, err - } - if rootBytes == nil { - return common.Hash{}, ErrNotFound - } - return common.BytesToHash(rootBytes), nil -} - -func (t *Tree) getIndexByRoot(tx kv.Tx, root common.Hash) (uint64, error) { - indexBytes, err := tx.GetOne(t.indexTable, root[:]) - if err != nil { - return 0, err - } - if indexBytes == nil { - return 0, ErrNotFound - } - return dbCommon.BytesToUint64(indexBytes), nil -} - -func (t *Tree) getSiblings(tx kv.Tx, index uint32, root common.Hash) ( +func (t *Tree) getSiblings(tx db.Querier, index uint32, root common.Hash) ( siblings [32]common.Hash, hasUsedZeroHashes bool, err error, ) { currentNodeHash := root // It starts in height-1 because 0 is the level of the leafs - for h := int(DefaultHeight - 1); h >= 0; h-- { - var currentNode *treeNode + for h := int(types.DefaultHeight - 1); h >= 0; h-- { + var currentNode *types.TreeNode currentNode, err = t.getRHTNode(tx, currentNodeHash) if err != nil { - if err == ErrNotFound { + if errors.Is(err, ErrNotFound) { hasUsedZeroHashes = true siblings[h] = t.zeroHashes[h] err = nil continue } else { err = fmt.Errorf( - "height: %d, currentNode: %s, error: %v", + "height: %d, currentNode: %s, error: %w", h, currentNodeHash.Hex(), err, ) return @@ -157,11 +95,11 @@ func (t *Tree) getSiblings(tx kv.Tx, index uint32, root common.Hash) ( * Now, let's do AND operation => 100&100=100 which is higher than 0 so we need the left sibling (O5) */ if index&(1< 0 { - siblings[h] = currentNode.left - currentNodeHash = currentNode.right + siblings[h] = currentNode.Left + currentNodeHash = currentNode.Right } else { - siblings[h] = currentNode.right - currentNodeHash = currentNode.left + siblings[h] = currentNode.Right + currentNodeHash = currentNode.Left } } @@ -169,32 +107,30 @@ func (t *Tree) getSiblings(tx kv.Tx, index uint32, root common.Hash) ( } // GetProof returns the merkle proof for a given index and root. -func (t *Tree) GetProof(ctx context.Context, index uint32, root common.Hash) ([DefaultHeight]common.Hash, error) { - tx, err := t.db.BeginRw(ctx) +func (t *Tree) GetProof(ctx context.Context, index uint32, root common.Hash) (types.Proof, error) { + siblings, isErrNotFound, err := t.getSiblings(t.db, index, root) if err != nil { - return [DefaultHeight]common.Hash{}, err - } - defer tx.Rollback() - siblings, isErrNotFound, err := t.getSiblings(tx, index, root) - if err != nil { - return [DefaultHeight]common.Hash{}, err + return types.Proof{}, err } if isErrNotFound { - return [DefaultHeight]common.Hash{}, ErrNotFound + return types.Proof{}, ErrNotFound } return siblings, nil } -func (t *Tree) getRHTNode(tx kv.Tx, nodeHash common.Hash) (*treeNode, error) { - nodeBytes, err := tx.GetOne(t.rhtTable, nodeHash[:]) +func (t *Tree) getRHTNode(tx db.Querier, nodeHash common.Hash) (*types.TreeNode, error) { + node := &types.TreeNode{} + err := meddler.QueryRow( + tx, node, + fmt.Sprintf(`select * from %s where hash = $1`, t.rhtTable), + nodeHash.Hex(), + ) if err != nil { - return nil, err - } - if nodeBytes == nil { - return nil, ErrNotFound + if errors.Is(err, sql.ErrNoRows) { + return node, ErrNotFound + } + return node, err } - node := &treeNode{} - err = node.UnmarshalBinary(nodeBytes) return node, err } @@ -202,8 +138,9 @@ func generateZeroHashes(height uint8) []common.Hash { var zeroHashes = []common.Hash{ {}, } - // This generates a leaf = HashZero in position 0. In the rest of the positions that are equivalent to the ascending levels, - // we set the hashes of the nodes. So all nodes from level i=5 will have the same value and same children nodes. + // This generates a leaf = HashZero in position 0. In the rest of the positions that are + // equivalent to the ascending levels, we set the hashes of the nodes. + // So all nodes from level i=5 will have the same value and same children nodes. for i := 1; i <= int(height); i++ { hasher := sha3.NewLegacyKeccak256() hasher.Write(zeroHashes[i-1][:]) @@ -215,86 +152,101 @@ func generateZeroHashes(height uint8) []common.Hash { return zeroHashes } -func (t *Tree) storeNodes(tx kv.RwTx, nodes []treeNode) error { - for _, node := range nodes { - value, err := node.MarshalBinary() - if err != nil { - return err - } - if err := tx.Put(t.rhtTable, node.hash().Bytes(), value); err != nil { +func (t *Tree) storeNodes(tx db.Txer, nodes []types.TreeNode) error { + for i := 0; i < len(nodes); i++ { + if err := meddler.Insert(tx, t.rhtTable, &nodes[i]); err != nil { + if sqliteErr, ok := db.SQLiteErr(err); ok { + if sqliteErr.ExtendedCode == db.UniqueConstrain { + // ignore repeated entries. This is likely to happen due to not + // cleaning RHT when reorg + continue + } + } return err } } return nil } -func (t *Tree) storeRoot(tx kv.RwTx, rootIndex uint64, root common.Hash) error { - if err := tx.Put(t.rootTable, dbCommon.Uint64ToBytes(rootIndex), root[:]); err != nil { - return err - } - return tx.Put(t.indexTable, root[:], dbCommon.Uint64ToBytes(rootIndex)) +func (t *Tree) storeRoot(tx db.Txer, root types.Root) error { + return meddler.Insert(tx, t.rootTable, &root) } // GetLastRoot returns the last processed root -func (t *Tree) GetLastRoot(ctx context.Context) (common.Hash, error) { - tx, err := t.db.BeginRo(ctx) - if err != nil { - return common.Hash{}, err - } - defer tx.Rollback() +func (t *Tree) GetLastRoot(ctx context.Context) (types.Root, error) { + return t.getLastRootWithTx(t.db) +} - i, root, err := t.getLastIndexAndRootWithTx(tx) +func (t *Tree) getLastRootWithTx(tx db.Querier) (types.Root, error) { + var root types.Root + err := meddler.QueryRow( + tx, &root, + fmt.Sprintf(`SELECT * FROM %s ORDER BY block_num DESC, block_position DESC LIMIT 1;`, t.rootTable), + ) if err != nil { - return common.Hash{}, err - } - if i == -1 { - return common.Hash{}, ErrNotFound + if errors.Is(err, sql.ErrNoRows) { + return root, ErrNotFound + } + return root, err } return root, nil } -// getLastIndexAndRootWithTx return the index and the root associated to the last leaf inserted. -// If index == -1, it means no leaf added yet -func (t *Tree) getLastIndexAndRootWithTx(tx kv.Tx) (int64, common.Hash, error) { - iter, err := tx.RangeDescend( - t.rootTable, - dbCommon.Uint64ToBytes(math.MaxUint64), - dbCommon.Uint64ToBytes(0), - 1, - ) - if err != nil { - return 0, common.Hash{}, err +// GetRootByIndex returns the root associated to the index +func (t *Tree) GetRootByIndex(ctx context.Context, index uint32) (types.Root, error) { + var root types.Root + if err := meddler.QueryRow( + t.db, &root, + fmt.Sprintf(`SELECT * FROM %s WHERE position = $1;`, t.rootTable), + index, + ); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return root, ErrNotFound + } + return root, err } + return root, nil +} - lastIndexBytes, rootBytes, err := iter.Next() - if err != nil { - return 0, common.Hash{}, err - } - if lastIndexBytes == nil { - return -1, common.Hash{}, nil +// GetRootByHash returns the root associated to the hash +func (t *Tree) GetRootByHash(ctx context.Context, hash common.Hash) (types.Root, error) { + var root types.Root + if err := meddler.QueryRow( + t.db, &root, + fmt.Sprintf(`SELECT * FROM %s WHERE hash = $1;`, t.rootTable), + hash.Hex(), + ); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return root, ErrNotFound + } + return root, err } - return int64(dbCommon.BytesToUint64(lastIndexBytes)), common.Hash(rootBytes), nil + return root, nil } func (t *Tree) GetLeaf(ctx context.Context, index uint32, root common.Hash) (common.Hash, error) { - tx, err := t.db.BeginRo(ctx) - if err != nil { - return common.Hash{}, err - } - defer tx.Rollback() - currentNodeHash := root - for h := int(DefaultHeight - 1); h >= 0; h-- { - currentNode, err := t.getRHTNode(tx, currentNodeHash) + for h := int(types.DefaultHeight - 1); h >= 0; h-- { + currentNode, err := t.getRHTNode(t.db, currentNodeHash) if err != nil { return common.Hash{}, err } if index&(1< 0 { - currentNodeHash = currentNode.right + currentNodeHash = currentNode.Right } else { - currentNodeHash = currentNode.left + currentNodeHash = currentNode.Left } } return currentNodeHash, nil } + +// Reorg deletes all the data relevant from firstReorgedBlock (includded) and onwards +func (t *Tree) Reorg(tx db.Txer, firstReorgedBlock uint64) error { + _, err := tx.Exec( + fmt.Sprintf(`DELETE FROM %s WHERE block_num >= $1`, t.rootTable), + firstReorgedBlock, + ) + return err + // NOTE: rht is not cleaned, this could be done in the future as optimization +} diff --git a/tree/tree_test.go b/tree/tree_test.go index 3c27854f..b5278723 100644 --- a/tree/tree_test.go +++ b/tree/tree_test.go @@ -1,16 +1,20 @@ -package tree +package tree_test import ( "context" "encoding/json" "fmt" "os" + "path" "testing" + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/tree" + "github.com/0xPolygon/cdk/tree/migrations" "github.com/0xPolygon/cdk/tree/testvectors" + "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/common" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/stretchr/testify/require" ) @@ -25,59 +29,46 @@ func TestMTAddLeaf(t *testing.T) { for ti, testVector := range mtTestVectors { t.Run(fmt.Sprintf("Test vector %d", ti), func(t *testing.T) { - path := t.TempDir() - dbPrefix := "foo" - tableCfgFunc := func(defaultBuckets kv.TableCfg) kv.TableCfg { - cfg := kv.TableCfg{} - AddTables(cfg, dbPrefix) - return cfg - } - db, err := mdbx.NewMDBX(nil). - Path(path). - WithTableCfg(tableCfgFunc). - Open() + dbPath := path.Join(t.TempDir(), "file::memory:?cache=shared") + log.Debug("DB created at: ", dbPath) + err := migrations.RunMigrations(dbPath) + require.NoError(t, err) + treeDB, err := db.NewSQLiteDB(dbPath) require.NoError(t, err) - tree, err := NewAppendOnlyTree(context.Background(), db, dbPrefix) + _, err = treeDB.Exec(`select * from root`) require.NoError(t, err) + merkletree := tree.NewAppendOnlyTree(treeDB, "") // Add exisiting leaves - leaves := []Leaf{} + tx, err := db.NewTx(ctx, treeDB) + require.NoError(t, err) for i, leaf := range testVector.ExistingLeaves { - leaves = append(leaves, Leaf{ + err = merkletree.AddLeaf(tx, uint64(i), 0, types.Leaf{ Index: uint32(i), Hash: common.HexToHash(leaf), }) + require.NoError(t, err) } - tx, err := db.BeginRw(ctx) - require.NoError(t, err) - _, err = tree.AddLeaves(tx, leaves) - require.NoError(t, err) require.NoError(t, tx.Commit()) if len(testVector.ExistingLeaves) > 0 { - txRo, err := tree.db.BeginRo(ctx) - require.NoError(t, err) - _, actualRoot, err := tree.getLastIndexAndRootWithTx(txRo) - txRo.Rollback() + root, err := merkletree.GetLastRoot(ctx) require.NoError(t, err) - require.Equal(t, common.HexToHash(testVector.CurrentRoot), actualRoot) + require.Equal(t, common.HexToHash(testVector.CurrentRoot), root.Hash) } // Add new bridge - tx, err = db.BeginRw(ctx) + tx, err = db.NewTx(ctx, treeDB) require.NoError(t, err) - _, err = tree.AddLeaves(tx, []Leaf{{ + err = merkletree.AddLeaf(tx, uint64(len(testVector.ExistingLeaves)), 0, types.Leaf{ Index: uint32(len(testVector.ExistingLeaves)), Hash: common.HexToHash(testVector.NewLeaf.CurrentHash), - }}) + }) require.NoError(t, err) require.NoError(t, tx.Commit()) - txRo, err := tree.db.BeginRo(ctx) + root, err := merkletree.GetLastRoot(ctx) require.NoError(t, err) - _, actualRoot, err := tree.getLastIndexAndRootWithTx(txRo) - txRo.Rollback() - require.NoError(t, err) - require.Equal(t, common.HexToHash(testVector.NewRoot), actualRoot) + require.Equal(t, common.HexToHash(testVector.NewRoot), root.Hash) }) } } @@ -93,43 +84,30 @@ func TestMTGetProof(t *testing.T) { for ti, testVector := range mtTestVectors { t.Run(fmt.Sprintf("Test vector %d", ti), func(t *testing.T) { - path := t.TempDir() - dbPrefix := "foo" - tableCfgFunc := func(defaultBuckets kv.TableCfg) kv.TableCfg { - cfg := kv.TableCfg{} - AddTables(cfg, dbPrefix) - return cfg - } - db, err := mdbx.NewMDBX(nil). - Path(path). - WithTableCfg(tableCfgFunc). - Open() + dbPath := path.Join(t.TempDir(), "file::memory:?cache=shared") + err := migrations.RunMigrations(dbPath) require.NoError(t, err) - tree, err := NewAppendOnlyTree(context.Background(), db, dbPrefix) + treeDB, err := db.NewSQLiteDB(dbPath) require.NoError(t, err) + tre := tree.NewAppendOnlyTree(treeDB, "") - leaves := []Leaf{} + tx, err := db.NewTx(ctx, treeDB) + require.NoError(t, err) for li, leaf := range testVector.Deposits { - leaves = append(leaves, Leaf{ + err = tre.AddLeaf(tx, uint64(li), 0, types.Leaf{ Index: uint32(li), Hash: leaf.Hash(), }) + require.NoError(t, err) } - tx, err := db.BeginRw(ctx) - require.NoError(t, err) - _, err = tree.AddLeaves(tx, leaves) - require.NoError(t, err) require.NoError(t, tx.Commit()) - txRo, err := tree.db.BeginRo(ctx) - require.NoError(t, err) - _, actualRoot, err := tree.getLastIndexAndRootWithTx(txRo) + root, err := tre.GetLastRoot(ctx) require.NoError(t, err) - txRo.Rollback() expectedRoot := common.HexToHash(testVector.ExpectedRoot) - require.Equal(t, expectedRoot, actualRoot) + require.Equal(t, expectedRoot, root.Hash) - proof, err := tree.GetProof(ctx, testVector.Index, expectedRoot) + proof, err := tre.GetProof(ctx, testVector.Index, expectedRoot) require.NoError(t, err) for i, sibling := range testVector.MerkleProof { require.Equal(t, common.HexToHash(sibling), proof[i]) diff --git a/tree/types/types.go b/tree/types/types.go new file mode 100644 index 00000000..bb117342 --- /dev/null +++ b/tree/types/types.go @@ -0,0 +1,27 @@ +package types + +import "github.com/ethereum/go-ethereum/common" + +const ( + DefaultHeight uint8 = 32 +) + +type Leaf struct { + Index uint32 + Hash common.Hash +} + +type Root struct { + Hash common.Hash `meddler:"hash,hash"` + Index uint32 `meddler:"position"` + BlockNum uint64 `meddler:"block_num"` + BlockPosition uint64 `meddler:"block_position"` +} + +type TreeNode struct { + Hash common.Hash `meddler:"hash,hash"` + Left common.Hash `meddler:"left,hash"` + Right common.Hash `meddler:"right,hash"` +} + +type Proof [DefaultHeight]common.Hash diff --git a/tree/updatabletree.go b/tree/updatabletree.go index 5c54deb1..3ed8b881 100644 --- a/tree/updatabletree.go +++ b/tree/updatabletree.go @@ -1,140 +1,68 @@ package tree import ( - "context" - "math" + "database/sql" + "errors" - dbCommon "github.com/0xPolygon/cdk/common" + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/common" - "github.com/ledgerwatch/erigon-lib/kv" ) // UpdatableTree is a tree that have updatable leaves, and doesn't need to have sequential inserts type UpdatableTree struct { *Tree - lastRoot common.Hash } // NewUpdatableTree returns an UpdatableTree -func NewUpdatableTree(ctx context.Context, db kv.RwDB, dbPrefix string) (*UpdatableTree, error) { - // TODO: Load last root +func NewUpdatableTree(db *sql.DB, dbPrefix string) *UpdatableTree { t := newTree(db, dbPrefix) - tx, err := t.db.BeginRw(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - rootIndex, root, err := t.getLastIndexAndRootWithTx(tx) - if err != nil { - return nil, err - } - if rootIndex == -1 { - root = t.zeroHashes[DefaultHeight] - } ut := &UpdatableTree{ - Tree: t, - lastRoot: root, + Tree: t, } - return ut, nil + return ut } -// UpseartLeaves inserts or updates a list of leaves. The root index will be used to index the resulting -// root after performing all the operations. Root index must be greater than the last used root index, -// but doesn't need to be sequential. Great for relating block nums and roots :) -// It returns a function that must be called to rollback the changes done by this interaction -func (t *UpdatableTree) UpseartLeaves(tx kv.RwTx, leaves []Leaf, rootIndex uint64) (func(), error) { - if len(leaves) == 0 { - return func() {}, nil - } - rootBackup := t.lastRoot - rollback := func() { - t.lastRoot = rootBackup - } - - for _, l := range leaves { - if err := t.upsertLeaf(tx, l); err != nil { - return rollback, err +func (t *UpdatableTree) UpsertLeaf(tx db.Txer, blockNum, blockPosition uint64, leaf types.Leaf) error { + var rootHash common.Hash + root, err := t.getLastRootWithTx(tx) + if err != nil { + if errors.Is(err, ErrNotFound) { + rootHash = t.zeroHashes[types.DefaultHeight] + } else { + return err } + } else { + rootHash = root.Hash } - - if err := t.storeRoot(tx, rootIndex, t.lastRoot); err != nil { - return rollback, err - } - return rollback, nil -} - -func (t *UpdatableTree) upsertLeaf(tx kv.RwTx, leaf Leaf) error { - siblings, _, err := t.getSiblings(tx, leaf.Index, t.lastRoot) + siblings, _, err := t.getSiblings(tx, leaf.Index, rootHash) if err != nil { return err } currentChildHash := leaf.Hash - newNodes := []treeNode{} - for h := uint8(0); h < DefaultHeight; h++ { - var parent treeNode + newNodes := []types.TreeNode{} + for h := uint8(0); h < types.DefaultHeight; h++ { + var parent types.TreeNode if leaf.Index&(1< 0 { // Add child to the right - parent = treeNode{ - left: siblings[h], - right: currentChildHash, - } + parent = newTreeNode(siblings[h], currentChildHash) } else { // Add child to the left - parent = treeNode{ - left: currentChildHash, - right: siblings[h], - } + parent = newTreeNode(currentChildHash, siblings[h]) } - currentChildHash = parent.hash() + currentChildHash = parent.Hash newNodes = append(newNodes, parent) } - + if err := t.storeRoot(tx, types.Root{ + Hash: currentChildHash, + Index: leaf.Index, + BlockNum: blockNum, + BlockPosition: blockPosition, + }); err != nil { + return err + } if err := t.storeNodes(tx, newNodes); err != nil { return err } - t.lastRoot = currentChildHash return nil } - -// Reorg deletes all the data relevant from firstReorgedIndex (includded) and onwards -// and prepares the tree tfor being used as it was at firstReorgedIndex-1. -// It returns a function that must be called to rollback the changes done by this interaction -func (t *UpdatableTree) Reorg(tx kv.RwTx, firstReorgedIndex uint64) (func(), error) { - iter, err := tx.RangeDescend( - t.rootTable, - dbCommon.Uint64ToBytes(math.MaxUint64), - dbCommon.Uint64ToBytes(0), - 0, - ) - if err != nil { - return func() {}, err - } - rootBackup := t.lastRoot - rollback := func() { - t.lastRoot = rootBackup - } - - for lastIndexBytes, rootBytes, err := iter.Next(); lastIndexBytes != nil; lastIndexBytes, rootBytes, err = iter.Next() { - if err != nil { - return rollback, err - } - - if dbCommon.BytesToUint64(lastIndexBytes) >= firstReorgedIndex { - if err := tx.Delete(t.rootTable, lastIndexBytes); err != nil { - return rollback, err - } - } else { - t.lastRoot = common.Hash(rootBytes) - return rollback, nil - } - } - - // no root found after reorg, going back to empty tree - t.lastRoot = t.zeroHashes[DefaultHeight] - return rollback, nil -} - -// GetRootByRootIndex returns the root of the tree as it was right after adding the leaf with index -func (t *UpdatableTree) GetRootByRootIndex(tx kv.Tx, rootIndex uint64) (common.Hash, error) { - return t.getRootByIndex(tx, rootIndex) -} diff --git a/version.go b/version.go index 2dbd8a0c..ba83eaf5 100644 --- a/version.go +++ b/version.go @@ -1,4 +1,4 @@ -package zkevm +package cdk import ( "fmt"