diff --git a/.config/zepter.yaml b/.config/zepter.yaml new file mode 100644 index 0000000000..8c6425f4ff --- /dev/null +++ b/.config/zepter.yaml @@ -0,0 +1,40 @@ +version: + format: 1 + # Minimum zepter version that is expected to work. This is just for printing a nice error + # message when someone tries to use an older version. + binary: 0.13.2 + +# The examples in the following comments assume crate `A` to have a dependency on crate `B`. +workflows: + check: + - [ + "lint", + # Check that `A` activates the features of `B`. + "propagate-feature", + # These are the features to check: + "--features=std,optimism,dev,asm-keccak,jemalloc,jemalloc-prof,tracy-allocator,serde-bincode-compat,serde,test-utils,arbitrary,bench", + # Do not try to add a new section into `[features]` of `A` only because `B` expose that feature. There are edge-cases where this is still needed, but we can add them manually. + "--left-side-feature-missing=ignore", + # Ignore the case that `A` it outside of the workspace. Otherwise it will report errors in external dependencies that we have no influence on. + "--left-side-outside-workspace=ignore", + # Auxillary flags: + "--offline", + "--locked", + "--show-path", + "--quiet", + ] + default: + # Running `zepter` with no subcommand will check & fix. + - [$check.0, "--fix"] + +# Will be displayed when any workflow fails: +help: + text: | + Reth uses the Zepter CLI to detect abnormalities in Cargo features, e.g. missing propagation. + + It looks like one more more checks failed; please check the console output. + + You can try to automatically address them by installing zepter (`cargo install zepter --locked`) and simply running `zepter` in the workspace root. + links: + - "https://github.com/paradigmxyz/reth/pull/11888" + - "https://github.com/ggwpez/zepter" diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index afb3d67763..488e6c90cf 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -43,6 +43,6 @@ crates/tasks/ @mattsse crates/tokio-util/ @fgimenez @emhane crates/tracing/ @onbjerg crates/transaction-pool/ @mattsse -crates/trie/ @rkrasiuk @Rjected +crates/trie/ @rkrasiuk @Rjected @shekhirin etc/ @Rjected @onbjerg @shekhirin .github/ @onbjerg @gakonst @DaniPopes diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 7630fcc938..b13112951d 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -5,7 +5,10 @@ tool_chain=$1 # Array of crates to compile crates=($(cargo metadata --format-version=1 --no-deps | jq -r '.packages[].name' | grep '^reth' | sort)) + # Array of crates to exclude +# Used with the `contains` function. +# shellcheck disable=SC2034 exclude_crates=( # The following are not working yet, but known to be fixable reth-exex-types # https://github.com/paradigmxyz/reth/issues/9946 @@ -32,12 +35,9 @@ exclude_crates=( reth-engine-util reth-eth-wire reth-ethereum-cli - reth-ethereum-engine reth-ethereum-engine-primitives reth-ethereum-payload-builder reth-etl - reth-evm-ethereum - reth-execution-errors reth-exex reth-exex-test-utils reth-ipc @@ -51,7 +51,6 @@ exclude_crates=( reth-node-events reth-node-metrics reth-optimism-cli - reth-optimism-evm reth-optimism-node reth-optimism-payload-builder reth-optimism-rpc @@ -65,9 +64,7 @@ exclude_crates=( reth-rpc-eth-api reth-rpc-eth-types reth-rpc-layer - reth-rpc-types reth-stages - reth-storage-errors reth-engine-local # The following are not supposed to be working reth # all of the crates below diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml index 7a212a51dd..ec7bd05490 100644 --- a/.github/assets/hive/expected_failures.yaml +++ b/.github/assets/hive/expected_failures.yaml @@ -20,7 +20,7 @@ rpc-compat: - eth_getBlockByNumber/get-latest (reth) - eth_getBlockByNumber/get-safe (reth) - # https://github.com/paradigmxyz/reth/issues/8732 +# https://github.com/paradigmxyz/reth/issues/8732 engine-withdrawals: - Withdrawals Fork On Genesis (Paris) (reth) - Withdrawals Fork on Block 1 (Paris) (reth) @@ -41,17 +41,7 @@ engine-withdrawals: - Withdrawals Fork on Canonical Block 8 / Side Block 9 - 10 Block Re-Org (Paris) (reth) - Withdrawals Fork on Canonical Block 8 / Side Block 9 - 10 Block Re-Org Sync (Paris) (reth) -# https://github.com/paradigmxyz/reth/issues/8305 -# https://github.com/paradigmxyz/reth/issues/6217 -engine-api: - - Inconsistent Head in ForkchoiceState (Paris) (reth) - - Invalid NewPayload, StateRoot, Syncing=True, EmptyTxs=True, DynFeeTxs=False (Paris) (reth) - - Invalid NewPayload, StateRoot, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Paris) (reth) - - Invalid NewPayload, PrevRandao, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Paris) (reth) - - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=True, CanonicalReOrg=False, Invalid P9 (Paris) (reth) - - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Paris) (reth) - - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=True, CanonicalReOrg=True, Invalid P9 (Paris) (reth) - - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Paris) (reth) +engine-api: [] # https://github.com/paradigmxyz/reth/issues/8305 # https://github.com/paradigmxyz/reth/issues/6217 @@ -59,18 +49,11 @@ engine-api: # https://github.com/paradigmxyz/reth/issues/7144 engine-cancun: - Blob Transaction Ordering, Multiple Clients (Cancun) (reth) - - Inconsistent Head in ForkchoiceState (Cancun) (reth) - - Invalid NewPayload, StateRoot, Syncing=True, EmptyTxs=True, DynFeeTxs=False (Cancun) (reth) - - Invalid NewPayload, StateRoot, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) - - Invalid NewPayload, PrevRandao, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) - - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=True, CanonicalReOrg=False, Invalid P9 (Cancun) (reth) - - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Cancun) (reth) - - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=True, CanonicalReOrg=True, Invalid P9 (Cancun) (reth) - - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Cancun) (reth) - Invalid PayloadAttributes, Missing BeaconRoot, Syncing=True (Cancun) (reth) - - Invalid NewPayload, ParentBeaconBlockRoot, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) - Invalid NewPayload, ExcessBlobGas, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) + - Invalid NewPayload, VersionedHashes, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) + - Invalid NewPayload, VersionedHashes Version, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) + - Invalid NewPayload, Incomplete VersionedHashes, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) + - Invalid NewPayload, Extra VersionedHashes, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) -# https://github.com/paradigmxyz/reth/issues/8579 -sync: - - sync reth -> reth +sync: [] diff --git a/.github/assets/hive/expected_failures_experimental.yaml b/.github/assets/hive/expected_failures_experimental.yaml deleted file mode 100644 index d4b3d2bcbd..0000000000 --- a/.github/assets/hive/expected_failures_experimental.yaml +++ /dev/null @@ -1,63 +0,0 @@ -# https://github.com/paradigmxyz/reth/issues/7015 -# https://github.com/paradigmxyz/reth/issues/6332 -rpc-compat: - - debug_getRawBlock/get-invalid-number (reth) - - debug_getRawHeader/get-invalid-number (reth) - - debug_getRawReceipts/get-invalid-number (reth) - - debug_getRawTransaction/get-invalid-hash (reth) - - - eth_call/call-callenv (reth) - - eth_feeHistory/fee-history (reth) - - eth_getStorageAt/get-storage-invalid-key-too-large (reth) - - eth_getStorageAt/get-storage-invalid-key (reth) - - eth_getTransactionReceipt/get-access-list (reth) - - eth_getTransactionReceipt/get-blob-tx (reth) - - eth_getTransactionReceipt/get-dynamic-fee (reth) - - eth_getBlockByHash/get-block-by-hash (reth) - - eth_getBlockByNumber/get-block-n (reth) - - eth_getBlockByNumber/get-finalized (reth) - - eth_getBlockByNumber/get-genesis (reth) - - eth_getBlockByNumber/get-latest (reth) - - eth_getBlockByNumber/get-safe (reth) - -# https://github.com/paradigmxyz/reth/issues/8732 -engine-withdrawals: - - Withdrawals Fork On Genesis (Paris) (reth) - - Withdrawals Fork on Block 1 (Paris) (reth) - - Withdrawals Fork on Block 2 (Paris) (reth) - - Withdrawals Fork on Block 3 (Paris) (reth) - - Withdraw to a single account (Paris) (reth) - - Withdraw to two accounts (Paris) (reth) - - Withdraw many accounts (Paris) (reth) - - Withdraw zero amount (Paris) (reth) - - Empty Withdrawals (Paris) (reth) - - Corrupted Block Hash Payload (INVALID) (Paris) (reth) - - Withdrawals Fork on Block 1 - 8 Block Re-Org NewPayload (Paris) (reth) - - Withdrawals Fork on Block 1 - 8 Block Re-Org, Sync (Paris) (reth) - - Withdrawals Fork on Block 8 - 10 Block Re-Org NewPayload (Paris) (reth) - - Withdrawals Fork on Block 8 - 10 Block Re-Org Sync (Paris) (reth) - - Withdrawals Fork on Canonical Block 8 / Side Block 7 - 10 Block Re-Org (Paris) (reth) - - Withdrawals Fork on Canonical Block 8 / Side Block 7 - 10 Block Re-Org Sync (Paris) (reth) - - Withdrawals Fork on Canonical Block 8 / Side Block 9 - 10 Block Re-Org (Paris) (reth) - - Withdrawals Fork on Canonical Block 8 / Side Block 9 - 10 Block Re-Org Sync (Paris) (reth) - -# https://github.com/paradigmxyz/reth/issues/8305 -# https://github.com/paradigmxyz/reth/issues/6217 -engine-api: [] - -# https://github.com/paradigmxyz/reth/issues/8305 -# https://github.com/paradigmxyz/reth/issues/6217 -# https://github.com/paradigmxyz/reth/issues/8306 -# https://github.com/paradigmxyz/reth/issues/7144 -engine-cancun: - - Blob Transaction Ordering, Multiple Clients (Cancun) (reth) - - Invalid PayloadAttributes, Missing BeaconRoot, Syncing=True (Cancun) (reth) - - Invalid NewPayload, ExcessBlobGas, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) - - Invalid NewPayload, VersionedHashes, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) - - Invalid NewPayload, VersionedHashes Version, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) - - Invalid NewPayload, Incomplete VersionedHashes, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) - - Invalid NewPayload, Extra VersionedHashes, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) - -# https://github.com/paradigmxyz/reth/issues/8579 -sync: - - sync reth -> reth diff --git a/.github/workflows/assertoor.yml b/.github/workflows/assertoor.yml deleted file mode 100644 index a5028f7ff3..0000000000 --- a/.github/workflows/assertoor.yml +++ /dev/null @@ -1,230 +0,0 @@ -name: Assertoor Tests - -on: - workflow_dispatch: - schedule: - - cron: '0 0 * * *' - -jobs: - get_tests: - name: "Run assertoor tests on reth pairs" - runs-on: ubuntu-latest - outputs: - test_result: ${{ steps.test_result.outputs.test_result }} - test_status: ${{ steps.test_result.outputs.test_status }} - failed_test_status: ${{ steps.test_result.outputs.failed_test_status }} - if: github.repository == 'paradigmxyz/reth' - steps: - - name: Checkout Repository - uses: actions/checkout@v4 - - name: Setup Kurtosis - shell: bash - run: | - echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list - sudo apt update - sudo apt install kurtosis-cli - kurtosis analytics disable - - - name: Run Kurtosis - shell: bash - id: services - run: | - export github_sha=${{ github.sha }} - export github_repository=${{ github.repository }} - - cat etc/assertoor/assertoor-template.yaml | envsubst > etc/assertoor/assertoor.yaml - - kurtosis run github.com/ethpandaops/ethereum-package --enclave assertoor-${{ github.run_id }} --args-file etc/assertoor/assertoor.yaml - - enclave_dump=$(kurtosis enclave inspect assertoor-${{ github.run_id }}) - - assertoor_url=$(echo "$enclave_dump" | grep assertoor | grep http | sed 's/.*\(http:\/\/[0-9.:]\+\).*/\1/') - echo "assertoor_url: ${assertoor_url}" - echo "assertoor_url=${assertoor_url}" >> $GITHUB_OUTPUT - - - name: Await test completion - shell: bash - id: test_result - run: | - assertoor_url="${{ steps.services.outputs.assertoor_url }}" - - YELLOW='\033[1;33m' - GRAY='\033[0;37m' - GREEN='\033[0;32m' - RED='\033[0;31m' - NC='\033[0m' - - # print assertor logs - assertoor_container=$(docker container list | grep assertoor | sed 's/^\([^ ]\+\) .*$/\1/') - docker logs -f $assertoor_container & - - # helper to fetch task status for specific test id - get_tasks_status() { - tasks=$(curl -s ${assertoor_url}/api/v1/test_run/$1 | jq -c ".data.tasks[] | {index, parent_index, name, title, status, result}") - declare -A task_graph_map - task_graph_map[0]="" - - while read task; do - task_id=$(echo "$task" | jq -r ".index") - task_parent=$(echo "$task" | jq -r ".parent_index") - task_name=$(echo "$task" | jq -r ".name") - task_title=$(echo "$task" | jq -r ".title") - task_status=$(echo "$task" | jq -r ".status") - task_result=$(echo "$task" | jq -r ".result") - - task_graph="${task_graph_map[$task_parent]}" - task_graph_map[$task_id]="$task_graph |" - if [ ! -z "$task_graph" ]; then - task_graph="${task_graph}- " - fi - - if [ "$task_status" == "pending" ]; then - task_status="${GRAY}pending ${NC}" - elif [ "$task_status" == "running" ]; then - task_status="${YELLOW}running ${NC}" - elif [ "$task_status" == "complete" ]; then - task_status="${GREEN}complete${NC}" - fi - - if [ "$task_result" == "none" ]; then - task_result="${GRAY}none ${NC}" - elif [ "$task_result" == "success" ]; then - task_result="${GREEN}success${NC}" - elif [ "$task_result" == "failure" ]; then - task_result="${RED}failure${NC}" - fi - - echo -e " $(printf '%-4s' "$task_id")\t$task_status\t$task_result\t$(printf '%-50s' "$task_graph$task_name") \t$task_title" - done <<< $(echo "$tasks") - } - - # poll & check test status - final_test_result="" - failed_test_id="" - while true - do - pending_tests=0 - failed_tests=0 - total_tests=0 - running_test="" - - status_lines=() - task_lines="" - status_lines+=("$(date +'%Y-%m-%d %H:%M:%S') Test Status:") - - tests=$(curl -s ${assertoor_url}/api/v1/test_runs | jq -c ".data[] | {run_id, test_id, name, status}") - while read test; do - if [ -z "$test" ]; then - continue - fi - run_id=$(echo "$test" | jq -r ".run_id") - test_id=$(echo "$test" | jq -r ".test_id") - test_name=$(echo "$test" | jq -r ".name") - test_status=$(echo "$test" | jq -r ".status") - - if [ "$test_status" == "pending" ]; then - pending_tests=$(expr $pending_tests + 1) - status_name="${GRAY}pending${NC}" - elif [ "$test_status" == "running" ]; then - pending_tests=$(expr $pending_tests + 1) - running_test="$run_id" - status_name="${YELLOW}running${NC}" - - elif [ "$test_status" == "success" ]; then - status_name="${GREEN}success${NC}" - elif [ "$test_status" == "failure" ]; then - failed_tests=$(expr $failed_tests + 1) - failed_test_id="$run_id" - status_name="${RED}failure${NC}" - else - status_name="$test_status" - fi - status_lines+=(" $(printf '%-3s' "$test_id") $status_name \t$test_name") - total_tests=$(expr $total_tests + 1) - done <<< $(echo "$tests") - - for status_line in "${status_lines[@]}" - do - echo -e "$status_line" - done - - if ! [ -z "$running_test" ]; then - task_lines=$(get_tasks_status "$running_test") - echo "Active Test Task Status:" - echo "$task_lines" - fi - - if [ $failed_tests -gt 0 ]; then - final_test_result="failure" - break - fi - if [ $total_tests -gt 0 ] && [ $pending_tests -le 0 ]; then - final_test_result="success" - break - fi - - sleep 60 - done - - # save test results & status to github output - echo "test_result=$(echo "$final_test_result")" >> $GITHUB_OUTPUT - echo "test_status<> $GITHUB_OUTPUT - for status_line in "${status_lines[@]}" - do - echo -e "$status_line" >> $GITHUB_OUTPUT - done - echo "EOF" >> $GITHUB_OUTPUT - - if ! [ -z "$failed_test_id" ]; then - echo "failed_test_status<> $GITHUB_OUTPUT - get_tasks_status "$failed_test_id" >> $GITHUB_OUTPUT - echo "EOF" >> $GITHUB_OUTPUT - else - echo "failed_test_status=" >> $GITHUB_OUTPUT - fi - - - name: Generate dump and remove kurtosis enclave - shell: bash - run: | - mkdir -p ./temp/dump - cd ./temp/dump - cp ../../etc/assertoor/assertoor.yaml ./kurtosis-params.yaml - - kurtosis enclave dump assertoor-${{ github.run_id }} - kurtosis enclave rm -f assertoor-${{ github.run_id }} - - - name: Upload dump artifact - uses: actions/upload-artifact@v4 - with: - name: "kurtosis-enclave-dump-${{ github.run_id }}" - path: ./temp/dump - - - name: Return test result - shell: bash - run: | - test_result="${{ steps.test_result.outputs.test_result }}" - test_status=$( - cat <<"EOF" - ${{ steps.test_result.outputs.test_status }} - EOF - ) - failed_test_status=$( - cat <<"EOF" - ${{ steps.test_result.outputs.failed_test_status }} - EOF - ) - - echo "Test Result: $test_result" - echo "$test_status" - - if ! [ "$test_result" == "success" ]; then - echo "" - echo "Failed Test Task Status:" - echo "$failed_test_status" - - echo "" - echo "See 'Await test completion' task for detailed logs about this failure!" - echo "" - - exit 1 # fail action - fi diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index ab031da325..fc7b794ee5 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -10,7 +10,7 @@ env: CARGO_TERM_COLOR: always BASELINE: base IAI_CALLGRIND_RUNNER: iai-callgrind-runner - TOOL_CHAIN: "1.81" + TOOL_CHAIN: "1.82" concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} diff --git a/.github/workflows/build-check.yml b/.github/workflows/build-check.yml index 52be427ffc..0f80292ba5 100644 --- a/.github/workflows/build-check.yml +++ b/.github/workflows/build-check.yml @@ -6,7 +6,7 @@ on: env: CARGO_TERM_COLOR: always - TOOL_CHAIN: "1.81" + TOOL_CHAIN: "1.82" jobs: extract-version: diff --git a/.github/workflows/dependencies.yml b/.github/workflows/dependencies.yml index da3574329b..cfe38de4e8 100644 --- a/.github/workflows/dependencies.yml +++ b/.github/workflows/dependencies.yml @@ -10,7 +10,7 @@ on: # Needed so we can run it manually env: - TOOL_CHAIN: "1.81" + TOOL_CHAIN: "1.82" GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} BRANCH: cargo-update TITLE: "chore(deps): weekly `cargo update`" diff --git a/.github/workflows/docker-git.yml b/.github/workflows/docker-git.yml new file mode 100644 index 0000000000..2e3ad59aea --- /dev/null +++ b/.github/workflows/docker-git.yml @@ -0,0 +1,44 @@ +# Publishes the Docker image, only to be used with `workflow_dispatch`. The +# images from this workflow will be tagged with the git sha of the branch used +# and will NOT tag it as `latest`. + +name: docker-git + +on: + workflow_dispatch: {} + +env: + REPO_NAME: ${{ github.repository_owner }}/reth + IMAGE_NAME: ${{ github.repository_owner }}/reth + OP_IMAGE_NAME: ${{ github.repository_owner }}/op-reth + CARGO_TERM_COLOR: always + DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/reth + OP_DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/op-reth + DOCKER_USERNAME: ${{ github.actor }} + GIT_SHA: ${{ github.sha }} + +jobs: + build: + name: build and push + runs-on: ubuntu-20.04 + permissions: + packages: write + contents: read + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - uses: taiki-e/install-action@cross + - name: Log in to Docker + run: | + echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username ${DOCKER_USERNAME} --password-stdin + - name: Set up Docker builder + run: | + docker run --privileged --rm tonistiigi/binfmt --install arm64,amd64 + docker buildx create --use --name cross-builder + - name: Build and push the git-sha-tagged reth image + run: make PROFILE=maxperf GIT_SHA=$GIT_SHA docker-build-push-git-sha + - name: Build and push the git-sha-tagged op-reth image + run: make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME GIT_SHA=$GIT_SHA PROFILE=maxperf op-docker-build-push-git-sha diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 9d1cf113c1..18a4ec6649 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -3,7 +3,7 @@ name: docker on: - workflow_dispatch: { } + workflow_dispatch: {} push: tags: - v* @@ -18,7 +18,7 @@ env: OP_DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/op-reth BSC_DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/bsc-reth DOCKER_USERNAME: ${{ github.actor }} - TOOL_CHAIN: "1.81" + TOOL_CHAIN: "1.82" jobs: build: diff --git a/.github/workflows/eth-sync.yml b/.github/workflows/eth-sync.yml deleted file mode 100644 index 5f9c8135c7..0000000000 --- a/.github/workflows/eth-sync.yml +++ /dev/null @@ -1,56 +0,0 @@ -# Runs an ethereum mainnet sync test. - -name: eth-sync-test - -on: - pull_request: - merge_group: - push: - branches: [ main ] - -env: - CARGO_TERM_COLOR: always - TOOL_CHAIN: "1.81" - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -jobs: - sync: - name: sync / 100k blocks - # Only run sync tests in merge groups - if: github.event_name == 'merge_group' - runs-on: - group: Reth - env: - RUST_LOG: info,sync=error - RUST_BACKTRACE: 1 - timeout-minutes: 60 - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@stable - with: - toolchain: ${{ env.TOOL_CHAIN }} - - uses: Swatinem/rust-cache@v2 - with: - cache-on-failure: true - - name: Build reth - run: | - cargo install --features asm-keccak,jemalloc --path bin/reth - - name: Run sync - run: | - reth node \ - --debug.tip 0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4 \ - --debug.max-block 100000 \ - --debug.terminate - - name: Verify the target block hash - run: | - reth db get static-file headers 100000 \ - | grep 0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4 - - name: Run stage unwind for 100 blocks - run: | - reth stage unwind num-blocks 100 - - name: Run stage unwind to block hash - run: | - reth stage unwind to-block 0x52e0509d33a988ef807058e2980099ee3070187f7333aae12b64d4d675f34c5a diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index 332d0d21ae..46b3580718 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -10,7 +10,7 @@ on: env: CARGO_TERM_COLOR: always - TOOL_CHAIN: "1.81" + TOOL_CHAIN: "1.82" concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -85,7 +85,6 @@ jobs: strategy: fail-fast: false matrix: - engine: [regular, experimental] # ethereum/rpc to be deprecated: # https://github.com/ethereum/hive/pull/1117 scenario: @@ -184,7 +183,7 @@ jobs: needs: - prepare-reth - prepare-hive - name: run ${{ matrix.engine }} - ${{ matrix.scenario.sim }}${{ matrix.scenario.limit && format(' - {0}', matrix.scenario.limit) }} + name: run ${{ matrix.scenario.sim }}${{ matrix.scenario.limit && format(' - {0}', matrix.scenario.limit) }} runs-on: group: Reth permissions: @@ -221,11 +220,6 @@ jobs: ref: master path: hivetests - - name: Modify client for experimental engine - if: matrix.engine == 'experimental' - run: | - sed -ie 's/RUST_LOG=info $reth node $FLAGS/RUST_LOG=info $reth node --engine.experimental $FLAGS/' hivetests/clients/reth/reth.sh - - name: Run simulator run: | LIMIT="${{ matrix.scenario.limit }}" @@ -244,8 +238,7 @@ jobs: - name: Parse hive output run: | - FAILURE_FILE="${{ matrix.engine == 'experimental' && '.github/assets/hive/expected_failures_experimental.yaml' || '.github/assets/hive/expected_failures.yaml' }}" - find hivetests/workspace/logs -type f -name "*.json" ! -name "hive.json" | xargs -I {} python .github/assets/hive/parse.py {} --exclusion $FAILURE_FILE + find hivetests/workspace/logs -type f -name "*.json" ! -name "hive.json" | xargs -I {} python .github/assets/hive/parse.py {} --exclusion .github/assets/hive/expected_failures.yaml - name: Print simulator output if: ${{ failure() }} diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 883944a36e..28b239e959 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -12,7 +12,7 @@ on: env: CARGO_TERM_COLOR: always SEED: rustethereumethereumrust - TOOL_CHAIN: "1.81" + TOOL_CHAIN: "1.82" concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -60,7 +60,7 @@ jobs: - if: matrix.network == 'bsc' name: Run tests run: | - cargo nextest run \ + cargo nextest run --no-run \ --locked -p reth-bsc-node --features "bsc ${{ matrix.extra-features }}" \ -E "kind(test)" diff --git a/.github/workflows/kurtosis.yml b/.github/workflows/kurtosis.yml index 2e33b6d62b..d2f51225cc 100644 --- a/.github/workflows/kurtosis.yml +++ b/.github/workflows/kurtosis.yml @@ -10,7 +10,7 @@ on: env: CARGO_TERM_COLOR: always - TOOL_CHAIN: "1.81" + TOOL_CHAIN: "1.82" concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 74f96ac5f7..3d129eb665 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -8,8 +8,8 @@ on: env: CARGO_TERM_COLOR: always - TOOL_CHAIN: "1.81" - TOOL_CHAIN_NIGHTLY: "nightly-2024-09-09" + TOOL_CHAIN: "1.82" + TOOL_CHAIN_NIGHTLY: "nightly-2024-11-07" jobs: clippy-binaries: @@ -33,6 +33,7 @@ jobs: - uses: dtolnay/rust-toolchain@clippy with: toolchain: ${{ env.TOOL_CHAIN }} + components: clippy - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true @@ -210,6 +211,22 @@ jobs: env: RUSTFLAGS: -D warnings + # Check crates correctly propagate features + feature-propagation: + runs-on: ubuntu-latest + timeout-minutes: 20 + steps: + - uses: actions/checkout@v4 + - name: fetch deps + run: | + # Eagerly pull dependencies + time cargo metadata --format-version=1 --locked > /dev/null + - name: run zepter + run: | + cargo install zepter -f --locked + zepter --version + time zepter run check + lint-success: name: lint success runs-on: ubuntu-latest @@ -224,6 +241,7 @@ jobs: - grafana - no-test-deps - features + - feature-propagation timeout-minutes: 30 steps: - name: Decide whether the needed jobs succeeded or failed diff --git a/.github/workflows/op-sync.yml b/.github/workflows/op-sync.yml deleted file mode 100644 index 907468c676..0000000000 --- a/.github/workflows/op-sync.yml +++ /dev/null @@ -1,58 +0,0 @@ -# Runs a base mainnet sync test. - -name: op-sync-test - -on: - pull_request: - merge_group: - push: - branches: [ main ] - -env: - CARGO_TERM_COLOR: always - TOOL_CHAIN: "1.81" - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -jobs: - sync: - name: op sync / 10k blocks - # Only run sync tests in merge groups - if: github.event_name == 'merge_group' - runs-on: - group: Reth - env: - RUST_LOG: info,sync=error - RUST_BACKTRACE: 1 - timeout-minutes: 60 - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@stable - with: - toolchain: ${{ env.TOOL_CHAIN }} - - uses: Swatinem/rust-cache@v2 - with: - cache-on-failure: true - - name: Build op-reth - run: make install-op - - name: Run sync - # https://basescan.org/block/10000 - run: | - op-reth node \ - --chain base \ - --debug.tip 0xbb9b85352c7ebca6ba8efc63bd66cecd038c92ec8ebd02e153a3e0b197e672b7 \ - --debug.max-block 10000 \ - --debug.terminate - - name: Verify the target block hash - run: | - op-reth db --chain base get static-file headers 10000 \ - | grep 0xbb9b85352c7ebca6ba8efc63bd66cecd038c92ec8ebd02e153a3e0b197e672b7 - - name: Run stage unwind for 100 blocks - run: | - op-reth stage --chain base unwind num-blocks 100 - - name: Run stage unwind to block hash - run: | - op-reth stage --chain base unwind to-block 0x118a6e922a8c6cab221fc5adfe5056d2b72d58c6580e9c5629de55299e2cf8de - diff --git a/.github/workflows/release-dist.yml b/.github/workflows/release-dist.yml index 2142360e03..f7df80e81f 100644 --- a/.github/workflows/release-dist.yml +++ b/.github/workflows/release-dist.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Update Homebrew formula - uses: dawidd6/action-homebrew-bump-formula@v3 + uses: dawidd6/action-homebrew-bump-formula@v4 with: token: ${{ secrets.HOMEBREW }} no_fork: true diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 32262d034f..14b79297c2 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -13,7 +13,7 @@ env: OP_IMAGE_NAME: ${{ github.repository_owner }}/op-reth IMAGE_NAME: ${{ github.repository_owner }}/bsc-reth CARGO_TERM_COLOR: always - TOOL_CHAIN: "1.81" + TOOL_CHAIN: "1.82" jobs: extract-version: diff --git a/.github/workflows/stage.yml b/.github/workflows/stage.yml index c35e00144a..2700ff1cb0 100644 --- a/.github/workflows/stage.yml +++ b/.github/workflows/stage.yml @@ -12,7 +12,7 @@ env: CARGO_TERM_COLOR: always FROM_BLOCK: 0 TO_BLOCK: 50000 - TOOL_CHAIN: "1.81" + TOOL_CHAIN: "1.82" concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} diff --git a/.github/workflows/sync.yml b/.github/workflows/sync.yml new file mode 100644 index 0000000000..531d04b2e4 --- /dev/null +++ b/.github/workflows/sync.yml @@ -0,0 +1,63 @@ +# Runs sync tests. + +name: sync test + +on: + merge_group: + +env: + CARGO_TERM_COLOR: always + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + sync: + name: sync (${{ matrix.chain.bin }}) + runs-on: + group: Reth + env: + RUST_LOG: info,sync=error + RUST_BACKTRACE: 1 + timeout-minutes: 60 + strategy: + matrix: + chain: + - build: install + bin: reth + chain: mainnet + tip: "0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4" + block: 100000 + unwind-target: "0x52e0509d33a988ef807058e2980099ee3070187f7333aae12b64d4d675f34c5a" + - build: install-op + bin: op-reth + chain: base + tip: "0xbb9b85352c7ebca6ba8efc63bd66cecd038c92ec8ebd02e153a3e0b197e672b7" + block: 10000 + unwind-target: "0x118a6e922a8c6cab221fc5adfe5056d2b72d58c6580e9c5629de55299e2cf8de" + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: Build ${{ matrix.chain.bin }} + run: make ${{ matrix.chain.build }} + - name: Run sync + run: | + ${{ matrix.chain.bin }} node \ + --chain ${{ matrix.chain.chain }} \ + --debug.tip ${{ matrix.chain.tip }} \ + --debug.max-block ${{ matrix.chain.block }} \ + --debug.terminate + - name: Verify the target block hash + run: | + ${{ matrix.chain.bin }} db --chain ${{ matrix.chain.chain }} get static-file headers ${{ matrix.chain.block }} \ + | grep ${{ matrix.chain.tip }} + - name: Run stage unwind for 100 blocks + run: | + ${{ matrix.chain.bin }} stage --chain ${{ matrix.chain.chain }} unwind num-blocks 100 + - name: Run stage unwind to block hash + run: | + ${{ matrix.chain.bin }} stage --chain ${{ matrix.chain.chain }} unwind to-block ${{ matrix.chain.unwind-target }} diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index 499047e483..6da8248153 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -12,7 +12,7 @@ on: env: CARGO_TERM_COLOR: always SEED: rustethereumethereumrust - TOOL_CHAIN: "1.81" + TOOL_CHAIN: "1.82" concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 6850ace928..a25c4c9d4d 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -10,7 +10,7 @@ on: merge_group: env: - TOOL_CHAIN: "1.81" + TOOL_CHAIN: "1.82" jobs: check-reth: diff --git a/CHANGELOG.md b/CHANGELOG.md index da530ccf94..589c9cb3cb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## v1.1.0 + +This release merges with upstream version v1.1.1, including several bug fixes. + +**Key changes:** + +* **Compatibility:** This version is compatible with the BSC mainnet, testnet, and opBNB mainnet, testnet. +* **Bug fixes:** Several bug fixes are included in this release. For detailed code changes: [v1.0.7...v1.1.0](https://github.com/bnb-chain/reth/compare/v1.0.7...v1.1.0) + ## v1.0.7 This is a hotfix release for the BSC/opBNB mainnet and testnet. diff --git a/Cargo.lock b/Cargo.lock index 87a2d5fb3d..2daa8837ec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -111,8 +111,8 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.4.2" -source = "git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36#718060680134e6bb40d97d3c6fb56fd1950ced36" +version = "0.5.4" +source = "git+https://github.com/bnb-chain/alloy?tag=v1.0.3#576cabc6a05f6e483d10d5d16f69a67da5f0b126" dependencies = [ "alloy-eips", "alloy-primitives", @@ -126,6 +126,26 @@ dependencies = [ "serde_with", ] +[[package]] +name = "alloy-contract" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "460ab80ce4bda1c80bcf96fe7460520476f2c7b734581c6567fac2708e2a60ef" +dependencies = [ + "alloy-dyn-abi", + "alloy-json-abi", + "alloy-network", + "alloy-network-primitives", + "alloy-primitives", + "alloy-provider", + "alloy-rpc-types-eth", + "alloy-sol-types", + "alloy-transport", + "futures", + "futures-util", + "thiserror", +] + [[package]] name = "alloy-dyn-abi" version = "0.8.5" @@ -159,13 +179,14 @@ dependencies = [ [[package]] name = "alloy-eip7702" -version = "0.1.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea59dc42102bc9a1905dc57901edc6dd48b9f38115df86c7d252acba70d71d04" +checksum = "64ffc577390ce50234e02d841214b3dc0bea6aaaae8e04bbf3cb82e9a45da9eb" dependencies = [ "alloy-primitives", "alloy-rlp", "arbitrary", + "derive_more 1.0.0", "k256", "rand 0.8.5", "serde", @@ -174,8 +195,8 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.4.2" -source = "git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36#718060680134e6bb40d97d3c6fb56fd1950ced36" +version = "0.5.4" +source = "git+https://github.com/bnb-chain/alloy?tag=v1.0.3#576cabc6a05f6e483d10d5d16f69a67da5f0b126" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -185,6 +206,8 @@ dependencies = [ "arbitrary", "c-kzg", "derive_more 1.0.0", + "ethereum_ssz", + "ethereum_ssz_derive", "once_cell", "serde", "sha2 0.10.8", @@ -192,9 +215,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "0.4.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8429cf4554eed9b40feec7f4451113e76596086447550275e3def933faf47ce3" +checksum = "dde15e14944a88bd6a57d325e9a49b75558746fe16aaccc79713ae50a6a9574c" dependencies = [ "alloy-primitives", "alloy-serde", @@ -203,8 +226,8 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "0.4.2" -source = "git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36#718060680134e6bb40d97d3c6fb56fd1950ced36" +version = "0.5.4" +source = "git+https://github.com/bnb-chain/alloy?tag=v1.0.3#576cabc6a05f6e483d10d5d16f69a67da5f0b126" dependencies = [ "alloy-primitives", "alloy-serde", @@ -225,8 +248,8 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.4.2" -source = "git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36#718060680134e6bb40d97d3c6fb56fd1950ced36" +version = "0.5.4" +source = "git+https://github.com/bnb-chain/alloy?tag=v1.0.3#576cabc6a05f6e483d10d5d16f69a67da5f0b126" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -238,8 +261,8 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.4.2" -source = "git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36#718060680134e6bb40d97d3c6fb56fd1950ced36" +version = "0.5.4" +source = "git+https://github.com/bnb-chain/alloy?tag=v1.0.3#576cabc6a05f6e483d10d5d16f69a67da5f0b126" dependencies = [ "alloy-consensus", "alloy-eips", @@ -258,8 +281,8 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "0.4.2" -source = "git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36#718060680134e6bb40d97d3c6fb56fd1950ced36" +version = "0.5.4" +source = "git+https://github.com/bnb-chain/alloy?tag=v1.0.3#576cabc6a05f6e483d10d5d16f69a67da5f0b126" dependencies = [ "alloy-consensus", "alloy-eips", @@ -270,11 +293,11 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "0.4.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f1334a738aa1710cb8227441b3fcc319202ce78e967ef37406940242df4a454" +checksum = "27444ea67d360508753022807cdd0b49a95c878924c9c5f8f32668b7d7768245" dependencies = [ - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", "k256", "rand 0.8.5", @@ -287,9 +310,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.7" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecb848c43f6b06ae3de2e4a67496cbbabd78ae87db0f1248934f15d76192c6a" +checksum = "fd58d377699e6cfeab52c4a9d28bdc4ef37e2bd235ff2db525071fe37a2e9af5" dependencies = [ "alloy-rlp", "arbitrary", @@ -302,7 +325,7 @@ dependencies = [ "getrandom 0.2.15", "hashbrown 0.15.0", "hex-literal", - "indexmap 2.5.0", + "indexmap 2.6.0", "itoa", "k256", "keccak-asm", @@ -319,8 +342,8 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "0.4.2" -source = "git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36#718060680134e6bb40d97d3c6fb56fd1950ced36" +version = "0.5.4" +source = "git+https://github.com/bnb-chain/alloy?tag=v1.0.3#576cabc6a05f6e483d10d5d16f69a67da5f0b126" dependencies = [ "alloy-chains", "alloy-consensus", @@ -329,14 +352,14 @@ dependencies = [ "alloy-network", "alloy-network-primitives", "alloy-primitives", - "alloy-pubsub 0.4.2 (git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36)", + "alloy-pubsub 0.5.4 (git+https://github.com/bnb-chain/alloy?tag=v1.0.3)", "alloy-rpc-client", - "alloy-rpc-types-admin 0.4.2 (git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36)", + "alloy-rpc-types-admin 0.5.4 (git+https://github.com/bnb-chain/alloy?tag=v1.0.3)", "alloy-rpc-types-engine", "alloy-rpc-types-eth", "alloy-transport", "alloy-transport-http", - "alloy-transport-ws 0.4.2 (git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36)", + "alloy-transport-ws 0.5.4 (git+https://github.com/bnb-chain/alloy?tag=v1.0.3)", "async-stream", "async-trait", "auto_impl", @@ -344,21 +367,24 @@ dependencies = [ "futures", "futures-utils-wasm", "lru", + "parking_lot", "pin-project", "reqwest 0.12.8", + "schnellru", "serde", "serde_json", "thiserror", "tokio", "tracing", "url", + "wasmtimer", ] [[package]] name = "alloy-pubsub" -version = "0.4.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f32cef487122ae75c91eb50154c70801d71fabdb976fec6c49e0af5e6486ab15" +checksum = "96ba46eb69ddf7a9925b81f15229cb74658e6eebe5dd30a5b74e2cd040380573" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -375,8 +401,8 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "0.4.2" -source = "git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36#718060680134e6bb40d97d3c6fb56fd1950ced36" +version = "0.5.4" +source = "git+https://github.com/bnb-chain/alloy?tag=v1.0.3#576cabc6a05f6e483d10d5d16f69a67da5f0b126" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -415,15 +441,15 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "0.4.2" -source = "git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36#718060680134e6bb40d97d3c6fb56fd1950ced36" +version = "0.5.4" +source = "git+https://github.com/bnb-chain/alloy?tag=v1.0.3#576cabc6a05f6e483d10d5d16f69a67da5f0b126" dependencies = [ "alloy-json-rpc", "alloy-primitives", - "alloy-pubsub 0.4.2 (git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36)", + "alloy-pubsub 0.5.4 (git+https://github.com/bnb-chain/alloy?tag=v1.0.3)", "alloy-transport", "alloy-transport-http", - "alloy-transport-ws 0.4.2 (git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36)", + "alloy-transport-ws 0.5.4 (git+https://github.com/bnb-chain/alloy?tag=v1.0.3)", "futures", "pin-project", "reqwest 0.12.8", @@ -434,13 +460,14 @@ dependencies = [ "tower 0.5.1", "tracing", "url", + "wasmtimer", ] [[package]] name = "alloy-rpc-types" -version = "0.4.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ffc534b7919e18f35e3aa1f507b6f3d9d92ec298463a9f6beaac112809d8d06" +checksum = "eea9bf1abdd506f985a53533f5ac01296bcd6102c5e139bbc5d40bc468d2c916" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -451,11 +478,11 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "0.4.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb520ed46cc5b7d8c014a73fdd77b6a310383a2a5c0a5ae3c9b8055881f062b7" +checksum = "ea02c25541fb19eaac4278aa5c41d2d7e0245898887e54a74bfc0f3103e99415" dependencies = [ - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", "serde", "serde_json", @@ -463,10 +490,10 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "0.4.2" -source = "git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36#718060680134e6bb40d97d3c6fb56fd1950ced36" +version = "0.5.4" +source = "git+https://github.com/bnb-chain/alloy?tag=v1.0.3#576cabc6a05f6e483d10d5d16f69a67da5f0b126" dependencies = [ - "alloy-genesis 0.4.2 (git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36)", + "alloy-genesis 0.5.4 (git+https://github.com/bnb-chain/alloy?tag=v1.0.3)", "alloy-primitives", "serde", "serde_json", @@ -474,9 +501,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "0.4.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d780adaa5d95b07ad92006b2feb68ecfa7e2015f7d5976ceaac4c906c73ebd07" +checksum = "2382fc63fb0cf3e02818d547b80cb66cc49a31f8803d0c328402b2008bc13650" dependencies = [ "alloy-primitives", "alloy-serde", @@ -485,9 +512,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "0.4.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a8dc5980fe30203d698627cddb5f0cedc57f900c8b5e1229c8b9448e37acb4a" +checksum = "45357a642081c8ce235c0ad990c4e9279f5f18a723545076b38cfcc05cc25234" dependencies = [ "alloy-eips", "alloy-primitives", @@ -499,9 +526,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "0.4.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59d8f8c5bfb160081a772f1f68eb9a37e8929c4ef74e5d01f5b78c2b645a5c5e" +checksum = "a5afe3ab1038f90faf56304aa0adf1e6a8c9844615d8f83967f932f3a70390b1" dependencies = [ "alloy-primitives", "serde", @@ -509,8 +536,8 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "0.4.2" -source = "git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36#718060680134e6bb40d97d3c6fb56fd1950ced36" +version = "0.5.4" +source = "git+https://github.com/bnb-chain/alloy?tag=v1.0.3#576cabc6a05f6e483d10d5d16f69a67da5f0b126" dependencies = [ "alloy-consensus", "alloy-eips", @@ -518,6 +545,8 @@ dependencies = [ "alloy-rlp", "alloy-serde", "derive_more 1.0.0", + "ethereum_ssz", + "ethereum_ssz_derive", "jsonrpsee-types", "jsonwebtoken", "rand 0.8.5", @@ -527,8 +556,8 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "0.4.2" -source = "git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36#718060680134e6bb40d97d3c6fb56fd1950ced36" +version = "0.5.4" +source = "git+https://github.com/bnb-chain/alloy?tag=v1.0.3#576cabc6a05f6e483d10d5d16f69a67da5f0b126" dependencies = [ "alloy-consensus", "alloy-eips", @@ -537,6 +566,7 @@ dependencies = [ "alloy-rlp", "alloy-serde", "alloy-sol-types", + "arbitrary", "derive_more 1.0.0", "itertools 0.13.0", "jsonrpsee-types", @@ -546,9 +576,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "0.4.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cec23ce56c869eec5f6b6fd6a8a92b5aa0cfaf8d7be3a96502e537554dc7430" +checksum = "3246948dfa5f5060a9abe04233d741ea656ef076b12958f3242416ce9f375058" dependencies = [ "alloy-eips", "alloy-primitives", @@ -559,9 +589,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "0.4.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "017cad3e5793c5613588c1f9732bcbad77e820ba7d0feaba3527749f856fdbc5" +checksum = "4e5fb6c5c401321f802f69dcdb95b932f30f8158f6798793f914baac5995628e" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -573,9 +603,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "0.4.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b230e321c416be7f50530159392b4c41a45596d40d97e185575bcd0b545e521" +checksum = "9ad066b49c3b1b5f64cdd2399177a19926a6a15db2dbf11e2098de621f9e7480" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -585,8 +615,8 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "0.4.2" -source = "git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36#718060680134e6bb40d97d3c6fb56fd1950ced36" +version = "0.5.4" +source = "git+https://github.com/bnb-chain/alloy?tag=v1.0.3#576cabc6a05f6e483d10d5d16f69a67da5f0b126" dependencies = [ "alloy-primitives", "arbitrary", @@ -596,8 +626,8 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.4.2" -source = "git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36#718060680134e6bb40d97d3c6fb56fd1950ced36" +version = "0.5.4" +source = "git+https://github.com/bnb-chain/alloy?tag=v1.0.3#576cabc6a05f6e483d10d5d16f69a67da5f0b126" dependencies = [ "alloy-primitives", "async-trait", @@ -609,8 +639,8 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "0.4.2" -source = "git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36#718060680134e6bb40d97d3c6fb56fd1950ced36" +version = "0.5.4" +source = "git+https://github.com/bnb-chain/alloy?tag=v1.0.3#576cabc6a05f6e483d10d5d16f69a67da5f0b126" dependencies = [ "alloy-consensus", "alloy-network", @@ -626,9 +656,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.8.5" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68e7f6e8fe5b443f82b3f1e15abfa191128f71569148428e49449d01f6f49e8b" +checksum = "8a1b42ac8f45e2f49f4bcdd72cbfde0bb148f5481d403774ffa546e48b83efc1" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", @@ -640,14 +670,14 @@ dependencies = [ [[package]] name = "alloy-sol-macro-expander" -version = "0.8.5" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b96ce28d2fde09abb6135f410c41fad670a3a770b6776869bd852f1df102e6f" +checksum = "06318f1778e57f36333e850aa71bd1bb5e560c10279e236622faae0470c50412" dependencies = [ "alloy-sol-macro-input", "const-hex", "heck", - "indexmap 2.5.0", + "indexmap 2.6.0", "proc-macro-error2", "proc-macro2", "quote", @@ -658,9 +688,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-input" -version = "0.8.5" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "906746396a8296537745711630d9185746c0b50c033d5e9d18b0a6eba3d53f90" +checksum = "eaebb9b0ad61a41345a22c9279975c0cdd231b97947b10d7aad1cf0a7181e4a5" dependencies = [ "const-hex", "dunce", @@ -696,8 +726,8 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.4.2" -source = "git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36#718060680134e6bb40d97d3c6fb56fd1950ced36" +version = "0.5.4" +source = "git+https://github.com/bnb-chain/alloy?tag=v1.0.3#576cabc6a05f6e483d10d5d16f69a67da5f0b126" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -710,12 +740,13 @@ dependencies = [ "tower 0.5.1", "tracing", "url", + "wasmtimer", ] [[package]] name = "alloy-transport-http" -version = "0.4.2" -source = "git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36#718060680134e6bb40d97d3c6fb56fd1950ced36" +version = "0.5.4" +source = "git+https://github.com/bnb-chain/alloy?tag=v1.0.3#576cabc6a05f6e483d10d5d16f69a67da5f0b126" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -728,12 +759,12 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "0.4.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b90cf9cde7f2fce617da52768ee28f522264b282d148384a4ca0ea85af04fa3a" +checksum = "8073d1186bfeeb8fbdd1292b6f1a0731f3aed8e21e1463905abfae0b96a887a6" dependencies = [ "alloy-json-rpc", - "alloy-pubsub 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-pubsub 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-transport", "bytes 1.7.2", "futures", @@ -747,11 +778,11 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "0.4.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7153b88690de6a50bba81c11e1d706bc41dbb90126d607404d60b763f6a3947f" +checksum = "61f27837bb4a1d6c83a28231c94493e814882f0e9058648a97e908a5f3fc9fcf" dependencies = [ - "alloy-pubsub 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-pubsub 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-transport", "futures", "http 1.1.0", @@ -765,10 +796,10 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "0.4.2" -source = "git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36#718060680134e6bb40d97d3c6fb56fd1950ced36" +version = "0.5.4" +source = "git+https://github.com/bnb-chain/alloy?tag=v1.0.3#576cabc6a05f6e483d10d5d16f69a67da5f0b126" dependencies = [ - "alloy-pubsub 0.4.2 (git+https://github.com/bnb-chain/alloy?rev=718060680134e6bb40d97d3c6fb56fd1950ced36)", + "alloy-pubsub 0.5.4 (git+https://github.com/bnb-chain/alloy?tag=v1.0.3)", "alloy-transport", "futures", "http 1.1.0", @@ -782,13 +813,14 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.6.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9703ce68b97f8faae6f7739d1e003fc97621b856953cbcdbb2b515743f23288" +checksum = "cdd7f8b3a7c65ca09b3c7bdd7c7d72d7423d026f5247eda96af53d24e58315c1" dependencies = [ "alloy-primitives", "alloy-rlp", "arbitrary", + "arrayvec", "derive_arbitrary", "derive_more 1.0.0", "nybbles", @@ -886,13 +918,13 @@ checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "aquamarine" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21cc1548309245035eb18aa7f0967da6bc65587005170c56e6ef2788a4cf3f4e" +checksum = "0f50776554130342de4836ba542aa85a4ddb361690d7e8df13774d7284c3d5c2" dependencies = [ "include_dir", "itertools 0.10.5", - "proc-macro-error", + "proc-macro-error2", "proc-macro2", "quote", "syn 2.0.79", @@ -1096,6 +1128,9 @@ name = "arrayvec" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" +dependencies = [ + "serde", +] [[package]] name = "asn1_der" @@ -1483,7 +1518,7 @@ dependencies = [ "bitflags 2.6.0", "boa_interner", "boa_macros", - "indexmap 2.5.0", + "indexmap 2.6.0", "num-bigint", "rustc-hash 2.0.0", ] @@ -1509,7 +1544,7 @@ dependencies = [ "fast-float", "hashbrown 0.14.5", "icu_normalizer", - "indexmap 2.5.0", + "indexmap 2.6.0", "intrusive-collections", "itertools 0.13.0", "num-bigint", @@ -1555,7 +1590,7 @@ dependencies = [ "boa_gc", "boa_macros", "hashbrown 0.14.5", - "indexmap 2.5.0", + "indexmap 2.6.0", "once_cell", "phf", "rustc-hash 2.0.0", @@ -1654,7 +1689,7 @@ dependencies = [ [[package]] name = "bsc-reth" -version = "1.0.7" +version = "1.1.0" dependencies = [ "clap", "reth-bsc-chainspec", @@ -1681,6 +1716,17 @@ dependencies = [ "regex-automata 0.1.10", ] +[[package]] +name = "bstr" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40723b8fb387abc38f4f4a37c09073622e41dd12327033091ef8950659e6dc0c" +dependencies = [ + "memchr", + "regex-automata 0.4.8", + "serde", +] + [[package]] name = "bumpalo" version = "3.16.0" @@ -2357,6 +2403,12 @@ dependencies = [ "itertools 0.10.5", ] +[[package]] +name = "critical-section" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" + [[package]] name = "crossbeam-channel" version = "0.5.13" @@ -2400,7 +2452,7 @@ dependencies = [ "bitflags 2.6.0", "crossterm_winapi", "libc", - "parking_lot 0.12.3", + "parking_lot", "winapi", ] @@ -2413,7 +2465,7 @@ dependencies = [ "bitflags 2.6.0", "crossterm_winapi", "mio 1.0.2", - "parking_lot 0.12.3", + "parking_lot", "rustix", "signal-hook", "signal-hook-mio", @@ -2596,7 +2648,7 @@ dependencies = [ "hashbrown 0.14.5", "lock_api", "once_cell", - "parking_lot_core 0.9.10", + "parking_lot_core", ] [[package]] @@ -2610,7 +2662,8 @@ dependencies = [ "hashbrown 0.14.5", "lock_api", "once_cell", - "parking_lot_core 0.9.10", + "parking_lot_core", + "serde", ] [[package]] @@ -2656,11 +2709,12 @@ dependencies = [ [[package]] name = "delay_map" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4355c25cbf99edcb6b4a0e906f6bdc6956eda149e84455bea49696429b2f8e8" +checksum = "df941644b671f05f59433e481ba0d31ac10e3667de725236a4c0d587c496fba1" dependencies = [ "futures", + "tokio", "tokio-util", ] @@ -2819,9 +2873,9 @@ dependencies = [ [[package]] name = "discv5" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f569b8c367554666c8652305621e8bae3634a2ff5c6378081d5bd8c399c99f23" +checksum = "23e6b70634e26c909d1edbb3142b3eaf3b89da0e52f284f00ca7c80d9901ad9e" dependencies = [ "aes", "aes-gcm", @@ -2840,13 +2894,13 @@ dependencies = [ "lru", "more-asserts", "multiaddr", - "parking_lot 0.11.2", + "parking_lot", "rand 0.8.5", "smallvec", - "socket2 0.4.10", + "socket2", "tokio", "tracing", - "uint", + "uint 0.10.0", "zeroize", ] @@ -2964,7 +3018,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -2975,7 +3029,9 @@ dependencies = [ "reth-evm-ethereum", "reth-primitives", "reth-provider", + "reth-revm", "reth-stages", + "revm", "serde", "serde_json", "thiserror", @@ -3081,6 +3137,47 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "ethereum_serde_utils" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70cbccfccf81d67bff0ab36e591fa536c8a935b078a7b0e58c1d00d418332fc9" +dependencies = [ + "alloy-primitives", + "hex 0.4.3", + "serde", + "serde_derive", + "serde_json", +] + +[[package]] +name = "ethereum_ssz" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfbba28f4f3f32d92c06a64f5bf6c4537b5d4e21f28c689bd2bbaecfea4e0d3e" +dependencies = [ + "alloy-primitives", + "derivative", + "ethereum_serde_utils", + "itertools 0.13.0", + "serde", + "serde_derive", + "smallvec", + "typenum", +] + +[[package]] +name = "ethereum_ssz_derive" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d37845ba7c16bf4be8be4b5786f03a2ba5f2fda0d7f9e7cb2282f69cff420d7" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 2.0.79", +] + [[package]] name = "event-listener" version = "2.5.3" @@ -3136,11 +3233,29 @@ dependencies = [ "tokio-stream", ] +[[package]] +name = "example-custom-beacon-withdrawals" +version = "0.0.0" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-sol-macro", + "alloy-sol-types", + "eyre", + "reth", + "reth-chainspec", + "reth-evm", + "reth-evm-ethereum", + "reth-node-ethereum", + "reth-primitives", + "tokio", +] + [[package]] name = "example-custom-dev-node" version = "0.0.0" dependencies = [ - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", "eyre", "futures-util", @@ -3156,7 +3271,7 @@ dependencies = [ name = "example-custom-engine-types" version = "0.0.0" dependencies = [ - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", "alloy-rpc-types", "eyre", @@ -3170,6 +3285,7 @@ dependencies = [ "reth-payload-builder", "reth-primitives", "reth-tracing", + "reth-trie-db", "serde", "thiserror", "tokio", @@ -3179,7 +3295,7 @@ dependencies = [ name = "example-custom-evm" version = "0.0.0" dependencies = [ - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", "eyre", "reth", @@ -3197,6 +3313,7 @@ dependencies = [ name = "example-custom-inspector" version = "0.0.0" dependencies = [ + "alloy-eips", "alloy-primitives", "alloy-rpc-types", "clap", @@ -3220,6 +3337,7 @@ dependencies = [ name = "example-custom-payload-builder" version = "0.0.0" dependencies = [ + "alloy-eips", "alloy-primitives", "eyre", "futures-util", @@ -3247,8 +3365,6 @@ dependencies = [ "reth-network", "reth-network-api", "reth-node-ethereum", - "reth-primitives", - "reth-provider", "tokio", "tokio-stream", "tracing", @@ -3273,6 +3389,7 @@ dependencies = [ name = "example-manual-p2p" version = "0.0.0" dependencies = [ + "alloy-consensus", "eyre", "futures", "reth-chainspec", @@ -3337,7 +3454,6 @@ dependencies = [ "reth-discv4", "reth-network", "reth-primitives", - "reth-provider", "reth-tracing", "secp256k1", "serde_json", @@ -3364,10 +3480,10 @@ dependencies = [ name = "example-stateful-precompile" version = "0.0.0" dependencies = [ - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", "eyre", - "parking_lot 0.12.3", + "parking_lot", "reth", "reth-chainspec", "reth-node-api", @@ -3732,6 +3848,7 @@ version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ + "serde", "typenum", "version_check", "zeroize", @@ -3852,7 +3969,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -3871,7 +3988,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -3937,9 +4054,9 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.8.4" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" dependencies = [ "hashbrown 0.14.5", ] @@ -4176,7 +4293,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.7", + "socket2", "tokio", "tower-service", "tracing", @@ -4251,7 +4368,7 @@ dependencies = [ "http-body 1.0.1", "hyper 1.4.1", "pin-project-lite", - "socket2 0.5.7", + "socket2", "tokio", "tower-service", "tracing", @@ -4543,13 +4660,13 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "arbitrary", "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.0", "serde", ] @@ -4566,7 +4683,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88" dependencies = [ "ahash", - "indexmap 2.5.0", + "indexmap 2.6.0", "is-terminal", "itoa", "log", @@ -4656,7 +4773,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.7", + "socket2", "widestring", "windows-sys 0.48.0", "winreg", @@ -4841,7 +4958,7 @@ dependencies = [ "http-body 1.0.1", "http-body-util", "jsonrpsee-types", - "parking_lot 0.12.3", + "parking_lot", "pin-project", "rand 0.8.5", "rustc-hash 2.0.0", @@ -5113,7 +5230,7 @@ checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ "bitflags 2.6.0", "libc", - "redox_syscall 0.5.7", + "redox_syscall", ] [[package]] @@ -5177,6 +5294,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47186c6da4d81ca383c7c47c1bfc80f4b95f4720514d860a5407aaf4233f9588" dependencies = [ "linked-hash-map", + "serde", ] [[package]] @@ -5199,6 +5317,7 @@ checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", + "serde", ] [[package]] @@ -5294,9 +5413,9 @@ dependencies = [ [[package]] name = "metrics" -version = "0.23.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "884adb57038347dfbaf2d5065887b6cf4312330dc8e94bc30a1a839bd79d3261" +checksum = "8ae428771d17306715c5091d446327d1cfdedc82185c65ba8423ab404e45bf10" dependencies = [ "ahash", "portable-atomic", @@ -5316,12 +5435,12 @@ dependencies = [ [[package]] name = "metrics-exporter-prometheus" -version = "0.15.3" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4f0c8427b39666bf970460908b213ec09b3b350f20c0c2eabcbba51704a08e6" +checksum = "85b6f8152da6d7892ff1b7a1c0fa3f435e92b5918ad67035c3bb432111d9a29b" dependencies = [ "base64 0.22.1", - "indexmap 2.5.0", + "indexmap 2.6.0", "metrics", "metrics-util", "quanta", @@ -5330,30 +5449,30 @@ dependencies = [ [[package]] name = "metrics-process" -version = "2.1.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb524e5438255eaa8aa74214d5a62713b77b2c3c6e3c0bbeee65cfd9a58948ba" +checksum = "57ca8ecd85575fbb143b2678cb123bb818779391ec0f745b1c4a9dbabadde407" dependencies = [ + "libc", "libproc", "mach2", "metrics", "once_cell", - "procfs", + "procfs 0.17.0", "rlimit", - "windows 0.57.0", + "windows 0.58.0", ] [[package]] name = "metrics-util" -version = "0.17.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4259040465c955f9f2f1a4a8a16dc46726169bca0f88e8fb2dbeced487c3e828" +checksum = "15b482df36c13dd1869d73d14d28cd4855fbd6cfc32294bee109908a9f4a4ed7" dependencies = [ "crossbeam-epoch", "crossbeam-utils", - "hashbrown 0.14.5", + "hashbrown 0.15.0", "metrics", - "num_cpus", "quanta", "sketches-ddsketch", ] @@ -5589,6 +5708,7 @@ dependencies = [ "libc", "log", "mio 0.8.11", + "serde", "walkdir", "windows-sys 0.48.0", ] @@ -5794,6 +5914,7 @@ version = "1.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82881c4be219ab5faaf2ad5e5e5ecdff8c66bd7402ca3160975c93b24961afd1" dependencies = [ + "critical-section", "portable-atomic", ] @@ -5805,9 +5926,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.4.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ea7162170c6f3cad8f67f4dd7108e3f78349fd553da5b8bebff1e7ef8f38896" +checksum = "f26c3b35b7b3e36d15e0563eebffe13c1d9ca16b7aaffcb6a64354633547e16b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5823,9 +5944,9 @@ dependencies = [ [[package]] name = "op-alloy-genesis" -version = "0.4.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f3d31dfbbd8dd898c7512f8ce7d30103980485416f668566100b0ed0994b958" +checksum = "ccacc2efed3d60d98ea581bddb885df1c6c62a592e55de049cfefd94116112cd" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5837,9 +5958,9 @@ dependencies = [ [[package]] name = "op-alloy-network" -version = "0.4.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d113b325527ba7da271a8793f1c14bdf7f035ce9e0611e668c36fc6812568c7f" +checksum = "5ff6fc0f94702ea0f4d8466bffdc990067ae6df9213465df9b7957f74f1e5461" dependencies = [ "alloy-consensus", "alloy-network", @@ -5851,26 +5972,29 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.4.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "310873e4fbfc41986716c4fb6000a8b49d025d932d2c261af58271c434b05288" +checksum = "f5f8e6ec6b91c6aaeb20860b455a52fd8e300acfe5d534e96e9073a24f853e74" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-serde", + "async-trait", "derive_more 1.0.0", "op-alloy-consensus", "op-alloy-genesis", "serde", + "tracing", + "unsigned-varint 0.8.0", ] [[package]] name = "op-alloy-rpc-types" -version = "0.4.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "323c65880e2561aa87f74f8af260fd15b9cc930c448c88a60ae95af86c88c634" +checksum = "94bae9bf91b620e1e2c2291562e5998bc1247bd8ada011773e1997b31a95de99" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5878,6 +6002,7 @@ dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", "alloy-serde", + "arbitrary", "op-alloy-consensus", "serde", "serde_json", @@ -5885,21 +6010,23 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.4.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349e7b420f45d1a00216ec4c65fcf3f0057a841bc39732c405c85ae782b94121" +checksum = "4b52ee59c86537cff83e8c7f2a6aa287a94f3608bb40c06d442aafd0c2e807a4" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", "alloy-serde", "derive_more 1.0.0", + "ethereum_ssz", "op-alloy-protocol", "serde", + "snap", ] [[package]] name = "op-reth" -version = "1.0.7" +version = "1.1.0" dependencies = [ "clap", "reth-cli-util", @@ -5981,6 +6108,7 @@ version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" dependencies = [ + "arbitrary", "arrayvec", "bitvec", "byte-slice-cast", @@ -6008,17 +6136,6 @@ version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" -[[package]] -name = "parking_lot" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" -dependencies = [ - "instant", - "lock_api", - "parking_lot_core 0.8.6", -] - [[package]] name = "parking_lot" version = "0.12.3" @@ -6026,21 +6143,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", - "parking_lot_core 0.9.10", -] - -[[package]] -name = "parking_lot_core" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" -dependencies = [ - "cfg-if", - "instant", - "libc", - "redox_syscall 0.2.16", - "smallvec", - "winapi", + "parking_lot_core", ] [[package]] @@ -6051,7 +6154,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.7", + "redox_syscall", "smallvec", "windows-targets 0.52.6", ] @@ -6308,7 +6411,7 @@ dependencies = [ "log", "nix", "once_cell", - "parking_lot 0.12.3", + "parking_lot", "smallvec", "symbolic-demangle", "tempfile", @@ -6387,7 +6490,7 @@ checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" dependencies = [ "fixed-hash", "impl-codec", - "uint", + "uint 0.9.5", ] [[package]] @@ -6399,30 +6502,6 @@ dependencies = [ "toml_edit", ] -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn 1.0.109", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] - [[package]] name = "proc-macro-error-attr2" version = "2.0.0" @@ -6465,7 +6544,19 @@ dependencies = [ "flate2", "hex 0.4.3", "lazy_static", - "procfs-core", + "procfs-core 0.16.0", + "rustix", +] + +[[package]] +name = "procfs" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc5b72d8145275d844d4b5f6d4e1eef00c8cd889edb6035c21675d1bb1f45c9f" +dependencies = [ + "bitflags 2.6.0", + "hex 0.4.3", + "procfs-core 0.17.0", "rustix", ] @@ -6480,6 +6571,16 @@ dependencies = [ "hex 0.4.3", ] +[[package]] +name = "procfs-core" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "239df02d8349b06fc07398a3a1697b06418223b1c7725085e801e7c0fc6a12ec" +dependencies = [ + "bitflags 2.6.0", + "hex 0.4.3", +] + [[package]] name = "proptest" version = "1.5.0" @@ -6658,7 +6759,7 @@ dependencies = [ "ahash", "equivalent", "hashbrown 0.14.5", - "parking_lot 0.12.3", + "parking_lot", ] [[package]] @@ -6673,7 +6774,7 @@ dependencies = [ "quinn-udp", "rustc-hash 2.0.0", "rustls 0.23.13", - "socket2 0.5.7", + "socket2", "thiserror", "tokio", "tracing", @@ -6704,7 +6805,7 @@ checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" dependencies = [ "libc", "once_cell", - "socket2 0.5.7", + "socket2", "tracing", "windows-sys 0.59.0", ] @@ -6863,18 +6964,9 @@ checksum = "d3edd4d5d42c92f0a659926464d4cce56b562761267ecf0f469d85b7de384175" [[package]] name = "redox_syscall" -version = "0.2.16" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_syscall" -version = "0.5.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ "bitflags 2.6.0", ] @@ -7042,7 +7134,7 @@ dependencies = [ [[package]] name = "reth" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7067,7 +7159,6 @@ dependencies = [ "reth-consensus-common", "reth-db", "reth-db-api", - "reth-discv4", "reth-downloaders", "reth-engine-util", "reth-errors", @@ -7115,8 +7206,9 @@ dependencies = [ [[package]] name = "reth-auto-seal-consensus" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "futures-util", @@ -7145,14 +7237,17 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", "futures-core", "futures-util", "metrics", "reth-chainspec", + "reth-evm", "reth-metrics", "reth-payload-builder", "reth-payload-primitives", @@ -7168,9 +7263,10 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "1.0.7" +version = "1.1.0" dependencies = [ - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-eips", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", "alloy-rpc-types-engine", "assert_matches", @@ -7219,19 +7315,19 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-eips", "alloy-json-rpc", "alloy-primitives", "alloy-provider", - "alloy-pubsub 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-pubsub 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-rpc-client", "alloy-rpc-types-engine", "alloy-transport", "alloy-transport-http", "alloy-transport-ipc", - "alloy-transport-ws 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-transport-ws 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "async-trait", "clap", "csv", @@ -7254,18 +7350,18 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-consensus", "alloy-eips", - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", "aquamarine", "assert_matches", "dashmap 6.1.0", "linked_hash_set", "metrics", - "parking_lot 0.12.3", + "parking_lot", "reth-blockchain-tree-api", "reth-chainspec", "reth-consensus", @@ -7294,8 +7390,9 @@ dependencies = [ [[package]] name = "reth-blockchain-tree-api" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-eips", "alloy-primitives", "reth-consensus", "reth-execution-errors", @@ -7306,10 +7403,11 @@ dependencies = [ [[package]] name = "reth-bsc-chainspec" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-chains", - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-consensus", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", "derive_more 1.0.0", "once_cell", @@ -7324,7 +7422,7 @@ dependencies = [ [[package]] name = "reth-bsc-cli" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7367,10 +7465,11 @@ dependencies = [ [[package]] name = "reth-bsc-consensus" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-consensus", "alloy-dyn-abi", + "alloy-eips", "alloy-json-abi", "alloy-primitives", "alloy-rlp", @@ -7381,7 +7480,7 @@ dependencies = [ "lazy_static", "lru", "mockall 0.12.1", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", "reth-bsc-chainspec", "reth-bsc-forks", @@ -7411,13 +7510,15 @@ dependencies = [ [[package]] name = "reth-bsc-engine" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-dyn-abi", + "alloy-eips", "alloy-json-abi", "alloy-primitives", "alloy-rlp", "alloy-rpc-types", + "alloy-rpc-types-engine", "bitset", "blst", "bytes 1.7.2", @@ -7425,7 +7526,7 @@ dependencies = [ "lazy_static", "lru", "mockall 0.12.1", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", "reth-beacon-consensus", "reth-bsc-chainspec", @@ -7459,15 +7560,16 @@ dependencies = [ [[package]] name = "reth-bsc-evm" -version = "1.0.7" +version = "1.1.0" dependencies = [ - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-consensus", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", "bitset", "blst", "lazy_static", "lru", - "parking_lot 0.12.3", + "parking_lot", "reth-bsc-chainspec", "reth-bsc-consensus", "reth-bsc-forks", @@ -7488,7 +7590,7 @@ dependencies = [ [[package]] name = "reth-bsc-forks" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-chains", "alloy-primitives", @@ -7499,7 +7601,7 @@ dependencies = [ [[package]] name = "reth-bsc-node" -version = "1.0.7" +version = "1.1.0" dependencies = [ "eyre", "futures", @@ -7528,14 +7630,16 @@ dependencies = [ "reth-rpc", "reth-tracing", "reth-transaction-pool", + "reth-trie-db", "serde_json", "tokio", ] [[package]] name = "reth-bsc-payload-builder" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", @@ -7560,9 +7664,10 @@ dependencies = [ [[package]] name = "reth-bsc-primitives" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-chains", + "alloy-consensus", "alloy-primitives", "include_dir", "lazy_static", @@ -7578,7 +7683,7 @@ dependencies = [ [[package]] name = "reth-chain-state" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7589,7 +7694,7 @@ dependencies = [ "derive_more 1.0.0", "lazy_static", "metrics", - "parking_lot 0.12.3", + "parking_lot", "pin-project", "quick_cache", "rand 0.8.5", @@ -7601,6 +7706,7 @@ dependencies = [ "reth-provider", "reth-revm", "reth-storage-api", + "reth-testing-utils", "reth-trie", "revm", "serial_test", @@ -7611,11 +7717,12 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-chains", + "alloy-consensus", "alloy-eips", - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", "alloy-rlp", "alloy-trie", @@ -7633,23 +7740,25 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.0.7" +version = "1.1.0" dependencies = [ - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "clap", "eyre", "reth-cli-runner", + "reth-db", "serde_json", "shellexpand", ] [[package]] name = "reth-cli-commands" -version = "1.0.7" +version = "1.1.0" dependencies = [ "ahash", "alloy-eips", "alloy-primitives", + "alloy-rlp", "arbitrary", "backon", "clap", @@ -7668,6 +7777,7 @@ dependencies = [ "reth-cli", "reth-cli-runner", "reth-cli-util", + "reth-codecs", "reth-config", "reth-consensus", "reth-db", @@ -7691,10 +7801,13 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-prune", + "reth-prune-types", "reth-stages", + "reth-stages-types", "reth-static-file", "reth-static-file-types", "reth-trie", + "reth-trie-common", "reth-trie-db", "secp256k1", "serde", @@ -7706,7 +7819,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.0.7" +version = "1.1.0" dependencies = [ "reth-tasks", "tokio", @@ -7715,7 +7828,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7725,6 +7838,7 @@ dependencies = [ "rand 0.8.5", "reth-fs-util", "secp256k1", + "serde", "thiserror", "tikv-jemallocator", "tracy-client", @@ -7732,13 +7846,12 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-consensus", "alloy-eips", - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", - "alloy-rlp", "alloy-trie", "arbitrary", "bytes 1.7.2", @@ -7746,16 +7859,16 @@ dependencies = [ "op-alloy-consensus", "proptest", "proptest-arbitrary-interop", - "rand 0.8.5", "reth-codecs-derive", "serde", "serde_json", "test-fuzz", + "visibility", ] [[package]] name = "reth-codecs-derive" -version = "1.0.7" +version = "1.1.0" dependencies = [ "convert_case", "proc-macro2", @@ -7766,7 +7879,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", "eyre", @@ -7783,8 +7896,9 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-eips", "alloy-primitives", "auto_impl", "derive_more 1.0.0", @@ -7793,9 +7907,10 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "mockall 0.13.0", "rand 0.8.5", @@ -7808,7 +7923,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7831,7 +7946,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", "arbitrary", @@ -7843,11 +7958,10 @@ dependencies = [ "iai-callgrind", "metrics", "page_size", - "parking_lot 0.12.3", + "parking_lot", "paste", "pprof", "proptest", - "rand 0.8.5", "reth-db-api", "reth-fs-util", "reth-libmdbx", @@ -7872,9 +7986,9 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.0.7" +version = "1.1.0" dependencies = [ - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", "arbitrary", "bytes 1.7.2", @@ -7900,9 +8014,10 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.0.7" +version = "1.1.0" dependencies = [ - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-consensus", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", "boyer-moore-magiclen", "eyre", @@ -7928,7 +8043,7 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", "arbitrary", @@ -7937,14 +8052,14 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "reth-codecs", - "reth-primitives", + "reth-primitives-traits", "serde", "test-fuzz", ] [[package]] name = "reth-discv4" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7952,7 +8067,8 @@ dependencies = [ "discv5", "enr", "generic-array 0.14.7", - "parking_lot 0.12.3", + "itertools 0.13.0", + "parking_lot", "rand 0.8.5", "reth-ethereum-forks", "reth-net-banlist", @@ -7970,7 +8086,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7994,7 +8110,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-chains", "alloy-primitives", @@ -8002,7 +8118,7 @@ dependencies = [ "data-encoding", "enr", "linked_hash_set", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", "reth-chainspec", "reth-ethereum-forks", @@ -8022,7 +8138,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8059,7 +8175,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8068,24 +8184,21 @@ dependencies = [ "alloy-rpc-types", "alloy-signer", "alloy-signer-local", + "derive_more 1.0.0", "eyre", "futures-util", "jsonrpsee", - "jsonrpsee-types", "op-alloy-rpc-types-engine", "reth", "reth-chainspec", "reth-db", + "reth-engine-local", "reth-network-peers", "reth-node-builder", - "reth-node-ethereum", "reth-payload-builder", "reth-payload-primitives", - "reth-primitives", "reth-provider", - "reth-rpc", "reth-rpc-layer", - "reth-rpc-types-compat", "reth-stages-types", "reth-tokio-util", "reth-tracing", @@ -8093,11 +8206,12 @@ dependencies = [ "tokio", "tokio-stream", "tracing", + "url", ] [[package]] name = "reth-ecies" -version = "1.0.7" +version = "1.1.0" dependencies = [ "aes", "alloy-primitives", @@ -8127,28 +8241,28 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", "eyre", "futures-util", + "op-alloy-rpc-types-engine", "reth-beacon-consensus", - "reth-chain-state", "reth-chainspec", - "reth-config", - "reth-db", + "reth-consensus", + "reth-engine-primitives", + "reth-engine-service", "reth-engine-tree", "reth-ethereum-engine-primitives", - "reth-exex-test-utils", - "reth-node-types", + "reth-evm", "reth-payload-builder", "reth-payload-primitives", - "reth-primitives", + "reth-payload-validator", "reth-provider", "reth-prune", + "reth-rpc-types-compat", "reth-stages-api", - "reth-tracing", "reth-transaction-pool", "tokio", "tokio-stream", @@ -8157,7 +8271,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", "reth-execution-types", @@ -8169,7 +8283,7 @@ dependencies = [ [[package]] name = "reth-engine-service" -version = "1.0.7" +version = "1.1.0" dependencies = [ "futures", "pin-project", @@ -8197,7 +8311,7 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8207,7 +8321,6 @@ dependencies = [ "dashmap 6.1.0", "futures", "metrics", - "rand 0.8.5", "reth-beacon-consensus", "reth-blockchain-tree", "reth-blockchain-tree-api", @@ -8239,15 +8352,19 @@ dependencies = [ "reth-trie", "reth-trie-parallel", "reth-trie-prefetch", + "revm-primitives", "thiserror", "tokio", + "tokio-stream", "tracing", ] [[package]] name = "reth-engine-util" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "eyre", @@ -8276,7 +8393,7 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.0.7" +version = "1.1.0" dependencies = [ "reth-blockchain-tree-api", "reth-consensus", @@ -8288,7 +8405,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8323,12 +8440,12 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-chains", "alloy-consensus", "alloy-eips", - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", "alloy-rlp", "arbitrary", @@ -8346,7 +8463,7 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.0.7" +version = "1.1.0" dependencies = [ "clap", "eyre", @@ -8357,8 +8474,10 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "reth-chainspec", "reth-consensus", @@ -8369,7 +8488,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8388,9 +8507,10 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-chains", + "alloy-consensus", "alloy-primitives", "alloy-rlp", "arbitrary", @@ -8407,8 +8527,10 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "reth-basic-payload-builder", "reth-chain-state", @@ -8431,7 +8553,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", "rayon", @@ -8441,21 +8563,26 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "auto_impl", "futures-util", "metrics", - "parking_lot 0.12.3", + "parking_lot", "reth-chainspec", + "reth-consensus", + "reth-consensus-common", + "reth-ethereum-forks", "reth-execution-errors", "reth-execution-types", "reth-metrics", "reth-primitives", "reth-primitives-traits", "reth-prune-types", + "reth-revm", "reth-storage-errors", "revm", "revm-primitives", @@ -8464,20 +8591,20 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-consensus", "alloy-eips", - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", "alloy-sol-types", "reth-chainspec", + "reth-consensus", "reth-ethereum-consensus", "reth-ethereum-forks", "reth-evm", "reth-execution-types", "reth-primitives", - "reth-prune-types", "reth-revm", "reth-testing-utils", "revm-primitives", @@ -8489,7 +8616,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8504,7 +8631,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8521,23 +8648,22 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-consensus", "alloy-eips", - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", "eyre", "futures", "itertools 0.13.0", "metrics", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", "reth-blockchain-tree", "reth-chain-state", "reth-chainspec", "reth-config", - "reth-db-api", "reth-db-common", "reth-evm", "reth-evm-ethereum", @@ -8566,8 +8692,9 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-eips", "eyre", "futures-util", "rand 0.8.5", @@ -8577,7 +8704,6 @@ dependencies = [ "reth-consensus", "reth-db", "reth-db-common", - "reth-ethereum-engine-primitives", "reth-evm", "reth-execution-types", "reth-exex", @@ -8591,6 +8717,7 @@ dependencies = [ "reth-provider", "reth-tasks", "reth-transaction-pool", + "reth-trie-db", "tempfile", "thiserror", "tokio", @@ -8598,7 +8725,7 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8614,7 +8741,7 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.0.7" +version = "1.1.0" dependencies = [ "serde", "serde_json", @@ -8623,7 +8750,7 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8647,7 +8774,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.0.7" +version = "1.1.0" dependencies = [ "async-trait", "bytes 1.7.2", @@ -8669,15 +8796,15 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.0.7" +version = "1.1.0" dependencies = [ "bitflags 2.6.0", "byteorder", "criterion", "dashmap 6.1.0", "derive_more 1.0.0", - "indexmap 2.5.0", - "parking_lot 0.12.3", + "indexmap 2.6.0", + "parking_lot", "pprof", "rand 0.8.5", "rand_xorshift", @@ -8690,7 +8817,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.0.7" +version = "1.1.0" dependencies = [ "bindgen 0.70.1", "cc", @@ -8698,7 +8825,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.0.7" +version = "1.1.0" dependencies = [ "futures", "metrics", @@ -8709,14 +8836,14 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.0.7" +version = "1.1.0" dependencies = [ "futures-util", "if-addrs", @@ -8730,7 +8857,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8747,7 +8874,7 @@ dependencies = [ "futures", "itertools 0.13.0", "metrics", - "parking_lot 0.12.3", + "parking_lot", "pin-project", "pprof", "rand 0.8.5", @@ -8790,10 +8917,10 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", - "alloy-rpc-types-admin 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-rpc-types-admin 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "auto_impl", "derive_more 1.0.0", "enr", @@ -8812,14 +8939,14 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-eips", "alloy-primitives", "auto_impl", "derive_more 1.0.0", "futures", - "parking_lot 0.12.3", + "parking_lot", "reth-consensus", "reth-eth-wire-types", "reth-network-peers", @@ -8832,7 +8959,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8848,7 +8975,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.0.7" +version = "1.1.0" dependencies = [ "humantime-serde", "reth-ethereum-forks", @@ -8861,7 +8988,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.0.7" +version = "1.1.0" dependencies = [ "anyhow", "bincode", @@ -8879,24 +9006,29 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-rpc-types-engine", + "eyre", + "reth-beacon-consensus", + "reth-bsc-consensus", + "reth-consensus", "reth-engine-primitives", "reth-evm", "reth-network-api", + "reth-node-core", "reth-node-types", "reth-payload-builder", "reth-payload-primitives", "reth-primitives", "reth-provider", - "reth-rpc-eth-api", "reth-tasks", "reth-transaction-pool", ] [[package]] name = "reth-node-builder" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -8921,6 +9053,7 @@ dependencies = [ "reth-db-api", "reth-db-common", "reth-downloaders", + "reth-engine-local", "reth-engine-service", "reth-engine-tree", "reth-engine-util", @@ -8962,8 +9095,10 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "clap", @@ -8988,8 +9123,6 @@ dependencies = [ "reth-network-peers", "reth-primitives", "reth-prune-types", - "reth-rpc-api", - "reth-rpc-eth-api", "reth-rpc-eth-types", "reth-rpc-server-types", "reth-rpc-types-compat", @@ -9002,7 +9135,6 @@ dependencies = [ "serde", "shellexpand", "strum", - "tempfile", "thiserror", "tokio", "toml 0.8.19", @@ -9012,13 +9144,20 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.0.7" +version = "1.1.0" dependencies = [ - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-consensus", + "alloy-contract", + "alloy-eips", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", + "alloy-provider", + "alloy-rpc-types-beacon", + "alloy-signer", + "alloy-sol-types", "eyre", "futures", - "futures-util", + "rand 0.8.5", "reth", "reth-auto-seal-consensus", "reth-basic-payload-builder", @@ -9030,27 +9169,32 @@ dependencies = [ "reth-e2e-test-utils", "reth-ethereum-engine-primitives", "reth-ethereum-payload-builder", + "reth-evm", "reth-evm-ethereum", "reth-exex", "reth-network", "reth-node-api", "reth-node-builder", - "reth-node-core", "reth-payload-builder", "reth-primitives", "reth-provider", + "reth-revm", "reth-rpc", "reth-tasks", "reth-tracing", "reth-transaction-pool", + "reth-trie-db", + "revm", "serde_json", "tokio", ] [[package]] name = "reth-node-events" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "futures", @@ -9059,7 +9203,6 @@ dependencies = [ "reth-beacon-consensus", "reth-network", "reth-network-api", - "reth-primitives", "reth-primitives-traits", "reth-provider", "reth-prune", @@ -9071,7 +9214,7 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.0.7" +version = "1.1.0" dependencies = [ "eyre", "http 1.1.0", @@ -9080,14 +9223,13 @@ dependencies = [ "metrics-exporter-prometheus", "metrics-process", "metrics-util", - "procfs", + "procfs 0.16.0", "reqwest 0.12.8", - "reth-chainspec", "reth-db-api", "reth-metrics", "reth-provider", "reth-tasks", - "socket2 0.5.7", + "socket2", "tikv-jemalloc-ctl", "tokio", "tower 0.4.13", @@ -9097,19 +9239,24 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.0.7" +version = "1.1.0" dependencies = [ "reth-chainspec", "reth-db-api", "reth-engine-primitives", + "reth-primitives", + "reth-primitives-traits", + "reth-trie-db", ] [[package]] name = "reth-optimism-chainspec" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-chains", - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-consensus", + "alloy-eips", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", "derive_more 1.0.0", "once_cell", @@ -9124,13 +9271,15 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", "alloy-rlp", "clap", "eyre", "futures-util", + "op-alloy-consensus", + "proptest", "reth-chainspec", "reth-cli", "reth-cli-commands", @@ -9168,8 +9317,9 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "reth-chainspec", "reth-consensus", @@ -9183,14 +9333,16 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-consensus", "alloy-eips", - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", + "derive_more 1.0.0", "op-alloy-consensus", "reth-chainspec", + "reth-consensus", "reth-ethereum-forks", "reth-evm", "reth-execution-errors", @@ -9203,14 +9355,13 @@ dependencies = [ "reth-revm", "revm", "revm-primitives", - "thiserror", "tokio", "tracing", ] [[package]] name = "reth-optimism-forks" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-chains", "alloy-primitives", @@ -9221,21 +9372,17 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-eips", - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", "alloy-rpc-types-engine", - "async-trait", "clap", "eyre", - "jsonrpsee", - "jsonrpsee-types", "op-alloy-consensus", "op-alloy-rpc-types-engine", - "parking_lot 0.12.3", - "reqwest 0.12.8", + "parking_lot", "reth", "reth-auto-seal-consensus", "reth-basic-payload-builder", @@ -9244,8 +9391,8 @@ dependencies = [ "reth-chainspec", "reth-consensus", "reth-db", - "reth-discv5", "reth-e2e-test-utils", + "reth-engine-local", "reth-evm", "reth-network", "reth-node-api", @@ -9260,27 +9407,25 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-revm", - "reth-rpc", - "reth-rpc-eth-api", - "reth-rpc-eth-types", - "reth-rpc-types-compat", "reth-tracing", "reth-transaction-pool", + "reth-trie-db", + "revm", "serde", "serde_json", - "thiserror", "tokio", - "tracing", ] [[package]] name = "reth-optimism-payload-builder" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-rpc-types-engine", + "op-alloy-consensus", "op-alloy-rpc-types-engine", "reth-basic-payload-builder", "reth-chain-state", @@ -9300,7 +9445,6 @@ dependencies = [ "reth-transaction-pool", "reth-trie", "revm", - "revm-primitives", "sha2 0.10.8", "thiserror", "tracing", @@ -9308,17 +9452,18 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "reth-primitives", - "reth-primitives-traits", ] [[package]] name = "reth-optimism-rpc" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rpc-types", @@ -9328,7 +9473,7 @@ dependencies = [ "op-alloy-consensus", "op-alloy-network", "op-alloy-rpc-types", - "parking_lot 0.12.3", + "parking_lot", "reqwest 0.12.8", "reth-chainspec", "reth-evm", @@ -9356,7 +9501,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.0.7" +version = "1.1.0" dependencies = [ "reth-codecs", "reth-db-api", @@ -9367,7 +9512,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -9388,8 +9533,9 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-eips", "alloy-primitives", "alloy-rpc-types", "async-trait", @@ -9409,7 +9555,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-rpc-types", "reth-chainspec", @@ -9419,11 +9565,11 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-consensus", "alloy-eips", - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", "alloy-rlp", "alloy-rpc-types", @@ -9448,7 +9594,6 @@ dependencies = [ "reth-chainspec", "reth-codecs", "reth-ethereum-forks", - "reth-optimism-chainspec", "reth-primitives-traits", "reth-static-file-types", "reth-testing-utils", @@ -9465,11 +9610,11 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-consensus", "alloy-eips", - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", "alloy-rlp", "arbitrary", @@ -9493,7 +9638,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9506,8 +9651,7 @@ dependencies = [ "itertools 0.13.0", "metrics", "notify", - "once_cell", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", "rayon", "reth-blockchain-tree-api", @@ -9543,7 +9687,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", "assert_matches", @@ -9572,7 +9716,7 @@ dependencies = [ [[package]] name = "reth-prune-types" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", "arbitrary", @@ -9592,11 +9736,11 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", - "reth-chainspec", - "reth-consensus-common", "reth-ethereum-forks", "reth-execution-errors", "reth-primitives", @@ -9609,17 +9753,18 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-consensus", "alloy-dyn-abi", "alloy-eips", - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-network", "alloy-primitives", "alloy-rlp", "alloy-rpc-types", - "alloy-rpc-types-admin 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-rpc-types-admin 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-rpc-types-beacon", "alloy-rpc-types-debug", "alloy-rpc-types-eth", "alloy-rpc-types-mev", @@ -9637,20 +9782,22 @@ dependencies = [ "jsonrpsee", "jsonrpsee-types", "jsonwebtoken", - "parking_lot 0.12.3", + "parking_lot", "pin-project", "rand 0.8.5", "reth-bsc-consensus", "reth-bsc-primitives", "reth-chainspec", + "reth-consensus", "reth-consensus-common", "reth-errors", + "reth-ethereum-consensus", "reth-evm", "reth-evm-ethereum", "reth-network-api", "reth-network-peers", "reth-network-types", - "reth-node-api", + "reth-payload-validator", "reth-primitives", "reth-provider", "reth-revm", @@ -9679,13 +9826,13 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-eips", "alloy-json-rpc", "alloy-primitives", "alloy-rpc-types", - "alloy-rpc-types-admin 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-rpc-types-admin 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-rpc-types-anvil", "alloy-rpc-types-beacon", "alloy-rpc-types-debug", @@ -9698,15 +9845,16 @@ dependencies = [ "jsonrpsee", "reth-engine-primitives", "reth-network-peers", - "reth-primitives", "reth-rpc-eth-api", - "serde_json", + "serde", + "serde_with", ] [[package]] name = "reth-rpc-api-testing-util" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-eips", "alloy-primitives", "alloy-rpc-types", "alloy-rpc-types-eth", @@ -9724,15 +9872,13 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.0.7" +version = "1.1.0" dependencies = [ - "alloy-network", + "alloy-eips", "alloy-primitives", - "alloy-rpc-types", "alloy-rpc-types-engine", "alloy-rpc-types-eth", "alloy-rpc-types-trace", - "alloy-serde", "clap", "http 1.1.0", "jsonrpsee", @@ -9741,6 +9887,7 @@ dependencies = [ "reth-beacon-consensus", "reth-bsc-consensus", "reth-chainspec", + "reth-consensus", "reth-engine-primitives", "reth-ethereum-engine-primitives", "reth-evm", @@ -9749,7 +9896,6 @@ dependencies = [ "reth-metrics", "reth-network-api", "reth-network-peers", - "reth-node-api", "reth-node-core", "reth-payload-builder", "reth-primitives", @@ -9763,13 +9909,13 @@ dependencies = [ "reth-rpc-server-types", "reth-rpc-types-compat", "reth-tasks", - "reth-tokio-util", "reth-tracing", "reth-transaction-pool", "serde", "serde_json", "thiserror", "tokio", + "tokio-util", "tower 0.4.13", "tower-http", "tracing", @@ -9777,7 +9923,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9813,8 +9959,9 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-dyn-abi", "alloy-eips", "alloy-json-rpc", @@ -9829,13 +9976,14 @@ dependencies = [ "futures", "jsonrpsee", "jsonrpsee-types", - "parking_lot 0.12.3", + "parking_lot", "reth-bsc-primitives", "reth-chainspec", "reth-errors", "reth-evm", "reth-execution-types", "reth-network-api", + "reth-node-api", "reth-primitives", "reth-provider", "reth-revm", @@ -9854,17 +10002,17 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rpc-types", "alloy-rpc-types-eth", - "alloy-serde", "alloy-sol-types", "derive_more 1.0.0", "futures", + "itertools 0.13.0", "jsonrpsee-core", "jsonrpsee-types", "metrics", @@ -9898,7 +10046,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-rpc-types-engine", "http 1.1.0", @@ -9913,38 +10061,39 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "jsonrpsee-core", "jsonrpsee-types", "reth-errors", "reth-network-api", - "reth-primitives", "serde", "strum", ] [[package]] name = "reth-rpc-types-compat" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-rpc-types", "alloy-rpc-types-engine", "alloy-rpc-types-eth", - "alloy-serde", "reth-primitives", "reth-trie-common", + "serde", "serde_json", ] [[package]] name = "reth-stages" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9986,7 +10135,6 @@ dependencies = [ "reth-testing-utils", "reth-trie", "reth-trie-db", - "serde_json", "tempfile", "thiserror", "tokio", @@ -9995,7 +10143,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", "aquamarine", @@ -10023,7 +10171,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", "arbitrary", @@ -10040,17 +10188,14 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", "assert_matches", - "parking_lot 0.12.3", + "parking_lot", "rayon", - "reth-chainspec", "reth-db", "reth-db-api", - "reth-nippy-jar", - "reth-node-types", "reth-provider", "reth-prune-types", "reth-stages", @@ -10065,7 +10210,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", "clap", @@ -10076,8 +10221,9 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "auto_impl", @@ -10094,7 +10240,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10106,7 +10252,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.0.7" +version = "1.1.0" dependencies = [ "auto_impl", "dyn-clone", @@ -10123,11 +10269,11 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-consensus", "alloy-eips", - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", "rand 0.8.5", "reth-primitives", @@ -10136,7 +10282,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.0.7" +version = "1.1.0" dependencies = [ "tokio", "tokio-stream", @@ -10145,7 +10291,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.0.7" +version = "1.1.0" dependencies = [ "clap", "eyre", @@ -10159,7 +10305,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10172,7 +10318,7 @@ dependencies = [ "criterion", "futures-util", "metrics", - "parking_lot 0.12.3", + "parking_lot", "paste", "pprof", "proptest", @@ -10204,20 +10350,19 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rlp", "auto_impl", "bincode", "criterion", - "derive_more 1.0.0", "itertools 0.13.0", "metrics", "proptest", "proptest-arbitrary-interop", "rayon", - "reth-chainspec", "reth-execution-errors", "reth-metrics", "reth-primitives", @@ -10228,17 +10373,16 @@ dependencies = [ "serde", "serde_json", "serde_with", - "tokio", "tracing", "triehash", ] [[package]] name = "reth-trie-common" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-consensus", - "alloy-genesis 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", "alloy-rlp", "alloy-trie", @@ -10259,26 +10403,22 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.0.7" +version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rlp", - "auto_impl", "derive_more 1.0.0", - "itertools 0.13.0", "metrics", "proptest", "proptest-arbitrary-interop", - "rayon", "reth-chainspec", "reth-db", "reth-db-api", "reth-execution-errors", "reth-metrics", - "reth-node-types", "reth-primitives", "reth-provider", - "reth-stages-types", "reth-storage-errors", "reth-trie", "reth-trie-common", @@ -10286,15 +10426,13 @@ dependencies = [ "serde", "serde_json", "similar-asserts", - "tokio", - "tokio-stream", "tracing", "triehash", ] [[package]] name = "reth-trie-parallel" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10308,7 +10446,6 @@ dependencies = [ "rand 0.8.5", "rayon", "reth-db", - "reth-db-api", "reth-execution-errors", "reth-metrics", "reth-primitives", @@ -10322,7 +10459,7 @@ dependencies = [ [[package]] name = "reth-trie-prefetch" -version = "1.0.7" +version = "1.1.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10347,10 +10484,30 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-trie-sparse" +version = "1.1.0" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "assert_matches", + "criterion", + "itertools 0.13.0", + "pretty_assertions", + "proptest", + "rand 0.8.5", + "reth-testing-utils", + "reth-tracing", + "reth-trie", + "reth-trie-common", + "smallvec", + "thiserror", +] + [[package]] name = "revm" -version = "14.0.3" -source = "git+https://github.com/bnb-chain/revm?rev=v1.0.5#a9752c97d2c742d25405a4a9a698f30f5007da6e" +version = "17.0.0" +source = "git+https://github.com/bnb-chain/revm?tag=v1.0.6#d66170e712460ae766fc26a063f106658ce33e9d" dependencies = [ "auto_impl", "cfg-if", @@ -10363,9 +10520,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.8.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43c44af0bf801f48d25f7baf25cf72aff4c02d610f83b428175228162fef0246" +checksum = "1e29c662f7887f3b659d4b0fd234673419a8fcbeaa1ecc29bf7034c0a75cc8ea" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -10382,8 +10539,8 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "10.0.3" -source = "git+https://github.com/bnb-chain/revm?rev=v1.0.5#a9752c97d2c742d25405a4a9a698f30f5007da6e" +version = "13.0.0" +source = "git+https://github.com/bnb-chain/revm?tag=v1.0.6#d66170e712460ae766fc26a063f106658ce33e9d" dependencies = [ "revm-primitives", "serde", @@ -10391,8 +10548,8 @@ dependencies = [ [[package]] name = "revm-precompile" -version = "11.0.3" -source = "git+https://github.com/bnb-chain/revm?rev=v1.0.5#a9752c97d2c742d25405a4a9a698f30f5007da6e" +version = "14.0.0" +source = "git+https://github.com/bnb-chain/revm?tag=v1.0.6#d66170e712460ae766fc26a063f106658ce33e9d" dependencies = [ "alloy-rlp", "aurora-engine-modexp", @@ -10419,8 +10576,8 @@ dependencies = [ [[package]] name = "revm-primitives" -version = "10.0.0" -source = "git+https://github.com/bnb-chain/revm?rev=v1.0.5#a9752c97d2c742d25405a4a9a698f30f5007da6e" +version = "13.0.0" +source = "git+https://github.com/bnb-chain/revm?tag=v1.0.6#d66170e712460ae766fc26a063f106658ce33e9d" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -11037,7 +11194,7 @@ version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "itoa", "memchr", "ryu", @@ -11089,15 +11246,15 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.10.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9720086b3357bcb44fce40117d769a4d068c70ecfa190850a980a71755f66fcc" +checksum = "8e28bdad6db2b8340e449f7108f020b3b092e8583a9e3fb82713e1d4e71fe817" dependencies = [ "base64 0.22.1", "chrono", "hex 0.4.3", "indexmap 1.9.3", - "indexmap 2.5.0", + "indexmap 2.6.0", "serde", "serde_derive", "serde_json", @@ -11107,9 +11264,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.10.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f1abbfe725f27678f4663bcacb75a83e829fd464c25d78dd038a3a29e307cec" +checksum = "9d846214a9854ef724f3da161b426242d8de7c1fc7de2f89bb1efcb154dca79d" dependencies = [ "darling", "proc-macro2", @@ -11126,7 +11283,7 @@ dependencies = [ "futures", "log", "once_cell", - "parking_lot 0.12.3", + "parking_lot", "scc", "serial_test_derive", ] @@ -11297,6 +11454,10 @@ name = "similar" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e" +dependencies = [ + "bstr 1.10.0", + "unicode-segmentation", +] [[package]] name = "similar-asserts" @@ -11305,6 +11466,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfe85670573cd6f0fa97940f26e7e6601213c3b0555246c24234131f88c5709e" dependencies = [ "console", + "serde", "similar", ] @@ -11328,9 +11490,9 @@ checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" [[package]] name = "sketches-ddsketch" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85636c14b73d81f541e525f585c0a2109e6744e1565b5c1668e31c70c10ed65c" +checksum = "c1e9a774a6c28142ac54bb25d25562e6bcf957493a184f15ad4eebccb23e410a" [[package]] name = "slab" @@ -11357,16 +11519,6 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b" -[[package]] -name = "socket2" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "socket2" version = "0.5.7" @@ -11545,9 +11697,9 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.8.5" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ab661c8148c2261222a4d641ad5477fd4bea79406a99056096a0b41b35617a5" +checksum = "edf42e81491fb8871b74df3d222c64ae8cbc1269ea509fa768a3ed3e1b0ac8cb" dependencies = [ "paste", "proc-macro2", @@ -11653,7 +11805,7 @@ source = "git+https://github.com/bnb-chain/tendermint-rs-parlia?rev=8c21ccbd58a1 dependencies = [ "anomaly", "async-trait", - "bstr", + "bstr 0.2.17", "byteorder", "bytes 0.5.6", "chrono", @@ -11947,10 +12099,10 @@ dependencies = [ "bytes 1.7.2", "libc", "mio 1.0.2", - "parking_lot 0.12.3", + "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.7", + "socket2", "tokio-macros", "windows-sys 0.52.0", ] @@ -12066,7 +12218,7 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", @@ -12338,9 +12490,10 @@ dependencies = [ "ipconfig", "lru-cache", "once_cell", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", "resolv-conf", + "serde", "smallvec", "thiserror", "tokio", @@ -12398,6 +12551,18 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "uint" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "909988d098b2f738727b161a106cfc7cab00c539c2687a8836f8e565976fb53e" +dependencies = [ + "byteorder", + "crunchy", + "hex 0.4.3", + "static_assertions", +] + [[package]] name = "unarray" version = "0.1.4" @@ -12562,6 +12727,17 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +[[package]] +name = "visibility" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", +] + [[package]] name = "wait-timeout" version = "0.2.0" @@ -12688,6 +12864,20 @@ dependencies = [ "web-sys", ] +[[package]] +name = "wasmtimer" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7ed9d8b15c7fb594d72bfb4b5a276f3d2029333cd93a932f376f5937f6f80ee" +dependencies = [ + "futures", + "js-sys", + "parking_lot", + "pin-utils", + "slab", + "wasm-bindgen", +] + [[package]] name = "web-sys" version = "0.3.70" diff --git a/Cargo.toml b/Cargo.toml index 36e689d7f0..b7f2415304 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [workspace.package] -version = "1.0.7" +version = "1.1.0" edition = "2021" -rust-version = "1.81" +rust-version = "1.82" license = "MIT OR Apache-2.0" homepage = "https://paradigmxyz.github.io/reth" repository = "https://github.com/paradigmxyz/reth" @@ -134,6 +134,7 @@ members = [ "crates/trie/db", "crates/trie/parallel/", "crates/trie/prefetch/", + "crates/trie/sparse", "crates/trie/trie", "examples/beacon-api-sidecar-fetcher/", "examples/beacon-api-sse/", @@ -155,6 +156,7 @@ members = [ "examples/rpc-db/", "examples/stateful-precompile/", "examples/txpool-tracing/", + "examples/custom-beacon-withdrawals", "testing/ef-tests/", "testing/testing-utils", ] @@ -194,7 +196,9 @@ equatable_if_let = "warn" explicit_into_iter_loop = "warn" explicit_iter_loop = "warn" flat_map_option = "warn" +from_iter_instead_of_collect = "warn" if_not_else = "warn" +if_then_some_else_none = "warn" implicit_clone = "warn" imprecise_flops = "warn" iter_on_empty_collections = "warn" @@ -295,6 +299,13 @@ codegen-units = 1 inherits = "release" lto = "fat" +[profile.reproducible] +inherits = "release" +debug = false +panic = "abort" +codegen-units = 1 +overflow-checks = true + [workspace.dependencies] # reth bsc-reth = { path = "crates/bsc/bin" } @@ -399,7 +410,7 @@ reth-primitives-traits = { path = "crates/primitives-traits", default-features = reth-provider = { path = "crates/storage/provider" } reth-prune = { path = "crates/prune/prune" } reth-prune-types = { path = "crates/prune/types" } -reth-revm = { path = "crates/revm" } +reth-revm = { path = "crates/revm", default-features = false } reth-rpc = { path = "crates/rpc/rpc" } reth-rpc-api = { path = "crates/rpc/rpc-api" } reth-rpc-api-testing-util = { path = "crates/rpc/rpc-testing-util" } @@ -429,13 +440,9 @@ reth-trie-parallel = { path = "crates/trie/parallel" } reth-trie-prefetch = { path = "crates/trie/prefetch" } # revm -revm = { version = "14.0.3", features = [ - "std", - "secp256k1", - "blst", -], default-features = false } -revm-inspectors = "0.8.1" -revm-primitives = { version = "10.0.0", features = [ +revm = { version = "17.0.0", features = ["std"], default-features = false } +revm-inspectors = "0.10.0" +revm-primitives = { version = "13.0.0", features = [ "std", ], default-features = false } @@ -443,53 +450,54 @@ revm-primitives = { version = "10.0.0", features = [ alloy-chains = "0.1.33" alloy-dyn-abi = "0.8.0" alloy-json-abi = "0.8.0" -alloy-primitives = { version = "0.8.7", default-features = false } +alloy-primitives = { version = "0.8.9", default-features = false } alloy-rlp = "0.3.4" alloy-sol-types = "0.8.0" -alloy-trie = { version = "0.6", default-features = false } +alloy-trie = { version = "0.7", default-features = false } -alloy-consensus = { version = "0.4.2", default-features = false } -alloy-eips = { version = "0.4.2", default-features = false } -alloy-genesis = { version = "0.4.2", default-features = false } -alloy-json-rpc = { version = "0.4.2", default-features = false } -alloy-network = { version = "0.4.2", default-features = false } -alloy-network-primitives = { version = "0.4.2", default-features = false } -alloy-node-bindings = { version = "0.4.2", default-features = false } -alloy-provider = { version = "0.4.2", features = [ +alloy-consensus = { version = "0.5.4", default-features = false } +alloy-contract = { version = "0.5.4", default-features = false } +alloy-eips = { version = "0.5.4", default-features = false } +alloy-genesis = { version = "0.5.4", default-features = false } +alloy-json-rpc = { version = "0.5.4", default-features = false } +alloy-network = { version = "0.5.4", default-features = false } +alloy-network-primitives = { version = "0.5.4", default-features = false } +alloy-node-bindings = { version = "0.5.4", default-features = false } +alloy-provider = { version = "0.5.4", features = [ "reqwest", ], default-features = false } -alloy-pubsub = { version = "0.4.2", default-features = false } -alloy-rpc-client = { version = "0.4.2", default-features = false } -alloy-rpc-types = { version = "0.4.2", features = [ +alloy-pubsub = { version = "0.5.4", default-features = false } +alloy-rpc-client = { version = "0.5.4", default-features = false } +alloy-rpc-types = { version = "0.5.4", features = [ "eth", ], default-features = false } -alloy-rpc-types-admin = { version = "0.4.2", default-features = false } -alloy-rpc-types-anvil = { version = "0.4.2", default-features = false } -alloy-rpc-types-beacon = { version = "0.4.2", default-features = false } -alloy-rpc-types-debug = { version = "0.4.2", default-features = false } -alloy-rpc-types-engine = { version = "0.4.2", default-features = false } -alloy-rpc-types-eth = { version = "0.4.2", default-features = false } -alloy-rpc-types-mev = { version = "0.4.2", default-features = false } -alloy-rpc-types-trace = { version = "0.4.2", default-features = false } -alloy-rpc-types-txpool = { version = "0.4.2", default-features = false } -alloy-serde = { version = "0.4.2", default-features = false } -alloy-signer = { version = "0.4.2", default-features = false } -alloy-signer-local = { version = "0.4.2", default-features = false } -alloy-transport = { version = "0.4.2" } -alloy-transport-http = { version = "0.4.2", features = [ +alloy-rpc-types-admin = { version = "0.5.4", default-features = false } +alloy-rpc-types-anvil = { version = "0.5.4", default-features = false } +alloy-rpc-types-beacon = { version = "0.5.4", default-features = false } +alloy-rpc-types-debug = { version = "0.5.4", default-features = false } +alloy-rpc-types-engine = { version = "0.5.4", default-features = false } +alloy-rpc-types-eth = { version = "0.5.4", default-features = false } +alloy-rpc-types-mev = { version = "0.5.4", default-features = false } +alloy-rpc-types-trace = { version = "0.5.4", default-features = false } +alloy-rpc-types-txpool = { version = "0.5.4", default-features = false } +alloy-serde = { version = "0.5.4", default-features = false } +alloy-signer = { version = "0.5.4", default-features = false } +alloy-signer-local = { version = "0.5.4", default-features = false } +alloy-transport = { version = "0.5.4" } +alloy-transport-http = { version = "0.5.4", features = [ "reqwest-rustls-tls", ], default-features = false } -alloy-transport-ipc = { version = "0.4.2", default-features = false } -alloy-transport-ws = { version = "0.4.2", default-features = false } +alloy-transport-ipc = { version = "0.5.4", default-features = false } +alloy-transport-ws = { version = "0.5.4", default-features = false } # op -op-alloy-rpc-types = "0.4" -op-alloy-rpc-types-engine = "0.4" -op-alloy-network = "0.4" -op-alloy-consensus = "0.4" +op-alloy-rpc-types = "0.5" +op-alloy-rpc-types-engine = "0.5" +op-alloy-network = "0.5" +op-alloy-consensus = "0.5" # misc -aquamarine = "0.5" +aquamarine = "0.6" auto_impl = "1" backon = { version = "1.2", default-features = false, features = [ "std-blocking-sleep", @@ -517,7 +525,9 @@ notify = { version = "6.1.1", default-features = false, features = [ "macos_fsevent", ] } nybbles = "0.2.1" -once_cell = "1.19" +once_cell = { version = "1.19", default-features = false, features = [ + "critical-section", +] } parking_lot = "0.12" paste = "1.0" rand = "0.8.5" @@ -540,11 +550,11 @@ url = "2.3" zstd = "0.13" # metrics -metrics = "0.23.0" +metrics = "0.24.0" metrics-derive = "0.1" -metrics-exporter-prometheus = { version = "0.15.0", default-features = false } -metrics-process = "2.1.0" -metrics-util = { default-features = false, version = "0.17.0" } +metrics-exporter-prometheus = { version = "0.16.0", default-features = false } +metrics-process = { version = "2.3.1"} +metrics-util = { default-features = false, version = "0.18.0" } # proc-macros proc-macro2 = "1.0" @@ -569,7 +579,7 @@ tower = "0.4" tower-http = "0.5" # p2p -discv5 = "0.7.0" +discv5 = "0.8.0" if-addrs = "0.13" # rpc @@ -607,7 +617,7 @@ pprof = "0.13" proptest = "1.4" proptest-derive = "0.5" serial_test = { default-features = false, version = "3" } -similar-asserts = { default-features = false, version = "1.5.0" } +similar-asserts = { version = "1.5.0", features = ["serde"] } tempfile = "3.8" test-fuzz = "6" tikv-jemalloc-ctl = "0.6" @@ -615,20 +625,20 @@ tikv-jemallocator = "0.6" tracy-client = "0.17.3" [patch.crates-io] -revm = { git = "https://github.com/bnb-chain/revm", rev = "v1.0.5" } -revm-interpreter = { git = "https://github.com/bnb-chain/revm", rev = "v1.0.5" } -revm-primitives = { git = "https://github.com/bnb-chain/revm", rev = "v1.0.5" } -alloy-rpc-types-eth = { git = "https://github.com/bnb-chain/alloy", rev = "718060680134e6bb40d97d3c6fb56fd1950ced36" } -alloy-consensus = { git = "https://github.com/bnb-chain/alloy", rev = "718060680134e6bb40d97d3c6fb56fd1950ced36" } -alloy-eips = { git = "https://github.com/bnb-chain/alloy", rev = "718060680134e6bb40d97d3c6fb56fd1950ced36" } -alloy-network = { git = "https://github.com/bnb-chain/alloy", rev = "718060680134e6bb40d97d3c6fb56fd1950ced36" } -alloy-network-primitives = { git = "https://github.com/bnb-chain/alloy", rev = "718060680134e6bb40d97d3c6fb56fd1950ced36" } -alloy-serde = { git = "https://github.com/bnb-chain/alloy", rev = "718060680134e6bb40d97d3c6fb56fd1950ced36" } -alloy-signer = { git = "https://github.com/bnb-chain/alloy", rev = "718060680134e6bb40d97d3c6fb56fd1950ced36" } -alloy-signer-local = { git = "https://github.com/bnb-chain/alloy", rev = "718060680134e6bb40d97d3c6fb56fd1950ced36" } -alloy-provider = { git = "https://github.com/bnb-chain/alloy", rev = "718060680134e6bb40d97d3c6fb56fd1950ced36" } -alloy-transport = { git = "https://github.com/bnb-chain/alloy", rev = "718060680134e6bb40d97d3c6fb56fd1950ced36" } -alloy-transport-http = { git = "https://github.com/bnb-chain/alloy", rev = "718060680134e6bb40d97d3c6fb56fd1950ced36" } -alloy-json-rpc = { git = "https://github.com/bnb-chain/alloy", rev = "718060680134e6bb40d97d3c6fb56fd1950ced36" } -alloy-rpc-client = { git = "https://github.com/bnb-chain/alloy", rev = "718060680134e6bb40d97d3c6fb56fd1950ced36" } -alloy-rpc-types-engine = { git = "https://github.com/bnb-chain/alloy", rev = "718060680134e6bb40d97d3c6fb56fd1950ced36" } +revm = { git = "https://github.com/bnb-chain/revm", tag = "v1.0.6" } +revm-interpreter = { git = "https://github.com/bnb-chain/revm", tag = "v1.0.6" } +revm-primitives = { git = "https://github.com/bnb-chain/revm", tag = "v1.0.6" } +alloy-rpc-types-eth = { git = "https://github.com/bnb-chain/alloy", tag = "v1.0.3" } +alloy-consensus = { git = "https://github.com/bnb-chain/alloy", tag = "v1.0.3" } +alloy-eips = { git = "https://github.com/bnb-chain/alloy", tag = "v1.0.3" } +alloy-network = { git = "https://github.com/bnb-chain/alloy", tag = "v1.0.3" } +alloy-network-primitives = { git = "https://github.com/bnb-chain/alloy", tag = "v1.0.3" } +alloy-serde = { git = "https://github.com/bnb-chain/alloy", tag = "v1.0.3" } +alloy-signer = { git = "https://github.com/bnb-chain/alloy", tag = "v1.0.3" } +alloy-signer-local = { git = "https://github.com/bnb-chain/alloy", tag = "v1.0.3" } +alloy-provider = { git = "https://github.com/bnb-chain/alloy", tag = "v1.0.3" } +alloy-transport = { git = "https://github.com/bnb-chain/alloy", tag = "v1.0.3" } +alloy-transport-http = { git = "https://github.com/bnb-chain/alloy", tag = "v1.0.3" } +alloy-json-rpc = { git = "https://github.com/bnb-chain/alloy", tag = "v1.0.3" } +alloy-rpc-client = { git = "https://github.com/bnb-chain/alloy", tag = "v1.0.3" } +alloy-rpc-types-engine = { git = "https://github.com/bnb-chain/alloy", tag = "v1.0.3" } diff --git a/Dockerfile.reproducible b/Dockerfile.reproducible new file mode 100644 index 0000000000..12c12dd7c7 --- /dev/null +++ b/Dockerfile.reproducible @@ -0,0 +1,37 @@ +# Use the Rust 1.82 image based on Debian Bullseye +FROM rust:1.82-bullseye@sha256:c42c8ca762560c182ba30edda0e0d71a8604040af2672370559d7e854653c66d AS builder + +# Install specific version of libclang-dev +RUN apt-get update && apt-get install -y libclang-dev=1:11.0-51+nmu5 + +# Clone the repository at the specific branch +RUN git clone https://github.com/paradigmxyz/reth /app +WORKDIR /app + +# Checkout the reproducible-build branch +RUN git checkout reproducible-build + +# Get the latest commit timestamp and set SOURCE_DATE_EPOCH +RUN SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct) && \ + echo "SOURCE_DATE_EPOCH=$SOURCE_DATE_EPOCH" >> /etc/environment + +# Set environment variables for reproducibility +ARG RUSTFLAGS="-C target-feature=+crt-static -C link-arg=-Wl,--build-id=none -Clink-arg=-static-libgcc -C metadata='' --remap-path-prefix $(pwd)=." +ENV SOURCE_DATE_EPOCH=$SOURCE_DATE_EPOCH \ + CARGO_INCREMENTAL=0 \ + LC_ALL=C \ + TZ=UTC \ + RUSTFLAGS="${RUSTFLAGS}" + +# Set the default features if not provided +ARG FEATURES="jemalloc asm-keccak" + +# Build the project with the reproducible settings +RUN . /etc/environment && \ + cargo build --bin reth --features "${FEATURES}" --profile "reproducible" --locked --target x86_64-unknown-linux-gnu + +# Create a minimal final image with just the binary +FROM scratch AS binaries + +# Copy the compiled binary from the builder stage +COPY --from=builder /app/target/x86_64-unknown-linux-gnu/reproducible/reth /reth diff --git a/HARDFORK-CHECKLIST.md b/HARDFORK-CHECKLIST.md new file mode 100644 index 0000000000..80ebfc20c9 --- /dev/null +++ b/HARDFORK-CHECKLIST.md @@ -0,0 +1,21 @@ +# Non-exhaustive checklist for integrating new changes for an upcoming hard fork/devnet + +## Introducing new EIP types or changes to primitive types + +- Make required changes to primitive data structures on [alloy](https://github.com/alloy-rs/alloy) +- All new EIP data structures/constants/helpers etc. go into the `alloy-eips` crate at first. +- New transaction types go into `alloy-consensus` +- If there are changes to existing data structures, such as `Header` or `Block`, apply them to the types in `alloy-consensus` (e.g. new `request_hashes` field in Prague) + +## Engine API + +- If there are changes to the engine API (e.g. a new `engine_newPayloadVx` and `engine_getPayloadVx` pair) add the new types to the `alloy-rpc-types-engine` crate. +- If there are new parameters to the `engine_newPayloadVx` endpoint, add them to the `ExecutionPayloadSidecar` container type. This types contains all additional parameters that are required to convert an `ExecutionPayload` to an EL block. + +## Reth changes + +### Updates to the engine API + +- Add new endpoints to the `EngineApi` trait and implement endpoints. +- Update the `ExceuctionPayload` + `ExecutionPayloadSidecar` to `Block` conversion if there are any additional parameters. +- Update version specific validation checks in the `EngineValidator` trait. \ No newline at end of file diff --git a/Makefile b/Makefile index d6b0b7b33a..471a8ac490 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,7 @@ # Heavily inspired by Lighthouse: https://github.com/sigp/lighthouse/blob/693886b94176faa4cb450f024696cb69cda2fe58/Makefile .DEFAULT_GOAL := help +GIT_SHA ?= $(shell git rev-parse HEAD) GIT_TAG ?= $(shell git describe --tags --abbrev=0) BIN_DIR = "dist/bin" @@ -68,6 +69,16 @@ install-bsc: ## Build and install the bsc-reth binary under `~/.cargo/bin`. build: ## Build the reth binary into `target` directory. cargo build --bin reth --features "$(FEATURES)" --profile "$(PROFILE)" +SOURCE_DATE_EPOCH := $(shell git log -1 --pretty=%ct) +.PHONY: reproducible +reproducible: ## Build the reth binary into `target` directory with reproducible builds. Only works for x86_64-unknown-linux-gnu currently + SOURCE_DATE_EPOCH=$(SOURCE_DATE_EPOCH) \ + CARGO_INCREMENTAL=0 \ + LC_ALL=C \ + TZ=UTC \ + RUSTFLAGS="-C target-feature=+crt-static -C link-arg=-Wl,--build-id=none -Clink-arg=-static-libgcc -C metadata='' --remap-path-prefix $$(pwd)=." \ + cargo build --bin reth --features "$(FEATURES)" --profile "reproducible" --locked --target x86_64-unknown-linux-gnu + .PHONY: build-debug build-debug: ## Build the reth binary into `target/debug` directory. cargo build --bin reth --features "$(FEATURES)" @@ -225,6 +236,14 @@ ef-tests: $(EF_TESTS_DIR) ## Runs Ethereum Foundation tests. docker-build-push: ## Build and push a cross-arch Docker image tagged with the latest git tag. $(call docker_build_push,$(GIT_TAG),$(GIT_TAG)) +# Note: This requires a buildx builder with emulation support. For example: +# +# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` +# `docker buildx create --use --driver docker-container --name cross-builder` +.PHONY: docker-build-push-git-sha +docker-build-push-git-sha: ## Build and push a cross-arch Docker image tagged with the latest git sha. + $(call docker_build_push,$(GIT_SHA),$(GIT_SHA)) + # Note: This requires a buildx builder with emulation support. For example: # # `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` @@ -269,6 +288,14 @@ endef op-docker-build-push: ## Build and push a cross-arch Docker image tagged with the latest git tag. $(call op_docker_build_push,$(GIT_TAG),$(GIT_TAG)) +# Note: This requires a buildx builder with emulation support. For example: +# +# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` +# `docker buildx create --use --driver docker-container --name cross-builder` +.PHONY: op-docker-build-push-git-sha +op-docker-build-push-git-sha: ## Build and push a cross-arch Docker image tagged with the latest git sha. + $(call op_docker_build_push,$(GIT_SHA),$(GIT_SHA)) + # Note: This requires a buildx builder with emulation support. For example: # # `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` @@ -547,6 +574,7 @@ test: pr: make lint && \ make update-book-cli && \ + cargo docs --document-private-items && \ make test check-features: diff --git a/SECURITY.md b/SECURITY.md index 5260d529f5..bea27ad114 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,4 +2,4 @@ ## Reporting a Vulnerability -Contact georgios at paradigm.xyz. +Contact [security@ithaca.xyz](mailto:security@ithaca.xyz). diff --git a/bin/reth-bench/Cargo.toml b/bin/reth-bench/Cargo.toml index e4e40daeca..03844633a9 100644 --- a/bin/reth-bench/Cargo.toml +++ b/bin/reth-bench/Cargo.toml @@ -76,9 +76,16 @@ reth-tracing.workspace = true [features] default = ["jemalloc"] -asm-keccak = ["reth-primitives/asm-keccak"] - -jemalloc = ["reth-cli-util/jemalloc"] +asm-keccak = [ + "reth-primitives/asm-keccak", + "reth-node-core/asm-keccak", + "alloy-primitives/asm-keccak" +] + +jemalloc = [ + "reth-cli-util/jemalloc", + "reth-node-core/jemalloc" +] jemalloc-prof = ["reth-cli-util/jemalloc-prof"] tracy-allocator = ["reth-cli-util/tracy-allocator"] diff --git a/bin/reth-bench/src/authenticated_transport.rs b/bin/reth-bench/src/authenticated_transport.rs index c946d244de..72c4fd2988 100644 --- a/bin/reth-bench/src/authenticated_transport.rs +++ b/bin/reth-bench/src/authenticated_transport.rs @@ -84,7 +84,8 @@ impl InnerTransport { let (auth, claims) = build_auth(jwt).map_err(|e| AuthenticatedTransportError::InvalidJwt(e.to_string()))?; - let inner = WsConnect { url: url.to_string(), auth: Some(auth) } + let inner = WsConnect::new(url.clone()) + .with_auth(auth) .into_service() .await .map(Self::Ws) diff --git a/bin/reth-bench/src/bench/new_payload_fcu.rs b/bin/reth-bench/src/bench/new_payload_fcu.rs index a8c18b48a2..ca5359fb8c 100644 --- a/bin/reth-bench/src/bench/new_payload_fcu.rs +++ b/bin/reth-bench/src/bench/new_payload_fcu.rs @@ -37,9 +37,8 @@ pub struct Command { impl Command { /// Execute `benchmark new-payload-fcu` command pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> { - let cloned_args = self.benchmark.clone(); let BenchContext { benchmark_mode, block_provider, auth_provider, mut next_block } = - BenchContext::new(&cloned_args, self.rpc_url).await?; + BenchContext::new(&self.benchmark, self.rpc_url).await?; let (sender, mut receiver) = tokio::sync::mpsc::channel(1000); tokio::task::spawn(async move { diff --git a/bin/reth-bench/src/bench/new_payload_only.rs b/bin/reth-bench/src/bench/new_payload_only.rs index e6392318a5..85342d1af7 100644 --- a/bin/reth-bench/src/bench/new_payload_only.rs +++ b/bin/reth-bench/src/bench/new_payload_only.rs @@ -35,11 +35,10 @@ pub struct Command { impl Command { /// Execute `benchmark new-payload-only` command pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> { - let cloned_args = self.benchmark.clone(); // TODO: this could be just a function I guess, but destructuring makes the code slightly // more readable than a 4 element tuple. let BenchContext { benchmark_mode, block_provider, auth_provider, mut next_block } = - BenchContext::new(&cloned_args, self.rpc_url).await?; + BenchContext::new(&self.benchmark, self.rpc_url).await?; let (sender, mut receiver) = tokio::sync::mpsc::channel(1000); tokio::task::spawn(async move { diff --git a/bin/reth-bench/src/valid_payload.rs b/bin/reth-bench/src/valid_payload.rs index 6353aea712..b00f4ddcd6 100644 --- a/bin/reth-bench/src/valid_payload.rs +++ b/bin/reth-bench/src/valid_payload.rs @@ -215,14 +215,6 @@ pub(crate) async fn call_new_payload>( versioned_hashes: Vec, ) -> TransportResult { match payload { - ExecutionPayload::V4(_payload) => { - todo!("V4 payloads not supported yet"); - // auth_provider - // .new_payload_v4_wait(payload, versioned_hashes, parent_beacon_block_root, ...) - // .await?; - // - // Ok(EngineApiMessageVersion::V4) - } ExecutionPayload::V3(payload) => { // We expect the caller let parent_beacon_block_root = parent_beacon_block_root diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 476f9cd5ce..ffd1998b24 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -96,22 +96,28 @@ backon.workspace = true similar-asserts.workspace = true [dev-dependencies] -reth-discv4.workspace = true tempfile.workspace = true [features] default = ["jemalloc"] -dev = ["reth-cli-commands/dev"] +dev = ["reth-cli-commands/arbitrary"] -asm-keccak = ["reth-node-core/asm-keccak", "reth-primitives/asm-keccak"] +asm-keccak = [ + "reth-node-core/asm-keccak", + "reth-primitives/asm-keccak", + "alloy-primitives/asm-keccak" +] jemalloc = [ "reth-cli-util/jemalloc", "reth-node-core/jemalloc", "reth-node-metrics/jemalloc", ] -jemalloc-prof = ["reth-cli-util/jemalloc"] +jemalloc-prof = [ + "reth-cli-util/jemalloc", + "reth-cli-util/jemalloc-prof" +] tracy-allocator = ["reth-cli-util/tracy-allocator"] min-error-logs = ["tracing/release_max_level_error"] diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index cca801da36..192ab67002 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -18,6 +18,7 @@ use reth_db::DatabaseEnv; use reth_ethereum_cli::chainspec::EthereumChainSpecParser; use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_node_ethereum::{EthExecutorProvider, EthereumNode}; +use reth_node_metrics::recorder::install_prometheus_recorder; use reth_tracing::FileWorkerGuard; use std::{ffi::OsString, fmt, future::Future, sync::Arc}; use tracing::info; @@ -38,7 +39,7 @@ pub struct Cli, + pub command: Commands, /// The chain this node is running. /// @@ -51,7 +52,7 @@ pub struct Cli, + pub chain: Arc, /// Add a new instance of a node. /// @@ -67,10 +68,11 @@ pub struct Cli, Ext: clap::Args + fmt::Debug> Cl let _guard = self.init_tracing()?; info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.logs.log_file_directory); + // Install the prometheus recorder to be sure to record all metrics + let _ = install_prometheus_recorder(); + let runner = CliRunner::default(); match self.command { Commands::Node(command) => { diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index f68be4cf65..326cc86ffe 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -22,18 +22,20 @@ use reth_errors::RethResult; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_types::ExecutionOutcome; use reth_fs_util as fs; -use reth_node_api::{NodeTypesWithDB, NodeTypesWithEngine, PayloadBuilderAttributes}; +use reth_node_api::{ + EngineApiMessageVersion, NodeTypesWithDB, NodeTypesWithEngine, PayloadBuilderAttributes, +}; use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider}; -use reth_payload_builder::database::CachedReads; use reth_primitives::{ revm_primitives::KzgSettings, BlobTransaction, BlobTransactionSidecar, - PooledTransactionsElement, SealedBlock, SealedBlockWithSenders, Transaction, TransactionSigned, + PooledTransactionsElement, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, + TransactionSigned, }; use reth_provider::{ providers::BlockchainProvider, BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, ProviderFactory, StageCheckpointReader, StateProviderFactory, }; -use reth_revm::{database::StateProviderDatabase, primitives::EnvKzgSettings}; +use reth_revm::{cached::CachedReads, database::StateProviderDatabase, primitives::EnvKzgSettings}; use reth_stages::StageId; use reth_transaction_pool::{ blobstore::InMemoryBlobStore, BlobStore, EthPooledTransaction, PoolConfig, TransactionOrigin, @@ -222,11 +224,12 @@ impl> Command { withdrawals: None, }; let payload_config = PayloadConfig::new( - Arc::clone(&best_block), + Arc::new(SealedHeader::new(best_block.header().clone(), best_block.hash())), Bytes::default(), reth_payload_builder::EthPayloadBuilderAttributes::try_new( best_block.hash(), payload_attrs, + EngineApiMessageVersion::default() as u8, )?, ); diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index af0af96c56..020296379f 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -1,6 +1,7 @@ //! Command for debugging execution. use crate::{args::NetworkArgs, utils::get_single_header}; +use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, B256}; use clap::Parser; use futures::{stream::select as stream_select, StreamExt}; @@ -24,7 +25,6 @@ use reth_network_p2p::{headers::client::HeadersClient, BlockClient}; use reth_node_api::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; use reth_node_ethereum::EthExecutorProvider; -use reth_primitives::BlockHashOrNumber; use reth_provider::{ BlockExecutionWriter, ChainSpecProvider, ProviderFactory, StageCheckpointReader, }; diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index a95cdcabd7..68427134a8 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -4,6 +4,7 @@ use crate::{ args::NetworkArgs, utils::{get_single_body, get_single_header}, }; +use alloy_eips::BlockHashOrNumber; use backon::{ConstantBuilder, Retryable}; use clap::Parser; use reth_chainspec::ChainSpec; @@ -19,7 +20,6 @@ use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; use reth_node_api::{NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_ethereum::EthExecutorProvider; -use reth_primitives::BlockHashOrNumber; use reth_provider::{ writer::UnifiedStorageWriter, AccountExtReader, ChainSpecProvider, HashingWriter, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index f3dd495051..782d05190a 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -1,5 +1,6 @@ //! Command for debugging merkle trie calculation. use crate::{args::NetworkArgs, utils::get_single_header}; +use alloy_eips::BlockHashOrNumber; use backon::{ConstantBuilder, Retryable}; use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; @@ -18,7 +19,6 @@ use reth_network_api::NetworkInfo; use reth_network_p2p::full_block::FullBlockClient; use reth_node_api::{NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_ethereum::EthExecutorProvider; -use reth_primitives::BlockHashOrNumber; use reth_provider::{ writer::UnifiedStorageWriter, BlockNumReader, BlockWriter, ChainSpecProvider, DatabaseProviderFactory, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, @@ -152,12 +152,13 @@ impl> Command { provider_rw.insert_block(sealed_block.clone())?; td += sealed_block.difficulty; - let mut executor = executor_provider.batch_executor(StateProviderDatabase::new( - LatestStateProviderRef::new( + let mut executor = executor_provider.batch_executor( + StateProviderDatabase::new(LatestStateProviderRef::new( provider_rw.tx_ref(), provider_rw.static_file_provider().clone(), - ), - )); + )), + None, + ); executor.execute_and_verify_one((&sealed_block.clone().unseal(), td, None).into())?; let execution_outcome = executor.finalize(); diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index 46a8dbfbef..1b1ef8bc48 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -18,7 +18,9 @@ use reth_engine_util::engine_store::{EngineMessageStore, StoredEngineApiMessage} use reth_fs_util as fs; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; -use reth_node_api::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; +use reth_node_api::{ + EngineApiMessageVersion, NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine, +}; use reth_node_ethereum::{EthEngineTypes, EthEvmConfig, EthExecutorProvider}; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_provider::{ @@ -166,12 +168,17 @@ impl> Command { debug!(target: "reth::cli", filepath = %filepath.display(), ?message, "Forwarding Engine API message"); match message { StoredEngineApiMessage::ForkchoiceUpdated { state, payload_attrs } => { - let response = - beacon_engine_handle.fork_choice_updated(state, payload_attrs).await?; + let response = beacon_engine_handle + .fork_choice_updated( + state, + payload_attrs, + EngineApiMessageVersion::default(), + ) + .await?; debug!(target: "reth::cli", ?response, "Received for forkchoice updated"); } - StoredEngineApiMessage::NewPayload { payload, cancun_fields } => { - let response = beacon_engine_handle.new_payload(payload, cancun_fields).await?; + StoredEngineApiMessage::NewPayload { payload, sidecar } => { + let response = beacon_engine_handle.new_payload(payload, sidecar).await?; debug!(target: "reth::cli", ?response, "Received for new payload"); } }; diff --git a/bin/reth/src/main.rs b/bin/reth/src/main.rs index f424163a24..e146912c06 100644 --- a/bin/reth/src/main.rs +++ b/bin/reth/src/main.rs @@ -33,11 +33,11 @@ pub struct EngineArgs { pub legacy: bool, /// Configure persistence threshold for engine experimental. - #[arg(long = "engine.persistence-threshold", requires = "experimental", default_value_t = DEFAULT_PERSISTENCE_THRESHOLD)] + #[arg(long = "engine.persistence-threshold", conflicts_with = "legacy", default_value_t = DEFAULT_PERSISTENCE_THRESHOLD)] pub persistence_threshold: u64, /// Configure the target number of blocks to keep in memory. - #[arg(long = "engine.memory-block-buffer-target", requires = "experimental", default_value_t = DEFAULT_MEMORY_BLOCK_BUFFER_TARGET)] + #[arg(long = "engine.memory-block-buffer-target", conflicts_with = "legacy", default_value_t = DEFAULT_MEMORY_BLOCK_BUFFER_TARGET)] pub memory_block_buffer_target: u64, } diff --git a/book/cli/help.rs b/book/cli/help.rs index e347e1ea5d..963f53deb0 100755 --- a/book/cli/help.rs +++ b/book/cli/help.rs @@ -320,7 +320,7 @@ fn preprocess_help(s: &str) -> Cow<'_, str> { (r"default: reth/.*/\w+", "default: reth//"), // Remove rpc.max-tracing-requests default value ( - r"(rpc.max-tracing-requests \n.*\n.*\n.*)\[default: \d+\]", + r"(rpc.max-tracing-requests \n.*\n.*\n.*\n.*\n.*)\[default: \d+\]", r"$1[default: ]", ), ]; diff --git a/book/cli/reth/db.md b/book/cli/reth/db.md index 9e3b32cc0b..17a6de4e60 100644 --- a/book/cli/reth/db.md +++ b/book/cli/reth/db.md @@ -81,6 +81,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/db/diff.md b/book/cli/reth/db/diff.md index ea4c29612f..efb9e7d32e 100644 --- a/book/cli/reth/db/diff.md +++ b/book/cli/reth/db/diff.md @@ -45,6 +45,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + --table The table name to diff. If not specified, all tables are diffed. diff --git a/book/cli/reth/debug/build-block.md b/book/cli/reth/debug/build-block.md index 76ddac306c..7bceb62b94 100644 --- a/book/cli/reth/debug/build-block.md +++ b/book/cli/reth/debug/build-block.md @@ -69,6 +69,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + --trusted-setup-file Overrides the KZG trusted setup by reading from the supplied file diff --git a/book/cli/reth/debug/execution.md b/book/cli/reth/debug/execution.md index 202e1452a8..b8e1ce05d1 100644 --- a/book/cli/reth/debug/execution.md +++ b/book/cli/reth/debug/execution.md @@ -69,6 +69,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Networking: -d, --disable-discovery Disable the discovery service diff --git a/book/cli/reth/debug/in-memory-merkle.md b/book/cli/reth/debug/in-memory-merkle.md index 534e6d46c6..a183db997e 100644 --- a/book/cli/reth/debug/in-memory-merkle.md +++ b/book/cli/reth/debug/in-memory-merkle.md @@ -69,6 +69,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Networking: -d, --disable-discovery Disable the discovery service diff --git a/book/cli/reth/debug/merkle.md b/book/cli/reth/debug/merkle.md index 19bc38acce..d9a72794ef 100644 --- a/book/cli/reth/debug/merkle.md +++ b/book/cli/reth/debug/merkle.md @@ -69,6 +69,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Networking: -d, --disable-discovery Disable the discovery service diff --git a/book/cli/reth/debug/replay-engine.md b/book/cli/reth/debug/replay-engine.md index 7a14b9cf09..b7a1266d39 100644 --- a/book/cli/reth/debug/replay-engine.md +++ b/book/cli/reth/debug/replay-engine.md @@ -69,6 +69,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Networking: -d, --disable-discovery Disable the discovery service diff --git a/book/cli/reth/import.md b/book/cli/reth/import.md index 7bd8a0079e..82a521ac0a 100644 --- a/book/cli/reth/import.md +++ b/book/cli/reth/import.md @@ -69,6 +69,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + --no-state Disables stages that require state. diff --git a/book/cli/reth/init-state.md b/book/cli/reth/init-state.md index cb221634c4..533c0f8f88 100644 --- a/book/cli/reth/init-state.md +++ b/book/cli/reth/init-state.md @@ -69,6 +69,31 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + + --without-evm + Specifies whether to initialize the state without relying on EVM historical data. + + When enabled, and before inserting the state, it creates a dummy chain up to the last EVM block specified. It then, appends the first block provided block. + + - **Note**: **Do not** import receipts and blocks beforehand, or this will fail or be ignored. + + --header + Header file containing the header in an RLP encoded format. + + --total-difficulty + Total difficulty of the header. + + --header-hash + Hash of the header. + JSONL file with state dump. diff --git a/book/cli/reth/init.md b/book/cli/reth/init.md index cc889e5e35..ebe2a8386c 100644 --- a/book/cli/reth/init.md +++ b/book/cli/reth/init.md @@ -69,6 +69,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index 73a8063a85..5f0090ef89 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -245,7 +245,7 @@ RPC: --http.api Rpc Modules to be configured for the HTTP server - [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots] + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, flashbots] --http.corsdomain Http Corsdomain to allow request from @@ -269,7 +269,7 @@ RPC: --ws.api Rpc Modules to be configured for the WS server - [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots] + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, flashbots] --ipcdisable Disable the IPC-RPC server @@ -331,7 +331,9 @@ RPC: [default: 500] --rpc.max-tracing-requests - Maximum number of concurrent tracing requests + Maximum number of concurrent tracing requests. + + By default this chooses a sensible value based on the number of available cores. Tracing requests are generally CPU bound. Choosing a value that is higher than the available CPU cores can have a negative impact on the performance of the node and affect the node's ability to maintain sync. [default: ] @@ -365,6 +367,9 @@ RPC: [default: 25] + --builder.disallow + Path to file containing disallowed addresses, json-encoded list of strings. Block validation API will reject blocks containing transactions from these addresses + RPC State Cache: --rpc-cache.max-blocks Max number of blocks in cache @@ -588,6 +593,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Dev testnet: --dev Start the node in dev mode @@ -614,8 +628,6 @@ Pruning: --block-interval Minimum pruning interval measured in blocks - [default: 0] - --prune.senderrecovery.full Prunes all sender recovery data diff --git a/book/cli/reth/p2p.md b/book/cli/reth/p2p.md index 01253705b2..33639042a1 100644 --- a/book/cli/reth/p2p.md +++ b/book/cli/reth/p2p.md @@ -247,6 +247,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/prune.md b/book/cli/reth/prune.md index e0641256f1..41684ecd9e 100644 --- a/book/cli/reth/prune.md +++ b/book/cli/reth/prune.md @@ -69,6 +69,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/recover/storage-tries.md b/book/cli/reth/recover/storage-tries.md index 1f639cb095..1afe94f55d 100644 --- a/book/cli/reth/recover/storage-tries.md +++ b/book/cli/reth/recover/storage-tries.md @@ -69,6 +69,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/stage/drop.md b/book/cli/reth/stage/drop.md index ae21a89183..c22d6be668 100644 --- a/book/cli/reth/stage/drop.md +++ b/book/cli/reth/stage/drop.md @@ -69,6 +69,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Possible values: - headers: The headers stage within the pipeline diff --git a/book/cli/reth/stage/dump.md b/book/cli/reth/stage/dump.md index 291d896902..e3df5bf2df 100644 --- a/book/cli/reth/stage/dump.md +++ b/book/cli/reth/stage/dump.md @@ -76,6 +76,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/stage/run.md b/book/cli/reth/stage/run.md index bfe5ff9d6c..204efc9685 100644 --- a/book/cli/reth/stage/run.md +++ b/book/cli/reth/stage/run.md @@ -69,6 +69,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + --metrics Enable Prometheus metrics. diff --git a/book/cli/reth/stage/unwind.md b/book/cli/reth/stage/unwind.md index d181b3bcad..cb72b9313c 100644 --- a/book/cli/reth/stage/unwind.md +++ b/book/cli/reth/stage/unwind.md @@ -74,6 +74,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Networking: -d, --disable-discovery Disable the discovery service diff --git a/book/developers/exex/remote.md b/book/developers/exex/remote.md index 4344e28b34..0ec704308f 100644 --- a/book/developers/exex/remote.md +++ b/book/developers/exex/remote.md @@ -25,41 +25,7 @@ We will also need a bunch of dependencies. Some of them you know from the [Hello but some of specific to what we need now. ```toml -[package] -name = "remote-exex" -version = "0.1.0" -edition = "2021" - -[dependencies] -# reth -reth = { git = "https://github.com/paradigmxyz/reth.git" } -reth-exex = { git = "https://github.com/paradigmxyz/reth.git", features = ["serde"] } -reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth.git"} -reth-tracing = { git = "https://github.com/paradigmxyz/reth.git" } - -# async -tokio = { version = "1", features = ["full"] } -tokio-stream = "0.1" -futures-util = "0.3" - -# grpc -tonic = "0.11" -prost = "0.12" -bincode = "1" - -# misc -eyre = "0.6" - -[build-dependencies] -tonic-build = "0.11" - -[[bin]] -name = "exex" -path = "src/exex.rs" - -[[bin]] -name = "consumer" -path = "src/consumer.rs" +{{#include ../../sources/exex/remote/Cargo.toml}} ``` We also added a build dependency for Tonic. We will use it to generate the Rust code for our @@ -87,26 +53,12 @@ For an example of a full schema, see the [Remote ExEx](https://github.com/paradi ```protobuf -syntax = "proto3"; - -package exex; - -service RemoteExEx { - rpc Subscribe(SubscribeRequest) returns (stream ExExNotification) {} -} - -message SubscribeRequest {} - -message ExExNotification { - bytes data = 1; -} +{{#include ../../sources/exex/remote/proto/exex.proto}} ``` To instruct Tonic to generate the Rust code using this `.proto`, add the following lines to your `lib.rs` file: ```rust,norun,noplayground,ignore -pub mod proto { - tonic::include_proto!("exex"); -} +{{#include ../../sources/exex/remote/src/lib.rs}} ``` ## ExEx and gRPC server @@ -119,52 +71,7 @@ Let's create a minimal gRPC server that listens on the port `:10000`, and spawn the [NodeBuilder](https://reth.rs/docs/reth/builder/struct.NodeBuilder.html)'s [task executor](https://reth.rs/docs/reth/tasks/struct.TaskExecutor.html). ```rust,norun,noplayground,ignore -use remote_exex::proto::{ - self, - remote_ex_ex_server::{RemoteExEx, RemoteExExServer}, -}; -use reth_exex::ExExNotification; -use reth_node_ethereum::EthereumNode; -use reth_tracing::tracing::info; -use std::sync::Arc; -use tokio::sync::{broadcast, mpsc}; -use tokio_stream::wrappers::ReceiverStream; -use tonic::{transport::Server, Request, Response, Status}; - -struct ExExService {} - -#[tonic::async_trait] -impl RemoteExEx for ExExService { - type SubscribeStream = ReceiverStream>; - - async fn subscribe( - &self, - _request: Request, - ) -> Result, Status> { - let (_tx, rx) = mpsc::channel(1); - - Ok(Response::new(ReceiverStream::new(rx))) - } -} - -fn main() -> eyre::Result<()> { - reth::cli::Cli::parse_args().run(|builder, _| async move { - let server = Server::builder() - .add_service(RemoteExExServer::new(ExExService {})) - .serve("[::1]:10000".parse().unwrap()); - - let handle = builder.node(EthereumNode::default()).launch().await?; - - handle - .node - .task_executor - .spawn_critical("gRPC server", async move { - server.await.expect("failed to start gRPC server") - }); - - handle.wait_for_node_exit().await - }) -} +{{#include ../../sources/exex/remote/src/exex_1.rs}} ``` Currently, it does not send anything on the stream. @@ -175,40 +82,7 @@ Let's create this channel in the `main` function where we will have both gRPC se and save the sender part (that way we will be able to create new receivers) of this channel in our gRPC server. ```rust,norun,noplayground,ignore -// ... -use reth_exex::{ExExNotification}; - -struct ExExService { - notifications: Arc>, -} - -... - -fn main() -> eyre::Result<()> { - reth::cli::Cli::parse_args().run(|builder, _| async move { - let notifications = Arc::new(broadcast::channel(1).0); - - let server = Server::builder() - .add_service(RemoteExExServer::new(ExExService { - notifications: notifications.clone(), - })) - .serve("[::1]:10000".parse().unwrap()); - - let handle = builder - .node(EthereumNode::default()) - .launch() - .await?; - - handle - .node - .task_executor - .spawn_critical("gRPC server", async move { - server.await.expect("failed to start gRPC server") - }); - - handle.wait_for_node_exit().await - }) -} +{{#include ../../sources/exex/remote/src/exex_2.rs}} ``` And with that, we're ready to handle incoming notifications, serialize them with [bincode](https://docs.rs/bincode/) @@ -218,37 +92,7 @@ For each incoming request, we spawn a separate tokio task that will run in the b and then return the stream receiver to the client. ```rust,norun,noplayground,ignore -// ... - -#[tonic::async_trait] -impl RemoteExEx for ExExService { - type SubscribeStream = ReceiverStream>; - - async fn subscribe( - &self, - _request: Request, - ) -> Result, Status> { - let (tx, rx) = mpsc::channel(1); - - let mut notifications = self.notifications.subscribe(); - tokio::spawn(async move { - while let Ok(notification) = notifications.recv().await { - let proto_notification = proto::ExExNotification { - data: bincode::serialize(¬ification).expect("failed to serialize"), - }; - tx.send(Ok(proto_notification)) - .await - .expect("failed to send notification to client"); - - info!("Notification sent to the gRPC client"); - } - }); - - Ok(Response::new(ReceiverStream::new(rx))) - } -} - -// ... +{{#rustdoc_include ../../sources/exex/remote/src/exex_3.rs:snippet}} ``` That's it for the gRPC server part! It doesn't receive anything on the `notifications` channel yet, @@ -267,65 +111,14 @@ Don't forget to emit `ExExEvent::FinishedHeight` ```rust,norun,noplayground,ignore -// ... - -use futures_util::StreamExt; -use reth_exex::{ExExContext, ExExEvent}; - -async fn remote_exex( - mut ctx: ExExContext, - notifications: Arc>, -) -> eyre::Result<()> { - while let Some(notification) = ctx.notifications.next().await { - if let Some(committed_chain) = notification.committed_chain() { - ctx.events - .send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; - } - - info!("Notification sent to the gRPC server"); - let _ = notifications.send(notification); - } - - Ok(()) -} - -// ... +{{#rustdoc_include ../../sources/exex/remote/src/exex_4.rs:snippet}} ``` All that's left is to connect all pieces together: install our ExEx in the node and pass the sender part of communication channel to it. ```rust,norun,noplayground,ignore -// ... - -fn main() -> eyre::Result<()> { - reth::cli::Cli::parse_args().run(|builder, _| async move { - let notifications = Arc::new(broadcast::channel(1).0); - - let server = Server::builder() - .add_service(RemoteExExServer::new(ExExService { - notifications: notifications.clone(), - })) - .serve("[::1]:10000".parse().unwrap()); - - let handle = builder - .node(EthereumNode::default()) - .install_exex("remote-exex", |ctx| async move { - Ok(remote_exex(ctx, notifications)) - }) - .launch() - .await?; - - handle - .node - .task_executor - .spawn_critical("gRPC server", async move { - server.await.expect("failed to start gRPC server") - }); - - handle.wait_for_node_exit().await - }) -} +{{#rustdoc_include ../../sources/exex/remote/src/exex.rs:snippet}} ``` ### Full `exex.rs` code @@ -334,98 +127,7 @@ fn main() -> eyre::Result<()> { Click to expand ```rust,norun,noplayground,ignore -use std::sync::Arc; - -use futures_util::StreamExt; -use remote_exex::proto::{ - self, - remote_ex_ex_server::{RemoteExEx, RemoteExExServer}, -}; -use reth::api::FullNodeComponents; -use reth_exex::{ExExContext, ExExEvent, ExExNotification}; -use reth_node_ethereum::EthereumNode; -use reth_tracing::tracing::info; -use tokio::sync::{broadcast, mpsc}; -use tokio_stream::wrappers::ReceiverStream; -use tonic::{transport::Server, Request, Response, Status}; - -struct ExExService { - notifications: Arc>, -} - -#[tonic::async_trait] -impl RemoteExEx for ExExService { - type SubscribeStream = ReceiverStream>; - - async fn subscribe( - &self, - _request: Request, - ) -> Result, Status> { - let (tx, rx) = mpsc::channel(1); - - let mut notifications = self.notifications.subscribe(); - tokio::spawn(async move { - while let Ok(notification) = notifications.recv().await { - let proto_notification = proto::ExExNotification { - data: bincode::serialize(¬ification).expect("failed to serialize"), - }; - tx.send(Ok(proto_notification)) - .await - .expect("failed to send notification to client"); - - info!(?notification, "Notification sent to the gRPC client"); - } - }); - - Ok(Response::new(ReceiverStream::new(rx))) - } -} - -async fn remote_exex( - mut ctx: ExExContext, - notifications: Arc>, -) -> eyre::Result<()> { - while let Some(notification) = ctx.notifications.next().await { - if let Some(committed_chain) = notification.committed_chain() { - ctx.events - .send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; - } - - info!(?notification, "Notification sent to the gRPC server"); - let _ = notifications.send(notification); - } - - Ok(()) -} - -fn main() -> eyre::Result<()> { - reth::cli::Cli::parse_args().run(|builder, _| async move { - let notifications = Arc::new(broadcast::channel(1).0); - - let server = Server::builder() - .add_service(RemoteExExServer::new(ExExService { - notifications: notifications.clone(), - })) - .serve("[::1]:10000".parse().unwrap()); - - let handle = builder - .node(EthereumNode::default()) - .install_exex("remote-exex", |ctx| async move { - Ok(remote_exex(ctx, notifications)) - }) - .launch() - .await?; - - handle - .node - .task_executor - .spawn_critical("gRPC server", async move { - server.await.expect("failed to start gRPC server") - }); - - handle.wait_for_node_exit().await - }) -} +{{#include ../../sources/exex/remote/src/exex.rs}} ``` @@ -442,38 +144,7 @@ because notifications can get very heavy ```rust,norun,noplayground,ignore -use remote_exex::proto::{remote_ex_ex_client::RemoteExExClient, SubscribeRequest}; -use reth_exex::ExExNotification; -use reth_tracing::{tracing::info, RethTracer, Tracer}; - -#[tokio::main] -async fn main() -> eyre::Result<()> { - let _ = RethTracer::new().init()?; - - let mut client = RemoteExExClient::connect("http://[::1]:10000") - .await? - .max_encoding_message_size(usize::MAX) - .max_decoding_message_size(usize::MAX); - - let mut stream = client.subscribe(SubscribeRequest {}).await?.into_inner(); - while let Some(notification) = stream.message().await? { - let notification: ExExNotification = bincode::deserialize(¬ification.data)?; - - match notification { - ExExNotification::ChainCommitted { new } => { - info!(committed_chain = ?new.range(), "Received commit"); - } - ExExNotification::ChainReorged { old, new } => { - info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg"); - } - ExExNotification::ChainReverted { old } => { - info!(reverted_chain = ?old.range(), "Received revert"); - } - }; - } - - Ok(()) -} +{{#include ../../sources/exex/remote/src/consumer.rs}} ``` ## Running diff --git a/book/developers/exex/tracking-state.md b/book/developers/exex/tracking-state.md index 52c73e6180..d2a9fe6ca3 100644 --- a/book/developers/exex/tracking-state.md +++ b/book/developers/exex/tracking-state.md @@ -19,63 +19,7 @@ because you can't access variables inside the function to assert the state of yo ```rust,norun,noplayground,ignore -use std::{ - future::Future, - pin::Pin, - task::{ready, Context, Poll}, -}; - -use futures_util::StreamExt; -use reth::api::FullNodeComponents; -use reth_exex::{ExExContext, ExExEvent, ExExNotification}; -use reth_node_ethereum::EthereumNode; -use reth_tracing::tracing::info; - -struct MyExEx { - ctx: ExExContext, -} - -impl Future for MyExEx { - type Output = eyre::Result<()>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.get_mut(); - - while let Some(notification) = ready!(this.ctx.notifications.poll_next_unpin(cx)) { - match ¬ification { - ExExNotification::ChainCommitted { new } => { - info!(committed_chain = ?new.range(), "Received commit"); - } - ExExNotification::ChainReorged { old, new } => { - info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg"); - } - ExExNotification::ChainReverted { old } => { - info!(reverted_chain = ?old.range(), "Received revert"); - } - }; - - if let Some(committed_chain) = notification.committed_chain() { - this.ctx - .events - .send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; - } - } - - Poll::Ready(Ok(())) - } -} - -fn main() -> eyre::Result<()> { - reth::cli::Cli::parse_args().run(|builder, _| async move { - let handle = builder - .node(EthereumNode::default()) - .install_exex("my-exex", |ctx| async move { Ok(MyExEx { ctx }) }) - .launch() - .await?; - - handle.wait_for_node_exit().await - }) -} +{{#include ../../sources/exex/tracking-state/src/bin/1.rs}} ``` For those who are not familiar with how async Rust works on a lower level, that may seem scary, @@ -96,85 +40,7 @@ With all that done, we're now free to add more fields to our `MyExEx` struct, an Our ExEx will count the number of transactions in each block and log it to the console. ```rust,norun,noplayground,ignore -use std::{ - future::Future, - pin::Pin, - task::{ready, Context, Poll}, -}; - -use futures_util::StreamExt; -use reth::{api::FullNodeComponents, primitives::BlockNumber}; -use reth_exex::{ExExContext, ExExEvent}; -use reth_node_ethereum::EthereumNode; -use reth_tracing::tracing::info; - -struct MyExEx { - ctx: ExExContext, - /// First block that was committed since the start of the ExEx. - first_block: Option, - /// Total number of transactions committed. - transactions: u64, -} - -impl MyExEx { - fn new(ctx: ExExContext) -> Self { - Self { - ctx, - first_block: None, - transactions: 0, - } - } -} - -impl Future for MyExEx { - type Output = eyre::Result<()>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.get_mut(); - - while let Some(notification) = ready!(this.ctx.notifications.poll_next_unpin(cx)) { - if let Some(reverted_chain) = notification.reverted_chain() { - this.transactions = this.transactions.saturating_sub( - reverted_chain - .blocks_iter() - .map(|b| b.body.len() as u64) - .sum(), - ); - } - - if let Some(committed_chain) = notification.committed_chain() { - this.first_block.get_or_insert(committed_chain.first().number); - - this.transactions += committed_chain - .blocks_iter() - .map(|b| b.body.len() as u64) - .sum::(); - - this.ctx - .events - .send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; - } - - if let Some(first_block) = this.first_block { - info!(%first_block, transactions = %this.transactions, "Total number of transactions"); - } - } - - Poll::Ready(Ok(())) - } -} - -fn main() -> eyre::Result<()> { - reth::cli::Cli::parse_args().run(|builder, _| async move { - let handle = builder - .node(EthereumNode::default()) - .install_exex("my-exex", |ctx| async move { Ok(MyExEx::new(ctx)) }) - .launch() - .await?; - - handle.wait_for_node_exit().await - }) -} +{{#include ../../sources/exex/tracking-state/src/bin/2.rs}} ``` As you can see, we added two fields to our ExEx struct: diff --git a/book/run/mainnet.md b/book/run/mainnet.md index 4412f51c7b..c4908971f6 100644 --- a/book/run/mainnet.md +++ b/book/run/mainnet.md @@ -83,4 +83,14 @@ In the meantime, consider setting up [observability](./observability.md) to moni ## Running without a Consensus Layer -We provide a method for running Reth without a Consensus Layer via the `--debug.tip ` parameter. If you provide that to your node, it will simulate sending a `engine_forkChoiceUpdated` message _once_ and will trigger syncing to the provided block hash. This is useful for testing and debugging purposes, but in order to have a node that can keep up with the tip you'll need to run a CL alongside it. At the moment we have no plans of including a Consensus Layer implementation in Reth, and we are open to including light clients other methods of syncing like importing Lighthouse as a library. +We provide a method for running Reth without a Consensus Layer via the `--debug.tip ` parameter. If you provide that to your node, it will simulate sending an `engine_forkchoiceUpdated` message _once_ and will trigger syncing to the provided block hash. This is useful for testing and debugging purposes, but in order to have a node that can keep up with the tip you'll need to run a CL alongside it. At the moment we have no plans of including a Consensus Layer implementation in Reth, and we are open to including light clients other methods of syncing like importing Lighthouse as a library. + +## Running with Etherscan as Block Source + +You can use `--debug.etherscan` to run Reth with a fake consensus client that advances the chain using recent blocks on Etherscan. This requires an Etherscan API key (set via `ETHERSCAN_API_KEY` environment variable). Optionally, specify a custom API URL with `--debug.etherscan `. + +Example: +```bash +export ETHERSCAN_API_KEY=your_api_key_here +reth node --debug.etherscan +``` \ No newline at end of file diff --git a/book/sources/Cargo.toml b/book/sources/Cargo.toml index c04c8567f9..b374ad798b 100644 --- a/book/sources/Cargo.toml +++ b/book/sources/Cargo.toml @@ -1,9 +1,13 @@ [workspace] -members = [ - "exex/hello-world", -] +members = ["exex/hello-world", "exex/remote", "exex/tracking-state"] # Explicitly set the resolver to version 2, which is the default for packages with edition >= 2021 # https://doc.rust-lang.org/edition-guide/rust-2021/default-cargo-resolver.html resolver = "2" +[patch.'https://github.com/paradigmxyz/reth'] +reth = { path = "../../bin/reth" } +reth-exex = { path = "../../crates/exex/exex" } +reth-node-ethereum = { path = "../../crates/ethereum/node" } +reth-tracing = { path = "../../crates/tracing" } +reth-node-api = { path = "../../crates/node/api" } diff --git a/book/sources/exex/hello-world/Cargo.toml b/book/sources/exex/hello-world/Cargo.toml index e5d32a1405..c466018c66 100644 --- a/book/sources/exex/hello-world/Cargo.toml +++ b/book/sources/exex/hello-world/Cargo.toml @@ -4,10 +4,10 @@ version = "0.1.0" edition = "2021" [dependencies] -reth = { git = "https://github.com/paradigmxyz/reth.git" } # Reth -reth-exex = { git = "https://github.com/paradigmxyz/reth.git" } # Execution Extensions +reth = { git = "https://github.com/paradigmxyz/reth.git" } # Reth +reth-exex = { git = "https://github.com/paradigmxyz/reth.git" } # Execution Extensions reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth.git" } # Ethereum Node implementation -reth-tracing = { git = "https://github.com/paradigmxyz/reth.git" } # Logging +reth-tracing = { git = "https://github.com/paradigmxyz/reth.git" } # Logging -eyre = "0.6" # Easy error handling +eyre = "0.6" # Easy error handling futures-util = "0.3" # Stream utilities for consuming notifications diff --git a/book/sources/exex/remote/Cargo.toml b/book/sources/exex/remote/Cargo.toml new file mode 100644 index 0000000000..6cca3a841f --- /dev/null +++ b/book/sources/exex/remote/Cargo.toml @@ -0,0 +1,54 @@ +[package] +name = "remote-exex" +version = "0.1.0" +edition = "2021" + +[dependencies] +# reth +reth = { git = "https://github.com/paradigmxyz/reth.git" } +reth-exex = { git = "https://github.com/paradigmxyz/reth.git", features = [ + "serde", +] } +reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth.git" } +reth-node-api = { git = "https://github.com/paradigmxyz/reth.git" } +reth-tracing = { git = "https://github.com/paradigmxyz/reth.git" } + +# async +tokio = { version = "1", features = ["full"] } +tokio-stream = "0.1" +futures-util = "0.3" + +# grpc +tonic = "0.11" +prost = "0.12" +bincode = "1" + +# misc +eyre = "0.6" + +[build-dependencies] +tonic-build = "0.11" + +[[bin]] +name = "exex_1" +path = "src/exex_1.rs" + +[[bin]] +name = "exex_2" +path = "src/exex_2.rs" + +[[bin]] +name = "exex_3" +path = "src/exex_3.rs" + +[[bin]] +name = "exex_4" +path = "src/exex_4.rs" + +[[bin]] +name = "exex" +path = "src/exex.rs" + +[[bin]] +name = "consumer" +path = "src/consumer.rs" diff --git a/book/sources/exex/remote/build.rs b/book/sources/exex/remote/build.rs new file mode 100644 index 0000000000..8e66f2a30a --- /dev/null +++ b/book/sources/exex/remote/build.rs @@ -0,0 +1,4 @@ +fn main() -> Result<(), Box> { + tonic_build::compile_protos("proto/exex.proto")?; + Ok(()) +} diff --git a/book/sources/exex/remote/proto/exex.proto b/book/sources/exex/remote/proto/exex.proto new file mode 100644 index 0000000000..9bb180b5f2 --- /dev/null +++ b/book/sources/exex/remote/proto/exex.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package exex; + +service RemoteExEx { + rpc Subscribe(SubscribeRequest) returns (stream ExExNotification) {} +} + +message SubscribeRequest {} + +message ExExNotification { + bytes data = 1; +} \ No newline at end of file diff --git a/book/sources/exex/remote/src/consumer.rs b/book/sources/exex/remote/src/consumer.rs new file mode 100644 index 0000000000..a0400f4bbf --- /dev/null +++ b/book/sources/exex/remote/src/consumer.rs @@ -0,0 +1,32 @@ +use remote_exex::proto::{remote_ex_ex_client::RemoteExExClient, SubscribeRequest}; +use reth_exex::ExExNotification; +use reth_tracing::{tracing::info, RethTracer, Tracer}; + +#[tokio::main] +async fn main() -> eyre::Result<()> { + let _ = RethTracer::new().init()?; + + let mut client = RemoteExExClient::connect("http://[::1]:10000") + .await? + .max_encoding_message_size(usize::MAX) + .max_decoding_message_size(usize::MAX); + + let mut stream = client.subscribe(SubscribeRequest {}).await?.into_inner(); + while let Some(notification) = stream.message().await? { + let notification: ExExNotification = bincode::deserialize(¬ification.data)?; + + match notification { + ExExNotification::ChainCommitted { new } => { + info!(committed_chain = ?new.range(), "Received commit"); + } + ExExNotification::ChainReorged { old, new } => { + info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg"); + } + ExExNotification::ChainReverted { old } => { + info!(reverted_chain = ?old.range(), "Received revert"); + } + }; + } + + Ok(()) +} diff --git a/book/sources/exex/remote/src/exex.rs b/book/sources/exex/remote/src/exex.rs new file mode 100644 index 0000000000..1ae4785db8 --- /dev/null +++ b/book/sources/exex/remote/src/exex.rs @@ -0,0 +1,87 @@ +use futures_util::TryStreamExt; +use remote_exex::proto::{ + self, + remote_ex_ex_server::{RemoteExEx, RemoteExExServer}, +}; +use reth_exex::{ExExContext, ExExEvent, ExExNotification}; +use reth_node_api::FullNodeComponents; +use reth_node_ethereum::EthereumNode; +use reth_tracing::tracing::info; +use std::sync::Arc; +use tokio::sync::{broadcast, mpsc}; +use tokio_stream::wrappers::ReceiverStream; +use tonic::{transport::Server, Request, Response, Status}; + +struct ExExService { + notifications: Arc>, +} + +#[tonic::async_trait] +impl RemoteExEx for ExExService { + type SubscribeStream = ReceiverStream>; + + async fn subscribe( + &self, + _request: Request, + ) -> Result, Status> { + let (tx, rx) = mpsc::channel(1); + + let mut notifications = self.notifications.subscribe(); + tokio::spawn(async move { + while let Ok(notification) = notifications.recv().await { + let proto_notification = proto::ExExNotification { + data: bincode::serialize(¬ification).expect("failed to serialize"), + }; + tx.send(Ok(proto_notification)) + .await + .expect("failed to send notification to client"); + + info!("Notification sent to the gRPC client"); + } + }); + + Ok(Response::new(ReceiverStream::new(rx))) + } +} + +async fn remote_exex( + mut ctx: ExExContext, + notifications: Arc>, +) -> eyre::Result<()> { + while let Some(notification) = ctx.notifications.try_next().await? { + if let Some(committed_chain) = notification.committed_chain() { + ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; + } + + info!("Notification sent to the gRPC server"); + let _ = notifications.send(notification); + } + + Ok(()) +} + +// ANCHOR: snippet +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let notifications = Arc::new(broadcast::channel(1).0); + + let server = Server::builder() + .add_service(RemoteExExServer::new(ExExService { + notifications: notifications.clone(), + })) + .serve("[::1]:10000".parse().unwrap()); + + let handle = builder + .node(EthereumNode::default()) + .install_exex("remote-exex", |ctx| async move { Ok(remote_exex(ctx, notifications)) }) + .launch() + .await?; + + handle.node.task_executor.spawn_critical("gRPC server", async move { + server.await.expect("failed to start gRPC server") + }); + + handle.wait_for_node_exit().await + }) +} +// ANCHOR_END: snippet diff --git a/book/sources/exex/remote/src/exex_1.rs b/book/sources/exex/remote/src/exex_1.rs new file mode 100644 index 0000000000..09a4bcc064 --- /dev/null +++ b/book/sources/exex/remote/src/exex_1.rs @@ -0,0 +1,40 @@ +use remote_exex::proto::{ + self, + remote_ex_ex_server::{RemoteExEx, RemoteExExServer}, +}; +use reth_node_ethereum::EthereumNode; +use tokio::sync::mpsc; +use tokio_stream::wrappers::ReceiverStream; +use tonic::{transport::Server, Request, Response, Status}; + +struct ExExService {} + +#[tonic::async_trait] +impl RemoteExEx for ExExService { + type SubscribeStream = ReceiverStream>; + + async fn subscribe( + &self, + _request: Request, + ) -> Result, Status> { + let (_tx, rx) = mpsc::channel(1); + + Ok(Response::new(ReceiverStream::new(rx))) + } +} + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let server = Server::builder() + .add_service(RemoteExExServer::new(ExExService {})) + .serve("[::1]:10000".parse().unwrap()); + + let handle = builder.node(EthereumNode::default()).launch().await?; + + handle.node.task_executor.spawn_critical("gRPC server", async move { + server.await.expect("failed to start gRPC server") + }); + + handle.wait_for_node_exit().await + }) +} diff --git a/book/sources/exex/remote/src/exex_2.rs b/book/sources/exex/remote/src/exex_2.rs new file mode 100644 index 0000000000..c4f51ddaea --- /dev/null +++ b/book/sources/exex/remote/src/exex_2.rs @@ -0,0 +1,49 @@ +use remote_exex::proto::{ + self, + remote_ex_ex_server::{RemoteExEx, RemoteExExServer}, +}; +use reth_exex::ExExNotification; +use reth_node_ethereum::EthereumNode; +use std::sync::Arc; +use tokio::sync::{broadcast, mpsc}; +use tokio_stream::wrappers::ReceiverStream; +use tonic::{transport::Server, Request, Response, Status}; + +#[allow(dead_code)] +struct ExExService { + notifications: Arc>, +} + +#[tonic::async_trait] +impl RemoteExEx for ExExService { + type SubscribeStream = ReceiverStream>; + + async fn subscribe( + &self, + _request: Request, + ) -> Result, Status> { + let (_tx, rx) = mpsc::channel(1); + + Ok(Response::new(ReceiverStream::new(rx))) + } +} + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let notifications = Arc::new(broadcast::channel(1).0); + + let server = Server::builder() + .add_service(RemoteExExServer::new(ExExService { + notifications: notifications.clone(), + })) + .serve("[::1]:10000".parse().unwrap()); + + let handle = builder.node(EthereumNode::default()).launch().await?; + + handle.node.task_executor.spawn_critical("gRPC server", async move { + server.await.expect("failed to start gRPC server") + }); + + handle.wait_for_node_exit().await + }) +} diff --git a/book/sources/exex/remote/src/exex_3.rs b/book/sources/exex/remote/src/exex_3.rs new file mode 100644 index 0000000000..9f264cf345 --- /dev/null +++ b/book/sources/exex/remote/src/exex_3.rs @@ -0,0 +1,65 @@ +use remote_exex::proto::{ + self, + remote_ex_ex_server::{RemoteExEx, RemoteExExServer}, +}; +use reth_exex::ExExNotification; +use reth_node_ethereum::EthereumNode; +use reth_tracing::tracing::info; +use std::sync::Arc; +use tokio::sync::{broadcast, mpsc}; +use tokio_stream::wrappers::ReceiverStream; +use tonic::{transport::Server, Request, Response, Status}; + +struct ExExService { + notifications: Arc>, +} + +// ANCHOR: snippet +#[tonic::async_trait] +impl RemoteExEx for ExExService { + type SubscribeStream = ReceiverStream>; + + async fn subscribe( + &self, + _request: Request, + ) -> Result, Status> { + let (tx, rx) = mpsc::channel(1); + + let mut notifications = self.notifications.subscribe(); + tokio::spawn(async move { + while let Ok(notification) = notifications.recv().await { + let proto_notification = proto::ExExNotification { + data: bincode::serialize(¬ification).expect("failed to serialize"), + }; + tx.send(Ok(proto_notification)) + .await + .expect("failed to send notification to client"); + + info!("Notification sent to the gRPC client"); + } + }); + + Ok(Response::new(ReceiverStream::new(rx))) + } +} +// ANCHOR_END: snippet + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let notifications = Arc::new(broadcast::channel(1).0); + + let server = Server::builder() + .add_service(RemoteExExServer::new(ExExService { + notifications: notifications.clone(), + })) + .serve("[::1]:10000".parse().unwrap()); + + let handle = builder.node(EthereumNode::default()).launch().await?; + + handle.node.task_executor.spawn_critical("gRPC server", async move { + server.await.expect("failed to start gRPC server") + }); + + handle.wait_for_node_exit().await + }) +} diff --git a/book/sources/exex/remote/src/exex_4.rs b/book/sources/exex/remote/src/exex_4.rs new file mode 100644 index 0000000000..24c7bf2c2f --- /dev/null +++ b/book/sources/exex/remote/src/exex_4.rs @@ -0,0 +1,84 @@ +use futures_util::TryStreamExt; +use remote_exex::proto::{ + self, + remote_ex_ex_server::{RemoteExEx, RemoteExExServer}, +}; +use reth_exex::{ExExContext, ExExEvent, ExExNotification}; +use reth_node_api::FullNodeComponents; +use reth_node_ethereum::EthereumNode; +use reth_tracing::tracing::info; +use std::sync::Arc; +use tokio::sync::{broadcast, mpsc}; +use tokio_stream::wrappers::ReceiverStream; +use tonic::{transport::Server, Request, Response, Status}; + +struct ExExService { + notifications: Arc>, +} + +#[tonic::async_trait] +impl RemoteExEx for ExExService { + type SubscribeStream = ReceiverStream>; + + async fn subscribe( + &self, + _request: Request, + ) -> Result, Status> { + let (tx, rx) = mpsc::channel(1); + + let mut notifications = self.notifications.subscribe(); + tokio::spawn(async move { + while let Ok(notification) = notifications.recv().await { + let proto_notification = proto::ExExNotification { + data: bincode::serialize(¬ification).expect("failed to serialize"), + }; + tx.send(Ok(proto_notification)) + .await + .expect("failed to send notification to client"); + + info!("Notification sent to the gRPC client"); + } + }); + + Ok(Response::new(ReceiverStream::new(rx))) + } +} + +// ANCHOR: snippet +#[allow(dead_code)] +async fn remote_exex( + mut ctx: ExExContext, + notifications: Arc>, +) -> eyre::Result<()> { + while let Some(notification) = ctx.notifications.try_next().await? { + if let Some(committed_chain) = notification.committed_chain() { + ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; + } + + info!("Notification sent to the gRPC server"); + let _ = notifications.send(notification); + } + + Ok(()) +} +// ANCHOR_END: snippet + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let notifications = Arc::new(broadcast::channel(1).0); + + let server = Server::builder() + .add_service(RemoteExExServer::new(ExExService { + notifications: notifications.clone(), + })) + .serve("[::1]:10000".parse().unwrap()); + + let handle = builder.node(EthereumNode::default()).launch().await?; + + handle.node.task_executor.spawn_critical("gRPC server", async move { + server.await.expect("failed to start gRPC server") + }); + + handle.wait_for_node_exit().await + }) +} diff --git a/book/sources/exex/remote/src/lib.rs b/book/sources/exex/remote/src/lib.rs new file mode 100644 index 0000000000..9abb458bd3 --- /dev/null +++ b/book/sources/exex/remote/src/lib.rs @@ -0,0 +1,3 @@ +pub mod proto { + tonic::include_proto!("exex"); +} diff --git a/book/sources/exex/tracking-state/Cargo.toml b/book/sources/exex/tracking-state/Cargo.toml new file mode 100644 index 0000000000..a8e862d0a7 --- /dev/null +++ b/book/sources/exex/tracking-state/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "tracking-state" +version = "0.1.0" +edition = "2021" + +[dependencies] +reth = { git = "https://github.com/paradigmxyz/reth.git" } +reth-exex = { git = "https://github.com/paradigmxyz/reth.git", features = [ + "serde", +] } +reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth.git" } +reth-tracing = { git = "https://github.com/paradigmxyz/reth.git" } + +eyre = "0.6" # Easy error handling +futures-util = "0.3" # Stream utilities for consuming notifications +alloy-primitives = "0.8.7" diff --git a/book/sources/exex/tracking-state/src/bin/1.rs b/book/sources/exex/tracking-state/src/bin/1.rs new file mode 100644 index 0000000000..0d42e0791a --- /dev/null +++ b/book/sources/exex/tracking-state/src/bin/1.rs @@ -0,0 +1,57 @@ +use std::{ + future::Future, + pin::Pin, + task::{ready, Context, Poll}, +}; + +use futures_util::{FutureExt, TryStreamExt}; +use reth::api::FullNodeComponents; +use reth_exex::{ExExContext, ExExEvent, ExExNotification}; +use reth_node_ethereum::EthereumNode; +use reth_tracing::tracing::info; + +struct MyExEx { + ctx: ExExContext, +} + +impl Future for MyExEx { + type Output = eyre::Result<()>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.get_mut(); + + while let Some(notification) = ready!(this.ctx.notifications.try_next().poll_unpin(cx))? { + match ¬ification { + ExExNotification::ChainCommitted { new } => { + info!(committed_chain = ?new.range(), "Received commit"); + } + ExExNotification::ChainReorged { old, new } => { + info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg"); + } + ExExNotification::ChainReverted { old } => { + info!(reverted_chain = ?old.range(), "Received revert"); + } + }; + + if let Some(committed_chain) = notification.committed_chain() { + this.ctx + .events + .send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; + } + } + + Poll::Ready(Ok(())) + } +} + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let handle = builder + .node(EthereumNode::default()) + .install_exex("my-exex", |ctx| async move { Ok(MyExEx { ctx }) }) + .launch() + .await?; + + handle.wait_for_node_exit().await + }) +} diff --git a/book/sources/exex/tracking-state/src/bin/2.rs b/book/sources/exex/tracking-state/src/bin/2.rs new file mode 100644 index 0000000000..9416810668 --- /dev/null +++ b/book/sources/exex/tracking-state/src/bin/2.rs @@ -0,0 +1,73 @@ +use std::{ + future::Future, + pin::Pin, + task::{ready, Context, Poll}, +}; + +use alloy_primitives::BlockNumber; +use futures_util::{FutureExt, TryStreamExt}; +use reth::api::FullNodeComponents; +use reth_exex::{ExExContext, ExExEvent}; +use reth_node_ethereum::EthereumNode; +use reth_tracing::tracing::info; + +struct MyExEx { + ctx: ExExContext, + /// First block that was committed since the start of the ExEx. + first_block: Option, + /// Total number of transactions committed. + transactions: u64, +} + +impl MyExEx { + fn new(ctx: ExExContext) -> Self { + Self { ctx, first_block: None, transactions: 0 } + } +} + +impl Future for MyExEx { + type Output = eyre::Result<()>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.get_mut(); + + while let Some(notification) = ready!(this.ctx.notifications.try_next().poll_unpin(cx))? { + if let Some(reverted_chain) = notification.reverted_chain() { + this.transactions = this.transactions.saturating_sub( + reverted_chain.blocks_iter().map(|b| b.body.transactions.len() as u64).sum(), + ); + } + + if let Some(committed_chain) = notification.committed_chain() { + this.first_block.get_or_insert(committed_chain.first().number); + + this.transactions += committed_chain + .blocks_iter() + .map(|b| b.body.transactions.len() as u64) + .sum::(); + + this.ctx + .events + .send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; + } + + if let Some(first_block) = this.first_block { + info!(%first_block, transactions = %this.transactions, "Total number of transactions"); + } + } + + Poll::Ready(Ok(())) + } +} + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let handle = builder + .node(EthereumNode::default()) + .install_exex("my-exex", |ctx| async move { Ok(MyExEx::new(ctx)) }) + .launch() + .await?; + + handle.wait_for_node_exit().await + }) +} diff --git a/bsc.Dockerfile b/bsc.Dockerfile index ff5e380b9e..659b143047 100644 --- a/bsc.Dockerfile +++ b/bsc.Dockerfile @@ -1,4 +1,4 @@ -FROM lukemathwalker/cargo-chef:latest-rust-1.81 AS chef +FROM lukemathwalker/cargo-chef:latest-rust-1.82 AS chef WORKDIR /app LABEL org.opencontainers.image.source=https://github.com/bnb-chain/reth diff --git a/clippy.toml b/clippy.toml index cdfa4bc93a..862c568634 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1,3 +1,17 @@ -msrv = "1.81" +msrv = "1.82" too-large-for-stack = 128 -doc-valid-idents = ["P2P", "ExEx", "ExExes", "IPv4", "IPv6", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "WAL", "MessagePack"] +doc-valid-idents = [ + "P2P", + "ExEx", + "ExExes", + "IPv4", + "IPv6", + "KiB", + "MiB", + "GiB", + "TiB", + "PiB", + "EiB", + "WAL", + "MessagePack", +] diff --git a/crates/blockchain-tree-api/Cargo.toml b/crates/blockchain-tree-api/Cargo.toml index 552b727671..b1c01f8593 100644 --- a/crates/blockchain-tree-api/Cargo.toml +++ b/crates/blockchain-tree-api/Cargo.toml @@ -18,6 +18,7 @@ reth-storage-errors.workspace = true # alloy alloy-primitives.workspace = true +alloy-eips.workspace = true # misc thiserror.workspace = true diff --git a/crates/blockchain-tree-api/src/lib.rs b/crates/blockchain-tree-api/src/lib.rs index 0a1bf6164e..7e1d0d714c 100644 --- a/crates/blockchain-tree-api/src/lib.rs +++ b/crates/blockchain-tree-api/src/lib.rs @@ -9,8 +9,9 @@ use self::error::CanonicalError; use crate::error::InsertBlockError; +use alloy_eips::BlockNumHash; use alloy_primitives::{BlockHash, BlockNumber}; -use reth_primitives::{BlockNumHash, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::collections::BTreeMap; diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index bfe6969369..560c61664d 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -60,11 +60,32 @@ reth-consensus = { workspace = true, features = ["test-utils"] } reth-testing-utils.workspace = true reth-revm.workspace = true reth-evm-ethereum.workspace = true +reth-execution-types.workspace = true parking_lot.workspace = true assert_matches.workspace = true alloy-genesis.workspace = true alloy-consensus.workspace = true [features] -test-utils = [] -optimism = ["reth-primitives/optimism", "reth-provider/optimism"] +test-utils = [ + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-evm/test-utils", + "reth-network/test-utils", + "reth-primitives/test-utils", + "reth-revm/test-utils", + "reth-stages-api/test-utils", + "reth-db/test-utils", + "reth-db-api/test-utils", + "reth-provider/test-utils", + "reth-trie-db/test-utils", + "reth-trie/test-utils" +] +optimism = [ + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-execution-types/optimism", + "reth-db/optimism", + "reth-db-api/optimism", + "reth-chainspec/optimism" +] diff --git a/crates/blockchain-tree/src/block_buffer.rs b/crates/blockchain-tree/src/block_buffer.rs index e116463e4a..5d4ca2705c 100644 --- a/crates/blockchain-tree/src/block_buffer.rs +++ b/crates/blockchain-tree/src/block_buffer.rs @@ -2,7 +2,7 @@ use crate::metrics::BlockBufferMetrics; use alloy_primitives::{BlockHash, BlockNumber}; use reth_network::cache::LruCache; use reth_primitives::SealedBlockWithSenders; -use std::collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet}; +use std::collections::{BTreeMap, HashMap, HashSet}; /// Contains the tree of pending blocks that cannot be executed due to missing parent. /// It allows to store unconnected blocks for potential future inclusion. @@ -83,6 +83,7 @@ impl BlockBuffer { } self.metrics.blocks.set(self.blocks.len() as f64); } + /// Removes the given block from the buffer and also all the children of the block. /// /// This is used to get all the blocks that are dependent on the block that is included. @@ -93,10 +94,11 @@ impl BlockBuffer { &mut self, parent_hash: &BlockHash, ) -> Vec { - // remove parent block if present - let mut removed = self.remove_block(parent_hash).into_iter().collect::>(); - - removed.extend(self.remove_children(vec![*parent_hash])); + let removed = self + .remove_block(parent_hash) + .into_iter() + .chain(self.remove_children(vec![*parent_hash])) + .collect(); self.metrics.blocks.set(self.blocks.len() as f64); removed } @@ -126,10 +128,10 @@ impl BlockBuffer { /// Remove block entry fn remove_from_earliest_blocks(&mut self, number: BlockNumber, hash: &BlockHash) { - if let btree_map::Entry::Occupied(mut entry) = self.earliest_blocks.entry(number) { - entry.get_mut().remove(hash); - if entry.get().is_empty() { - entry.remove(); + if let Some(entry) = self.earliest_blocks.get_mut(&number) { + entry.remove(hash); + if entry.is_empty() { + self.earliest_blocks.remove(&number); } } } @@ -137,13 +139,13 @@ impl BlockBuffer { /// Remove from parent child connection. This method does not remove children. fn remove_from_parent(&mut self, parent_hash: BlockHash, hash: &BlockHash) { // remove from parent to child connection, but only for this block parent. - if let hash_map::Entry::Occupied(mut entry) = self.parent_to_child.entry(parent_hash) { - entry.get_mut().remove(hash); + if let Some(entry) = self.parent_to_child.get_mut(&parent_hash) { + entry.remove(hash); // if set is empty remove block entry. - if entry.get().is_empty() { - entry.remove(); + if entry.is_empty() { + self.parent_to_child.remove(&parent_hash); } - }; + } } /// Removes block from inner collections. diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index fea4517776..0014c71537 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -121,9 +121,6 @@ where /// is crucial for the correct execution of transactions. /// - `tree_config`: Configuration for the blockchain tree, including any parameters that affect /// its structure or performance. - /// - `prune_modes`: Configuration for pruning old blockchain data. This helps in managing the - /// storage space efficiently. It's important to validate this configuration to ensure it does - /// not lead to unintended data loss. pub fn new( externals: TreeExternals, config: BlockchainTreeConfig, @@ -938,6 +935,7 @@ where // check unconnected block buffer for children of the chains let mut all_chain_blocks = Vec::new(); for chain in self.state.chains.values() { + all_chain_blocks.reserve_exact(chain.blocks().len()); for (&number, block) in chain.blocks() { all_chain_blocks.push(BlockNumHash { number, hash: block.hash() }) } @@ -1416,9 +1414,10 @@ where #[cfg(test)] mod tests { use super::*; - use alloy_consensus::TxEip1559; + use alloy_consensus::{TxEip1559, EMPTY_ROOT_HASH}; + use alloy_eips::eip1559::INITIAL_BASE_FEE; use alloy_genesis::{Genesis, GenesisAccount}; - use alloy_primitives::{keccak256, Address, Sealable, B256}; + use alloy_primitives::{keccak256, Address, Sealable, Signature, B256}; use assert_matches::assert_matches; use linked_hash_set::LinkedHashSet; use reth_chainspec::{ChainSpecBuilder, MAINNET, MIN_TRANSACTION_GAS}; @@ -1428,11 +1427,10 @@ mod tests { use reth_evm::test_utils::MockExecutorProvider; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_primitives::{ - constants::{EIP1559_INITIAL_BASE_FEE, EMPTY_ROOT_HASH}, proofs::{calculate_receipt_root, calculate_transaction_root}, revm_primitives::AccountInfo, - Account, BlockBody, Header, Signature, Transaction, TransactionSigned, - TransactionSignedEcRecovered, Withdrawals, + Account, BlockBody, Header, Transaction, TransactionSigned, TransactionSignedEcRecovered, + Withdrawals, }; use reth_provider::{ test_utils::{ @@ -1602,7 +1600,7 @@ mod tests { provider_rw.commit().unwrap(); } - let single_tx_cost = U256::from(EIP1559_INITIAL_BASE_FEE * MIN_TRANSACTION_GAS); + let single_tx_cost = U256::from(INITIAL_BASE_FEE * MIN_TRANSACTION_GAS); let mock_tx = |nonce: u64| -> TransactionSignedEcRecovered { TransactionSigned::from_transaction_and_signature( Transaction::Eip1559(TxEip1559 { @@ -1610,7 +1608,7 @@ mod tests { nonce, gas_limit: MIN_TRANSACTION_GAS, to: Address::ZERO.into(), - max_fee_per_gas: EIP1559_INITIAL_BASE_FEE as u128, + max_fee_per_gas: INITIAL_BASE_FEE as u128, ..Default::default() }), Signature::test_signature(), @@ -1647,7 +1645,7 @@ mod tests { gas_used: body.len() as u64 * MIN_TRANSACTION_GAS, gas_limit: chain_spec.max_gas_limit, mix_hash: B256::random(), - base_fee_per_gas: Some(EIP1559_INITIAL_BASE_FEE), + base_fee_per_gas: Some(INITIAL_BASE_FEE), transactions_root, receipts_root, state_root: state_root_unhashed(HashMap::from([( @@ -1675,7 +1673,6 @@ mod tests { ommers: Vec::new(), withdrawals: Some(Withdrawals::default()), sidecars: None, - requests: None, }, }, body.iter().map(|tx| tx.signer()).collect(), diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 149cdaaa43..6b017eb7da 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -326,7 +326,7 @@ impl AppendableChain { let parent_block = self.chain.tip(); let ancestor_blocks = - self.headers().map(|h| return (h.hash() as B256, h.header().clone())).collect(); + self.headers().map(|h| (h.hash() as B256, h.header().clone())).collect(); let bundle_state_data = BundleStateDataRef { execution_outcome: self.execution_outcome(), diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index 719852c12a..4e22fcb78b 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -21,7 +21,6 @@ use std::{collections::BTreeMap, sync::Arc}; /// - A handle to the database /// - A handle to the consensus engine /// - The executor factory to execute blocks with -/// - The chain spec #[derive(Debug)] pub struct TreeExternals { /// The provider factory, used to commit the canonical chain, or unwind it. diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index 925b8f03ad..862b02e760 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -60,6 +60,12 @@ impl BlockchainTreeEngine for NoopBlockchainTree { Ok(()) } + fn update_block_hashes_and_clear_buffered( + &self, + ) -> Result, CanonicalError> { + Ok(BTreeMap::new()) + } + fn connect_buffered_blocks_to_canonical_hashes(&self) -> Result<(), CanonicalError> { Ok(()) } @@ -67,12 +73,6 @@ impl BlockchainTreeEngine for NoopBlockchainTree { fn make_canonical(&self, block_hash: BlockHash) -> Result { Err(BlockchainTreeError::BlockHashNotFoundInChain { block_hash }.into()) } - - fn update_block_hashes_and_clear_buffered( - &self, - ) -> Result, CanonicalError> { - Ok(BTreeMap::new()) - } } impl BlockchainTreeViewer for NoopBlockchainTree { diff --git a/crates/blockchain-tree/src/state.rs b/crates/blockchain-tree/src/state.rs index b76db9e6a9..ca8af6f9b5 100644 --- a/crates/blockchain-tree/src/state.rs +++ b/crates/blockchain-tree/src/state.rs @@ -61,6 +61,7 @@ impl TreeState { pub(crate) fn block_by_hash(&self, block_hash: BlockHash) -> Option<&SealedBlock> { self.block_with_senders_by_hash(block_hash).map(|block| &block.block) } + /// Returns the block with matching hash from any side-chain. /// /// Caution: This will not return blocks from the canonical chain. @@ -128,3 +129,302 @@ impl From for SidechainId { Self(value) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::canonical_chain::CanonicalChain; + use alloy_primitives::B256; + use reth_execution_types::Chain; + use reth_provider::ExecutionOutcome; + + #[test] + fn test_tree_state_initialization() { + // Set up some dummy data for initialization + let last_finalized_block_number = 10u64; + let last_canonical_hashes = vec![(9u64, B256::random()), (10u64, B256::random())]; + let buffer_limit = 5; + + // Initialize the tree state + let tree_state = TreeState::new( + last_finalized_block_number, + last_canonical_hashes.clone(), + buffer_limit, + ); + + // Verify the tree state after initialization + assert_eq!(tree_state.block_chain_id_generator, 0); + assert_eq!(tree_state.block_indices().last_finalized_block(), last_finalized_block_number); + assert_eq!( + *tree_state.block_indices.canonical_chain().inner(), + *CanonicalChain::new(last_canonical_hashes.into_iter().collect()).inner() + ); + assert!(tree_state.chains.is_empty()); + assert!(tree_state.buffered_blocks.lru.is_empty()); + } + + #[test] + fn test_tree_state_next_id() { + // Initialize the tree state + let mut tree_state = TreeState::new(0, vec![], 5); + + // Generate a few sidechain IDs + let first_id = tree_state.next_id(); + let second_id = tree_state.next_id(); + + // Verify the generated sidechain IDs and the updated generator state + assert_eq!(first_id, SidechainId(0)); + assert_eq!(second_id, SidechainId(1)); + assert_eq!(tree_state.block_chain_id_generator, 2); + } + + #[test] + fn test_tree_state_insert_chain() { + // Initialize tree state + let mut tree_state = TreeState::new(0, vec![], 5); + + // Create a chain with two blocks + let block = SealedBlockWithSenders::default(); + let block1_hash = B256::random(); + let block2_hash = B256::random(); + + let mut block1 = block.clone(); + let mut block2 = block; + + block1.block.header.set_hash(block1_hash); + block1.block.header.set_block_number(9); + block2.block.header.set_hash(block2_hash); + block2.block.header.set_block_number(10); + + let chain = AppendableChain::new(Chain::new( + [block1, block2], + Default::default(), + Default::default(), + )); + + // Insert the chain into the TreeState + let chain_id = tree_state.insert_chain(chain).unwrap(); + + // Verify the chain ID and that it was added to the chains collection + assert_eq!(chain_id, SidechainId(0)); + assert!(tree_state.chains.contains_key(&chain_id)); + + // Ensure that the block indices are updated + assert_eq!( + tree_state.block_indices.get_side_chain_id(&block1_hash).unwrap(), + SidechainId(0) + ); + assert_eq!( + tree_state.block_indices.get_side_chain_id(&block2_hash).unwrap(), + SidechainId(0) + ); + + // Ensure that the block chain ID generator was updated + assert_eq!(tree_state.block_chain_id_generator, 1); + + // Create an empty chain + let chain_empty = AppendableChain::new(Chain::default()); + + // Insert the empty chain into the tree state + let chain_id = tree_state.insert_chain(chain_empty); + + // Ensure that the empty chain was not inserted + assert!(chain_id.is_none()); + + // Nothing should have changed and no new chain should have been added + assert!(tree_state.chains.contains_key(&SidechainId(0))); + assert!(!tree_state.chains.contains_key(&SidechainId(1))); + assert_eq!( + tree_state.block_indices.get_side_chain_id(&block1_hash).unwrap(), + SidechainId(0) + ); + assert_eq!( + tree_state.block_indices.get_side_chain_id(&block2_hash).unwrap(), + SidechainId(0) + ); + assert_eq!(tree_state.block_chain_id_generator, 1); + } + + #[test] + fn test_block_by_hash_side_chain() { + // Initialize a tree state with some dummy data + let mut tree_state = TreeState::new(0, vec![], 5); + + // Create two side-chain blocks with random hashes + let block1_hash = B256::random(); + let block2_hash = B256::random(); + + let mut block1 = SealedBlockWithSenders::default(); + let mut block2 = SealedBlockWithSenders::default(); + + block1.block.header.set_hash(block1_hash); + block1.block.header.set_block_number(9); + block2.block.header.set_hash(block2_hash); + block2.block.header.set_block_number(10); + + // Create an chain with these blocks + let chain = AppendableChain::new(Chain::new( + vec![block1.clone(), block2.clone()], + Default::default(), + Default::default(), + )); + + // Insert the side chain into the TreeState + tree_state.insert_chain(chain).unwrap(); + + // Retrieve the blocks by their hashes + let retrieved_block1 = tree_state.block_by_hash(block1_hash); + assert_eq!(*retrieved_block1.unwrap(), block1.block); + + let retrieved_block2 = tree_state.block_by_hash(block2_hash); + assert_eq!(*retrieved_block2.unwrap(), block2.block); + + // Test block_by_hash with a random hash that doesn't exist + let non_existent_hash = B256::random(); + let result = tree_state.block_by_hash(non_existent_hash); + + // Ensure that no block is found + assert!(result.is_none()); + } + + #[test] + fn test_block_with_senders_by_hash() { + // Initialize a tree state with some dummy data + let mut tree_state = TreeState::new(0, vec![], 5); + + // Create two side-chain blocks with random hashes + let block1_hash = B256::random(); + let block2_hash = B256::random(); + + let mut block1 = SealedBlockWithSenders::default(); + let mut block2 = SealedBlockWithSenders::default(); + + block1.block.header.set_hash(block1_hash); + block1.block.header.set_block_number(9); + block2.block.header.set_hash(block2_hash); + block2.block.header.set_block_number(10); + + // Create a chain with these blocks + let chain = AppendableChain::new(Chain::new( + vec![block1.clone(), block2.clone()], + Default::default(), + Default::default(), + )); + + // Insert the side chain into the TreeState + tree_state.insert_chain(chain).unwrap(); + + // Test to retrieve the blocks with senders by their hashes + let retrieved_block1 = tree_state.block_with_senders_by_hash(block1_hash); + assert_eq!(*retrieved_block1.unwrap(), block1); + + let retrieved_block2 = tree_state.block_with_senders_by_hash(block2_hash); + assert_eq!(*retrieved_block2.unwrap(), block2); + + // Test block_with_senders_by_hash with a random hash that doesn't exist + let non_existent_hash = B256::random(); + let result = tree_state.block_with_senders_by_hash(non_existent_hash); + + // Ensure that no block is found + assert!(result.is_none()); + } + + #[test] + fn test_get_buffered_block() { + // Initialize a tree state with some dummy data + let mut tree_state = TreeState::new(0, vec![], 5); + + // Create a block with a random hash and add it to the buffer + let block_hash = B256::random(); + let mut block = SealedBlockWithSenders::default(); + block.block.header.set_hash(block_hash); + + // Add the block to the buffered blocks in the TreeState + tree_state.buffered_blocks.insert_block(block.clone()); + + // Test get_buffered_block to retrieve the block by its hash + let retrieved_block = tree_state.get_buffered_block(&block_hash); + assert_eq!(*retrieved_block.unwrap(), block); + + // Test get_buffered_block with a non-existent hash + let non_existent_hash = B256::random(); + let result = tree_state.get_buffered_block(&non_existent_hash); + + // Ensure that no block is found + assert!(result.is_none()); + } + + #[test] + fn test_lowest_buffered_ancestor() { + // Initialize a tree state with some dummy data + let mut tree_state = TreeState::new(0, vec![], 5); + + // Create blocks with random hashes and set up parent-child relationships + let ancestor_hash = B256::random(); + let descendant_hash = B256::random(); + + let mut ancestor_block = SealedBlockWithSenders::default(); + let mut descendant_block = SealedBlockWithSenders::default(); + + ancestor_block.block.header.set_hash(ancestor_hash); + descendant_block.block.header.set_hash(descendant_hash); + descendant_block.block.header.set_parent_hash(ancestor_hash); + + // Insert the blocks into the buffer + tree_state.buffered_blocks.insert_block(ancestor_block.clone()); + tree_state.buffered_blocks.insert_block(descendant_block.clone()); + + // Test lowest_buffered_ancestor for the descendant block + let lowest_ancestor = tree_state.lowest_buffered_ancestor(&descendant_hash); + assert!(lowest_ancestor.is_some()); + assert_eq!(lowest_ancestor.unwrap().block.header.hash(), ancestor_hash); + + // Test lowest_buffered_ancestor with a non-existent hash + let non_existent_hash = B256::random(); + let result = tree_state.lowest_buffered_ancestor(&non_existent_hash); + + // Ensure that no ancestor is found + assert!(result.is_none()); + } + + #[test] + fn test_receipts_by_block_hash() { + // Initialize a tree state with some dummy data + let mut tree_state = TreeState::new(0, vec![], 5); + + // Create a block with a random hash and receipts + let block_hash = B256::random(); + let receipt1 = Receipt::default(); + let receipt2 = Receipt::default(); + + let mut block = SealedBlockWithSenders::default(); + block.block.header.set_hash(block_hash); + + let receipts = vec![receipt1, receipt2]; + + // Create a chain with the block and its receipts + let chain = AppendableChain::new(Chain::new( + vec![block.clone()], + ExecutionOutcome { receipts: receipts.clone().into(), ..Default::default() }, + Default::default(), + )); + + // Insert the chain into the TreeState + tree_state.insert_chain(chain).unwrap(); + + // Test receipts_by_block_hash for the inserted block + let retrieved_receipts = tree_state.receipts_by_block_hash(block_hash); + assert!(retrieved_receipts.is_some()); + + // Check if the correct receipts are returned + let receipts_ref: Vec<&Receipt> = receipts.iter().collect(); + assert_eq!(retrieved_receipts.unwrap(), receipts_ref); + + // Test receipts_by_block_hash with a non-existent block hash + let non_existent_hash = B256::random(); + let result = tree_state.receipts_by_block_hash(non_existent_hash); + + // Ensure that no receipts are found + assert!(result.is_none()); + } +} diff --git a/crates/bsc/bin/src/main.rs b/crates/bsc/bin/src/main.rs index 7ac657d042..99844d5bd5 100644 --- a/crates/bsc/bin/src/main.rs +++ b/crates/bsc/bin/src/main.rs @@ -7,7 +7,7 @@ static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::ne use clap::{Args, Parser}; use reth_bsc_cli::{BscChainSpecParser, Cli}; -use reth_bsc_node::{node::BSCAddOns, BscNode}; +use reth_bsc_node::{node::BscAddOns, BscNode}; use reth_node_builder::{ engine_tree_config::{ TreeConfig, DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, DEFAULT_PERSISTENCE_THRESHOLD, @@ -76,7 +76,7 @@ fn main() { let handle = builder .with_types_and_provider::>() .with_components(BscNode::components()) - .with_add_ons(BSCAddOns) + .with_add_ons(BscAddOns::default()) .launch_with_fn(|builder| { let launcher = EngineNodeLauncher::new( builder.task_executor().clone(), diff --git a/crates/bsc/chainspec/Cargo.toml b/crates/bsc/chainspec/Cargo.toml index e267fb451d..d1c72ac4ac 100644 --- a/crates/bsc/chainspec/Cargo.toml +++ b/crates/bsc/chainspec/Cargo.toml @@ -23,6 +23,7 @@ reth-bsc-forks.workspace = true # ethereum alloy-chains.workspace = true +alloy-consensus.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true @@ -40,4 +41,10 @@ op-alloy-rpc-types.workspace = true [features] default = ["std"] -std = [] \ No newline at end of file +std = [ + "alloy-consensus/std", + "alloy-genesis/std", + "alloy-primitives/std", + "once_cell/std", + "reth-primitives-traits/std" +] diff --git a/crates/bsc/chainspec/src/dev.rs b/crates/bsc/chainspec/src/dev.rs index e8f2d5a2f7..e2da82f1ac 100644 --- a/crates/bsc/chainspec/src/dev.rs +++ b/crates/bsc/chainspec/src/dev.rs @@ -6,11 +6,11 @@ use alloc::sync::Arc; use std::sync::Arc; use alloy_chains::Chain; +use alloy_consensus::constants::DEV_GENESIS_HASH; use alloy_primitives::U256; use once_cell::sync::Lazy; use reth_bsc_forks::DEV_HARDFORKS; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; -use reth_primitives_traits::constants::DEV_GENESIS_HASH; use crate::BscChainSpec; diff --git a/crates/bsc/chainspec/src/lib.rs b/crates/bsc/chainspec/src/lib.rs index c2f9023297..3dd0ad0d30 100644 --- a/crates/bsc/chainspec/src/lib.rs +++ b/crates/bsc/chainspec/src/lib.rs @@ -65,8 +65,8 @@ impl EthChainSpec for BscChainSpec { self.inner.prune_delete_limit() } - fn display_hardforks(&self) -> impl Display { - self.inner.display_hardforks() + fn display_hardforks(&self) -> Box { + Box::new(self.inner.display_hardforks()) } fn genesis_header(&self) -> &Header { diff --git a/crates/bsc/cli/Cargo.toml b/crates/bsc/cli/Cargo.toml index 0bef763f12..7448fa96d7 100644 --- a/crates/bsc/cli/Cargo.toml +++ b/crates/bsc/cli/Cargo.toml @@ -84,6 +84,7 @@ asm-keccak = [ # Jemalloc feature for vergen to generate correct env vars jemalloc = [ - "reth-node-core/jemalloc", - "reth-node-metrics/jemalloc" + "reth-node-core/jemalloc", + "reth-node-metrics/jemalloc", + "reth-cli-util/jemalloc" ] diff --git a/crates/bsc/consensus/Cargo.toml b/crates/bsc/consensus/Cargo.toml index 0f35b25543..87530a0da4 100644 --- a/crates/bsc/consensus/Cargo.toml +++ b/crates/bsc/consensus/Cargo.toml @@ -31,6 +31,7 @@ reth-bsc-forks.workspace = true reth-bsc-primitives.workspace = true # eth +alloy-eips.workspace = true alloy-rlp.workspace = true alloy-consensus.workspace = true alloy-dyn-abi.workspace = true diff --git a/crates/bsc/consensus/src/constants.rs b/crates/bsc/consensus/src/constants.rs index 4fd2af7180..2a5b808a0e 100644 --- a/crates/bsc/consensus/src/constants.rs +++ b/crates/bsc/consensus/src/constants.rs @@ -1,5 +1,5 @@ +use alloy_consensus::constants::ETH_TO_WEI; use alloy_primitives::U256; -use reth_primitives::constants::ETH_TO_WEI; /// Fixed number of extra-data prefix bytes reserved for signer vanity pub const EXTRA_VANITY_LEN: usize = 32; diff --git a/crates/bsc/consensus/src/lib.rs b/crates/bsc/consensus/src/lib.rs index 184b29bf88..eb8492e18b 100644 --- a/crates/bsc/consensus/src/lib.rs +++ b/crates/bsc/consensus/src/lib.rs @@ -13,6 +13,7 @@ use std::{ time::SystemTime, }; +use alloy_consensus::EMPTY_OMMER_ROOT_HASH; use alloy_json_abi::JsonAbi; use alloy_primitives::{Address, B256, U256}; use alloy_rlp::Decodable; @@ -30,7 +31,7 @@ use reth_consensus_common::validation::{ use reth_primitives::{ constants::EMPTY_MIX_HASH, parlia::{ParliaConfig, Snapshot, VoteAddress, VoteAttestation}, - BlockWithSenders, GotExpected, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, + BlockWithSenders, GotExpected, Header, SealedBlock, SealedHeader, }; use secp256k1::{ ecdsa::{RecoverableSignature, RecoveryId}, diff --git a/crates/bsc/consensus/src/trace_helper.rs b/crates/bsc/consensus/src/trace_helper.rs index 3411c8598a..d5e99be250 100644 --- a/crates/bsc/consensus/src/trace_helper.rs +++ b/crates/bsc/consensus/src/trace_helper.rs @@ -1,13 +1,14 @@ use std::sync::Arc; -use alloy_primitives::{address, Address, U256}; +use alloy_primitives::{address, map::HashMap, Address, U256}; use reth_bsc_forks::BscHardforks; use reth_bsc_primitives::system_contracts::get_upgrade_system_contracts; -use reth_primitives::revm_primitives::{db::DatabaseRef, BlockEnv}; -use reth_revm::db::{ - AccountState::{NotExisting, Touched}, - CacheDB, +use reth_primitives::revm_primitives::{ + db::{Database, DatabaseCommit}, + state::AccountStatus, + BlockEnv, }; +use reth_revm::primitives::Account; use crate::Parlia; @@ -24,9 +25,9 @@ impl BscTraceHelper { Self { parlia } } - pub fn upgrade_system_contracts( + pub fn upgrade_system_contracts( &self, - db: &mut CacheDB, + db: &mut DB, block_env: &BlockEnv, parent_timestamp: u64, before_tx: bool, @@ -43,38 +44,60 @@ impl BscTraceHelper { ) .map_err(|_| BscTraceHelperError::GetUpgradeSystemContractsFailed)?; + let mut changeset: HashMap<_, _> = Default::default(); for (k, v) in contracts { + let mut info = db + .basic(k) + .map_err(|_| BscTraceHelperError::LoadAccountFailed)? + .unwrap_or_default() + .clone(); + + info.code_hash = v.clone().unwrap().hash_slow(); + info.code = v; + let account = - db.load_account(k).map_err(|_| BscTraceHelperError::LoadAccountFailed).unwrap(); - if account.account_state == NotExisting { - account.account_state = Touched; - } - account.info.code_hash = v.clone().unwrap().hash_slow(); - account.info.code = v; + Account { info, status: AccountStatus::Touched, ..Default::default() }; + + changeset.insert(k, account); } + + db.commit(changeset); } Ok(()) } - pub fn add_block_reward( + pub fn add_block_reward( &self, - db: &mut CacheDB, + db: &mut DB, block_env: &BlockEnv, ) -> Result<(), BscTraceHelperError> { - let sys_acc = - db.load_account(SYSTEM_ADDRESS).map_err(|_| BscTraceHelperError::LoadAccountFailed)?; - let balance = sys_acc.info.balance; + let mut sys_info = db + .basic(SYSTEM_ADDRESS) + .map_err(|_| BscTraceHelperError::LoadAccountFailed)? + .unwrap_or_default(); + let balance = sys_info.balance; if balance > U256::ZERO { - sys_acc.info.balance = U256::ZERO; + let mut changeset: HashMap<_, _> = Default::default(); - let val_acc = db - .load_account(block_env.coinbase) - .map_err(|_| BscTraceHelperError::LoadAccountFailed)?; - if val_acc.account_state == NotExisting { - val_acc.account_state = Touched; - } - val_acc.info.balance += balance; + sys_info.balance = U256::ZERO; + + let sys_account = + Account { info: sys_info, status: AccountStatus::Touched, ..Default::default() }; + changeset.insert(SYSTEM_ADDRESS, sys_account); + + let mut val_info = db + .basic(block_env.coinbase) + .map_err(|_| BscTraceHelperError::LoadAccountFailed)? + .unwrap_or_default(); + + val_info.balance += balance; + + let val_account = + Account { info: val_info, status: AccountStatus::Touched, ..Default::default() }; + changeset.insert(block_env.coinbase, val_account); + + db.commit(changeset); } Ok(()) diff --git a/crates/bsc/consensus/src/validation.rs b/crates/bsc/consensus/src/validation.rs index f2ecb12324..af2060a1c8 100644 --- a/crates/bsc/consensus/src/validation.rs +++ b/crates/bsc/consensus/src/validation.rs @@ -1,8 +1,8 @@ +use alloy_eips::eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}; use alloy_primitives::{Bloom, B256}; use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks}; use reth_consensus::ConsensusError; use reth_primitives::{ - constants::eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, gas_spent_by_transactions, BlockWithSenders, GotExpected, Header, Receipt, SealedHeader, }; diff --git a/crates/bsc/engine/Cargo.toml b/crates/bsc/engine/Cargo.toml index 0de980e56c..6ef389a930 100644 --- a/crates/bsc/engine/Cargo.toml +++ b/crates/bsc/engine/Cargo.toml @@ -35,11 +35,13 @@ reth-bsc-evm.workspace = true reth-bsc-payload-builder.workspace = true # eth +alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true alloy-rpc-types.workspace = true alloy-dyn-abi.workspace = true alloy-json-abi.workspace = true +alloy-rpc-types-engine.workspace = true # crypto secp256k1.workspace = true diff --git a/crates/bsc/engine/src/lib.rs b/crates/bsc/engine/src/lib.rs index 1ae0ca174e..ccaef072a2 100644 --- a/crates/bsc/engine/src/lib.rs +++ b/crates/bsc/engine/src/lib.rs @@ -5,16 +5,9 @@ // The `bsc` feature must be enabled to use this crate. #![cfg(feature = "bsc")] -use std::{ - clone::Clone, - collections::{HashMap, VecDeque}, - fmt::Debug, - marker::PhantomData, - sync::Arc, -}; - +use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockHash, BlockNumber, B256}; -use alloy_rpc_types::engine::{ +pub use alloy_rpc_types_engine::{ ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, ExecutionPayloadV1, PayloadAttributes, }; @@ -29,8 +22,15 @@ use reth_engine_primitives::{ }; use reth_network_api::events::EngineMessage; use reth_network_p2p::BlockClient; -use reth_primitives::{BlockBody, BlockHashOrNumber, SealedHeader}; +use reth_primitives::{BlockBody, SealedHeader}; use reth_provider::{BlockReaderIdExt, CanonChainTracker, ParliaProvider}; +use std::{ + clone::Clone, + collections::{HashMap, VecDeque}, + fmt::Debug, + marker::PhantomData, + sync::Arc, +}; use tokio::sync::{ mpsc::{UnboundedReceiver, UnboundedSender}, Mutex, RwLockReadGuard, RwLockWriteGuard, @@ -65,10 +65,10 @@ where + TryInto + TryInto, { - type ExecutionPayloadV1 = ExecutionPayloadV1; - type ExecutionPayloadV2 = ExecutionPayloadEnvelopeV2; - type ExecutionPayloadV3 = ExecutionPayloadEnvelopeV3; - type ExecutionPayloadV4 = ExecutionPayloadEnvelopeV4; + type ExecutionPayloadEnvelopeV1 = ExecutionPayloadV1; + type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2; + type ExecutionPayloadEnvelopeV3 = ExecutionPayloadEnvelopeV3; + type ExecutionPayloadEnvelopeV4 = ExecutionPayloadEnvelopeV4; } /// A default payload type for [`BscEngineTypes`] diff --git a/crates/bsc/engine/src/task.rs b/crates/bsc/engine/src/task.rs index babb46bcfa..83bb0537d1 100644 --- a/crates/bsc/engine/src/task.rs +++ b/crates/bsc/engine/src/task.rs @@ -5,6 +5,7 @@ use std::{ time::{SystemTime, UNIX_EPOCH}, }; +use alloy_eips::BlockHashOrNumber; use alloy_primitives::{Sealable, B256}; use alloy_rpc_types::{engine::ForkchoiceState, BlockId, RpcBlockHash}; use reth_beacon_consensus::{ @@ -13,13 +14,14 @@ use reth_beacon_consensus::{ use reth_bsc_consensus::Parlia; use reth_bsc_evm::SnapshotReader; use reth_chainspec::EthChainSpec; +use reth_engine_primitives::EngineApiMessageVersion; use reth_network_api::events::EngineMessage; use reth_network_p2p::{ headers::client::{HeadersClient, HeadersDirection, HeadersRequest}, priority::Priority, BlockClient, }; -use reth_primitives::{Block, BlockBody, BlockHashOrNumber, SealedHeader}; +use reth_primitives::{Block, BlockBody, SealedHeader}; use reth_provider::{BlockReaderIdExt, CanonChainTracker, ParliaProvider}; use tokio::{ signal, @@ -488,6 +490,7 @@ impl< state, payload_attrs: None, tx, + version: EngineApiMessageVersion::default(), }); debug!(target: "consensus::parlia", ?state, "Sent fork choice update"); diff --git a/crates/bsc/evm/Cargo.toml b/crates/bsc/evm/Cargo.toml index e9e357a935..3318c63434 100644 --- a/crates/bsc/evm/Cargo.toml +++ b/crates/bsc/evm/Cargo.toml @@ -27,11 +27,11 @@ reth-bsc-chainspec.workspace = true reth-bsc-forks.workspace = true reth-bsc-primitives.workspace = true -# Revm -revm-primitives.workspace = true - +# ethereum +alloy-consensus.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true +revm-primitives.workspace = true # misc thiserror.workspace = true diff --git a/crates/bsc/evm/src/execute.rs b/crates/bsc/evm/src/execute.rs index c5228a1ed7..6e2cf5d6a6 100644 --- a/crates/bsc/evm/src/execute.rs +++ b/crates/bsc/evm/src/execute.rs @@ -3,6 +3,7 @@ use core::fmt::Display; use std::{collections::HashMap, num::NonZeroUsize, sync::Arc}; +use alloy_consensus::Transaction as _; use alloy_primitives::{Address, BlockNumber, Bytes, B256, U256}; use lazy_static::lazy_static; use lru::LruCache; @@ -141,11 +142,15 @@ where self.bsc_executor(db, prefetch_tx) } - fn batch_executor(&self, db: DB) -> Self::BatchExecutor + fn batch_executor( + &self, + db: DB, + prefetch_tx: Option>, + ) -> Self::BatchExecutor where DB: Database + Display>, { - let executor = self.bsc_executor(db, None); + let executor = self.bsc_executor(db, prefetch_tx); BscBatchExecutor { executor, batch_record: BlockBatchRecord::default(), @@ -244,7 +249,6 @@ where })?; if let Some(tx) = tx.as_ref() { - // let post_state = HashedPostState::from_state(state.clone()); tx.send(state.clone()).unwrap_or_else(|err| { debug!(target: "evm_executor", ?err, "Failed to send post state to prefetch channel") }); @@ -791,7 +795,7 @@ where Ok(BlockExecutionOutput { state: self.state.take_bundle(), receipts, - requests: Vec::default(), + requests: Default::default(), gas_used, snapshot, }) @@ -816,7 +820,7 @@ where Ok(BlockExecutionOutput { state: self.state.take_bundle(), receipts, - requests: Vec::default(), + requests: Default::default(), gas_used, snapshot, }) @@ -845,7 +849,7 @@ where Ok(BlockExecutionOutput { state: self.state.take_bundle(), receipts, - requests: Vec::default(), + requests: Default::default(), gas_used, snapshot, }) diff --git a/crates/bsc/evm/src/lib.rs b/crates/bsc/evm/src/lib.rs index ed23221099..0f81ec2ba0 100644 --- a/crates/bsc/evm/src/lib.rs +++ b/crates/bsc/evm/src/lib.rs @@ -6,7 +6,7 @@ // The `bsc` feature must be enabled to use this crate. #![cfg(feature = "bsc")] -use std::sync::Arc; +use std::{convert::Infallible, sync::Arc}; use alloy_primitives::{Address, Bytes, U256}; use reth_bsc_chainspec::BscChainSpec; @@ -54,6 +54,7 @@ impl BscEvmConfig { impl ConfigureEvmEnv for BscEvmConfig { type Header = Header; + type Error = Infallible; // TODO: error type fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { transaction.fill_tx_env(tx_env, sender); @@ -101,7 +102,7 @@ impl ConfigureEvmEnv for BscEvmConfig { &self, parent: &Self::Header, attributes: NextBlockEnvAttributes, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), Self::Error> { // configure evm env based on parent block let cfg = CfgEnv::default().with_chain_id(self.chain_spec.chain().id()); @@ -112,14 +113,7 @@ impl ConfigureEvmEnv for BscEvmConfig { // cancun now, we need to set the excess blob gas to the default value let blob_excess_gas_and_price = parent .next_block_excess_blob_gas() - .or_else(|| { - if spec_id == SpecId::CANCUN { - // default excess blob gas is zero - Some(0) - } else { - None - } - }) + .or_else(|| (spec_id == SpecId::CANCUN).then_some(0)) .map(BlobExcessGasAndPrice::new); let mut basefee = parent.next_block_base_fee( @@ -156,7 +150,7 @@ impl ConfigureEvmEnv for BscEvmConfig { blob_excess_gas_and_price, }; - (CfgEnvWithHandlerCfg::new_with_spec_id(cfg, spec_id), block_env) + Ok((CfgEnvWithHandlerCfg::new_with_spec_id(cfg, spec_id), block_env)) } } diff --git a/crates/bsc/hardforks/Cargo.toml b/crates/bsc/hardforks/Cargo.toml index 5677e60b9a..59ddcfed3d 100644 --- a/crates/bsc/hardforks/Cargo.toml +++ b/crates/bsc/hardforks/Cargo.toml @@ -27,5 +27,13 @@ once_cell.workspace = true [features] default = ["std"] -std = [] -serde = ["dep:serde"] \ No newline at end of file +std = [ + "alloy-primitives/std", + "once_cell/std", + "serde?/std" +] +serde = [ + "dep:serde", + "alloy-chains/serde", + "alloy-primitives/serde" +] diff --git a/crates/bsc/node/Cargo.toml b/crates/bsc/node/Cargo.toml index ed883df01e..3a3369410d 100644 --- a/crates/bsc/node/Cargo.toml +++ b/crates/bsc/node/Cargo.toml @@ -26,6 +26,7 @@ reth-primitives.workspace = true reth-config.workspace = true reth-rpc.workspace = true reth-node-api.workspace = true +reth-trie-db.workspace = true # bsc-reth reth-bsc-chainspec.workspace = true @@ -62,4 +63,8 @@ bsc = [ "reth-bsc-payload-builder/bsc", "reth-bsc-engine/bsc", ] -asm-keccak = ["reth-primitives/asm-keccak"] \ No newline at end of file +asm-keccak = [ + "reth-primitives/asm-keccak", + "reth/asm-keccak", + "reth-node-core/asm-keccak" +] diff --git a/crates/bsc/node/src/node.rs b/crates/bsc/node/src/node.rs index d11cf346bb..cf057f0daa 100644 --- a/crates/bsc/node/src/node.rs +++ b/crates/bsc/node/src/node.rs @@ -8,18 +8,23 @@ use reth_bsc_consensus::Parlia; use reth_bsc_engine::{BscEngineTypes, BscEngineValidator}; use reth_bsc_evm::{BscEvmConfig, BscExecutorProvider}; use reth_bsc_payload_builder::{BscBuiltPayload, BscPayloadBuilderAttributes}; +use reth_ethereum_engine_primitives::EthPayloadAttributes; use reth_network::NetworkHandle; -use reth_node_api::{ConfigureEvm, EngineValidator, FullNodeComponents, NodeAddOns}; +use reth_node_api::{ + AddOnsContext, ConfigureEvm, EngineValidator, FullNodeComponents, NodePrimitives, + NodeTypesWithDB, +}; use reth_node_builder::{ components::{ - ComponentsBuilder, ConsensusBuilder, EngineValidatorBuilder, ExecutorBuilder, - NetworkBuilder, ParliaBuilder, PayloadServiceBuilder, PoolBuilder, + ComponentsBuilder, ConsensusBuilder, ExecutorBuilder, NetworkBuilder, ParliaBuilder, + PayloadServiceBuilder, PoolBuilder, }, node::{FullNodeTypes, NodeTypes, NodeTypesWithEngine}, - BuilderContext, Node, PayloadBuilderConfig, PayloadTypes, + rpc::{EngineValidatorBuilder, RpcAddOns}, + BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, PayloadTypes, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::Header; +use reth_primitives::{Block, Header}; use reth_provider::CanonStateSubscriptions; use reth_rpc::EthApi; use reth_tracing::tracing::{debug, info}; @@ -27,6 +32,15 @@ use reth_transaction_pool::{ blobstore::DiskFileBlobStore, EthTransactionPool, TransactionPool, TransactionValidationTaskExecutor, }; +use reth_trie_db::MerklePatriciaTrie; + +/// Ethereum primitive types. +#[derive(Debug)] +pub struct BscPrimitives; + +impl NodePrimitives for BscPrimitives { + type Block = Block; +} /// Type configuration for a regular BSC node. #[derive(Debug, Default, Clone, Copy)] @@ -42,12 +56,14 @@ impl BscNode { BscNetworkBuilder, BscExecutorBuilder, BscConsensusBuilder, - BscEngineValidatorBuilder, BscParliaBuilder, > where - Node: FullNodeTypes< - Types: NodeTypesWithEngine, + Node: FullNodeTypes>, + ::Engine: PayloadTypes< + BuiltPayload = BscBuiltPayload, + PayloadAttributes = EthPayloadAttributes, + PayloadBuilderAttributes = BscPayloadBuilderAttributes, >, { ComponentsBuilder::default() @@ -57,14 +73,14 @@ impl BscNode { .network(BscNetworkBuilder::default()) .executor(BscExecutorBuilder::default()) .consensus(BscConsensusBuilder::default()) - .engine_validator(BscEngineValidatorBuilder::default()) .parlia(BscParliaBuilder::default()) } } impl NodeTypes for BscNode { - type Primitives = (); + type Primitives = BscPrimitives; type ChainSpec = BscChainSpec; + type StateCommitment = MerklePatriciaTrie; } impl NodeTypesWithEngine for BscNode { @@ -72,16 +88,20 @@ impl NodeTypesWithEngine for BscNode { } /// Add-ons w.r.t. l1 bsc. -#[derive(Debug, Clone, Default)] -pub struct BSCAddOns; - -impl NodeAddOns for BSCAddOns { - type EthApi = EthApi; -} +pub type BscAddOns = RpcAddOns< + N, + EthApi< + ::Provider, + ::Pool, + NetworkHandle, + ::Evm, + >, + BscEngineValidatorBuilder, +>; impl Node for BscNode where - Types: NodeTypesWithEngine, + Types: NodeTypesWithDB + NodeTypesWithEngine, N: FullNodeTypes, { type ComponentsBuilder = ComponentsBuilder< @@ -91,18 +111,19 @@ where BscNetworkBuilder, BscExecutorBuilder, BscConsensusBuilder, - BscEngineValidatorBuilder, BscParliaBuilder, >; - type AddOns = BSCAddOns; + type AddOns = BscAddOns< + NodeAdapter>::Components>, + >; fn components_builder(&self) -> Self::ComponentsBuilder { Self::components() } fn add_ons(&self) -> Self::AddOns { - BSCAddOns + BscAddOns::default() } } @@ -321,16 +342,15 @@ where #[non_exhaustive] pub struct BscEngineValidatorBuilder; -impl EngineValidatorBuilder for BscEngineValidatorBuilder +impl EngineValidatorBuilder for BscEngineValidatorBuilder where - Node: FullNodeTypes< - Types: NodeTypesWithEngine, - >, - BscEngineValidator: EngineValidator<::Engine>, + Types: NodeTypesWithEngine, + Node: FullNodeComponents, + BscEngineValidator: EngineValidator, { type Validator = BscEngineValidator; - async fn build_validator(self, _ctx: &BuilderContext) -> eyre::Result { + async fn build(self, _ctx: &AddOnsContext<'_, Node>) -> eyre::Result { Ok(BscEngineValidator {}) } } diff --git a/crates/bsc/payload/Cargo.toml b/crates/bsc/payload/Cargo.toml index 0b1712588c..853a6724e0 100644 --- a/crates/bsc/payload/Cargo.toml +++ b/crates/bsc/payload/Cargo.toml @@ -31,11 +31,12 @@ reth-chain-state.workspace = true reth-bsc-chainspec.workspace = true # ethereum -revm.workspace = true +alloy-consensus.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true -revm-primitives.workspace = true alloy-rpc-types-engine.workspace = true +revm.workspace = true +revm-primitives.workspace = true # misc tracing.workspace = true diff --git a/crates/bsc/payload/src/builder.rs b/crates/bsc/payload/src/builder.rs index a1b364fb54..a037fd38a8 100644 --- a/crates/bsc/payload/src/builder.rs +++ b/crates/bsc/payload/src/builder.rs @@ -2,7 +2,8 @@ use std::sync::Arc; -use alloy_eips::eip4844::MAX_DATA_GAS_PER_BLOCK; +use alloy_consensus::EMPTY_OMMER_ROOT_HASH; +use alloy_eips::{eip4844::MAX_DATA_GAS_PER_BLOCK, merge::BEACON_NONCE}; use alloy_primitives::U256; use reth_basic_payload_builder::*; use reth_bsc_chainspec::BscChainSpec; @@ -12,10 +13,9 @@ use reth_evm::{system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv, NextBl use reth_execution_types::ExecutionOutcome; use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; use reth_primitives::{ - constants::BEACON_NONCE, proofs, revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, - Block, BlockBody, Header, Receipt, EMPTY_OMMER_ROOT_HASH, + Block, BlockBody, Header, Receipt, }; use reth_provider::StateProviderFactory; use reth_revm::database::StateProviderDatabase; @@ -56,7 +56,7 @@ where &self, config: &PayloadConfig, parent: &Header, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), EvmConfig::Error> { let next_attributes = NextBlockEnvAttributes { timestamp: config.attributes.timestamp(), suggested_fee_recipient: config.attributes.suggested_fee_recipient(), @@ -80,7 +80,9 @@ where &self, args: BuildArguments, ) -> Result, PayloadBuilderError> { - let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); + let (cfg_env, block_env) = self + .cfg_and_block_env(&args.config, &args.config.parent_header) + .map_err(PayloadBuilderError::other)?; bsc_payload(self.evm_config.clone(), args, cfg_env, block_env) } @@ -100,7 +102,9 @@ where cancel: Default::default(), best_payload: None, }; - let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); + let (cfg_env, block_env) = self + .cfg_and_block_env(&args.config, &args.config.parent_header) + .map_err(PayloadBuilderError::other)?; bsc_payload(self.evm_config.clone(), args, cfg_env, block_env)? .into_payload() .ok_or_else(|| PayloadBuilderError::MissingPayload) @@ -130,13 +134,13 @@ where let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; let chain_spec = client.chain_spec(); - let state_provider = client.state_by_block_hash(config.parent_block.hash())?; + let state_provider = client.state_by_block_hash(config.parent_header.hash())?; let state = StateProviderDatabase::new(state_provider); let mut db = State::builder().with_database_ref(cached_reads.as_db(state)).with_bundle_update().build(); - let PayloadConfig { parent_block, extra_data, attributes } = config; + let PayloadConfig { parent_header, extra_data, attributes } = config; - debug!(target: "payload_builder", id=%attributes.payload_attributes.id, parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building new payload"); + debug!(target: "payload_builder", id=%attributes.payload_attributes.id, parent_hash = ?parent_header.hash(), parent_number = parent_header.number, "building new payload"); let mut cumulative_gas_used = 0; let mut sum_blob_gas_used = 0; let block_gas_limit: u64 = initialized_block_env.gas_limit.to::(); @@ -154,7 +158,7 @@ where let block_number = initialized_block_env.number.to::(); - let mut system_caller = SystemCaller::new(&evm_config, chain_spec.clone()); + let mut system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); // apply eip-4788 pre block contract call system_caller @@ -166,7 +170,7 @@ where ) .map_err(|err| { warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), + parent_hash=%parent_header.hash(), %err, "failed to apply beacon root contract call for payload" ); @@ -178,10 +182,10 @@ where &mut db, &initialized_cfg, &initialized_block_env, - parent_block.hash(), + parent_header.hash(), ) .map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to update blockhashes for payload"); + warn!(target: "payload_builder", parent_hash=%parent_header.hash(), %err, "failed to update blockhashes for payload"); PayloadBuilderError::Internal(err.into()) })?; @@ -222,7 +226,7 @@ where let env = EnvWithHandlerCfg::new_with_cfg_env( initialized_cfg.clone(), initialized_block_env.clone(), - evm_config.tx_env(&tx), + evm_config.tx_env(tx.as_signed(), tx.signer()), ); // Configure the environment for the block. @@ -301,7 +305,7 @@ where } // calculate the requests and the requests root - let (requests, requests_root) = (None, None); + let (requests, requests_hash) = (None, None); let WithdrawalsOutcome { withdrawals_root, withdrawals } = commit_withdrawals( &mut db, @@ -318,7 +322,7 @@ where db.take_bundle(), vec![receipts.clone()].into(), block_number, - vec![requests.clone().unwrap_or_default()], + vec![requests.unwrap_or_default()], ); let receipts_root = execution_outcome.receipts_root_slow(block_number).expect("Number is in range"); @@ -330,7 +334,7 @@ where let state_provider = db.database.0.inner.borrow_mut(); state_provider.db.state_root_with_updates(hashed_state.clone()).inspect_err(|err| { warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), + parent_hash=%parent_header.hash(), %err, "failed to calculate state root for payload" ); @@ -352,9 +356,9 @@ where executed_txs.iter().filter(|tx| tx.is_eip4844()).map(|tx| tx.hash).collect(), )?; - excess_blob_gas = if chain_spec.is_cancun_active_at_timestamp(parent_block.timestamp) { - let parent_excess_blob_gas = parent_block.excess_blob_gas.unwrap_or_default(); - let parent_blob_gas_used = parent_block.blob_gas_used.unwrap_or_default(); + excess_blob_gas = if chain_spec.is_cancun_active_at_timestamp(parent_header.timestamp) { + let parent_excess_blob_gas = parent_header.excess_blob_gas.unwrap_or_default(); + let parent_blob_gas_used = parent_header.blob_gas_used.unwrap_or_default(); Some(calc_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used)) } else { // for the first post-fork block, both parent.blob_gas_used and @@ -366,7 +370,7 @@ where } let header = Header { - parent_hash: parent_block.hash(), + parent_hash: parent_header.hash(), ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: initialized_block_env.coinbase, state_root, @@ -378,7 +382,7 @@ where mix_hash: attributes.payload_attributes.prev_randao, nonce: BEACON_NONCE.into(), base_fee_per_gas: Some(base_fee), - number: parent_block.number + 1, + number: parent_header.number + 1, gas_limit: block_gas_limit, difficulty: U256::ZERO, gas_used: cumulative_gas_used, @@ -386,19 +390,13 @@ where parent_beacon_block_root: attributes.payload_attributes.parent_beacon_block_root, blob_gas_used: blob_gas_used.map(Into::into), excess_blob_gas: excess_blob_gas.map(Into::into), - requests_root, + requests_hash, }; // seal the block let block = Block { header, - body: BlockBody { - transactions: executed_txs, - ommers: vec![], - withdrawals, - sidecars: None, - requests, - }, + body: BlockBody { transactions: executed_txs, ommers: vec![], withdrawals, sidecars: None }, }; let sealed_block = block.seal_slow(); diff --git a/crates/bsc/payload/src/payload.rs b/crates/bsc/payload/src/payload.rs index 457d67c4d6..103d7d9925 100644 --- a/crates/bsc/payload/src/payload.rs +++ b/crates/bsc/payload/src/payload.rs @@ -2,8 +2,7 @@ //! Bsc builder support -use std::convert::Infallible; - +use alloy_eips::eip7685::Requests; use alloy_primitives::{Address, B256, U256}; use alloy_rpc_types_engine::{ ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, @@ -14,9 +13,9 @@ use reth_payload_builder::EthPayloadBuilderAttributes; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; use reth_primitives::{BlobTransactionSidecar, SealedBlock, Withdrawals}; use reth_rpc_types_compat::engine::payload::{ - block_to_payload_v1, block_to_payload_v3, block_to_payload_v4, - convert_block_to_payload_field_v2, + block_to_payload_v1, block_to_payload_v3, convert_block_to_payload_field_v2, }; +use std::{convert::Infallible, sync::Arc}; /// Bsc Payload Builder Attributes #[derive(Debug, Clone, PartialEq, Eq)] @@ -32,8 +31,14 @@ impl PayloadBuilderAttributes for BscPayloadBuilderAttributes { /// Creates a new payload builder for the given parent block and the attributes. /// /// Derives the unique [`PayloadId`] for the given parent and attributes - fn try_new(parent: B256, attributes: PayloadAttributes) -> Result { - Ok(Self { payload_attributes: EthPayloadBuilderAttributes::try_new(parent, attributes)? }) + fn try_new( + parent: B256, + attributes: PayloadAttributes, + version: u8, + ) -> Result { + Ok(Self { + payload_attributes: EthPayloadBuilderAttributes::try_new(parent, attributes, version)?, + }) } fn payload_id(&self) -> PayloadId { @@ -110,8 +115,8 @@ impl BscBuiltPayload { } /// Adds sidecars to the payload. - pub fn extend_sidecars(&mut self, sidecars: Vec) { - self.sidecars.extend(sidecars) + pub fn extend_sidecars(&mut self, sidecars: Vec>) { + self.sidecars.extend(sidecars.into_iter().map(|arc| (*arc).clone())); } } @@ -127,6 +132,10 @@ impl BuiltPayload for BscBuiltPayload { fn executed_block(&self) -> Option { self.executed_block.clone() } + + fn requests(&self) -> Option { + None + } } impl BuiltPayload for &BscBuiltPayload { @@ -141,6 +150,10 @@ impl BuiltPayload for &BscBuiltPayload { fn executed_block(&self) -> Option { self.executed_block.clone() } + + fn requests(&self) -> Option { + None + } } // V1 engine_getPayloadV1 response @@ -185,7 +198,7 @@ impl From for ExecutionPayloadEnvelopeV4 { let BscBuiltPayload { block, fees, sidecars, .. } = value; Self { - execution_payload: block_to_payload_v4(block), + execution_payload: block_to_payload_v3(block), block_value: fees, // From the engine API spec: // @@ -197,6 +210,7 @@ impl From for ExecutionPayloadEnvelopeV4 { // should_override_builder: false, blobs_bundle: sidecars.into_iter().map(Into::into).collect::>().into(), + execution_requests: Default::default(), } } } diff --git a/crates/bsc/primitives/Cargo.toml b/crates/bsc/primitives/Cargo.toml index c486ecda9d..8b5ee00125 100644 --- a/crates/bsc/primitives/Cargo.toml +++ b/crates/bsc/primitives/Cargo.toml @@ -20,6 +20,7 @@ revm-primitives.workspace = true alloy-chains.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true reth-bsc-chainspec.workspace = true reth-bsc-forks.workspace = true diff --git a/crates/bsc/primitives/src/system_contracts/mod.rs b/crates/bsc/primitives/src/system_contracts/mod.rs index 8feb8f103e..570dabe4bd 100644 --- a/crates/bsc/primitives/src/system_contracts/mod.rs +++ b/crates/bsc/primitives/src/system_contracts/mod.rs @@ -3,6 +3,7 @@ use std::collections::HashMap; use alloy_chains::Chain; +use alloy_consensus::Transaction; use alloy_primitives::{hex, Address, BlockNumber}; use include_dir::{include_dir, Dir}; use lazy_static::lazy_static; diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index 5327d5a53c..7b90e9a028 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -52,6 +52,7 @@ rand = { workspace = true, optional = true } revm = { workspace = true, optional = true } [dev-dependencies] +reth-testing-utils.workspace = true alloy-signer.workspace = true alloy-signer-local.workspace = true alloy-consensus.workspace = true @@ -62,9 +63,15 @@ serial_test = "3.1.1" [features] test-utils = [ - "alloy-signer", - "alloy-signer-local", - "alloy-consensus", - "rand", - "revm" + "alloy-signer", + "alloy-signer-local", + "alloy-consensus", + "rand", + "revm", + "reth-chainspec/test-utils", + "reth-primitives/test-utils", + "reth-trie/test-utils", + "revm?/test-utils", + "reth-provider/test-utils", + "reth-revm/test-utils" ] diff --git a/crates/chain-state/src/chain_info.rs b/crates/chain-state/src/chain_info.rs index d36d9f47e4..3c75544ac4 100644 --- a/crates/chain-state/src/chain_info.rs +++ b/crates/chain-state/src/chain_info.rs @@ -95,14 +95,12 @@ impl ChainInfoTracker { /// Returns the safe header of the chain. pub fn get_safe_num_hash(&self) -> Option { - let h = self.inner.safe_block.borrow(); - h.as_ref().map(|h| h.num_hash()) + self.inner.safe_block.borrow().as_ref().map(SealedHeader::num_hash) } /// Returns the finalized header of the chain. pub fn get_finalized_num_hash(&self) -> Option { - let h = self.inner.finalized_block.borrow(); - h.as_ref().map(|h| h.num_hash()) + self.inner.finalized_block.borrow().as_ref().map(SealedHeader::num_hash) } /// Sets the canonical head of the chain. @@ -169,3 +167,200 @@ struct ChainInfoInner { /// The block that the beacon node considers finalized. finalized_block: watch::Sender>, } + +#[cfg(test)] +mod tests { + use super::*; + use reth_testing_utils::{generators, generators::random_header}; + + #[test] + fn test_chain_info() { + // Create a random header + let mut rng = generators::rng(); + let header = random_header(&mut rng, 10, None); + + // Create a new chain info tracker with the header + let tracker = ChainInfoTracker::new(header.clone(), None, None); + + // Fetch the chain information from the tracker + let chain_info = tracker.chain_info(); + + // Verify that the chain information matches the header + assert_eq!(chain_info.best_number, header.number); + assert_eq!(chain_info.best_hash, header.hash()); + } + + #[test] + fn test_on_forkchoice_update_received() { + // Create a random block header + let mut rng = generators::rng(); + let header = random_header(&mut rng, 10, None); + + // Create a new chain info tracker with the header + let tracker = ChainInfoTracker::new(header, None, None); + + // Assert that there has been no forkchoice update yet (the timestamp is None) + assert!(tracker.last_forkchoice_update_received_at().is_none()); + + // Call the method to record the receipt of a forkchoice update + tracker.on_forkchoice_update_received(); + + // Assert that there is now a timestamp indicating when the forkchoice update was received + assert!(tracker.last_forkchoice_update_received_at().is_some()); + } + + #[test] + fn test_on_transition_configuration_exchanged() { + // Create a random header + let mut rng = generators::rng(); + let header = random_header(&mut rng, 10, None); + + // Create a new chain info tracker with the header + let tracker = ChainInfoTracker::new(header, None, None); + + // Assert that there has been no transition configuration exchange yet (the timestamp is + // None) + assert!(tracker.last_transition_configuration_exchanged_at().is_none()); + + // Call the method to record the transition configuration exchange + tracker.on_transition_configuration_exchanged(); + + // Assert that there is now a timestamp indicating when the transition configuration + // exchange occurred + assert!(tracker.last_transition_configuration_exchanged_at().is_some()); + } + + #[test] + fn test_set_canonical_head() { + // Create a random number generator + let mut rng = generators::rng(); + // Generate two random headers for testing + let header1 = random_header(&mut rng, 10, None); + let header2 = random_header(&mut rng, 20, None); + + // Create a new chain info tracker with the first header + let tracker = ChainInfoTracker::new(header1, None, None); + + // Set the second header as the canonical head of the tracker + tracker.set_canonical_head(header2.clone()); + + // Assert that the tracker now uses the second header as its canonical head + let canonical_head = tracker.get_canonical_head(); + assert_eq!(canonical_head, header2); + } + + #[test] + fn test_set_safe() { + // Create a random number generator + let mut rng = generators::rng(); + + // Case 1: basic test + // Generate two random headers for the test + let header1 = random_header(&mut rng, 10, None); + let header2 = random_header(&mut rng, 20, None); + + // Create a new chain info tracker with the first header (header1) + let tracker = ChainInfoTracker::new(header1, None, None); + + // Call the set_safe method with the second header (header2) + tracker.set_safe(header2.clone()); + + // Verify that the tracker now has header2 as the safe block + let safe_header = tracker.get_safe_header(); + assert!(safe_header.is_some()); // Ensure a safe header is present + let safe_header = safe_header.unwrap(); + assert_eq!(safe_header, header2); + + // Case 2: call with the same header as the current safe block + // Call set_safe again with the same header (header2) + tracker.set_safe(header2.clone()); + + // Verify that nothing changes and the safe header remains the same + let same_safe_header = tracker.get_safe_header(); + assert!(same_safe_header.is_some()); + let same_safe_header = same_safe_header.unwrap(); + assert_eq!(same_safe_header, header2); + + // Case 3: call with a different (new) header + // Generate a third header with a higher block number + let header3 = random_header(&mut rng, 30, None); + + // Call set_safe with this new header (header3) + tracker.set_safe(header3.clone()); + + // Verify that the safe header is updated with the new header + let updated_safe_header = tracker.get_safe_header(); + assert!(updated_safe_header.is_some()); + let updated_safe_header = updated_safe_header.unwrap(); + assert_eq!(updated_safe_header, header3); + } + + #[test] + fn test_set_finalized() { + // Create a random number generator + let mut rng = generators::rng(); + + // Generate random headers for testing + let header1 = random_header(&mut rng, 10, None); + let header2 = random_header(&mut rng, 20, None); + let header3 = random_header(&mut rng, 30, None); + + // Create a new chain info tracker with the first header + let tracker = ChainInfoTracker::new(header1, None, None); + + // Initial state: finalize header should be None + assert!(tracker.get_finalized_header().is_none()); + + // Set the second header as the finalized header + tracker.set_finalized(header2.clone()); + + // Assert that the tracker now uses the second header as its finalized block + let finalized_header = tracker.get_finalized_header(); + assert!(finalized_header.is_some()); + let finalized_header = finalized_header.unwrap(); + assert_eq!(finalized_header, header2); + + // Case 2: attempt to set the same finalized header again + tracker.set_finalized(header2.clone()); + + // The finalized header should remain unchanged + let unchanged_finalized_header = tracker.get_finalized_header(); + assert_eq!(unchanged_finalized_header.unwrap(), header2); // Should still be header2 + + // Case 3: set a higher block number as finalized + tracker.set_finalized(header3.clone()); + + // The finalized header should now be updated to header3 + let updated_finalized_header = tracker.get_finalized_header(); + assert!(updated_finalized_header.is_some()); + assert_eq!(updated_finalized_header.unwrap(), header3); + } + + #[test] + fn test_get_finalized_num_hash() { + // Create a random header + let mut rng = generators::rng(); + let finalized_header = random_header(&mut rng, 10, None); + + // Create a new chain info tracker with the finalized header + let tracker = + ChainInfoTracker::new(finalized_header.clone(), Some(finalized_header.clone()), None); + + // Assert that the BlockNumHash returned matches the finalized header + assert_eq!(tracker.get_finalized_num_hash(), Some(finalized_header.num_hash())); + } + + #[test] + fn test_get_safe_num_hash() { + // Create a random header + let mut rng = generators::rng(); + let safe_header = random_header(&mut rng, 10, None); + + // Create a new chain info tracker with the safe header + let tracker = ChainInfoTracker::new(safe_header.clone(), None, None); + tracker.set_safe(safe_header.clone()); + + // Assert that the BlockNumHash returned matches the safe header + assert_eq!(tracker.get_safe_num_hash(), Some(safe_header.num_hash())); + } +} diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index fb67608ebd..6bef197bea 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -4,7 +4,7 @@ use crate::{ CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, ChainInfoTracker, MemoryOverlayStateProvider, }; -use alloy_eips::BlockNumHash; +use alloy_eips::{BlockHashOrNumber, BlockNumHash}; use alloy_primitives::{map::HashMap, Address, TxHash, B256}; use parking_lot::RwLock; use reth_chainspec::ChainInfo; @@ -504,20 +504,6 @@ impl CanonicalInMemoryState { self.inner.canon_state_notification_sender.send(event).ok(); } - /// Return state provider with reference to in-memory blocks that overlay database state. - /// - /// This merges the state of all blocks that are part of the chain that the requested block is - /// the head of. This includes all blocks that connect back to the canonical block on disk. - pub fn state_provider_from_state( - &self, - state: &BlockState, - historical: StateProviderBox, - ) -> MemoryOverlayStateProvider { - let in_memory = state.chain().into_iter().map(|block_state| block_state.block()).collect(); - - MemoryOverlayStateProvider::new(historical, in_memory) - } - /// Return state provider with reference to in-memory blocks that overlay database state. /// /// This merges the state of all blocks that are part of the chain that the requested block is @@ -528,7 +514,7 @@ impl CanonicalInMemoryState { historical: StateProviderBox, ) -> MemoryOverlayStateProvider { let in_memory = if let Some(state) = self.state_by_hash(hash) { - state.chain().into_iter().map(|block_state| block_state.block()).collect() + state.chain().map(|block_state| block_state.block()).collect() } else { Vec::new() }; @@ -706,10 +692,8 @@ impl BlockState { /// Returns a vector of `BlockStates` representing the entire in memory chain. /// The block state order in the output vector is newest to oldest (highest to lowest), /// including self as the first element. - pub fn chain(&self) -> Vec<&Self> { - let mut chain = vec![self]; - self.append_parent_chain(&mut chain); - chain + pub fn chain(&self) -> impl Iterator { + std::iter::successors(Some(self), |state| state.parent.as_deref()) } /// Appends the parent chain of this [`BlockState`] to the given vector. @@ -723,6 +707,65 @@ impl BlockState { pub fn iter(self: Arc) -> impl Iterator> { std::iter::successors(Some(self), |state| state.parent.clone()) } + + /// Return state provider with reference to in-memory blocks that overlay database state. + /// + /// This merges the state of all blocks that are part of the chain that the this block is + /// the head of. This includes all blocks that connect back to the canonical block on disk. + pub fn state_provider(&self, historical: StateProviderBox) -> MemoryOverlayStateProvider { + let in_memory = self.chain().map(|block_state| block_state.block()).collect(); + + MemoryOverlayStateProvider::new(historical, in_memory) + } + + /// Tries to find a block by [`BlockHashOrNumber`] in the chain ending at this block. + pub fn block_on_chain(&self, hash_or_num: BlockHashOrNumber) -> Option<&Self> { + self.chain().find(|block| match hash_or_num { + BlockHashOrNumber::Hash(hash) => block.hash() == hash, + BlockHashOrNumber::Number(number) => block.number() == number, + }) + } + + /// Tries to find a transaction by [`TxHash`] in the chain ending at this block. + pub fn transaction_on_chain(&self, hash: TxHash) -> Option { + self.chain().find_map(|block_state| { + block_state + .block_ref() + .block() + .body + .transactions() + .find(|tx| tx.hash() == hash) + .cloned() + }) + } + + /// Tries to find a transaction with meta by [`TxHash`] in the chain ending at this block. + pub fn transaction_meta_on_chain( + &self, + tx_hash: TxHash, + ) -> Option<(TransactionSigned, TransactionMeta)> { + self.chain().find_map(|block_state| { + block_state + .block_ref() + .block() + .body + .transactions() + .enumerate() + .find(|(_, tx)| tx.hash() == tx_hash) + .map(|(index, tx)| { + let meta = TransactionMeta { + tx_hash, + index: index as u64, + block_hash: block_state.hash(), + block_number: block_state.block_ref().block.number, + base_fee: block_state.block_ref().block.header.base_fee_per_gas, + timestamp: block_state.block_ref().block.timestamp, + excess_blob_gas: block_state.block_ref().block.excess_blob_gas, + }; + (tx.clone(), meta) + }) + }) + } } /// Represents an executed block stored in-memory. @@ -734,7 +777,7 @@ pub struct ExecutedBlock { pub senders: Arc>, /// Block's execution outcome. pub execution_output: Arc, - /// Block's hashedst state. + /// Block's hashed state. pub hashed_state: Arc, /// Trie updates that result of applying the block. pub trie: Arc, @@ -869,10 +912,11 @@ impl NewCanonicalChain { mod tests { use super::*; use crate::test_utils::TestBlockBuilder; + use alloy_eips::eip7685::Requests; use alloy_primitives::{map::HashSet, BlockNumber, Bytes, StorageKey, StorageValue}; use rand::Rng; use reth_errors::ProviderResult; - use reth_primitives::{Account, Bytecode, Receipt, Requests}; + use reth_primitives::{Account, Bytecode, Receipt}; use reth_storage_api::{ AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, @@ -1385,7 +1429,7 @@ mod tests { let parents = single_block.parent_state_chain(); assert_eq!(parents.len(), 0); - let block_state_chain = single_block.chain(); + let block_state_chain = single_block.chain().collect::>(); assert_eq!(block_state_chain.len(), 1); assert_eq!(block_state_chain[0].block().block.number, single_block_number); assert_eq!(block_state_chain[0].block().block.hash(), single_block_hash); @@ -1396,18 +1440,18 @@ mod tests { let mut test_block_builder = TestBlockBuilder::default(); let chain = create_mock_state_chain(&mut test_block_builder, 3); - let block_state_chain = chain[2].chain(); + let block_state_chain = chain[2].chain().collect::>(); assert_eq!(block_state_chain.len(), 3); assert_eq!(block_state_chain[0].block().block.number, 3); assert_eq!(block_state_chain[1].block().block.number, 2); assert_eq!(block_state_chain[2].block().block.number, 1); - let block_state_chain = chain[1].chain(); + let block_state_chain = chain[1].chain().collect::>(); assert_eq!(block_state_chain.len(), 2); assert_eq!(block_state_chain[0].block().block.number, 2); assert_eq!(block_state_chain[1].block().block.number, 1); - let block_state_chain = chain[0].chain(); + let block_state_chain = chain[0].chain().collect::>(); assert_eq!(block_state_chain.len(), 1); assert_eq!(block_state_chain[0].block().block.number, 1); } diff --git a/crates/chain-state/src/lib.rs b/crates/chain-state/src/lib.rs index 0f05ef021d..d21ed2059c 100644 --- a/crates/chain-state/src/lib.rs +++ b/crates/chain-state/src/lib.rs @@ -22,7 +22,7 @@ pub use notifications::{ }; mod memory_overlay; -pub use memory_overlay::MemoryOverlayStateProvider; +pub use memory_overlay::{MemoryOverlayStateProvider, MemoryOverlayStateProviderRef}; pub use cache::cached_provider::CachedStateProvider; diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index eb125dad11..ada0faee49 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -7,14 +7,26 @@ use alloy_primitives::{ use reth_errors::ProviderResult; use reth_primitives::{Account, Bytecode}; use reth_storage_api::{ - AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateProviderBox, - StateRootProvider, StorageRootProvider, + AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, + StorageRootProvider, }; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, }; use std::sync::OnceLock; +/// A state provider that stores references to in-memory blocks along with their state as well as a +/// reference of the historical state provider for fallback lookups. +#[allow(missing_debug_implementations)] +pub struct MemoryOverlayStateProviderRef<'a> { + /// Historical state provider for state lookups that are not found in in-memory blocks. + pub(crate) historical: Box, + /// The collection of executed parent blocks. Expected order is newest to oldest. + pub(crate) in_memory: Vec, + /// Lazy-loaded in-memory trie data. + pub(crate) trie_state: OnceLock, +} + /// A state provider that stores references to in-memory blocks along with their state as well as /// the historical state provider for fallback lookups. #[allow(missing_debug_implementations)] @@ -27,193 +39,200 @@ pub struct MemoryOverlayStateProvider { pub(crate) trie_state: OnceLock, } -impl MemoryOverlayStateProvider { - /// Create new memory overlay state provider. - /// - /// ## Arguments - /// - /// - `in_memory` - the collection of executed ancestor blocks in reverse. - /// - `historical` - a historical state provider for the latest ancestor block stored in the - /// database. - pub fn new(historical: Box, in_memory: Vec) -> Self { - Self { historical, in_memory, trie_state: OnceLock::new() } - } - - /// Turn this state provider into a [`StateProviderBox`] - pub fn boxed(self) -> StateProviderBox { - Box::new(self) - } - - /// Return lazy-loaded trie state aggregated from in-memory blocks. - fn trie_state(&self) -> &MemoryOverlayTrieState { - self.trie_state.get_or_init(|| { - let mut trie_state = MemoryOverlayTrieState::default(); - for block in self.in_memory.iter().rev() { - trie_state.state.extend_ref(block.hashed_state.as_ref()); - trie_state.nodes.extend_ref(block.trie.as_ref()); - } - trie_state - }) - } -} +macro_rules! impl_state_provider { + ([$($tokens:tt)*],$type:ty, $historical_type:ty) => { + impl $($tokens)* $type { + /// Create new memory overlay state provider. + /// + /// ## Arguments + /// + /// - `in_memory` - the collection of executed ancestor blocks in reverse. + /// - `historical` - a historical state provider for the latest ancestor block stored in the + /// database. + pub fn new(historical: $historical_type, in_memory: Vec) -> Self { + Self { historical, in_memory, trie_state: OnceLock::new() } + } + + /// Turn this state provider into a state provider + pub fn boxed(self) -> $historical_type { + Box::new(self) + } -impl BlockHashReader for MemoryOverlayStateProvider { - fn block_hash(&self, number: BlockNumber) -> ProviderResult> { - for block in &self.in_memory { - if block.block.number == number { - return Ok(Some(block.block.hash())) + /// Return lazy-loaded trie state aggregated from in-memory blocks. + fn trie_state(&self) -> &MemoryOverlayTrieState { + self.trie_state.get_or_init(|| { + let mut trie_state = MemoryOverlayTrieState::default(); + for block in self.in_memory.iter().rev() { + trie_state.state.extend_ref(block.hashed_state.as_ref()); + trie_state.nodes.extend_ref(block.trie.as_ref()); + } + trie_state + }) } } - self.historical.block_hash(number) - } - - fn canonical_hashes_range( - &self, - start: BlockNumber, - end: BlockNumber, - ) -> ProviderResult> { - let range = start..end; - let mut earliest_block_number = None; - let mut in_memory_hashes = Vec::new(); - for block in &self.in_memory { - if range.contains(&block.block.number) { - in_memory_hashes.insert(0, block.block.hash()); - earliest_block_number = Some(block.block.number); + impl $($tokens)* BlockHashReader for $type { + fn block_hash(&self, number: BlockNumber) -> ProviderResult> { + for block in &self.in_memory { + if block.block.number == number { + return Ok(Some(block.block.hash())) + } + } + + self.historical.block_hash(number) + } + + fn canonical_hashes_range( + &self, + start: BlockNumber, + end: BlockNumber, + ) -> ProviderResult> { + let range = start..end; + let mut earliest_block_number = None; + let mut in_memory_hashes = Vec::new(); + for block in &self.in_memory { + if range.contains(&block.block.number) { + in_memory_hashes.insert(0, block.block.hash()); + earliest_block_number = Some(block.block.number); + } + } + + let mut hashes = + self.historical.canonical_hashes_range(start, earliest_block_number.unwrap_or(end))?; + hashes.append(&mut in_memory_hashes); + Ok(hashes) } } - let mut hashes = - self.historical.canonical_hashes_range(start, earliest_block_number.unwrap_or(end))?; - hashes.append(&mut in_memory_hashes); - Ok(hashes) - } -} + impl $($tokens)* AccountReader for $type { + fn basic_account(&self, address: Address) -> ProviderResult> { + for block in &self.in_memory { + if let Some(account) = block.execution_output.account(&address) { + return Ok(account) + } + } -impl AccountReader for MemoryOverlayStateProvider { - fn basic_account(&self, address: Address) -> ProviderResult> { - for block in &self.in_memory { - if let Some(account) = block.execution_output.account(&address) { - return Ok(account) + self.historical.basic_account(address) } } - self.historical.basic_account(address) - } -} + impl $($tokens)* StateRootProvider for $type { + fn state_root(&self, state: HashedPostState) -> ProviderResult { + self.state_root_from_nodes(TrieInput::from_state(state)) + } -impl StateRootProvider for MemoryOverlayStateProvider { - fn state_root(&self, state: HashedPostState) -> ProviderResult { - self.state_root_from_nodes(TrieInput::from_state(state)) - } - - fn state_root_from_nodes(&self, mut input: TrieInput) -> ProviderResult { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); - self.historical.state_root_from_nodes(input) - } - - fn state_root_with_updates( - &self, - state: HashedPostState, - ) -> ProviderResult<(B256, TrieUpdates)> { - self.state_root_from_nodes_with_updates(TrieInput::from_state(state)) - } - - fn state_root_from_nodes_with_updates( - &self, - mut input: TrieInput, - ) -> ProviderResult<(B256, TrieUpdates)> { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); - self.historical.state_root_from_nodes_with_updates(input) - } -} + fn state_root_from_nodes(&self, mut input: TrieInput) -> ProviderResult { + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.state_root_from_nodes(input) + } -impl StorageRootProvider for MemoryOverlayStateProvider { - // TODO: Currently this does not reuse available in-memory trie nodes. - fn storage_root(&self, address: Address, storage: HashedStorage) -> ProviderResult { - let state = &self.trie_state().state; - let mut hashed_storage = - state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); - hashed_storage.extend(&storage); - self.historical.storage_root(address, hashed_storage) - } - - // TODO: Currently this does not reuse available in-memory trie nodes. - fn storage_proof( - &self, - address: Address, - slot: B256, - storage: HashedStorage, - ) -> ProviderResult { - let state = &self.trie_state().state; - let mut hashed_storage = - state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); - hashed_storage.extend(&storage); - self.historical.storage_proof(address, slot, hashed_storage) - } -} + fn state_root_with_updates( + &self, + state: HashedPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + self.state_root_from_nodes_with_updates(TrieInput::from_state(state)) + } -impl StateProofProvider for MemoryOverlayStateProvider { - fn proof( - &self, - mut input: TrieInput, - address: Address, - slots: &[B256], - ) -> ProviderResult { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); - self.historical.proof(input, address, slots) - } - - fn multiproof( - &self, - mut input: TrieInput, - targets: HashMap>, - ) -> ProviderResult { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); - self.historical.multiproof(input, targets) - } - - fn witness( - &self, - mut input: TrieInput, - target: HashedPostState, - ) -> ProviderResult> { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); - self.historical.witness(input, target) - } -} + fn state_root_from_nodes_with_updates( + &self, + mut input: TrieInput, + ) -> ProviderResult<(B256, TrieUpdates)> { + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.state_root_from_nodes_with_updates(input) + } + } + + impl $($tokens)* StorageRootProvider for $type { + // TODO: Currently this does not reuse available in-memory trie nodes. + fn storage_root(&self, address: Address, storage: HashedStorage) -> ProviderResult { + let state = &self.trie_state().state; + let mut hashed_storage = + state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); + hashed_storage.extend(&storage); + self.historical.storage_root(address, hashed_storage) + } -impl StateProvider for MemoryOverlayStateProvider { - fn storage( - &self, - address: Address, - storage_key: StorageKey, - ) -> ProviderResult> { - for block in &self.in_memory { - if let Some(value) = block.execution_output.storage(&address, storage_key.into()) { - return Ok(Some(value)) + // TODO: Currently this does not reuse available in-memory trie nodes. + fn storage_proof( + &self, + address: Address, + slot: B256, + storage: HashedStorage, + ) -> ProviderResult { + let state = &self.trie_state().state; + let mut hashed_storage = + state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); + hashed_storage.extend(&storage); + self.historical.storage_proof(address, slot, hashed_storage) } } - self.historical.storage(address, storage_key) - } + impl $($tokens)* StateProofProvider for $type { + fn proof( + &self, + mut input: TrieInput, + address: Address, + slots: &[B256], + ) -> ProviderResult { + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.proof(input, address, slots) + } + + fn multiproof( + &self, + mut input: TrieInput, + targets: HashMap>, + ) -> ProviderResult { + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.multiproof(input, targets) + } - fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { - for block in &self.in_memory { - if let Some(contract) = block.execution_output.bytecode(&code_hash) { - return Ok(Some(contract)) + fn witness( + &self, + mut input: TrieInput, + target: HashedPostState, + ) -> ProviderResult> { + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.witness(input, target) } } - self.historical.bytecode_by_hash(code_hash) - } + impl $($tokens)* StateProvider for $type { + fn storage( + &self, + address: Address, + storage_key: StorageKey, + ) -> ProviderResult> { + for block in &self.in_memory { + if let Some(value) = block.execution_output.storage(&address, storage_key.into()) { + return Ok(Some(value)) + } + } + + self.historical.storage(address, storage_key) + } + + fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { + for block in &self.in_memory { + if let Some(contract) = block.execution_output.bytecode(&code_hash) { + return Ok(Some(contract)) + } + } + + self.historical.bytecode_by_hash(code_hash) + } + } + }; } +impl_state_provider!([], MemoryOverlayStateProvider, Box); +impl_state_provider!([<'a>], MemoryOverlayStateProviderRef<'a>, Box); + /// The collection of data necessary for trie-related operations for [`MemoryOverlayStateProvider`]. #[derive(Clone, Default, Debug)] pub(crate) struct MemoryOverlayTrieState { diff --git a/crates/chain-state/src/notifications.rs b/crates/chain-state/src/notifications.rs index fc717314b3..582e1d2a05 100644 --- a/crates/chain-state/src/notifications.rs +++ b/crates/chain-state/src/notifications.rs @@ -190,3 +190,232 @@ impl Stream for ForkChoiceStream { } } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::B256; + use reth_execution_types::ExecutionOutcome; + use reth_primitives::{Receipt, Receipts, TransactionSigned, TxType}; + + #[test] + fn test_commit_notification() { + let block = SealedBlockWithSenders::default(); + let block1_hash = B256::new([0x01; 32]); + let block2_hash = B256::new([0x02; 32]); + + let mut block1 = block.clone(); + block1.set_block_number(1); + block1.set_hash(block1_hash); + + let mut block2 = block; + block2.set_block_number(2); + block2.set_hash(block2_hash); + + let chain = Arc::new(Chain::new( + vec![block1.clone(), block2.clone()], + ExecutionOutcome::default(), + None, + )); + + // Create a commit notification + let notification = CanonStateNotification::Commit { new: chain.clone() }; + + // Test that `committed` returns the correct chain + assert_eq!(notification.committed(), chain); + + // Test that `reverted` returns None for `Commit` + assert!(notification.reverted().is_none()); + + // Test that `tip` returns the correct block + assert_eq!(*notification.tip(), block2); + } + + #[test] + fn test_reorg_notification() { + let block = SealedBlockWithSenders::default(); + let block1_hash = B256::new([0x01; 32]); + let block2_hash = B256::new([0x02; 32]); + let block3_hash = B256::new([0x03; 32]); + + let mut block1 = block.clone(); + block1.set_block_number(1); + block1.set_hash(block1_hash); + + let mut block2 = block.clone(); + block2.set_block_number(2); + block2.set_hash(block2_hash); + + let mut block3 = block; + block3.set_block_number(3); + block3.set_hash(block3_hash); + + let old_chain = + Arc::new(Chain::new(vec![block1.clone()], ExecutionOutcome::default(), None)); + let new_chain = Arc::new(Chain::new( + vec![block2.clone(), block3.clone()], + ExecutionOutcome::default(), + None, + )); + + // Create a reorg notification + let notification = + CanonStateNotification::Reorg { old: old_chain.clone(), new: new_chain.clone() }; + + // Test that `reverted` returns the old chain + assert_eq!(notification.reverted(), Some(old_chain)); + + // Test that `committed` returns the new chain + assert_eq!(notification.committed(), new_chain); + + // Test that `tip` returns the tip of the new chain (last block in the new chain) + assert_eq!(*notification.tip(), block3); + } + + #[test] + fn test_block_receipts_commit() { + // Create a default block instance for use in block definitions. + let block = SealedBlockWithSenders::default(); + + // Define unique hashes for two blocks to differentiate them in the chain. + let block1_hash = B256::new([0x01; 32]); + let block2_hash = B256::new([0x02; 32]); + + // Create a default transaction to include in block1's transactions. + let tx = TransactionSigned::default(); + + // Create a clone of the default block and customize it to act as block1. + let mut block1 = block.clone(); + block1.set_block_number(1); + block1.set_hash(block1_hash); + // Add the transaction to block1's transactions. + block1.block.body.transactions.push(tx); + + // Clone the default block and customize it to act as block2. + let mut block2 = block; + block2.set_block_number(2); + block2.set_hash(block2_hash); + + // Create a receipt for the transaction in block1. + #[allow(clippy::needless_update)] + let receipt1 = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 12345, + logs: vec![], + success: true, + ..Default::default() + }; + + // Wrap the receipt in a `Receipts` structure, as expected in the `ExecutionOutcome`. + let receipts = Receipts { receipt_vec: vec![vec![Some(receipt1.clone())]] }; + + // Define an `ExecutionOutcome` with the created receipts. + let execution_outcome = ExecutionOutcome { receipts, ..Default::default() }; + + // Create a new chain segment with `block1` and `block2` and the execution outcome. + let new_chain = + Arc::new(Chain::new(vec![block1.clone(), block2.clone()], execution_outcome, None)); + + // Create a commit notification containing the new chain segment. + let notification = CanonStateNotification::Commit { new: new_chain }; + + // Call `block_receipts` on the commit notification to retrieve block receipts. + let block_receipts = notification.block_receipts(); + + // Assert that only one receipt entry exists in the `block_receipts` list. + assert_eq!(block_receipts.len(), 1); + + // Verify that the first entry matches block1's hash and transaction receipt. + assert_eq!( + block_receipts[0].0, + BlockReceipts { + block: block1.num_hash(), + tx_receipts: vec![(B256::default(), receipt1)] + } + ); + + // Assert that the receipt is from the committed segment (not reverted). + assert!(!block_receipts[0].1); + } + + #[test] + fn test_block_receipts_reorg() { + // Define block1 for the old chain segment, which will be reverted. + let mut old_block1 = SealedBlockWithSenders::default(); + old_block1.set_block_number(1); + old_block1.set_hash(B256::new([0x01; 32])); + old_block1.block.body.transactions.push(TransactionSigned::default()); + + // Create a receipt for a transaction in the reverted block. + #[allow(clippy::needless_update)] + let old_receipt = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 54321, + logs: vec![], + success: false, + ..Default::default() + }; + let old_receipts = Receipts { receipt_vec: vec![vec![Some(old_receipt.clone())]] }; + + let old_execution_outcome = + ExecutionOutcome { receipts: old_receipts, ..Default::default() }; + + // Create an old chain segment to be reverted, containing `old_block1`. + let old_chain = Arc::new(Chain::new(vec![old_block1.clone()], old_execution_outcome, None)); + + // Define block2 for the new chain segment, which will be committed. + let mut new_block1 = SealedBlockWithSenders::default(); + new_block1.set_block_number(2); + new_block1.set_hash(B256::new([0x02; 32])); + new_block1.block.body.transactions.push(TransactionSigned::default()); + + // Create a receipt for a transaction in the new committed block. + #[allow(clippy::needless_update)] + let new_receipt = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 12345, + logs: vec![], + success: true, + ..Default::default() + }; + let new_receipts = Receipts { receipt_vec: vec![vec![Some(new_receipt.clone())]] }; + + let new_execution_outcome = + ExecutionOutcome { receipts: new_receipts, ..Default::default() }; + + // Create a new chain segment to be committed, containing `new_block1`. + let new_chain = Arc::new(Chain::new(vec![new_block1.clone()], new_execution_outcome, None)); + + // Create a reorg notification with both reverted (old) and committed (new) chain segments. + let notification = CanonStateNotification::Reorg { old: old_chain, new: new_chain }; + + // Retrieve receipts from both old (reverted) and new (committed) segments. + let block_receipts = notification.block_receipts(); + + // Assert there are two receipt entries, one from each chain segment. + assert_eq!(block_receipts.len(), 2); + + // Verify that the first entry matches old_block1 and its receipt from the reverted segment. + assert_eq!( + block_receipts[0].0, + BlockReceipts { + block: old_block1.num_hash(), + tx_receipts: vec![(B256::default(), old_receipt)] + } + ); + // Confirm this is from the reverted segment. + assert!(block_receipts[0].1); + + // Verify that the second entry matches new_block1 and its receipt from the committed + // segment. + assert_eq!( + block_receipts[1].0, + BlockReceipts { + block: new_block1.num_hash(), + tx_receipts: vec![(B256::default(), new_receipt)] + } + ); + // Confirm this is from the committed segment. + assert!(!block_receipts[1].1); + } +} diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index e0065374de..1c33e43955 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -2,7 +2,8 @@ use crate::{ in_memory::ExecutedBlock, CanonStateNotification, CanonStateNotifications, CanonStateSubscriptions, }; -use alloy_consensus::TxEip1559; +use alloy_consensus::{Transaction as _, TxEip1559, EMPTY_ROOT_HASH}; +use alloy_eips::{eip1559::INITIAL_BASE_FEE, eip7685::Requests}; use alloy_primitives::{Address, BlockNumber, Sealable, B256, U256}; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; @@ -10,10 +11,9 @@ use rand::{thread_rng, Rng}; use reth_chainspec::{ChainSpec, EthereumHardfork, MIN_TRANSACTION_GAS}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ - constants::{EIP1559_INITIAL_BASE_FEE, EMPTY_ROOT_HASH}, proofs::{calculate_receipt_root, calculate_transaction_root, calculate_withdrawals_root}, - BlockBody, Header, Receipt, Receipts, Requests, SealedBlock, SealedBlockWithSenders, - SealedHeader, Transaction, TransactionSigned, TransactionSignedEcRecovered, + BlockBody, Header, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, + Transaction, TransactionSigned, TransactionSignedEcRecovered, }; use reth_trie::{root::state_root_unhashed, updates::TrieUpdates, HashedPostState}; use revm::{db::BundleState, primitives::AccountInfo}; @@ -74,7 +74,7 @@ impl TestBlockBuilder { /// Gas cost of a single transaction generated by the block builder. pub fn single_tx_cost() -> U256 { - U256::from(EIP1559_INITIAL_BASE_FEE * MIN_TRANSACTION_GAS) + U256::from(INITIAL_BASE_FEE * MIN_TRANSACTION_GAS) } /// Generates a random [`SealedBlockWithSenders`]. @@ -91,7 +91,7 @@ impl TestBlockBuilder { nonce, gas_limit: MIN_TRANSACTION_GAS, to: Address::random().into(), - max_fee_per_gas: EIP1559_INITIAL_BASE_FEE as u128, + max_fee_per_gas: INITIAL_BASE_FEE as u128, max_priority_fee_per_gas: 1, ..Default::default() }); @@ -135,7 +135,7 @@ impl TestBlockBuilder { gas_used: transactions.len() as u64 * MIN_TRANSACTION_GAS, gas_limit: self.chain_spec.max_gas_limit, mix_hash: B256::random(), - base_fee_per_gas: Some(EIP1559_INITIAL_BASE_FEE), + base_fee_per_gas: Some(INITIAL_BASE_FEE), transactions_root: calculate_transaction_root(&transactions), receipts_root: calculate_receipt_root(&receipts), beneficiary: Address::random(), @@ -170,7 +170,6 @@ impl TestBlockBuilder { ommers: Vec::new(), withdrawals: Some(vec![].into()), sidecars: None, - requests: None, }, }; diff --git a/crates/chainspec/Cargo.toml b/crates/chainspec/Cargo.toml index 7a08e88b02..60ffffdca9 100644 --- a/crates/chainspec/Cargo.toml +++ b/crates/chainspec/Cargo.toml @@ -17,7 +17,7 @@ reth-network-peers.workspace = true reth-trie-common.workspace = true reth-primitives-traits.workspace = true -#bsc +# bsc reth-bsc-forks = { workspace = true, optional = true } # op-reth @@ -28,7 +28,7 @@ alloy-chains = { workspace = true, features = ["serde", "rlp"] } alloy-eips = { workspace = true, features = ["serde"] } alloy-genesis.workspace = true alloy-primitives = { workspace = true, features = ["rand", "rlp"] } -alloy-trie.workspace = true +alloy-consensus.workspace = true # misc auto_impl.workspace = true @@ -50,11 +50,26 @@ bsc = [ ] optimism = ["reth-optimism-forks"] std = [ - "alloy-chains/std", - "alloy-eips/std", - "alloy-genesis/std", - "alloy-primitives/std", - "alloy-trie/std", + "alloy-chains/std", + "alloy-eips/std", + "alloy-genesis/std", + "alloy-primitives/std", + "alloy-trie/std", + "reth-primitives-traits/std", + "alloy-consensus/std", + "once_cell/std" +] +arbitrary = [ + "alloy-chains/arbitrary", + "reth-ethereum-forks/arbitrary", + "reth-primitives-traits/arbitrary", + "reth-trie-common/arbitrary", + "alloy-consensus/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", + "alloy-trie/arbitrary" +] +test-utils = [ + "reth-primitives-traits/test-utils", + "reth-trie-common/test-utils" ] -arbitrary = ["alloy-chains/arbitrary"] -test-utils = [] diff --git a/crates/chainspec/src/api.rs b/crates/chainspec/src/api.rs index 08d727c694..9de6ace497 100644 --- a/crates/chainspec/src/api.rs +++ b/crates/chainspec/src/api.rs @@ -1,5 +1,5 @@ use crate::{ChainSpec, DepositContract}; -use alloc::vec::Vec; +use alloc::{boxed::Box, vec::Vec}; use alloy_chains::Chain; use alloy_eips::eip1559::BaseFeeParams; use alloy_genesis::Genesis; @@ -14,9 +14,14 @@ pub trait EthChainSpec: Send + Sync + Unpin + Debug { // todo: make chain spec type generic over hardfork //type Hardfork: Clone + Copy + 'static; - /// Chain id. + /// Returns the [`Chain`] object this spec targets. fn chain(&self) -> Chain; + /// Returns the chain id number + fn chain_id(&self) -> u64 { + self.chain().id() + } + /// Get the [`BaseFeeParams`] for the chain at the given block. fn base_fee_params_at_block(&self, block_number: u64) -> BaseFeeParams; @@ -33,7 +38,7 @@ pub trait EthChainSpec: Send + Sync + Unpin + Debug { fn prune_delete_limit(&self) -> usize; /// Returns a string representation of the hardforks. - fn display_hardforks(&self) -> impl Display; + fn display_hardforks(&self) -> Box; /// The genesis header. fn genesis_header(&self) -> &Header; @@ -52,6 +57,11 @@ pub trait EthChainSpec: Send + Sync + Unpin + Debug { self.chain().is_optimism() } + /// Returns `true` if this chain contains Ethereum configuration. + fn is_ethereum(&self) -> bool { + self.chain().is_ethereum() + } + /// Returns `true` if this chain contains Binance Smart Chain configuration. fn is_bsc(&self) -> bool; } @@ -81,8 +91,8 @@ impl EthChainSpec for ChainSpec { self.prune_delete_limit } - fn display_hardforks(&self) -> impl Display { - self.display_hardforks() + fn display_hardforks(&self) -> Box { + Box::new(Self::display_hardforks(self)) } fn genesis_header(&self) -> &Header { @@ -102,7 +112,7 @@ impl EthChainSpec for ChainSpec { } fn is_optimism(&self) -> bool { - Self::is_optimism(self) + self.chain.is_optimism() } fn is_bsc(&self) -> bool { diff --git a/crates/chainspec/src/constants.rs b/crates/chainspec/src/constants.rs index 2e22b2299a..3f46fb6b74 100644 --- a/crates/chainspec/src/constants.rs +++ b/crates/chainspec/src/constants.rs @@ -1,11 +1,12 @@ use crate::spec::DepositContract; -use alloy_primitives::{address, b256}; +use alloy_eips::eip6110::MAINNET_DEPOSIT_CONTRACT_ADDRESS; +use alloy_primitives::b256; /// Gas per transaction not creating a contract. pub const MIN_TRANSACTION_GAS: u64 = 21_000u64; /// Deposit contract address: `0x00000000219ab540356cbb839cbe05303d7705fa` pub(crate) const MAINNET_DEPOSIT_CONTRACT: DepositContract = DepositContract::new( - address!("00000000219ab540356cbb839cbe05303d7705fa"), + MAINNET_DEPOSIT_CONTRACT_ADDRESS, 11052984, b256!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"), ); diff --git a/crates/chainspec/src/lib.rs b/crates/chainspec/src/lib.rs index 424b2b77c2..2e97caba07 100644 --- a/crates/chainspec/src/lib.rs +++ b/crates/chainspec/src/lib.rs @@ -11,6 +11,12 @@ extern crate alloc; +use once_cell as _; +#[cfg(not(feature = "std"))] +pub(crate) use once_cell::sync::{Lazy as LazyLock, OnceCell as OnceLock}; +#[cfg(feature = "std")] +pub(crate) use std::sync::{LazyLock, OnceLock}; + /// Chain specific constants pub(crate) mod constants; pub use constants::MIN_TRANSACTION_GAS; @@ -27,16 +33,16 @@ pub use reth_ethereum_forks::*; pub use api::EthChainSpec; pub use info::ChainInfo; -#[cfg(feature = "test-utils")] +#[cfg(any(test, feature = "test-utils"))] pub use spec::test_fork_ids; pub use spec::{ BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, ChainSpecProvider, DepositContract, ForkBaseFeeParams, DEV, HOLESKY, MAINNET, SEPOLIA, }; -/// Simple utility to create a `OnceCell` with a value set. -pub fn once_cell_set(value: T) -> once_cell::sync::OnceCell { - let once = once_cell::sync::OnceCell::new(); +/// Simple utility to create a thread-safe sync cell with a value set. +pub fn once_cell_set(value: T) -> OnceLock { + let once = OnceLock::new(); let _ = once.set(value); once } diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index 82939c1115..8f88f73ec0 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -2,11 +2,17 @@ pub use alloy_eips::eip1559::BaseFeeParams; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_chains::{Chain, NamedChain}; +use alloy_consensus::constants::EMPTY_WITHDRAWALS; +use alloy_eips::{ + eip1559::INITIAL_BASE_FEE, eip6110::MAINNET_DEPOSIT_CONTRACT_ADDRESS, + eip7685::EMPTY_REQUESTS_HASH, +}; use alloy_genesis::Genesis; use alloy_primitives::{address, b256, Address, BlockNumber, B256, U256}; -use alloy_trie::EMPTY_ROOT_HASH; use derive_more::From; -use once_cell::sync::{Lazy, OnceCell}; + +use alloy_consensus::constants::{DEV_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH}; +use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use reth_ethereum_forks::{ ChainHardforks, DisplayHardforks, EthereumHardfork, EthereumHardforks, ForkCondition, ForkFilter, ForkFilterKey, ForkHash, ForkId, Hardfork, Hardforks, Head, DEV_HARDFORKS, @@ -16,19 +22,13 @@ use reth_network_peers::{ mainnet_nodes, op_nodes, op_testnet_nodes, opbnb_mainnet_nodes, opbnb_testnet_nodes, sepolia_nodes, NodeRecord, }; -use reth_primitives_traits::{ - constants::{ - DEV_GENESIS_HASH, EIP1559_INITIAL_BASE_FEE, EMPTY_WITHDRAWALS, ETHEREUM_BLOCK_GAS_LIMIT, - HOLESKY_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH, - }, - Header, SealedHeader, -}; +use reth_primitives_traits::{constants::HOLESKY_GENESIS_HASH, Header, SealedHeader}; use reth_trie_common::root::state_root_ref_unhashed; -use crate::{constants::MAINNET_DEPOSIT_CONTRACT, once_cell_set, EthChainSpec}; +use crate::{constants::MAINNET_DEPOSIT_CONTRACT, once_cell_set, EthChainSpec, LazyLock, OnceLock}; /// The Ethereum mainnet spec -pub static MAINNET: Lazy> = Lazy::new(|| { +pub static MAINNET: LazyLock> = LazyLock::new(|| { let mut spec = ChainSpec { chain: Chain::mainnet(), genesis: serde_json::from_str(include_str!("../res/genesis/mainnet.json")) @@ -43,7 +43,7 @@ pub static MAINNET: Lazy> = Lazy::new(|| { hardforks: EthereumHardfork::mainnet().into(), // https://etherscan.io/tx/0xe75fb554e433e03763a1560646ee22dcb74e5274b34c5ad644e7c0f619a7e1d0 deposit_contract: Some(DepositContract::new( - address!("00000000219ab540356cbb839cbe05303d7705fa"), + MAINNET_DEPOSIT_CONTRACT_ADDRESS, 11052984, b256!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"), )), @@ -56,7 +56,7 @@ pub static MAINNET: Lazy> = Lazy::new(|| { }); /// The Sepolia spec -pub static SEPOLIA: Lazy> = Lazy::new(|| { +pub static SEPOLIA: LazyLock> = LazyLock::new(|| { let mut spec = ChainSpec { chain: Chain::sepolia(), genesis: serde_json::from_str(include_str!("../res/genesis/sepolia.json")) @@ -81,7 +81,7 @@ pub static SEPOLIA: Lazy> = Lazy::new(|| { }); /// The Holesky spec -pub static HOLESKY: Lazy> = Lazy::new(|| { +pub static HOLESKY: LazyLock> = LazyLock::new(|| { let mut spec = ChainSpec { chain: Chain::holesky(), genesis: serde_json::from_str(include_str!("../res/genesis/holesky.json")) @@ -107,7 +107,7 @@ pub static HOLESKY: Lazy> = Lazy::new(|| { /// /// Includes 20 prefunded accounts with `10_000` ETH each derived from mnemonic "test test test test /// test test test test test test test junk". -pub static DEV: Lazy> = Lazy::new(|| { +pub static DEV: LazyLock> = LazyLock::new(|| { ChainSpec { chain: Chain::dev(), genesis: serde_json::from_str(include_str!("../res/genesis/dev.json")) @@ -183,13 +183,13 @@ pub struct ChainSpec { /// /// This is either stored at construction time if it is known using [`once_cell_set`], or /// computed once on the first access. - pub genesis_hash: OnceCell, + pub genesis_hash: OnceLock, /// The header corresponding to the genesis block. /// /// This is either stored at construction time if it is known using [`once_cell_set`], or /// computed once on the first access. - pub genesis_header: OnceCell
, + pub genesis_header: OnceLock
, /// The block at which [`EthereumHardfork::Paris`] was activated and the final difficulty at /// this block. @@ -312,11 +312,9 @@ impl ChainSpec { }; // If Prague is activated at genesis we set requests root to an empty trie root. - let requests_root = if self.is_prague_active_at_timestamp(self.genesis.timestamp) { - Some(EMPTY_ROOT_HASH) - } else { - None - }; + let requests_hash = self + .is_prague_active_at_timestamp(self.genesis.timestamp) + .then_some(EMPTY_REQUESTS_HASH); Header { gas_limit: self.genesis.gas_limit, @@ -332,7 +330,7 @@ impl ChainSpec { parent_beacon_block_root, blob_gas_used: blob_gas_used.map(Into::into), excess_blob_gas: excess_blob_gas.map(Into::into), - requests_root, + requests_hash, ..Default::default() } } @@ -346,7 +344,7 @@ impl ChainSpec { pub fn initial_base_fee(&self) -> Option { // If the base fee is set in the genesis block, we use that instead of the default. let genesis_base_fee = - self.genesis.base_fee_per_gas.map(|fee| fee as u64).unwrap_or(EIP1559_INITIAL_BASE_FEE); + self.genesis.base_fee_per_gas.map(|fee| fee as u64).unwrap_or(INITIAL_BASE_FEE); // If London is activated at genesis, we set the initial base fee as per EIP-1559. self.hardforks.fork(EthereumHardfork::London).active_at_block(0).then_some(genesis_base_fee) @@ -549,34 +547,36 @@ impl ChainSpec { } } - /// An internal helper function that returns the block number of the last block-based - /// fork that occurs before any existing TTD (merge)/timestamp based forks. + /// This internal helper function retrieves the block number of the last block-based fork + /// that occurs before: + /// - Any existing Total Terminal Difficulty (TTD) or + /// - Timestamp-based forks in the current [`ChainSpec`]. + /// + /// The function operates by examining the configured hard forks in the chain. It iterates + /// through the fork conditions and identifies the most recent block-based fork that + /// precedes any TTD or timestamp-based conditions. /// - /// Note: this returns None if the `ChainSpec` is not configured with a TTD/Timestamp fork. + /// If there are no block-based forks found before these conditions, or if the [`ChainSpec`] + /// is not configured with a TTD or timestamp fork, this function will return `None`. pub(crate) fn last_block_fork_before_merge_or_timestamp(&self) -> Option { let mut hardforks_iter = self.hardforks.forks_iter().peekable(); while let Some((_, curr_cond)) = hardforks_iter.next() { if let Some((_, next_cond)) = hardforks_iter.peek() { - // peek and find the first occurrence of ForkCondition::TTD (merge) , or in - // custom ChainSpecs, the first occurrence of - // ForkCondition::Timestamp. If curr_cond is ForkCondition::Block at - // this point, which it should be in most "normal" ChainSpecs, - // return its block_num + // Match against the `next_cond` to see if it represents: + // - A TTD (merge) + // - A timestamp-based fork match next_cond { - ForkCondition::TTD { fork_block, .. } => { - // handle Sepolia merge netsplit case - if fork_block.is_some() { - return *fork_block - } - // ensure curr_cond is indeed ForkCondition::Block and return block_num - if let ForkCondition::Block(block_num) = curr_cond { - return Some(block_num) - } - } - ForkCondition::Timestamp(_) => { - // ensure curr_cond is indeed ForkCondition::Block and return block_num + // If the next fork is TTD and specifies a specific block, return that block + // number + ForkCondition::TTD { fork_block: Some(block), .. } => return Some(*block), + + // If the next fork is TTD without a specific block or is timestamp-based, + // return the block number of the current condition if it is block-based. + ForkCondition::TTD { .. } | ForkCondition::Timestamp(_) => { + // Check if `curr_cond` is a block-based fork and return its block number if + // true. if let ForkCondition::Block(block_num) = curr_cond { - return Some(block_num) + return Some(block_num); } } ForkCondition::Block(_) | ForkCondition::Never => continue, @@ -656,6 +656,7 @@ impl From for ChainSpec { (EthereumHardfork::Shanghai.boxed(), genesis.config.shanghai_time), (EthereumHardfork::Cancun.boxed(), genesis.config.cancun_time), (EthereumHardfork::Prague.boxed(), genesis.config.prague_time), + (EthereumHardfork::Osaka.boxed(), genesis.config.osaka_time), ]; let mut time_hardforks = time_hardfork_opts @@ -692,7 +693,7 @@ impl From for ChainSpec { Self { chain: genesis.config.chain_id.into(), genesis, - genesis_hash: OnceCell::new(), + genesis_hash: OnceLock::new(), hardforks: ChainHardforks::new(ordered_hardforks), paris_block_and_final_difficulty, deposit_contract, @@ -782,13 +783,19 @@ impl ChainSpecBuilder { } /// Add the given fork with the given activation condition to the spec. - pub fn with_fork(mut self, fork: EthereumHardfork, condition: ForkCondition) -> Self { + pub fn with_fork(mut self, fork: H, condition: ForkCondition) -> Self { self.hardforks.insert(fork, condition); self } + /// Add the given chain hardforks to the spec. + pub fn with_forks(mut self, forks: ChainHardforks) -> Self { + self.hardforks = forks; + self + } + /// Remove the given fork from the spec. - pub fn without_fork(mut self, fork: EthereumHardfork) -> Self { + pub fn without_fork(mut self, fork: H) -> Self { self.hardforks.remove(fork); self } @@ -903,60 +910,10 @@ impl ChainSpecBuilder { self } - /// Enable Bedrock at genesis - #[cfg(feature = "optimism")] - pub fn bedrock_activated(mut self) -> Self { - self = self.paris_activated(); - self.hardforks - .insert(reth_optimism_forks::OptimismHardfork::Bedrock, ForkCondition::Block(0)); - self - } - - /// Enable Regolith at genesis - #[cfg(feature = "optimism")] - pub fn regolith_activated(mut self) -> Self { - self = self.bedrock_activated(); - self.hardforks - .insert(reth_optimism_forks::OptimismHardfork::Regolith, ForkCondition::Timestamp(0)); - self - } - - /// Enable Canyon at genesis - #[cfg(feature = "optimism")] - pub fn canyon_activated(mut self) -> Self { - self = self.regolith_activated(); - // Canyon also activates changes from L1's Shanghai hardfork - self.hardforks.insert(EthereumHardfork::Shanghai, ForkCondition::Timestamp(0)); - self.hardforks - .insert(reth_optimism_forks::OptimismHardfork::Canyon, ForkCondition::Timestamp(0)); - self - } - - /// Enable Ecotone at genesis - #[cfg(feature = "optimism")] - pub fn ecotone_activated(mut self) -> Self { - self = self.canyon_activated(); - self.hardforks.insert(EthereumHardfork::Cancun, ForkCondition::Timestamp(0)); - self.hardforks - .insert(reth_optimism_forks::OptimismHardfork::Ecotone, ForkCondition::Timestamp(0)); - self - } - - /// Enable Fjord at genesis - #[cfg(feature = "optimism")] - pub fn fjord_activated(mut self) -> Self { - self = self.ecotone_activated(); - self.hardforks - .insert(reth_optimism_forks::OptimismHardfork::Fjord, ForkCondition::Timestamp(0)); - self - } - - /// Enable Granite at genesis - #[cfg(feature = "optimism")] - pub fn granite_activated(mut self) -> Self { - self = self.fjord_activated(); - self.hardforks - .insert(reth_optimism_forks::OptimismHardfork::Granite, ForkCondition::Timestamp(0)); + /// Enable Osaka at genesis. + pub fn osaka_activated(mut self) -> Self { + self = self.prague_activated(); + self.hardforks.insert(EthereumHardfork::Osaka, ForkCondition::Timestamp(0)); self } @@ -979,7 +936,7 @@ impl ChainSpecBuilder { ChainSpec { chain: self.chain.expect("The chain is required"), genesis: self.genesis.expect("The genesis is required"), - genesis_hash: OnceCell::new(), + genesis_hash: OnceLock::new(), hardforks: self.hardforks, paris_block_and_final_difficulty, deposit_contract: None, @@ -1037,6 +994,7 @@ mod tests { use alloy_chains::Chain; use alloy_genesis::{ChainConfig, GenesisAccount}; use alloy_primitives::hex; + use alloy_trie::EMPTY_ROOT_HASH; use reth_ethereum_forks::{ForkCondition, ForkHash, ForkId, Head}; use reth_trie_common::TrieAccount; @@ -1913,9 +1871,7 @@ Post-merge hard forks (timestamp based): hex!("078dc6061b1d8eaa8493384b59c9c65ceb917201221d08b80c4de6770b6ec7e7").into(); assert_eq!(chainspec.genesis_header().state_root, expected_state_root); - let expected_withdrawals_hash: B256 = - hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(); - assert_eq!(chainspec.genesis_header().withdrawals_root, Some(expected_withdrawals_hash)); + assert_eq!(chainspec.genesis_header().withdrawals_root, Some(EMPTY_ROOT_HASH)); let expected_hash: B256 = hex!("1fc027d65f820d3eef441ebeec139ebe09e471cf98516dce7b5643ccb27f418c").into(); @@ -2302,7 +2258,7 @@ Post-merge hard forks (timestamp based): let spec = ChainSpec { chain: Chain::mainnet(), genesis: Genesis::default(), - genesis_hash: OnceCell::new(), + genesis_hash: OnceLock::new(), hardforks: ChainHardforks::new(vec![( EthereumHardfork::Frontier.boxed(), ForkCondition::Never, @@ -2320,7 +2276,7 @@ Post-merge hard forks (timestamp based): let spec = ChainSpec { chain: Chain::mainnet(), genesis: Genesis::default(), - genesis_hash: OnceCell::new(), + genesis_hash: OnceLock::new(), hardforks: ChainHardforks::new(vec![( EthereumHardfork::Shanghai.boxed(), ForkCondition::Never, diff --git a/crates/cli/cli/Cargo.toml b/crates/cli/cli/Cargo.toml index 7eb1f43b1e..5da51a1b2f 100644 --- a/crates/cli/cli/Cargo.toml +++ b/crates/cli/cli/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] # reth reth-cli-runner.workspace = true - +reth-db.workspace = true alloy-genesis.workspace = true # misc diff --git a/crates/cli/cli/src/lib.rs b/crates/cli/cli/src/lib.rs index 1db5ebf86b..e2c55057a4 100644 --- a/crates/cli/cli/src/lib.rs +++ b/crates/cli/cli/src/lib.rs @@ -10,10 +10,12 @@ use clap::{Error, Parser}; use reth_cli_runner::CliRunner; +use reth_db::ClientVersion; use std::{borrow::Cow, ffi::OsString}; /// The chainspec module defines the different chainspecs that can be used by the node. pub mod chainspec; +use crate::chainspec::ChainSpecParser; /// Reth based node cli. /// @@ -22,6 +24,9 @@ pub mod chainspec; /// It provides commonly used functionality for running commands and information about the CL, such /// as the name and version. pub trait RethCli: Sized { + /// The associated `ChainSpecParser` type + type ChainSpecParser: ChainSpecParser; + /// The name of the implementation, eg. `reth`, `op-reth`, etc. fn name(&self) -> Cow<'static, str>; @@ -66,4 +71,7 @@ pub trait RethCli: Sized { Ok(cli.with_runner(f)) } + + /// The client version of the node. + fn client_version() -> ClientVersion; } diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index e307859dfd..a0bc514770 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -17,6 +17,7 @@ reth-cli.workspace = true reth-ethereum-cli.workspace = true reth-cli-runner.workspace = true reth-cli-util.workspace = true +reth-codecs = { workspace = true, optional = true } reth-config.workspace = true reth-consensus.workspace = true reth-db = { workspace = true, features = ["mdbx"] } @@ -38,15 +39,19 @@ reth-node-metrics.workspace = true reth-primitives.workspace = true reth-provider.workspace = true reth-prune.workspace = true +reth-prune-types = { workspace = true, optional = true } reth-stages.workspace = true +reth-stages-types = { workspace = true, optional = true } reth-static-file-types = { workspace = true, features = ["clap"] } reth-static-file.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-trie-db = { workspace = true, features = ["metrics"] } +reth-trie-common = { workspace = true, optional = true } # ethereum alloy-eips.workspace = true alloy-primitives.workspace = true +alloy-rlp.workspace = true itertools.workspace = true futures.workspace = true @@ -88,10 +93,23 @@ reth-discv4.workspace = true [features] default = [] -dev = [ - "dep:proptest", - "dep:arbitrary", - "dep:proptest-arbitrary-interop", - "reth-primitives/arbitrary", - "reth-db-api/arbitrary", +arbitrary = [ + "dep:proptest", + "dep:arbitrary", + "dep:proptest-arbitrary-interop", + "reth-primitives/arbitrary", + "reth-db-api/arbitrary", + "reth-eth-wire/arbitrary", + "reth-db/arbitrary", + "reth-chainspec/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", + "reth-codecs/test-utils", + "reth-prune-types/test-utils", + "reth-stages-types/test-utils", + "reth-trie-common/test-utils", + "reth-codecs?/arbitrary", + "reth-prune-types?/arbitrary", + "reth-stages-types?/arbitrary", + "reth-trie-common?/arbitrary" ] diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index e7b90962ef..8b9c593215 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -130,7 +130,7 @@ impl> Environmen .static_file_provider() .check_consistency(&factory.provider()?, has_receipt_pruning)? { - if factory.db_ref().is_read_only() { + if factory.db_ref().is_read_only()? { warn!(target: "reth::cli", ?unwind_target, "Inconsistent storage. Restart node to heal."); return Ok(factory) } diff --git a/crates/cli/commands/src/db/get.rs b/crates/cli/commands/src/db/get.rs index 476a9ee362..8d31ba6efa 100644 --- a/crates/cli/commands/src/db/get.rs +++ b/crates/cli/commands/src/db/get.rs @@ -144,9 +144,8 @@ pub(crate) fn table_key(key: &str) -> Result { } /// Get an instance of subkey for given dupsort table -fn table_subkey(subkey: &Option) -> Result { - serde_json::from_str::(&subkey.clone().unwrap_or_default()) - .map_err(|e| eyre::eyre!(e)) +fn table_subkey(subkey: Option<&str>) -> Result { + serde_json::from_str::(subkey.unwrap_or_default()).map_err(|e| eyre::eyre!(e)) } struct GetValueViewer<'a, N: NodeTypesWithDB> { @@ -187,7 +186,7 @@ impl TableViewer<()> for GetValueViewer<'_, N> { let key = table_key::(&self.key)?; // process dupsort table - let subkey = table_subkey::(&self.subkey)?; + let subkey = table_subkey::(self.subkey.as_deref())?; match self.tool.get_dup::(key, subkey)? { Some(content) => { diff --git a/crates/cli/commands/src/import.rs b/crates/cli/commands/src/import.rs index 6d73c1bf0a..e3afed831d 100644 --- a/crates/cli/commands/src/import.rs +++ b/crates/cli/commands/src/import.rs @@ -210,6 +210,7 @@ where .with_tip_sender(tip_tx) // we want to sync all blocks the file client provides or 0 if empty .with_max_block(max_block) + .with_fail_on_unwind(true) .add_stages( DefaultStages::new( provider_factory.clone(), diff --git a/crates/cli/commands/src/init_state.rs b/crates/cli/commands/src/init_state.rs deleted file mode 100644 index 16e99f8fe9..0000000000 --- a/crates/cli/commands/src/init_state.rs +++ /dev/null @@ -1,79 +0,0 @@ -//! Command that initializes the node from a genesis file. - -use crate::common::{AccessRights, Environment, EnvironmentArgs}; -use alloy_primitives::B256; -use clap::Parser; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_cli::chainspec::ChainSpecParser; -use reth_config::config::EtlConfig; -use reth_db_common::init::init_from_state_dump; -use reth_node_builder::NodeTypesWithEngine; -use reth_provider::{providers::ProviderNodeTypes, ProviderFactory}; - -use std::{fs::File, io::BufReader, path::PathBuf}; -use tracing::info; - -/// Initializes the database with the genesis block. -#[derive(Debug, Parser)] -pub struct InitStateCommand { - #[command(flatten)] - pub env: EnvironmentArgs, - - /// JSONL file with state dump. - /// - /// Must contain accounts in following format, additional account fields are ignored. Must - /// also contain { "root": \ } as first line. - /// { - /// "balance": "\", - /// "nonce": \, - /// "code": "\", - /// "storage": { - /// "\": "\", - /// .. - /// }, - /// "address": "\", - /// } - /// - /// Allows init at a non-genesis block. Caution! Blocks must be manually imported up until - /// and including the non-genesis block to init chain at. See 'import' command. - #[arg(value_name = "STATE_DUMP_FILE", verbatim_doc_comment)] - pub state: PathBuf, -} - -impl> InitStateCommand { - /// Execute the `init` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { - info!(target: "reth::cli", "Reth init-state starting"); - - let Environment { config, provider_factory, .. } = self.env.init::(AccessRights::RW)?; - - info!(target: "reth::cli", "Initiating state dump"); - - let hash = init_at_state(self.state, provider_factory, config.stages.etl)?; - - info!(target: "reth::cli", hash = ?hash, "Genesis block written"); - Ok(()) - } -} - -/// Initialize chain with state at specific block, from a file with state dump. -pub fn init_at_state( - state_dump_path: PathBuf, - factory: ProviderFactory, - etl_config: EtlConfig, -) -> eyre::Result { - info!(target: "reth::cli", - path=?state_dump_path, - "Opening state dump"); - - let file = File::open(state_dump_path)?; - let reader = BufReader::new(file); - - let provider_rw = factory.provider_rw()?; - let hash = init_from_state_dump(reader, &provider_rw.0, etl_config)?; - provider_rw.commit()?; - - Ok(hash) -} diff --git a/crates/cli/commands/src/init_state/mod.rs b/crates/cli/commands/src/init_state/mod.rs new file mode 100644 index 0000000000..adaec3e8be --- /dev/null +++ b/crates/cli/commands/src/init_state/mod.rs @@ -0,0 +1,132 @@ +//! Command that initializes the node from a genesis file. + +use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use alloy_primitives::{B256, U256}; +use clap::Parser; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_cli::chainspec::ChainSpecParser; +use reth_db_common::init::init_from_state_dump; +use reth_node_builder::NodeTypesWithEngine; +use reth_primitives::SealedHeader; +use reth_provider::{ + BlockNumReader, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, +}; + +use std::{fs::File, io::BufReader, path::PathBuf, str::FromStr}; +use tracing::info; + +pub mod without_evm; + +/// Initializes the database with the genesis block. +#[derive(Debug, Parser)] +pub struct InitStateCommand { + #[command(flatten)] + pub env: EnvironmentArgs, + + /// JSONL file with state dump. + /// + /// Must contain accounts in following format, additional account fields are ignored. Must + /// also contain { "root": \ } as first line. + /// { + /// "balance": "\", + /// "nonce": \, + /// "code": "\", + /// "storage": { + /// "\": "\", + /// .. + /// }, + /// "address": "\", + /// } + /// + /// Allows init at a non-genesis block. Caution! Blocks must be manually imported up until + /// and including the non-genesis block to init chain at. See 'import' command. + #[arg(value_name = "STATE_DUMP_FILE", verbatim_doc_comment)] + pub state: PathBuf, + + /// Specifies whether to initialize the state without relying on EVM historical data. + /// + /// When enabled, and before inserting the state, it creates a dummy chain up to the last EVM + /// block specified. It then, appends the first block provided block. + /// + /// - **Note**: **Do not** import receipts and blocks beforehand, or this will fail or be + /// ignored. + #[arg(long, default_value = "false")] + pub without_evm: bool, + + /// Header file containing the header in an RLP encoded format. + #[arg(long, value_name = "HEADER_FILE", verbatim_doc_comment)] + pub header: Option, + + /// Total difficulty of the header. + #[arg(long, value_name = "TOTAL_DIFFICULTY", verbatim_doc_comment)] + pub total_difficulty: Option, + + /// Hash of the header. + #[arg(long, value_name = "HEADER_HASH", verbatim_doc_comment)] + pub header_hash: Option, +} + +impl> InitStateCommand { + /// Execute the `init` command + pub async fn execute>( + self, + ) -> eyre::Result<()> { + info!(target: "reth::cli", "Reth init-state starting"); + + let Environment { config, provider_factory, .. } = self.env.init::(AccessRights::RW)?; + + let static_file_provider = provider_factory.static_file_provider(); + let provider_rw = provider_factory.database_provider_rw()?; + + if self.without_evm { + // ensure header, total difficulty and header hash are provided + let header = self.header.ok_or_else(|| eyre::eyre!("Header file must be provided"))?; + let header = without_evm::read_header_from_file(header)?; + + let header_hash = + self.header_hash.ok_or_else(|| eyre::eyre!("Header hash must be provided"))?; + let header_hash = B256::from_str(&header_hash)?; + + let total_difficulty = self + .total_difficulty + .ok_or_else(|| eyre::eyre!("Total difficulty must be provided"))?; + let total_difficulty = U256::from_str(&total_difficulty)?; + + let last_block_number = provider_rw.last_block_number()?; + + if last_block_number == 0 { + without_evm::setup_without_evm( + &provider_rw, + &static_file_provider, + // &header, + // header_hash, + SealedHeader::new(header, header_hash), + total_difficulty, + )?; + + // SAFETY: it's safe to commit static files, since in the event of a crash, they + // will be unwinded according to database checkpoints. + // + // Necessary to commit, so the header is accessible to provider_rw and + // init_state_dump + static_file_provider.commit()?; + } else if last_block_number > 0 && last_block_number < header.number { + return Err(eyre::eyre!( + "Data directory should be empty when calling init-state with --without-evm-history." + )); + } + } + + info!(target: "reth::cli", "Initiating state dump"); + + let file = File::open(self.state)?; + let reader = BufReader::new(file); + + let hash = init_from_state_dump(reader, &provider_rw, config.stages.etl)?; + + provider_rw.commit()?; + + info!(target: "reth::cli", hash = ?hash, "Genesis block written"); + Ok(()) + } +} diff --git a/crates/optimism/cli/src/commands/init_state/bedrock.rs b/crates/cli/commands/src/init_state/without_evm.rs similarity index 70% rename from crates/optimism/cli/src/commands/init_state/bedrock.rs rename to crates/cli/commands/src/init_state/without_evm.rs index efff065e50..187996653c 100644 --- a/crates/optimism/cli/src/commands/init_state/bedrock.rs +++ b/crates/cli/commands/src/init_state/without_evm.rs @@ -1,5 +1,6 @@ use alloy_primitives::{BlockNumber, B256, U256}; -use reth_optimism_primitives::bedrock::{BEDROCK_HEADER, BEDROCK_HEADER_HASH, BEDROCK_HEADER_TTD}; +use alloy_rlp::Decodable; + use reth_primitives::{ BlockBody, Header, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, }; @@ -7,28 +8,42 @@ use reth_provider::{ providers::StaticFileProvider, BlockWriter, StageCheckpointWriter, StaticFileWriter, }; use reth_stages::{StageCheckpoint, StageId}; + +use std::{fs::File, io::Read, path::PathBuf}; use tracing::info; -/// Creates a dummy chain (with no transactions) up to the last OVM block and appends the -/// first valid Bedrock block. -pub(crate) fn setup_op_mainnet_without_ovm( +/// Reads the header RLP from a file and returns the Header. +pub(crate) fn read_header_from_file(path: PathBuf) -> Result { + let mut file = File::open(path)?; + let mut buf = Vec::new(); + file.read_to_end(&mut buf)?; + + let header = Header::decode(&mut &buf[..])?; + Ok(header) +} + +/// Creates a dummy chain (with no transactions) up to the last EVM block and appends the +/// first valid block. +pub fn setup_without_evm( provider_rw: &Provider, static_file_provider: &StaticFileProvider, + header: SealedHeader, + total_difficulty: U256, ) -> Result<(), eyre::Error> where Provider: StageCheckpointWriter + BlockWriter, { - info!(target: "reth::cli", "Setting up dummy OVM chain before importing state."); + info!(target: "reth::cli", "Setting up dummy EVM chain before importing state."); - // Write OVM dummy data up to `BEDROCK_HEADER - 1` block - append_dummy_chain(static_file_provider, BEDROCK_HEADER.number - 1)?; + // Write EVM dummy data up to `header - 1` block + append_dummy_chain(static_file_provider, header.number - 1)?; - info!(target: "reth::cli", "Appending Bedrock block."); + info!(target: "reth::cli", "Appending first valid block."); - append_bedrock_block(provider_rw, static_file_provider)?; + append_first_block(provider_rw, static_file_provider, &header, total_difficulty)?; for stage in StageId::ALL { - provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(BEDROCK_HEADER.number))?; + provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(header.number))?; } info!(target: "reth::cli", "Set up finished."); @@ -36,38 +51,30 @@ where Ok(()) } -/// Appends the first bedrock block. +/// Appends the first block. /// /// By appending it, static file writer also verifies that all segments are at the same /// height. -fn append_bedrock_block( +fn append_first_block( provider_rw: impl BlockWriter, sf_provider: &StaticFileProvider, + header: &SealedHeader, + total_difficulty: U256, ) -> Result<(), eyre::Error> { provider_rw.insert_block( - SealedBlockWithSenders::new( - SealedBlock::new( - SealedHeader::new(BEDROCK_HEADER, BEDROCK_HEADER_HASH), - BlockBody::default(), - ), - vec![], - ) - .expect("no senders or txes"), + SealedBlockWithSenders::new(SealedBlock::new(header.clone(), BlockBody::default()), vec![]) + .expect("no senders or txes"), )?; sf_provider.latest_writer(StaticFileSegment::Headers)?.append_header( - &BEDROCK_HEADER, - BEDROCK_HEADER_TTD, - &BEDROCK_HEADER_HASH, + header, + total_difficulty, + &header.hash(), )?; - sf_provider - .latest_writer(StaticFileSegment::Receipts)? - .increment_block(BEDROCK_HEADER.number)?; + sf_provider.latest_writer(StaticFileSegment::Receipts)?.increment_block(header.number)?; - sf_provider - .latest_writer(StaticFileSegment::Transactions)? - .increment_block(BEDROCK_HEADER.number)?; + sf_provider.latest_writer(StaticFileSegment::Transactions)?.increment_block(header.number)?; Ok(()) } diff --git a/crates/cli/commands/src/lib.rs b/crates/cli/commands/src/lib.rs index 33a38ddbc0..166ea438fb 100644 --- a/crates/cli/commands/src/lib.rs +++ b/crates/cli/commands/src/lib.rs @@ -20,7 +20,7 @@ pub mod p2p; pub mod prune; pub mod recover; pub mod stage; -#[cfg(feature = "dev")] +#[cfg(feature = "arbitrary")] pub mod test_vectors; pub use node::NodeCommand; diff --git a/crates/cli/commands/src/node.rs b/crates/cli/commands/src/node.rs index 645338b62f..03c48ad9c4 100644 --- a/crates/cli/commands/src/node.rs +++ b/crates/cli/commands/src/node.rs @@ -16,7 +16,6 @@ use reth_node_core::{ node_config::NodeConfig, version, }; -use reth_node_metrics::recorder::install_prometheus_recorder; use std::{ffi::OsString, fmt, future::Future, net::SocketAddr, path::PathBuf, sync::Arc}; /// Start the node @@ -192,10 +191,6 @@ impl< enable_execution_cache: performance_optimization.enable_execution_cache, }; - // Register the prometheus recorder before creating the database, - // because database init needs it to register metrics. - let _ = install_prometheus_recorder(); - let data_dir = node_config.datadir(); let db_path = data_dir.db(); diff --git a/crates/cli/commands/src/stage/drop.rs b/crates/cli/commands/src/stage/drop.rs index e2306f12ca..c14ad4a996 100644 --- a/crates/cli/commands/src/stage/drop.rs +++ b/crates/cli/commands/src/stage/drop.rs @@ -4,7 +4,7 @@ use clap::Parser; use itertools::Itertools; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; -use reth_db::{static_file::iter_static_files, tables}; +use reth_db::{mdbx::tx::Tx, static_file::iter_static_files, tables, DatabaseError}; use reth_db_api::transaction::{DbTx, DbTxMut}; use reth_db_common::{ init::{insert_genesis_header, insert_genesis_history, insert_genesis_state}, @@ -69,43 +69,28 @@ impl> Command tx.clear::()?; tx.clear::()?; tx.clear::()?; - tx.put::( - StageId::Headers.to_string(), - Default::default(), - )?; + reset_stage_checkpoint(tx, StageId::Headers)?; + insert_genesis_header(&provider_rw.0, &static_file_provider, &self.env.chain)?; } StageEnum::Bodies => { tx.clear::()?; tx.clear::()?; + reset_prune_checkpoint(tx, PruneSegment::Transactions)?; + tx.clear::()?; tx.clear::()?; tx.clear::()?; - tx.clear::()?; tx.clear::()?; - tx.put::( - StageId::Bodies.to_string(), - Default::default(), - )?; + reset_stage_checkpoint(tx, StageId::Bodies)?; + insert_genesis_header(&provider_rw.0, &static_file_provider, &self.env.chain)?; } StageEnum::Senders => { tx.clear::()?; // Reset pruned numbers to not count them in the next rerun's stage progress - if let Some(mut prune_checkpoint) = - tx.get::(PruneSegment::SenderRecovery)? - { - prune_checkpoint.block_number = None; - prune_checkpoint.tx_number = None; - tx.put::( - PruneSegment::SenderRecovery, - prune_checkpoint, - )?; - } - tx.put::( - StageId::SenderRecovery.to_string(), - Default::default(), - )?; + reset_prune_checkpoint(tx, PruneSegment::SenderRecovery)?; + reset_stage_checkpoint(tx, StageId::SenderRecovery)?; } StageEnum::Execution => { tx.clear::()?; @@ -115,53 +100,38 @@ impl> Command tx.clear::()?; tx.clear::()?; tx.clear::()?; - tx.put::( - StageId::Execution.to_string(), - Default::default(), - )?; + + reset_prune_checkpoint(tx, PruneSegment::Receipts)?; + reset_prune_checkpoint(tx, PruneSegment::ContractLogs)?; + reset_stage_checkpoint(tx, StageId::Execution)?; + let alloc = &self.env.chain.genesis().alloc; insert_genesis_state(&provider_rw.0, alloc.iter())?; } StageEnum::AccountHashing => { tx.clear::()?; - tx.put::( - StageId::AccountHashing.to_string(), - Default::default(), - )?; + reset_stage_checkpoint(tx, StageId::AccountHashing)?; } StageEnum::StorageHashing => { tx.clear::()?; - tx.put::( - StageId::StorageHashing.to_string(), - Default::default(), - )?; + reset_stage_checkpoint(tx, StageId::StorageHashing)?; } StageEnum::Hashing => { // Clear hashed accounts tx.clear::()?; - tx.put::( - StageId::AccountHashing.to_string(), - Default::default(), - )?; + reset_stage_checkpoint(tx, StageId::AccountHashing)?; // Clear hashed storages tx.clear::()?; - tx.put::( - StageId::StorageHashing.to_string(), - Default::default(), - )?; + reset_stage_checkpoint(tx, StageId::StorageHashing)?; } StageEnum::Merkle => { tx.clear::()?; tx.clear::()?; - tx.put::( - StageId::MerkleExecute.to_string(), - Default::default(), - )?; - tx.put::( - StageId::MerkleUnwind.to_string(), - Default::default(), - )?; + + reset_stage_checkpoint(tx, StageId::MerkleExecute)?; + reset_stage_checkpoint(tx, StageId::MerkleUnwind)?; + tx.delete::( StageId::MerkleExecute.to_string(), None, @@ -170,22 +140,17 @@ impl> Command StageEnum::AccountHistory | StageEnum::StorageHistory => { tx.clear::()?; tx.clear::()?; - tx.put::( - StageId::IndexAccountHistory.to_string(), - Default::default(), - )?; - tx.put::( - StageId::IndexStorageHistory.to_string(), - Default::default(), - )?; + + reset_stage_checkpoint(tx, StageId::IndexAccountHistory)?; + reset_stage_checkpoint(tx, StageId::IndexStorageHistory)?; + insert_genesis_history(&provider_rw.0, self.env.chain.genesis().alloc.iter())?; } StageEnum::TxLookup => { tx.clear::()?; - tx.put::( - StageId::TransactionLookup.to_string(), - Default::default(), - )?; + reset_prune_checkpoint(tx, PruneSegment::TransactionLookup)?; + + reset_stage_checkpoint(tx, StageId::TransactionLookup)?; insert_genesis_header(&provider_rw.0, &static_file_provider, &self.env.chain)?; } } @@ -197,3 +162,25 @@ impl> Command Ok(()) } } + +fn reset_prune_checkpoint( + tx: &Tx, + prune_segment: PruneSegment, +) -> Result<(), DatabaseError> { + if let Some(mut prune_checkpoint) = tx.get::(prune_segment)? { + prune_checkpoint.block_number = None; + prune_checkpoint.tx_number = None; + tx.put::(prune_segment, prune_checkpoint)?; + } + + Ok(()) +} + +fn reset_stage_checkpoint( + tx: &Tx, + stage_id: StageId, +) -> Result<(), DatabaseError> { + tx.put::(stage_id.to_string(), Default::default())?; + + Ok(()) +} diff --git a/crates/cli/commands/src/test_vectors/compact.rs b/crates/cli/commands/src/test_vectors/compact.rs new file mode 100644 index 0000000000..8def25fa39 --- /dev/null +++ b/crates/cli/commands/src/test_vectors/compact.rs @@ -0,0 +1,283 @@ +use alloy_primitives::{hex, private::getrandom::getrandom, TxKind}; +use arbitrary::Arbitrary; +use eyre::{Context, Result}; +use proptest::{ + prelude::{ProptestConfig, RngCore}, + test_runner::{TestRng, TestRunner}, +}; +use reth_codecs::alloy::{ + authorization_list::Authorization, + genesis_account::GenesisAccount, + header::{Header, HeaderExt}, + transaction::{ + eip1559::TxEip1559, eip2930::TxEip2930, eip4844::TxEip4844, eip7702::TxEip7702, + legacy::TxLegacy, + }, + withdrawal::Withdrawal, +}; +use reth_db::{ + models::{AccountBeforeTx, StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals}, + ClientVersion, +}; +use reth_fs_util as fs; +use reth_primitives::{ + Account, Log, LogData, Receipt, ReceiptWithBloom, StorageEntry, Transaction, + TransactionSignedNoHash, TxType, Withdrawals, +}; +use reth_prune_types::{PruneCheckpoint, PruneMode}; +use reth_stages_types::{ + AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, + HeadersCheckpoint, IndexHistoryCheckpoint, StageCheckpoint, StageUnitCheckpoint, + StorageHashingCheckpoint, +}; +use reth_trie::{hash_builder::HashBuilderValue, TrieMask}; +use reth_trie_common::{hash_builder::HashBuilderState, StoredNibbles, StoredNibblesSubKey}; +use std::{fs::File, io::BufReader}; + +pub const VECTORS_FOLDER: &str = "testdata/micro/compact"; +pub const VECTOR_SIZE: usize = 100; + +#[macro_export] +macro_rules! compact_types { + (regular: [$($regular_ty:ident),*], identifier: [$($id_ty:ident),*]) => { + pub const GENERATE_VECTORS: &[fn(&mut TestRunner) -> eyre::Result<()>] = &[ + $( + generate_vector::<$regular_ty> as fn(&mut TestRunner) -> eyre::Result<()>, + )* + $( + generate_vector::<$id_ty> as fn(&mut TestRunner) -> eyre::Result<()>, + )* + ]; + + pub const READ_VECTORS: &[fn() -> eyre::Result<()>] = &[ + $( + read_vector::<$regular_ty> as fn() -> eyre::Result<()>, + )* + $( + read_vector::<$id_ty> as fn() -> eyre::Result<()>, + )* + ]; + + pub static IDENTIFIER_TYPE: std::sync::LazyLock> = std::sync::LazyLock::new(|| { + let mut map = std::collections::HashSet::new(); + $( + map.insert(type_name::<$id_ty>()); + )* + map + }); + }; +} + +// The type that **actually** implements `Compact` should go here. If it's an alloy type, import the +// auxiliary type from reth_codecs::alloy instead. +compact_types!( + regular: [ + // reth-primitives + Account, + Receipt, + Withdrawals, + ReceiptWithBloom, + // reth_codecs::alloy + Authorization, + GenesisAccount, + Header, + HeaderExt, + Withdrawal, + TxEip2930, + TxEip1559, + TxEip4844, + TxEip7702, + TxLegacy, + HashBuilderValue, + LogData, + Log, + // BranchNodeCompact, // todo requires arbitrary + TrieMask, + // TxDeposit, TODO(joshie): optimism + // reth_prune_types + PruneCheckpoint, + PruneMode, + // reth_stages_types + AccountHashingCheckpoint, + StorageHashingCheckpoint, + ExecutionCheckpoint, + HeadersCheckpoint, + IndexHistoryCheckpoint, + EntitiesCheckpoint, + CheckpointBlockRange, + StageCheckpoint, + StageUnitCheckpoint, + // reth_db_api + StoredBlockOmmers, + StoredBlockBodyIndices, + StoredBlockWithdrawals, + // Manual implementations + TransactionSignedNoHash, + // Bytecode, // todo revm arbitrary + StorageEntry, + // MerkleCheckpoint, // todo storedsubnode -> branchnodecompact arbitrary + AccountBeforeTx, + ClientVersion, + StoredNibbles, + StoredNibblesSubKey, + // StorageTrieEntry, // todo branchnodecompact arbitrary + // StoredSubNode, // todo branchnodecompact arbitrary + HashBuilderState + ], + // These types require an extra identifier which is usually stored elsewhere (eg. parent type). + identifier: [ + // Signature todo we for v we only store parity(true || false), while v can take more values + Transaction, + TxType, + TxKind + ] +); + +/// Generates a vector of type `T` to a file. +pub fn generate_vectors() -> Result<()> { + generate_vectors_with(GENERATE_VECTORS) +} + +pub fn read_vectors() -> Result<()> { + read_vectors_with(READ_VECTORS) +} + +/// Generates a vector of type `T` to a file. +pub fn generate_vectors_with(gen: &[fn(&mut TestRunner) -> eyre::Result<()>]) -> Result<()> { + // Prepare random seed for test (same method as used by proptest) + let mut seed = [0u8; 32]; + getrandom(&mut seed)?; + println!("Seed for compact test vectors: {:?}", hex::encode_prefixed(seed)); + + // Start the runner with the seed + let config = ProptestConfig::default(); + let rng = TestRng::from_seed(config.rng_algorithm, &seed); + let mut runner = TestRunner::new_with_rng(config, rng); + + fs::create_dir_all(VECTORS_FOLDER)?; + + for generate_fn in gen { + generate_fn(&mut runner)?; + } + + Ok(()) +} + +/// Reads multiple vectors of different types ensuring their correctness by decoding and +/// re-encoding. +pub fn read_vectors_with(read: &[fn() -> eyre::Result<()>]) -> Result<()> { + fs::create_dir_all(VECTORS_FOLDER)?; + let mut errors = None; + + for read_fn in read { + if let Err(err) = read_fn() { + errors.get_or_insert_with(Vec::new).push(err); + } + } + + if let Some(err_list) = errors { + for error in err_list { + eprintln!("{:?}", error); + } + return Err(eyre::eyre!( + "If there are missing types, make sure to run `reth test-vectors compact --write` first.\n + If it happened during CI, ignore IF it's a new proposed type that `main` branch does not have." + )); + } + + Ok(()) +} + +/// Generates test vectors for a specific type `T`. +pub fn generate_vector(runner: &mut TestRunner) -> Result<()> +where + T: for<'a> Arbitrary<'a> + reth_codecs::Compact, +{ + let type_name = type_name::(); + print!("{}", &type_name); + + let mut bytes = std::iter::repeat(0u8).take(256).collect::>(); + let mut compact_buffer = vec![]; + + let mut values = Vec::with_capacity(VECTOR_SIZE); + for _ in 0..VECTOR_SIZE { + runner.rng().fill_bytes(&mut bytes); + compact_buffer.clear(); + + // Sometimes type T, might require extra arbitrary data, so we retry it a few times. + let mut tries = 0; + let obj = loop { + match T::arbitrary(&mut arbitrary::Unstructured::new(&bytes)) { + Ok(obj) => break obj, + Err(err) => { + if tries < 5 && matches!(err, arbitrary::Error::NotEnoughData) { + tries += 1; + bytes.extend(std::iter::repeat(0u8).take(256)); + } else { + return Err(err)? + } + } + } + }; + let res = obj.to_compact(&mut compact_buffer); + + if IDENTIFIER_TYPE.contains(&type_name) { + compact_buffer.push(res as u8); + } + + values.push(hex::encode(&compact_buffer)); + } + + serde_json::to_writer( + std::io::BufWriter::new( + std::fs::File::create(format!("{VECTORS_FOLDER}/{}.json", &type_name)).unwrap(), + ), + &values, + )?; + + println!(" ✅"); + + Ok(()) +} + +/// Reads a vector of type `T` from a file and compares each item with its reconstructed version +/// using `T::from_compact`. +pub fn read_vector() -> Result<()> +where + T: reth_codecs::Compact, +{ + let type_name = type_name::(); + print!("{}", &type_name); + + // Read the file where the vectors are stored + let file_path = format!("{VECTORS_FOLDER}/{}.json", &type_name); + let file = + File::open(&file_path).wrap_err_with(|| format!("Failed to open vector {type_name}."))?; + let reader = BufReader::new(file); + + let stored_values: Vec = serde_json::from_reader(reader)?; + let mut buffer = vec![]; + + for hex_str in stored_values { + let mut compact_bytes = hex::decode(hex_str)?; + let mut identifier = None; + buffer.clear(); + + if IDENTIFIER_TYPE.contains(&type_name) { + identifier = compact_bytes.pop().map(|b| b as usize); + } + let len_or_identifier = identifier.unwrap_or(compact_bytes.len()); + + let (reconstructed, _) = T::from_compact(&compact_bytes, len_or_identifier); + reconstructed.to_compact(&mut buffer); + assert_eq!(buffer, compact_bytes); + } + + println!(" ✅"); + + Ok(()) +} + +pub fn type_name() -> String { + std::any::type_name::().split("::").last().unwrap_or(std::any::type_name::()).to_string() +} diff --git a/crates/cli/commands/src/test_vectors/mod.rs b/crates/cli/commands/src/test_vectors/mod.rs index 999c0bc913..001d0c2e86 100644 --- a/crates/cli/commands/src/test_vectors/mod.rs +++ b/crates/cli/commands/src/test_vectors/mod.rs @@ -2,7 +2,8 @@ use clap::{Parser, Subcommand}; -mod tables; +pub mod compact; +pub mod tables; /// Generate test-vectors for different data types. #[derive(Debug, Parser)] @@ -19,6 +20,22 @@ pub enum Subcommands { /// List of table names. Case-sensitive. names: Vec, }, + /// Randomly generate test vectors for each `Compact` type using the `--write` flag. + /// + /// The generated vectors are serialized in both `json` and `Compact` formats and saved to a + /// file. + /// + /// Use the `--read` flag to read and validate the previously generated vectors from file. + #[group(multiple = false, required = true)] + Compact { + /// Write test vectors to a file. + #[arg(long)] + write: bool, + + /// Read test vectors from a file. + #[arg(long)] + read: bool, + }, } impl Command { @@ -28,6 +45,13 @@ impl Command { Subcommands::Tables { names } => { tables::generate_vectors(names)?; } + Subcommands::Compact { write, .. } => { + if write { + compact::generate_vectors()?; + } else { + compact::read_vectors()?; + } + } } Ok(()) } diff --git a/crates/cli/commands/src/test_vectors/tables.rs b/crates/cli/commands/src/test_vectors/tables.rs index 112685251d..29ba50c8d8 100644 --- a/crates/cli/commands/src/test_vectors/tables.rs +++ b/crates/cli/commands/src/test_vectors/tables.rs @@ -1,4 +1,4 @@ -use alloy_primitives::private::getrandom::getrandom; +use alloy_primitives::{hex, private::getrandom::getrandom}; use arbitrary::Arbitrary; use eyre::Result; use proptest::{ @@ -17,11 +17,11 @@ const VECTORS_FOLDER: &str = "testdata/micro/db"; const PER_TABLE: usize = 1000; /// Generates test vectors for specified `tables`. If list is empty, then generate for all tables. -pub(crate) fn generate_vectors(mut tables: Vec) -> Result<()> { +pub fn generate_vectors(mut tables: Vec) -> Result<()> { // Prepare random seed for test (same method as used by proptest) let mut seed = [0u8; 32]; getrandom(&mut seed)?; - println!("Seed for test vectors: {:?}", seed); + println!("Seed for table test vectors: {:?}", hex::encode_prefixed(seed)); // Start the runner with the seed let config = ProptestConfig::default(); diff --git a/crates/cli/util/Cargo.toml b/crates/cli/util/Cargo.toml index d96a882a67..70515f83b4 100644 --- a/crates/cli/util/Cargo.toml +++ b/crates/cli/util/Cargo.toml @@ -24,6 +24,7 @@ eyre.workspace = true rand.workspace = true secp256k1 = { workspace = true, features = ["rand"] } thiserror.workspace = true +serde.workspace = true tracy-client = { workspace = true, optional = true, features = ["demangle"] } diff --git a/crates/cli/util/src/parsers.rs b/crates/cli/util/src/parsers.rs index 202744a4bb..9bb803bcca 100644 --- a/crates/cli/util/src/parsers.rs +++ b/crates/cli/util/src/parsers.rs @@ -1,7 +1,9 @@ use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; +use reth_fs_util::FsPathError; use std::{ net::{IpAddr, Ipv4Addr, SocketAddr, ToSocketAddrs}, + path::Path, str::FromStr, time::Duration, }; @@ -82,6 +84,11 @@ pub fn parse_socket_address(value: &str) -> eyre::Result(path: &str) -> Result { + reth_fs_util::read_json_file(Path::new(path)) +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index 92b091f314..c53ad6bb95 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -16,6 +16,9 @@ use std::{ const EXTENSION: &str = "toml"; +/// The default prune block interval +pub const DEFAULT_BLOCK_INTERVAL: usize = 5; + /// Configuration for the reth node. #[derive(Debug, Clone, Default, Deserialize, PartialEq, Eq, Serialize)] #[serde(default)] @@ -390,7 +393,11 @@ pub struct PruneConfig { impl Default for PruneConfig { fn default() -> Self { - Self { block_interval: 5, recent_sidecars_kept_blocks: 0, segments: PruneModes::none() } + Self { + block_interval: DEFAULT_BLOCK_INTERVAL, + recent_sidecars_kept_blocks: 0, + segments: PruneModes::none(), + } } } @@ -404,27 +411,39 @@ impl PruneConfig { /// if the corresponding value in this config is not set. pub fn merge(&mut self, other: Option) { let Some(other) = other else { return }; + let Self { + block_interval, + recent_sidecars_kept_blocks, + segments: + PruneModes { + sender_recovery, + transaction_lookup, + receipts, + account_history, + storage_history, + receipts_log_filter, + }, + } = other; + + // Merge block_interval, only update if it's the default interval + if self.block_interval == DEFAULT_BLOCK_INTERVAL { + self.block_interval = block_interval; + } - // Merge block_interval - if self.block_interval == 0 { - self.block_interval = other.block_interval; + // Merge recent_sidecars_kept_blocks, only update if it's the default number of blocks + if self.recent_sidecars_kept_blocks == 0 { + self.recent_sidecars_kept_blocks = recent_sidecars_kept_blocks; } // Merge the various segment prune modes - self.segments.sender_recovery = - self.segments.sender_recovery.or(other.segments.sender_recovery); - self.segments.transaction_lookup = - self.segments.transaction_lookup.or(other.segments.transaction_lookup); - self.segments.receipts = self.segments.receipts.or(other.segments.receipts); - self.segments.account_history = - self.segments.account_history.or(other.segments.account_history); - self.segments.storage_history = - self.segments.storage_history.or(other.segments.storage_history); - - if self.segments.receipts_log_filter.0.is_empty() && - !other.segments.receipts_log_filter.0.is_empty() - { - self.segments.receipts_log_filter = other.segments.receipts_log_filter; + self.segments.sender_recovery = self.segments.sender_recovery.or(sender_recovery); + self.segments.transaction_lookup = self.segments.transaction_lookup.or(transaction_lookup); + self.segments.receipts = self.segments.receipts.or(receipts); + self.segments.account_history = self.segments.account_history.or(account_history); + self.segments.storage_history = self.segments.storage_history.or(storage_history); + + if self.segments.receipts_log_filter.0.is_empty() && !receipts_log_filter.0.is_empty() { + self.segments.receipts_log_filter = receipts_log_filter; } } } @@ -970,7 +989,7 @@ receipts = 'full' // Check that the configuration has been merged. Any configuration present in config1 // should not be overwritten by config2 - assert_eq!(config1.block_interval, 5); + assert_eq!(config1.block_interval, 10); assert_eq!(config1.segments.sender_recovery, Some(PruneMode::Full)); assert_eq!(config1.segments.transaction_lookup, Some(PruneMode::Full)); assert_eq!(config1.segments.receipts, Some(PruneMode::Distance(1000))); @@ -1010,9 +1029,9 @@ connect_trusted_nodes_only = true assert_eq!(conf.peers.trusted_nodes.len(), 2); let expected_enodes = vec![ - "enode://0401e494dbd0c84c5c0f72adac5985d2f2525e08b68d448958aae218f5ac8198a80d1498e0ebec2ce38b1b18d6750f6e61a56b4614c5a6c6cf0981c39aed47dc@34.159.32.127:30303", - "enode://e9675164b5e17b9d9edf0cc2bd79e6b6f487200c74d1331c220abb5b8ee80c2eefbf18213989585e9d0960683e819542e11d4eefb5f2b4019e1e49f9fd8fff18@berav2-bootnode.staketab.org:30303", - ]; + "enode://0401e494dbd0c84c5c0f72adac5985d2f2525e08b68d448958aae218f5ac8198a80d1498e0ebec2ce38b1b18d6750f6e61a56b4614c5a6c6cf0981c39aed47dc@34.159.32.127:30303", + "enode://e9675164b5e17b9d9edf0cc2bd79e6b6f487200c74d1331c220abb5b8ee80c2eefbf18213989585e9d0960683e819542e11d4eefb5f2b4019e1e49f9fd8fff18@berav2-bootnode.staketab.org:30303", + ]; for enode in expected_enodes { let node = TrustedPeer::from_str(enode).unwrap(); diff --git a/crates/consensus/auto-seal/Cargo.toml b/crates/consensus/auto-seal/Cargo.toml index b4b2812303..0227f93652 100644 --- a/crates/consensus/auto-seal/Cargo.toml +++ b/crates/consensus/auto-seal/Cargo.toml @@ -31,6 +31,7 @@ reth-tokio-util.workspace = true reth-trie.workspace = true # ethereum +alloy-eips.workspace = true alloy-primitives.workspace = true revm-primitives.workspace = true alloy-rpc-types-engine.workspace = true @@ -45,4 +46,13 @@ tokio-stream.workspace = true tracing.workspace = true [features] -optimism = ["reth-provider/optimism", "reth-optimism-consensus"] +optimism = [ + "reth-provider/optimism", + "reth-optimism-consensus", + "reth-beacon-consensus/optimism", + "reth-execution-types/optimism", + "reth-optimism-consensus?/optimism", + "reth-primitives/optimism", + "revm-primitives/optimism", + "reth-chainspec/optimism" +] diff --git a/crates/consensus/auto-seal/src/client.rs b/crates/consensus/auto-seal/src/client.rs index f9b80f10bb..0083192d7d 100644 --- a/crates/consensus/auto-seal/src/client.rs +++ b/crates/consensus/auto-seal/src/client.rs @@ -1,6 +1,7 @@ //! This includes download client implementations for auto sealing miners. use crate::Storage; +use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; use reth_network_p2p::{ bodies::client::{BodiesClient, BodiesFut}, @@ -9,7 +10,7 @@ use reth_network_p2p::{ priority::Priority, }; use reth_network_peers::{PeerId, WithPeerId}; -use reth_primitives::{BlockBody, BlockHashOrNumber, Header}; +use reth_primitives::{BlockBody, Header}; use std::fmt::Debug; use tracing::{trace, warn}; diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index 3424b68b34..ce9204565f 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -15,6 +15,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +use alloy_eips::{eip1898::BlockHashOrNumber, eip7685::Requests}; use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256, U256}; use reth_beacon_consensus::BeaconEngineMessage; use reth_chainspec::{EthChainSpec, EthereumHardforks}; @@ -25,8 +26,8 @@ use reth_execution_errors::{ }; use reth_execution_types::ExecutionOutcome; use reth_primitives::{ - proofs, Block, BlockBody, BlockHashOrNumber, BlockWithSenders, Header, Requests, SealedBlock, - SealedHeader, TransactionSigned, Withdrawals, + proofs, Block, BlockBody, BlockWithSenders, Header, SealedBlock, SealedHeader, + TransactionSigned, Withdrawals, }; use reth_provider::{BlockReaderIdExt, StateProviderFactory, StateRootProvider}; use reth_revm::database::StateProviderDatabase; @@ -282,17 +283,13 @@ impl StorageInner { parent.next_block_base_fee(chain_spec.base_fee_params_at_timestamp(timestamp)) }); - let blob_gas_used = if chain_spec.is_cancun_active_at_timestamp(timestamp) { - let mut sum_blob_gas_used = 0; - for tx in transactions { - if let Some(blob_tx) = tx.transaction.as_eip4844() { - sum_blob_gas_used += blob_tx.blob_gas(); - } - } - Some(sum_blob_gas_used) - } else { - None - }; + let blob_gas_used = chain_spec.is_cancun_active_at_timestamp(timestamp).then(|| { + transactions + .iter() + .filter_map(|tx| tx.transaction.as_eip4844()) + .map(|blob_tx| blob_tx.blob_gas()) + .sum::() + }); let mut header = Header { parent_hash: self.best_hash, @@ -304,8 +301,8 @@ impl StorageInner { gas_limit: chain_spec.max_gas_limit(), timestamp, base_fee_per_gas, - blob_gas_used: blob_gas_used.map(Into::into), - requests_root: requests.map(|r| proofs::calculate_requests_root(&r.0)), + blob_gas_used, + requests_hash: requests.map(|r| r.requests_hash()), ..Default::default() }; @@ -316,14 +313,10 @@ impl StorageInner { header.blob_gas_used = Some(0); let (parent_excess_blob_gas, parent_blob_gas_used) = match parent { - Some(parent_block) - if chain_spec.is_cancun_active_at_timestamp(parent_block.timestamp) => - { - ( - parent_block.excess_blob_gas.unwrap_or_default(), - parent_block.blob_gas_used.unwrap_or_default(), - ) - } + Some(parent) if chain_spec.is_cancun_active_at_timestamp(parent.timestamp) => ( + parent.excess_blob_gas.unwrap_or_default(), + parent.blob_gas_used.unwrap_or_default(), + ), _ => (0, 0), }; header.excess_blob_gas = @@ -375,7 +368,6 @@ impl StorageInner { ommers: ommers.clone(), withdrawals: withdrawals.clone(), sidecars: None, - requests: requests.clone(), }, } .with_recovered_senders() @@ -399,13 +391,8 @@ impl StorageInner { // root here let Block { mut header, body, .. } = block.block; - let body = BlockBody { - transactions: body.transactions, - ommers, - withdrawals, - sidecars: None, - requests, - }; + let body = + BlockBody { transactions: body.transactions, ommers, withdrawals, sidecars: None }; trace!(target: "consensus::auto", ?execution_outcome, ?header, ?body, "executed block, calculating state root and completing header"); // now we need to update certain header fields with the results of the execution @@ -696,7 +683,7 @@ mod tests { timestamp, base_fee_per_gas: None, blob_gas_used: Some(0), - requests_root: None, + requests_hash: None, excess_blob_gas: Some(0), ..Default::default() } diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs index e4873615f1..75ddda9086 100644 --- a/crates/consensus/auto-seal/src/task.rs +++ b/crates/consensus/auto-seal/src/task.rs @@ -3,7 +3,7 @@ use alloy_rpc_types_engine::ForkchoiceState; use futures_util::{future::BoxFuture, FutureExt}; use reth_beacon_consensus::{BeaconEngineMessage, ForkchoiceStatus}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; use reth_evm::execute::BlockExecutorProvider; use reth_provider::{CanonChainTracker, StateProviderFactory}; use reth_stages_api::PipelineEvent; @@ -113,7 +113,6 @@ where let to_engine = this.to_engine.clone(); let client = this.client.clone(); let chain_spec = Arc::clone(&this.chain_spec); - let pool = this.pool.clone(); let events = this.pipe_line_events.take(); let executor = this.block_executor.clone(); @@ -139,11 +138,6 @@ where &executor, ) { Ok((new_header, _bundle_state)) => { - // clear all transactions from pool - pool.remove_transactions( - transactions.iter().map(|tx| tx.hash()).collect(), - ); - let state = ForkchoiceState { head_block_hash: new_header.hash(), finalized_block_hash: new_header.hash(), @@ -161,6 +155,7 @@ where state, payload_attrs: None, tx, + version: EngineApiMessageVersion::default(), }); debug!(target: "consensus::auto", ?state, "Sent fork choice update"); diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index c1a83e9252..54e801006d 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -28,10 +28,12 @@ reth-tokio-util.workspace = true reth-engine-primitives.workspace = true reth-network-p2p.workspace = true reth-node-types.workspace = true +reth-chainspec = { workspace = true, optional = true } # ethereum alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true +alloy-eips.workspace = true # async tokio = { workspace = true, features = ["sync"] } @@ -48,8 +50,6 @@ thiserror.workspace = true schnellru.workspace = true itertools.workspace = true -reth-chainspec = { workspace = true, optional = true } - [dev-dependencies] # reth reth-payload-builder = { workspace = true, features = ["test-utils"] } @@ -78,9 +78,12 @@ assert_matches.workspace = true [features] optimism = [ - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-blockchain-tree/optimism", - "reth-chainspec" + "reth-chainspec", + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-blockchain-tree/optimism", + "reth-db/optimism", + "reth-db-api/optimism", + "reth-chainspec?/optimism" ] -bsc = [] \ No newline at end of file +bsc = [] diff --git a/crates/consensus/beacon/src/engine/forkchoice.rs b/crates/consensus/beacon/src/engine/forkchoice.rs index 975c2ee3bc..a9d9301738 100644 --- a/crates/consensus/beacon/src/engine/forkchoice.rs +++ b/crates/consensus/beacon/src/engine/forkchoice.rs @@ -8,7 +8,6 @@ pub struct ForkchoiceStateTracker { /// /// Caution: this can be invalid. latest: Option, - /// Tracks the latest forkchoice state that we received to which we need to sync. last_syncing: Option, /// The latest valid forkchoice state that we received and processed as valid. @@ -48,19 +47,19 @@ impl ForkchoiceStateTracker { /// Returns whether the latest received FCU is valid: [`ForkchoiceStatus::Valid`] #[allow(dead_code)] pub(crate) fn is_latest_valid(&self) -> bool { - self.latest_status().map(|s| s.is_valid()).unwrap_or(false) + self.latest_status().map_or(false, |s| s.is_valid()) } /// Returns whether the latest received FCU is syncing: [`ForkchoiceStatus::Syncing`] #[allow(dead_code)] pub(crate) fn is_latest_syncing(&self) -> bool { - self.latest_status().map(|s| s.is_syncing()).unwrap_or(false) + self.latest_status().map_or(false, |s| s.is_syncing()) } /// Returns whether the latest received FCU is syncing: [`ForkchoiceStatus::Invalid`] #[allow(dead_code)] pub(crate) fn is_latest_invalid(&self) -> bool { - self.latest_status().map(|s| s.is_invalid()).unwrap_or(false) + self.latest_status().map_or(false, |s| s.is_invalid()) } /// Returns the last valid head hash. @@ -75,32 +74,28 @@ impl ForkchoiceStateTracker { self.last_syncing.as_ref().map(|s| s.head_block_hash) } - /// Returns the latest received `ForkchoiceState`. + /// Returns the latest received [`ForkchoiceState`]. /// /// Caution: this can be invalid. pub const fn latest_state(&self) -> Option { self.last_valid } - /// Returns the last valid `ForkchoiceState`. + /// Returns the last valid [`ForkchoiceState`]. pub const fn last_valid_state(&self) -> Option { self.last_valid } /// Returns the last valid finalized hash. /// - /// This will return [`None`], if either there is no valid finalized forkchoice state, or the - /// finalized hash for the latest valid forkchoice state is zero. + /// This will return [`None`]: + /// - If either there is no valid finalized forkchoice state, + /// - Or the finalized hash for the latest valid forkchoice state is zero. #[inline] pub fn last_valid_finalized(&self) -> Option { - self.last_valid.and_then(|state| { - // if the hash is zero then we should act like there is no finalized hash - if state.finalized_block_hash.is_zero() { - None - } else { - Some(state.finalized_block_hash) - } - }) + self.last_valid + .filter(|state| !state.finalized_block_hash.is_zero()) + .map(|state| state.finalized_block_hash) } /// Returns the last received `ForkchoiceState` to which we need to sync. @@ -110,18 +105,14 @@ impl ForkchoiceStateTracker { /// Returns the sync target finalized hash. /// - /// This will return [`None`], if either there is no sync target forkchoice state, or the - /// finalized hash for the sync target forkchoice state is zero. + /// This will return [`None`]: + /// - If either there is no sync target forkchoice state, + /// - Or the finalized hash for the sync target forkchoice state is zero. #[inline] pub fn sync_target_finalized(&self) -> Option { - self.last_syncing.and_then(|state| { - // if the hash is zero then we should act like there is no finalized hash - if state.finalized_block_hash.is_zero() { - None - } else { - Some(state.finalized_block_hash) - } - }) + self.last_syncing + .filter(|state| !state.finalized_block_hash.is_zero()) + .map(|state| state.finalized_block_hash) } /// Returns true if no forkchoice state has been received yet. @@ -150,15 +141,18 @@ pub enum ForkchoiceStatus { } impl ForkchoiceStatus { - pub(crate) const fn is_valid(&self) -> bool { + /// Returns `true` if the forkchoice state is [`ForkchoiceStatus::Valid`]. + pub const fn is_valid(&self) -> bool { matches!(self, Self::Valid) } - pub(crate) const fn is_invalid(&self) -> bool { + /// Returns `true` if the forkchoice state is [`ForkchoiceStatus::Invalid`]. + pub const fn is_invalid(&self) -> bool { matches!(self, Self::Invalid) } - pub(crate) const fn is_syncing(&self) -> bool { + /// Returns `true` if the forkchoice state is [`ForkchoiceStatus::Syncing`]. + pub const fn is_syncing(&self) -> bool { matches!(self, Self::Syncing) } @@ -219,3 +213,229 @@ impl AsRef for ForkchoiceStateHash { } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_forkchoice_state_tracker_set_latest_valid() { + let mut tracker = ForkchoiceStateTracker::default(); + + // Latest state is None + assert!(tracker.latest_status().is_none()); + + // Create a valid ForkchoiceState + let state = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::from_slice(&[3; 32]), + }; + let status = ForkchoiceStatus::Valid; + + tracker.set_latest(state, status); + + // Assert that the latest state is set + assert!(tracker.latest.is_some()); + assert_eq!(tracker.latest.as_ref().unwrap().state, state); + + // Assert that last valid state is updated + assert!(tracker.last_valid.is_some()); + assert_eq!(tracker.last_valid.as_ref().unwrap(), &state); + + // Assert that last syncing state is None + assert!(tracker.last_syncing.is_none()); + + // Test when there is a latest status and it is valid + assert_eq!(tracker.latest_status(), Some(ForkchoiceStatus::Valid)); + } + + #[test] + fn test_forkchoice_state_tracker_set_latest_syncing() { + let mut tracker = ForkchoiceStateTracker::default(); + + // Create a syncing ForkchoiceState + let state = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::from_slice(&[0; 32]), // Zero to simulate not finalized + }; + let status = ForkchoiceStatus::Syncing; + + tracker.set_latest(state, status); + + // Assert that the latest state is set + assert!(tracker.latest.is_some()); + assert_eq!(tracker.latest.as_ref().unwrap().state, state); + + // Assert that last valid state is None since the status is syncing + assert!(tracker.last_valid.is_none()); + + // Assert that last syncing state is updated + assert!(tracker.last_syncing.is_some()); + assert_eq!(tracker.last_syncing.as_ref().unwrap(), &state); + + // Test when there is a latest status and it is syncing + assert_eq!(tracker.latest_status(), Some(ForkchoiceStatus::Syncing)); + } + + #[test] + fn test_forkchoice_state_tracker_set_latest_invalid() { + let mut tracker = ForkchoiceStateTracker::default(); + + // Create an invalid ForkchoiceState + let state = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::from_slice(&[3; 32]), + }; + let status = ForkchoiceStatus::Invalid; + + tracker.set_latest(state, status); + + // Assert that the latest state is set + assert!(tracker.latest.is_some()); + assert_eq!(tracker.latest.as_ref().unwrap().state, state); + + // Assert that last valid state is None since the status is invalid + assert!(tracker.last_valid.is_none()); + + // Assert that last syncing state is None since the status is invalid + assert!(tracker.last_syncing.is_none()); + + // Test when there is a latest status and it is invalid + assert_eq!(tracker.latest_status(), Some(ForkchoiceStatus::Invalid)); + } + + #[test] + fn test_forkchoice_state_tracker_sync_target() { + let mut tracker = ForkchoiceStateTracker::default(); + + // Test when there is no last syncing state (should return None) + assert!(tracker.sync_target().is_none()); + + // Set a last syncing forkchoice state + let state = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::from_slice(&[3; 32]), + }; + tracker.last_syncing = Some(state); + + // Test when the last syncing state is set (should return the head block hash) + assert_eq!(tracker.sync_target(), Some(B256::from_slice(&[1; 32]))); + } + + #[test] + fn test_forkchoice_state_tracker_last_valid_finalized() { + let mut tracker = ForkchoiceStateTracker::default(); + + // No valid finalized state (should return None) + assert!(tracker.last_valid_finalized().is_none()); + + // Valid finalized state, but finalized hash is zero (should return None) + let zero_finalized_state = ForkchoiceState { + head_block_hash: B256::ZERO, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, // Zero finalized hash + }; + tracker.last_valid = Some(zero_finalized_state); + assert!(tracker.last_valid_finalized().is_none()); + + // Valid finalized state with non-zero finalized hash (should return finalized hash) + let valid_finalized_state = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::from_slice(&[123; 32]), // Non-zero finalized hash + }; + tracker.last_valid = Some(valid_finalized_state); + assert_eq!(tracker.last_valid_finalized(), Some(B256::from_slice(&[123; 32]))); + + // Reset the last valid state to None + tracker.last_valid = None; + assert!(tracker.last_valid_finalized().is_none()); + } + + #[test] + fn test_forkchoice_state_tracker_sync_target_finalized() { + let mut tracker = ForkchoiceStateTracker::default(); + + // No sync target state (should return None) + assert!(tracker.sync_target_finalized().is_none()); + + // Sync target state with finalized hash as zero (should return None) + let zero_finalized_sync_target = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::ZERO, // Zero finalized hash + }; + tracker.last_syncing = Some(zero_finalized_sync_target); + assert!(tracker.sync_target_finalized().is_none()); + + // Sync target state with non-zero finalized hash (should return the hash) + let valid_sync_target = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::from_slice(&[22; 32]), // Non-zero finalized hash + }; + tracker.last_syncing = Some(valid_sync_target); + assert_eq!(tracker.sync_target_finalized(), Some(B256::from_slice(&[22; 32]))); + + // Reset the last sync target state to None + tracker.last_syncing = None; + assert!(tracker.sync_target_finalized().is_none()); + } + + #[test] + fn test_forkchoice_state_tracker_is_empty() { + let mut forkchoice = ForkchoiceStateTracker::default(); + + // Initially, no forkchoice state has been received, so it should be empty. + assert!(forkchoice.is_empty()); + + // After setting a forkchoice state, it should no longer be empty. + forkchoice.set_latest(ForkchoiceState::default(), ForkchoiceStatus::Valid); + assert!(!forkchoice.is_empty()); + + // Reset the forkchoice latest, it should be empty again. + forkchoice.latest = None; + assert!(forkchoice.is_empty()); + } + + #[test] + fn test_forkchoice_state_hash_find() { + // Define example hashes + let head_hash = B256::random(); + let safe_hash = B256::random(); + let finalized_hash = B256::random(); + let non_matching_hash = B256::random(); + + // Create a ForkchoiceState with specific hashes + let state = ForkchoiceState { + head_block_hash: head_hash, + safe_block_hash: safe_hash, + finalized_block_hash: finalized_hash, + }; + + // Test finding the head hash + assert_eq!( + ForkchoiceStateHash::find(&state, head_hash), + Some(ForkchoiceStateHash::Head(head_hash)) + ); + + // Test finding the safe hash + assert_eq!( + ForkchoiceStateHash::find(&state, safe_hash), + Some(ForkchoiceStateHash::Safe(safe_hash)) + ); + + // Test finding the finalized hash + assert_eq!( + ForkchoiceStateHash::find(&state, finalized_hash), + Some(ForkchoiceStateHash::Finalized(finalized_hash)) + ); + + // Test with a hash that doesn't match any of the hashes in ForkchoiceState + assert_eq!(ForkchoiceStateHash::find(&state, non_matching_hash), None); + } +} diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs index 65b7c38df9..f8840cf78a 100644 --- a/crates/consensus/beacon/src/engine/handle.rs +++ b/crates/consensus/beacon/src/engine/handle.rs @@ -5,10 +5,10 @@ use crate::{ BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, }; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, + ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; use futures::TryFutureExt; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; use reth_errors::RethResult; use reth_tokio_util::{EventSender, EventStream}; use tokio::sync::{mpsc::UnboundedSender, oneshot}; @@ -46,10 +46,10 @@ where pub async fn new_payload( &self, payload: ExecutionPayload, - cancun_fields: Option, + sidecar: ExecutionPayloadSidecar, ) -> Result { let (tx, rx) = oneshot::channel(); - let _ = self.to_engine.send(BeaconEngineMessage::NewPayload { payload, cancun_fields, tx }); + let _ = self.to_engine.send(BeaconEngineMessage::NewPayload { payload, sidecar, tx }); rx.await.map_err(|_| BeaconOnNewPayloadError::EngineUnavailable)? } @@ -60,9 +60,10 @@ where &self, state: ForkchoiceState, payload_attrs: Option, + version: EngineApiMessageVersion, ) -> Result { Ok(self - .send_fork_choice_updated(state, payload_attrs) + .send_fork_choice_updated(state, payload_attrs, version) .map_err(|_| BeaconForkChoiceUpdateError::EngineUnavailable) .await?? .await?) @@ -74,12 +75,14 @@ where &self, state: ForkchoiceState, payload_attrs: Option, + version: EngineApiMessageVersion, ) -> oneshot::Receiver> { let (tx, rx) = oneshot::channel(); let _ = self.to_engine.send(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx, + version, }); rx } diff --git a/crates/consensus/beacon/src/engine/hooks/static_file.rs b/crates/consensus/beacon/src/engine/hooks/static_file.rs index c6b61d3462..2a3465aaf1 100644 --- a/crates/consensus/beacon/src/engine/hooks/static_file.rs +++ b/crates/consensus/beacon/src/engine/hooks/static_file.rs @@ -9,7 +9,8 @@ use futures::FutureExt; use reth_errors::RethResult; use reth_primitives::static_file::HighestStaticFiles; use reth_provider::{ - BlockReader, DatabaseProviderFactory, StageCheckpointReader, StaticFileProviderFactory, + BlockReader, ChainStateBlockReader, DatabaseProviderFactory, StageCheckpointReader, + StaticFileProviderFactory, }; use reth_static_file::{StaticFileProducer, StaticFileProducerWithResult}; use reth_tasks::TaskSpawner; @@ -31,8 +32,9 @@ pub struct StaticFileHook { impl StaticFileHook where Provider: StaticFileProviderFactory - + DatabaseProviderFactory - + 'static, + + DatabaseProviderFactory< + Provider: StageCheckpointReader + BlockReader + ChainStateBlockReader, + > + 'static, { /// Create a new instance pub fn new( @@ -104,6 +106,11 @@ where return Ok(None) }; + let finalized_block_number = locked_static_file_producer + .last_finalized_block()? + .map(|on_disk| finalized_block_number.min(on_disk)) + .unwrap_or(finalized_block_number); + let targets = locked_static_file_producer.get_static_file_targets(HighestStaticFiles { headers: Some(finalized_block_number), @@ -138,8 +145,9 @@ where impl EngineHook for StaticFileHook where Provider: StaticFileProviderFactory - + DatabaseProviderFactory - + 'static, + + DatabaseProviderFactory< + Provider: StageCheckpointReader + BlockReader + ChainStateBlockReader, + > + 'static, { fn name(&self) -> &'static str { "StaticFile" diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index fdaad0cc4b..fa7457c122 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -1,10 +1,10 @@ use crate::engine::{error::BeaconOnNewPayloadError, forkchoice::ForkchoiceStatus}; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ForkChoiceUpdateResult, ForkchoiceState, + ExecutionPayload, ExecutionPayloadSidecar, ForkChoiceUpdateResult, ForkchoiceState, ForkchoiceUpdateError, ForkchoiceUpdated, PayloadId, PayloadStatus, PayloadStatusEnum, }; use futures::{future::Either, FutureExt}; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; use reth_errors::RethResult; use reth_payload_primitives::PayloadBuilderError; use std::{ @@ -144,8 +144,9 @@ pub enum BeaconEngineMessage { NewPayload { /// The execution payload received by Engine API. payload: ExecutionPayload, - /// The cancun-related newPayload fields, if any. - cancun_fields: Option, + /// The execution payload sidecar with additional version-specific fields received by + /// engine API. + sidecar: ExecutionPayloadSidecar, /// The sender for returning payload status result. tx: oneshot::Sender>, }, @@ -155,6 +156,8 @@ pub enum BeaconEngineMessage { state: ForkchoiceState, /// The payload attributes for block building. payload_attrs: Option, + /// The Engine API Version. + version: EngineApiMessageVersion, /// The sender for returning forkchoice updated result. tx: oneshot::Sender>, }, diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index f720d52793..27d663301a 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1,6 +1,7 @@ +use alloy_eips::{merge::EPOCH_SLOTS, BlockNumHash}; use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, PayloadStatusEnum, + ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, PayloadStatus, PayloadStatusEnum, PayloadValidationError, }; use futures::{stream::BoxStream, Future, StreamExt}; @@ -9,7 +10,7 @@ use reth_blockchain_tree_api::{ error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, BlockStatus, BlockValidationKind, BlockchainTreeEngine, CanonicalOutcome, InsertPayloadOk, }; -use reth_engine_primitives::{EngineTypes, PayloadTypes}; +use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes, PayloadTypes}; use reth_errors::{BlockValidationError, ProviderResult, RethError, RethResult}; use reth_network_p2p::{ sync::{NetworkSyncUpdater, SyncState}, @@ -19,9 +20,7 @@ use reth_node_types::NodeTypesWithEngine; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{PayloadAttributes, PayloadBuilder, PayloadBuilderAttributes}; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{ - constants::EPOCH_SLOTS, BlockNumHash, Head, Header, SealedBlock, SealedHeader, -}; +use reth_primitives::{Head, Header, SealedBlock, SealedHeader}; use reth_provider::{ providers::ProviderNodeTypes, BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ChainSpecProvider, ProviderError, StageCheckpointReader, @@ -429,7 +428,12 @@ where } else if let Some(attrs) = attrs { // the CL requested to build a new payload on top of this new VALID head let head = outcome.into_header().unseal(); - self.process_payload_attributes(attrs, head, state) + self.process_payload_attributes( + attrs, + head, + state, + EngineApiMessageVersion::default(), + ) } else { OnForkChoiceUpdated::valid(PayloadStatus::new( PayloadStatusEnum::Valid, @@ -462,8 +466,7 @@ where ) -> bool { // On Optimism, the proposers are allowed to reorg their own chain at will. #[cfg(feature = "optimism")] - if reth_chainspec::EthChainSpec::chain(self.blockchain.chain_spec().as_ref()).is_optimism() - { + if reth_chainspec::EthChainSpec::is_optimism(&self.blockchain.chain_spec()) { debug!( target: "consensus::engine", fcu_head_num=?header.number, @@ -1081,11 +1084,11 @@ where /// /// This returns a [`PayloadStatus`] that represents the outcome of a processed new payload and /// returns an error if an internal error occurred. - #[instrument(level = "trace", skip(self, payload, cancun_fields), fields(block_hash = ?payload.block_hash(), block_number = %payload.block_number(), is_pipeline_idle = %self.sync.is_pipeline_idle()), target = "consensus::engine")] + #[instrument(level = "trace", skip(self, payload, sidecar), fields(block_hash = ?payload.block_hash(), block_number = %payload.block_number(), is_pipeline_idle = %self.sync.is_pipeline_idle()), target = "consensus::engine")] fn on_new_payload( &mut self, payload: ExecutionPayload, - cancun_fields: Option, + sidecar: ExecutionPayloadSidecar, ) -> Result, BeaconOnNewPayloadError> { self.metrics.new_payload_messages.increment(1); @@ -1115,10 +1118,7 @@ where // // This validation **MUST** be instantly run in all cases even during active sync process. let parent_hash = payload.parent_hash(); - let block = match self - .payload_validator - .ensure_well_formed_payload(payload, cancun_fields.into()) - { + let block = match self.payload_validator.ensure_well_formed_payload(payload, sidecar) { Ok(block) => block, Err(error) => { error!(target: "consensus::engine", %error, "Invalid payload"); @@ -1165,6 +1165,7 @@ where attrs: ::PayloadAttributes, head: Header, state: ForkchoiceState, + version: EngineApiMessageVersion, ) -> OnForkChoiceUpdated { // 7. Client software MUST ensure that payloadAttributes.timestamp is greater than timestamp // of a block referenced by forkchoiceState.headBlockHash. If this condition isn't held @@ -1182,6 +1183,7 @@ where match <::PayloadBuilderAttributes as PayloadBuilderAttributes>::try_new( state.head_block_hash, attrs, + version as u8 ) { Ok(attributes) => { // send the payload to the builder and return the receiver for the pending payload @@ -1869,11 +1871,16 @@ where // sensitive, hence they are polled first. if let Poll::Ready(Some(msg)) = this.engine_message_stream.poll_next_unpin(cx) { match msg { - BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { + BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version: _version, + } => { this.on_forkchoice_updated(state, payload_attrs, tx); } - BeaconEngineMessage::NewPayload { payload, cancun_fields, tx } => { - match this.on_new_payload(payload, cancun_fields) { + BeaconEngineMessage::NewPayload { payload, sidecar, tx } => { + match this.on_new_payload(payload, sidecar) { Ok(Either::Right(block)) => { this.set_blockchain_tree_action( BlockchainTreeAction::InsertNewPayload { block, tx }, @@ -2061,7 +2068,12 @@ mod tests { assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); // consensus engine is still idle because no FCUs were received - let _ = env.send_new_payload(block_to_payload_v1(SealedBlock::default()), None).await; + let _ = env + .send_new_payload( + block_to_payload_v1(SealedBlock::default()), + ExecutionPayloadSidecar::none(), + ) + .await; assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); @@ -2626,7 +2638,7 @@ mod tests { 0, BlockParams { ommers_count: Some(0), ..Default::default() }, )), - None, + ExecutionPayloadSidecar::none(), ) .await; @@ -2641,7 +2653,7 @@ mod tests { 1, BlockParams { ommers_count: Some(0), ..Default::default() }, )), - None, + ExecutionPayloadSidecar::none(), ) .await; @@ -2719,7 +2731,10 @@ mod tests { // Send new payload let result = env - .send_new_payload_retry_on_syncing(block_to_payload_v1(block2.clone()), None) + .send_new_payload_retry_on_syncing( + block_to_payload_v1(block2.clone()), + ExecutionPayloadSidecar::none(), + ) .await .unwrap(); @@ -2854,7 +2869,9 @@ mod tests { 2, BlockParams { parent: Some(parent), ommers_count: Some(0), ..Default::default() }, ); - let res = env.send_new_payload(block_to_payload_v1(block), None).await; + let res = env + .send_new_payload(block_to_payload_v1(block), ExecutionPayloadSidecar::none()) + .await; let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Syncing); assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); @@ -2924,7 +2941,10 @@ mod tests { // Send new payload let result = env - .send_new_payload_retry_on_syncing(block_to_payload_v1(block2.clone()), None) + .send_new_payload_retry_on_syncing( + block_to_payload_v1(block2.clone()), + ExecutionPayloadSidecar::none(), + ) .await .unwrap(); diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 9f095b0c03..0e201dfcc0 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -6,7 +6,7 @@ use crate::{ }; use alloy_primitives::{BlockNumber, Sealable, B256}; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, + ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; use reth_blockchain_tree::{ config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree, @@ -19,6 +19,7 @@ use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; +use reth_engine_primitives::EngineApiMessageVersion; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_evm::{either::Either, test_utils::MockExecutorProvider}; use reth_evm_ethereum::execute::EthExecutorProvider; @@ -68,9 +69,9 @@ impl TestEnv { pub async fn send_new_payload>( &self, payload: T, - cancun_fields: Option, + sidecar: ExecutionPayloadSidecar, ) -> Result { - self.engine_handle.new_payload(payload.into(), cancun_fields).await + self.engine_handle.new_payload(payload.into(), sidecar).await } /// Sends the `ExecutionPayload` message to the consensus engine and retries if the engine @@ -78,11 +79,11 @@ impl TestEnv { pub async fn send_new_payload_retry_on_syncing>( &self, payload: T, - cancun_fields: Option, + sidecar: ExecutionPayloadSidecar, ) -> Result { let payload: ExecutionPayload = payload.into(); loop { - let result = self.send_new_payload(payload.clone(), cancun_fields.clone()).await?; + let result = self.send_new_payload(payload.clone(), sidecar.clone()).await?; if !result.is_syncing() { return Ok(result) } @@ -93,7 +94,9 @@ impl TestEnv { &self, state: ForkchoiceState, ) -> Result { - self.engine_handle.fork_choice_updated(state, None).await + self.engine_handle + .fork_choice_updated(state, None, EngineApiMessageVersion::default()) + .await } /// Sends the `ForkchoiceUpdated` message to the consensus engine and retries if the engine @@ -103,7 +106,10 @@ impl TestEnv { state: ForkchoiceState, ) -> Result { loop { - let result = self.engine_handle.fork_choice_updated(state, None).await?; + let result = self + .engine_handle + .fork_choice_updated(state, None, EngineApiMessageVersion::default()) + .await?; if !result.is_syncing() { return Ok(result) } diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index 0c8e434e5e..cc888c098f 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -19,6 +19,8 @@ reth-consensus.workspace = true # ethereum alloy-primitives.workspace = true revm-primitives.workspace = true +alloy-consensus.workspace = true +alloy-eips.workspace = true [dev-dependencies] reth-storage-api.workspace = true diff --git a/crates/consensus/common/src/calc.rs b/crates/consensus/common/src/calc.rs index 3f519332fe..e30c5b715f 100644 --- a/crates/consensus/common/src/calc.rs +++ b/crates/consensus/common/src/calc.rs @@ -1,6 +1,6 @@ +use alloy_consensus::constants::ETH_TO_WEI; use alloy_primitives::{BlockNumber, U256}; use reth_chainspec::{EthereumHardfork, Hardforks}; -use reth_primitives::constants::ETH_TO_WEI; /// Calculates the base block reward. /// @@ -57,7 +57,7 @@ pub fn base_block_reward_pre_merge(chain_spec: impl Hardforks, block_number: Blo /// ``` /// # use reth_chainspec::MAINNET; /// # use reth_consensus_common::calc::{base_block_reward, block_reward}; -/// # use reth_primitives::constants::ETH_TO_WEI; +/// # use alloy_consensus::constants::ETH_TO_WEI; /// # use alloy_primitives::U256; /// # /// // This is block 126 on mainnet. diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index f813da3cad..bba40df7e6 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,14 +1,10 @@ //! Collection of methods for block validation. +use alloy_consensus::constants::MAXIMUM_EXTRA_DATA_SIZE; +use alloy_eips::eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; -use reth_primitives::{ - constants::{ - eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, - MAXIMUM_EXTRA_DATA_SIZE, - }, - EthereumHardfork, GotExpected, Header, SealedBlock, SealedHeader, -}; +use reth_primitives::{EthereumHardfork, GotExpected, Header, SealedBlock, SealedHeader}; use revm_primitives::calc_excess_blob_gas; /// Gas used needs to be less than gas limit. Gas used is going to be checked after execution. @@ -77,24 +73,6 @@ pub fn validate_cancun_gas(block: &SealedBlock) -> Result<(), ConsensusError> { Ok(()) } -/// Validate that requests root is present if Prague is active. -/// -/// See [EIP-7685]: General purpose execution layer requests -/// -/// [EIP-7685]: https://eips.ethereum.org/EIPS/eip-7685 -#[inline] -pub fn validate_prague_request(block: &SealedBlock) -> Result<(), ConsensusError> { - let requests_root = - block.body.calculate_requests_root().ok_or(ConsensusError::BodyRequestsMissing)?; - let header_requests_root = block.requests_root.ok_or(ConsensusError::RequestsRootMissing)?; - if requests_root != *header_requests_root { - return Err(ConsensusError::BodyRequestsRootDiff( - GotExpected { got: requests_root, expected: header_requests_root }.into(), - )); - } - Ok(()) -} - /// Validate a block without regard for state: /// /// - Compares the ommer hash in the block header to the block body @@ -127,10 +105,6 @@ pub fn validate_block_pre_execution( validate_cancun_gas(block)?; } - if chain_spec.is_prague_active_at_timestamp(block.timestamp) { - validate_prague_request(block)?; - } - Ok(()) } @@ -228,7 +202,7 @@ pub fn validate_against_parent_eip1559_base_fee { /// Receipts of the block. pub receipts: &'a [Receipt], /// EIP-7685 requests of the block. - pub requests: &'a [Request], + pub requests: &'a Requests, } impl<'a> PostExecutionInput<'a> { /// Creates a new instance of `PostExecutionInput`. - pub const fn new(receipts: &'a [Receipt], requests: &'a [Request]) -> Self { + pub const fn new(receipts: &'a [Receipt], requests: &'a Requests) -> Self { Self { receipts, requests } } } @@ -170,10 +171,10 @@ pub enum ConsensusError { #[display("mismatched block withdrawals root: {_0}")] BodyWithdrawalsRootDiff(GotExpectedBoxed), - /// Error when the requests root in the block is different from the expected requests - /// root. - #[display("mismatched block requests root: {_0}")] - BodyRequestsRootDiff(GotExpectedBoxed), + /// Error when the requests hash in the block is different from the expected requests + /// hash. + #[display("mismatched block requests hash: {_0}")] + BodyRequestsHashDiff(GotExpectedBoxed), /// Error when a block with a specific hash and number is already known. #[display("block with [hash={hash}, number={number}] is already known")] @@ -257,17 +258,17 @@ pub enum ConsensusError { #[display("missing withdrawals root")] WithdrawalsRootMissing, - /// Error when the requests root is missing. - #[display("missing requests root")] - RequestsRootMissing, + /// Error when the requests hash is missing. + #[display("missing requests hash")] + RequestsHashMissing, /// Error when an unexpected withdrawals root is encountered. #[display("unexpected withdrawals root")] WithdrawalsRootUnexpected, - /// Error when an unexpected requests root is encountered. - #[display("unexpected requests root")] - RequestsRootUnexpected, + /// Error when an unexpected requests hash is encountered. + #[display("unexpected requests hash")] + RequestsHashUnexpected, /// Error when withdrawals are missing. #[display("missing withdrawals")] diff --git a/crates/consensus/debug-client/Cargo.toml b/crates/consensus/debug-client/Cargo.toml index c37beef107..e73125a80b 100644 --- a/crates/consensus/debug-client/Cargo.toml +++ b/crates/consensus/debug-client/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] # reth reth-node-api.workspace = true -reth-rpc-api.workspace = true +reth-rpc-api = { workspace = true, features = ["client"] } reth-rpc-builder.workspace = true reth-tracing.workspace = true diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index c32d0b09f2..9010c79907 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -13,10 +13,8 @@ workspace = true [dependencies] reth.workspace = true reth-chainspec.workspace = true -reth-primitives.workspace = true reth-tracing.workspace = true reth-db = { workspace = true, features = ["test-utils"] } -reth-rpc.workspace = true reth-rpc-layer.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-payload-primitives.workspace = true @@ -25,12 +23,11 @@ reth-node-builder = { workspace = true, features = ["test-utils"] } reth-tokio-util.workspace = true reth-stages-types.workspace = true reth-network-peers.workspace = true -reth-node-ethereum.workspace = true -reth-rpc-types-compat.workspace = true +reth-engine-local.workspace = true # rpc -jsonrpsee-types.workspace = true jsonrpsee.workspace = true +url.workspace = true # ethereum alloy-primitives.workspace = true @@ -48,6 +45,8 @@ alloy-rpc-types.workspace = true alloy-network.workspace = true alloy-consensus = { workspace = true, features = ["kzg"] } tracing.workspace = true +derive_more.workspace = true -[features] -bsc = ["reth-rpc/bsc"] \ No newline at end of file +# TODO: fix this +#[features] +#bsc = ["reth-rpc/bsc"] diff --git a/crates/e2e-test-utils/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs index 1b0ff9b54e..cfa245e1de 100644 --- a/crates/e2e-test-utils/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -12,24 +12,27 @@ use reth::{ types::engine::{ForkchoiceState, PayloadStatusEnum}, }, }; +use reth_chainspec::EthereumHardforks; +use reth_node_builder::BuiltPayload; use reth_payload_builder::PayloadId; use reth_rpc_layer::AuthClientService; -use std::marker::PhantomData; +use std::{marker::PhantomData, sync::Arc}; /// Helper for engine api operations #[derive(Debug)] -pub struct EngineApiTestContext { +pub struct EngineApiTestContext { + pub chain_spec: Arc, pub canonical_stream: CanonStateNotificationStream, pub engine_api_client: HttpClient>, pub _marker: PhantomData, } -impl EngineApiTestContext { +impl EngineApiTestContext { /// Retrieves a v3 payload from the engine api pub async fn get_payload_v3( &self, payload_id: PayloadId, - ) -> eyre::Result { + ) -> eyre::Result { Ok(EngineApiClient::::get_payload_v3(&self.engine_api_client, payload_id).await?) } @@ -47,22 +50,38 @@ impl EngineApiTestContext { payload: E::BuiltPayload, payload_builder_attributes: E::PayloadBuilderAttributes, expected_status: PayloadStatusEnum, - versioned_hashes: Vec, ) -> eyre::Result where - E::ExecutionPayloadV3: From + PayloadEnvelopeExt, + E::ExecutionPayloadEnvelopeV3: From + PayloadEnvelopeExt, + E::ExecutionPayloadEnvelopeV4: From + PayloadEnvelopeExt, { - // setup payload for submission - let envelope_v3: ::ExecutionPayloadV3 = payload.into(); - + let versioned_hashes = + payload.block().blob_versioned_hashes_iter().copied().collect::>(); // submit payload to engine api - let submission = EngineApiClient::::new_payload_v3( - &self.engine_api_client, - envelope_v3.execution_payload(), - versioned_hashes, - payload_builder_attributes.parent_beacon_block_root().unwrap(), - ) - .await?; + let submission = if self + .chain_spec + .is_prague_active_at_timestamp(payload_builder_attributes.timestamp()) + { + let requests = payload.requests().unwrap(); + let envelope: ::ExecutionPayloadEnvelopeV4 = payload.into(); + EngineApiClient::::new_payload_v4( + &self.engine_api_client, + envelope.execution_payload(), + versioned_hashes, + payload_builder_attributes.parent_beacon_block_root().unwrap(), + requests, + ) + .await? + } else { + let envelope: ::ExecutionPayloadEnvelopeV3 = payload.into(); + EngineApiClient::::new_payload_v3( + &self.engine_api_client, + envelope.execution_payload(), + versioned_hashes, + payload_builder_attributes.parent_beacon_block_root().unwrap(), + ) + .await? + }; assert_eq!(submission.status, expected_status); diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 998b48e704..1e9b39058e 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -7,17 +7,18 @@ use reth::{ args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, builder::{NodeBuilder, NodeConfig, NodeHandle}, network::PeersHandleProvider, - rpc::api::eth::{helpers::AddDevSigners, FullEthApiServer}, + rpc::server_types::RpcModuleSelection, tasks::TaskManager, }; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; +use reth_engine_local::LocalPayloadAttributesBuilder; use reth_node_builder::{ - components::NodeComponentsBuilder, rpc::EthApiBuilderProvider, FullNodeTypesAdapter, Node, - NodeAdapter, NodeAddOns, NodeComponents, NodeTypesWithDBAdapter, NodeTypesWithEngine, - RethFullAdapter, + components::NodeComponentsBuilder, rpc::RethRpcAddOns, EngineNodeLauncher, + FullNodeTypesAdapter, Node, NodeAdapter, NodeComponents, NodeTypesWithDBAdapter, + NodeTypesWithEngine, PayloadAttributesBuilder, PayloadTypes, }; -use reth_provider::providers::BlockchainProvider; +use reth_provider::providers::{BlockchainProvider, BlockchainProvider2}; use tracing::{span, Level}; use wallet::Wallet; @@ -49,6 +50,7 @@ pub async fn setup( num_nodes: usize, chain_spec: Arc, is_dev: bool, + attributes_generator: impl Fn(u64) -> <::Engine as PayloadTypes>::PayloadBuilderAttributes + Copy + 'static, ) -> eyre::Result<(Vec>, TaskManager, Wallet)> where N: Default + Node> + NodeTypesWithEngine, @@ -56,10 +58,7 @@ where TmpNodeAdapter, Components: NodeComponents, Network: PeersHandleProvider>, >, - N::AddOns: NodeAddOns< - Adapter, - EthApi: FullEthApiServer + AddDevSigners + EthApiBuilderProvider>, - >, + N::AddOns: RethRpcAddOns>, { let tasks = TaskManager::current(); let exec = tasks.executor(); @@ -87,7 +86,95 @@ where .launch() .await?; - let mut node = NodeTestContext::new(node).await?; + let mut node = NodeTestContext::new(node, attributes_generator).await?; + + // Connect each node in a chain. + if let Some(previous_node) = nodes.last_mut() { + previous_node.connect(&mut node).await; + } + + // Connect last node with the first if there are more than two + if idx + 1 == num_nodes && num_nodes > 2 { + if let Some(first_node) = nodes.first_mut() { + node.connect(first_node).await; + } + } + + nodes.push(node); + } + + Ok((nodes, tasks, Wallet::default().with_chain_id(chain_spec.chain().into()))) +} + +/// Creates the initial setup with `num_nodes` started and interconnected. +pub async fn setup_engine( + num_nodes: usize, + chain_spec: Arc, + is_dev: bool, + attributes_generator: impl Fn(u64) -> <::Engine as PayloadTypes>::PayloadBuilderAttributes + Copy + 'static, +) -> eyre::Result<( + Vec>>>, + TaskManager, + Wallet, +)> +where + N: Default + + Node>>> + + NodeTypesWithEngine, + N::ComponentsBuilder: NodeComponentsBuilder< + TmpNodeAdapter>>, + Components: NodeComponents< + TmpNodeAdapter>>, + Network: PeersHandleProvider, + >, + >, + N::AddOns: RethRpcAddOns>>>, + LocalPayloadAttributesBuilder: PayloadAttributesBuilder< + <::Engine as PayloadTypes>::PayloadAttributes, + >, +{ + let tasks = TaskManager::current(); + let exec = tasks.executor(); + + let network_config = NetworkArgs { + discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, + ..NetworkArgs::default() + }; + + // Create nodes and peer them + let mut nodes: Vec> = Vec::with_capacity(num_nodes); + + for idx in 0..num_nodes { + let node_config = NodeConfig::new(chain_spec.clone()) + .with_network(network_config.clone()) + .with_unused_ports() + .with_rpc( + RpcServerArgs::default() + .with_unused_ports() + .with_http() + .with_http_api(RpcModuleSelection::All), + ) + .set_dev(is_dev); + + let span = span!(Level::INFO, "node", idx); + let _enter = span.enter(); + let node = N::default(); + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) + .testing_node(exec.clone()) + .with_types_and_provider::>() + .with_components(node.components_builder()) + .with_add_ons(node.add_ons()) + .launch_with_fn(|builder| { + let launcher = EngineNodeLauncher::new( + builder.task_executor().clone(), + builder.config().datadir(), + Default::default(), + ); + builder.launch_with(launcher) + }) + .await?; + + let mut node = NodeTestContext::new(node, attributes_generator).await?; // Connect each node in a chain. if let Some(previous_node) = nodes.last_mut() { @@ -110,17 +197,17 @@ where // Type aliases type TmpDB = Arc>; -type TmpNodeAdapter = FullNodeTypesAdapter< - NodeTypesWithDBAdapter, - BlockchainProvider>, ->; - -type Adapter = NodeAdapter< - RethFullAdapter, - <>>::ComponentsBuilder as NodeComponentsBuilder< - RethFullAdapter, +type TmpNodeAdapter>> = + FullNodeTypesAdapter, Provider>; + +/// Type alias for a `NodeAdapter` +pub type Adapter>> = NodeAdapter< + TmpNodeAdapter, + <>>::ComponentsBuilder as NodeComponentsBuilder< + TmpNodeAdapter, >>::Components, >; /// Type alias for a type of `NodeHelper` -pub type NodeHelperType = NodeTestContext, AO>; +pub type NodeHelperType>> = + NodeTestContext, AO>; diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index 065c85fd01..acabc2bf62 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -14,24 +14,22 @@ use reth::{ network::PeersHandleProvider, providers::{BlockReader, BlockReaderIdExt, CanonStateSubscriptions, StageCheckpointReader}, rpc::{ - api::eth::{ - helpers::{EthApiSpec, EthTransactions, TraceExt}, - FullEthApiTypes, - }, + api::eth::helpers::{EthApiSpec, EthTransactions, TraceExt}, types::engine::PayloadStatusEnum, }, }; use reth_chainspec::EthereumHardforks; -use reth_node_builder::{NodeAddOns, NodeTypesWithEngine}; +use reth_node_builder::{rpc::RethRpcAddOns, NodeTypes, NodeTypesWithEngine}; use reth_stages_types::StageId; use tokio_stream::StreamExt; +use url::Url; /// An helper struct to handle node actions #[allow(missing_debug_implementations)] pub struct NodeTestContext where Node: FullNodeComponents, - AddOns: NodeAddOns, + AddOns: RethRpcAddOns, { /// The core structure representing the full node. pub inner: FullNode, @@ -40,7 +38,10 @@ where /// Context for testing network functionalities. pub network: NetworkTestContext, /// Context for testing the Engine API. - pub engine_api: EngineApiTestContext<::Engine>, + pub engine_api: EngineApiTestContext< + ::Engine, + ::ChainSpec, + >, /// Context for testing RPC features. pub rpc: RpcTestContext, } @@ -51,22 +52,26 @@ where Node: FullNodeComponents, Node::Types: NodeTypesWithEngine, Node::Network: PeersHandleProvider, - AddOns: NodeAddOns, + AddOns: RethRpcAddOns, { /// Creates a new test node - pub async fn new(node: FullNode) -> eyre::Result { + pub async fn new( + node: FullNode, + attributes_generator: impl Fn(u64) -> Engine::PayloadBuilderAttributes + 'static, + ) -> eyre::Result { let builder = node.payload_builder.clone(); Ok(Self { inner: node.clone(), - payload: PayloadTestContext::new(builder).await?, + payload: PayloadTestContext::new(builder, attributes_generator).await?, network: NetworkTestContext::new(node.network.clone()), engine_api: EngineApiTestContext { + chain_spec: node.chain_spec(), engine_api_client: node.auth_server_handle().http_client(), canonical_stream: node.provider.canonical_state_stream(), _marker: PhantomData::, }, - rpc: RpcTestContext { inner: node.rpc_registry }, + rpc: RpcTestContext { inner: node.add_ons_handle.rpc_registry }, }) } @@ -84,17 +89,17 @@ where &mut self, length: u64, tx_generator: impl Fn(u64) -> Pin>>, - attributes_generator: impl Fn(u64) -> Engine::PayloadBuilderAttributes + Copy, ) -> eyre::Result> where - Engine::ExecutionPayloadV3: From + PayloadEnvelopeExt, - AddOns::EthApi: EthApiSpec + EthTransactions + TraceExt + FullEthApiTypes, + Engine::ExecutionPayloadEnvelopeV3: From + PayloadEnvelopeExt, + Engine::ExecutionPayloadEnvelopeV4: From + PayloadEnvelopeExt, + AddOns::EthApi: EthApiSpec + EthTransactions + TraceExt, { let mut chain = Vec::with_capacity(length as usize); for i in 0..length { let raw_tx = tx_generator(i).await; let tx_hash = self.rpc.inject_tx(raw_tx).await?; - let (payload, eth_attr) = self.advance_block(vec![], attributes_generator).await?; + let (payload, eth_attr) = self.advance_block().await?; let block_hash = payload.block().hash(); let block_number = payload.block().number; self.assert_new_block(tx_hash, block_hash, block_number).await?; @@ -109,14 +114,13 @@ where /// It triggers the resolve payload via engine api and expects the built payload event. pub async fn new_payload( &mut self, - attributes_generator: impl Fn(u64) -> Engine::PayloadBuilderAttributes, ) -> eyre::Result<(Engine::BuiltPayload, Engine::PayloadBuilderAttributes)> where - ::ExecutionPayloadV3: + ::ExecutionPayloadEnvelopeV3: From + PayloadEnvelopeExt, { // trigger new payload building draining the pool - let eth_attr = self.payload.new_payload(attributes_generator).await.unwrap(); + let eth_attr = self.payload.new_payload().await.unwrap(); // first event is the payload attributes self.payload.expect_attr_event(eth_attr.clone()).await?; // wait for the payload builder to have finished building @@ -130,23 +134,18 @@ where /// Advances the node forward one block pub async fn advance_block( &mut self, - versioned_hashes: Vec, - attributes_generator: impl Fn(u64) -> Engine::PayloadBuilderAttributes, ) -> eyre::Result<(Engine::BuiltPayload, Engine::PayloadBuilderAttributes)> where - ::ExecutionPayloadV3: + ::ExecutionPayloadEnvelopeV3: + From + PayloadEnvelopeExt, + ::ExecutionPayloadEnvelopeV4: From + PayloadEnvelopeExt, { - let (payload, eth_attr) = self.new_payload(attributes_generator).await?; + let (payload, eth_attr) = self.new_payload().await?; let block_hash = self .engine_api - .submit_payload( - payload.clone(), - eth_attr.clone(), - PayloadStatusEnum::Valid, - versioned_hashes, - ) + .submit_payload(payload.clone(), eth_attr.clone(), PayloadStatusEnum::Valid) .await?; // trigger forkchoice update via engine api to commit the block to the blockchain @@ -235,4 +234,10 @@ where } Ok(()) } + + /// Returns the RPC URL. + pub fn rpc_url(&self) -> Url { + let addr = self.inner.rpc_server_handle().http_local_addr().unwrap(); + format!("http://{}", addr).parse().unwrap() + } } diff --git a/crates/e2e-test-utils/src/payload.rs b/crates/e2e-test-utils/src/payload.rs index 1f9a89307b..29aa11895b 100644 --- a/crates/e2e-test-utils/src/payload.rs +++ b/crates/e2e-test-utils/src/payload.rs @@ -1,44 +1,51 @@ use futures_util::StreamExt; -use reth::api::{BuiltPayload, EngineTypes, PayloadBuilderAttributes}; +use reth::api::{BuiltPayload, PayloadBuilderAttributes}; use reth_payload_builder::{PayloadBuilderHandle, PayloadId}; -use reth_payload_primitives::{Events, PayloadBuilder}; +use reth_payload_primitives::{Events, PayloadBuilder, PayloadTypes}; use tokio_stream::wrappers::BroadcastStream; /// Helper for payload operations -#[derive(Debug)] -pub struct PayloadTestContext { - pub payload_event_stream: BroadcastStream>, - payload_builder: PayloadBuilderHandle, +#[derive(derive_more::Debug)] +pub struct PayloadTestContext { + pub payload_event_stream: BroadcastStream>, + payload_builder: PayloadBuilderHandle, pub timestamp: u64, + #[debug(skip)] + attributes_generator: Box T::PayloadBuilderAttributes>, } -impl PayloadTestContext { +impl PayloadTestContext { /// Creates a new payload helper - pub async fn new(payload_builder: PayloadBuilderHandle) -> eyre::Result { + pub async fn new( + payload_builder: PayloadBuilderHandle, + attributes_generator: impl Fn(u64) -> T::PayloadBuilderAttributes + 'static, + ) -> eyre::Result { let payload_events = payload_builder.subscribe().await?; let payload_event_stream = payload_events.into_stream(); // Cancun timestamp - Ok(Self { payload_event_stream, payload_builder, timestamp: 1710338135 }) + Ok(Self { + payload_event_stream, + payload_builder, + timestamp: 1710338135, + attributes_generator: Box::new(attributes_generator), + }) } /// Creates a new payload job from static attributes - pub async fn new_payload( - &mut self, - attributes_generator: impl Fn(u64) -> E::PayloadBuilderAttributes, - ) -> eyre::Result { + pub async fn new_payload(&mut self) -> eyre::Result { self.timestamp += 1; - let attributes: E::PayloadBuilderAttributes = attributes_generator(self.timestamp); - self.payload_builder.new_payload(attributes.clone()).await.unwrap(); + let attributes = (self.attributes_generator)(self.timestamp); + self.payload_builder.send_new_payload(attributes.clone()).await.unwrap()?; Ok(attributes) } /// Asserts that the next event is a payload attributes event pub async fn expect_attr_event( &mut self, - attrs: E::PayloadBuilderAttributes, + attrs: T::PayloadBuilderAttributes, ) -> eyre::Result<()> { let first_event = self.payload_event_stream.next().await.unwrap()?; - if let reth::payload::Events::Attributes(attr) = first_event { + if let Events::Attributes(attr) = first_event { assert_eq!(attrs.timestamp(), attr.timestamp()); } else { panic!("Expect first event as payload attributes.") @@ -59,9 +66,9 @@ impl PayloadTestContext { } /// Expects the next event to be a built payload event or panics - pub async fn expect_built_payload(&mut self) -> eyre::Result { + pub async fn expect_built_payload(&mut self) -> eyre::Result { let second_event = self.payload_event_stream.next().await.unwrap()?; - if let reth::payload::Events::BuiltPayload(payload) = second_event { + if let Events::BuiltPayload(payload) = second_event { Ok(payload) } else { panic!("Expect a built payload event."); diff --git a/crates/e2e-test-utils/src/rpc.rs b/crates/e2e-test-utils/src/rpc.rs index b8cbe4d77a..7b7dabdf24 100644 --- a/crates/e2e-test-utils/src/rpc.rs +++ b/crates/e2e-test-utils/src/rpc.rs @@ -4,12 +4,15 @@ use alloy_primitives::{Bytes, B256}; use reth::{ builder::{rpc::RpcRegistry, FullNodeComponents}, rpc::api::{ - eth::helpers::{EthApiSpec, EthTransactions, TraceExt}, + eth::{ + helpers::{EthApiSpec, EthTransactions, TraceExt}, + EthApiTypes, + }, DebugApiServer, }, }; use reth_chainspec::EthereumHardforks; -use reth_node_builder::{EthApiTypes, NodeTypes}; +use reth_node_builder::NodeTypes; #[allow(missing_debug_implementations)] pub struct RpcTestContext { diff --git a/crates/e2e-test-utils/src/traits.rs b/crates/e2e-test-utils/src/traits.rs index 6786492140..a70bbf7afb 100644 --- a/crates/e2e-test-utils/src/traits.rs +++ b/crates/e2e-test-utils/src/traits.rs @@ -1,4 +1,5 @@ -use op_alloy_rpc_types_engine::OpExecutionPayloadEnvelopeV3; +use alloy_rpc_types::engine::ExecutionPayloadEnvelopeV4; +use op_alloy_rpc_types_engine::{OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4}; use reth::rpc::types::engine::{ExecutionPayloadEnvelopeV3, ExecutionPayloadV3}; /// The execution payload envelope type. @@ -13,8 +14,20 @@ impl PayloadEnvelopeExt for OpExecutionPayloadEnvelopeV3 { } } +impl PayloadEnvelopeExt for OpExecutionPayloadEnvelopeV4 { + fn execution_payload(&self) -> ExecutionPayloadV3 { + self.execution_payload.clone() + } +} + impl PayloadEnvelopeExt for ExecutionPayloadEnvelopeV3 { fn execution_payload(&self) -> ExecutionPayloadV3 { self.execution_payload.clone() } } + +impl PayloadEnvelopeExt for ExecutionPayloadEnvelopeV4 { + fn execution_payload(&self) -> ExecutionPayloadV3 { + self.execution_payload.clone() + } +} diff --git a/crates/e2e-test-utils/src/transaction.rs b/crates/e2e-test-utils/src/transaction.rs index 0496030444..58a25dc125 100644 --- a/crates/e2e-test-utils/src/transaction.rs +++ b/crates/e2e-test-utils/src/transaction.rs @@ -56,8 +56,7 @@ impl TransactionTestContext { delegate_to: Address, wallet: PrivateKeySigner, ) -> TxEnvelope { - let authorization = - Authorization { chain_id: U256::from(chain_id), address: delegate_to, nonce: 0 }; + let authorization = Authorization { chain_id, address: delegate_to, nonce: 0 }; let signature = wallet .sign_hash_sync(&authorization.signature_hash()) .expect("could not sign authorization"); diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index 51978311fa..416c4adb40 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -6,14 +6,15 @@ use eyre::OptionExt; use pretty_assertions::Comparison; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_engine_primitives::InvalidBlockHook; -use reth_evm::{system_calls::SystemCaller, ConfigureEvm}; +use reth_evm::{ + state_change::post_block_balance_increments, system_calls::SystemCaller, ConfigureEvm, +}; use reth_primitives::{Header, Receipt, SealedBlockWithSenders, SealedHeader}; use reth_provider::{BlockExecutionOutput, ChainSpecProvider, StateProviderFactory}; use reth_revm::{ database::StateProviderDatabase, db::states::bundle_state::BundleRetention, primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg}, - state_change::post_block_balance_increments, DatabaseCommit, StateBuilder, }; use reth_rpc_api::DebugApiClient; @@ -84,7 +85,8 @@ where EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()), ); - let mut system_caller = SystemCaller::new(&self.evm_config, self.provider.chain_spec()); + let mut system_caller = + SystemCaller::new(self.evm_config.clone(), self.provider.chain_spec()); // Apply pre-block system contract calls. system_caller.apply_pre_execution_changes(&block.clone().unseal(), &mut evm)?; @@ -161,7 +163,7 @@ where let response = ExecutionWitness { state: HashMap::from_iter(state), codes: Default::default(), - keys: Some(state_preimages), + keys: state_preimages, }; let re_executed_witness_path = self.save_file( format!("{}_{}.witness.re_executed.json", block.number, block.hash()), diff --git a/crates/engine/local/Cargo.toml b/crates/engine/local/Cargo.toml index d7a5d05091..fcfeabac1f 100644 --- a/crates/engine/local/Cargo.toml +++ b/crates/engine/local/Cargo.toml @@ -11,15 +11,19 @@ exclude.workspace = true [dependencies] # reth reth-beacon-consensus.workspace = true -reth-chain-state.workspace = true +reth-chainspec.workspace = true +reth-consensus.workspace = true +reth-engine-primitives.workspace = true +reth-engine-service.workspace = true reth-engine-tree.workspace = true +reth-evm.workspace = true reth-ethereum-engine-primitives.workspace = true -reth-node-types.workspace = true reth-payload-builder.workspace = true reth-payload-primitives.workspace = true -reth-primitives.workspace = true +reth-payload-validator.workspace = true reth-provider.workspace = true reth-prune.workspace = true +reth-rpc-types-compat.workspace = true reth-transaction-pool.workspace = true reth-stages-api.workspace = true @@ -36,16 +40,15 @@ futures-util.workspace = true eyre.workspace = true tracing.workspace = true -[dev-dependencies] -reth-chainspec.workspace = true -reth-chain-state.workspace = true -reth-config.workspace = true -reth-db = { workspace = true, features = ["test-utils"] } -reth-ethereum-engine-primitives.workspace = true -reth-exex-test-utils.workspace = true -reth-payload-builder = { workspace = true, features = ["test-utils"] } -reth-provider = { workspace = true, features = ["test-utils"] } -reth-tracing.workspace = true +op-alloy-rpc-types-engine = { workspace = true, optional = true } [lints] workspace = true + +[features] +optimism = [ + "op-alloy-rpc-types-engine", + "reth-beacon-consensus/optimism", + "reth-provider/optimism", + "reth-chainspec/optimism" +] diff --git a/crates/engine/local/src/lib.rs b/crates/engine/local/src/lib.rs index 1b84c8a113..26c84d50c8 100644 --- a/crates/engine/local/src/lib.rs +++ b/crates/engine/local/src/lib.rs @@ -1,4 +1,17 @@ //! A local engine service that can be used to drive a dev chain. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + pub mod miner; pub mod payload; pub mod service; + +pub use miner::MiningMode; +pub use payload::LocalPayloadAttributesBuilder; +pub use service::LocalEngineService; diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index de3d8cb8d0..7cebd30630 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -1,16 +1,31 @@ //! Contains the implementation of the mining mode for the local engine. -use alloy_primitives::TxHash; +use alloy_primitives::{TxHash, B256}; +use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayloadSidecar, ForkchoiceState}; +use eyre::OptionExt; use futures_util::{stream::Fuse, StreamExt}; +use reth_beacon_consensus::BeaconEngineMessage; +use reth_chainspec::EthereumHardforks; +use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; +use reth_payload_builder::PayloadBuilderHandle; +use reth_payload_primitives::{ + BuiltPayload, PayloadAttributesBuilder, PayloadBuilder, PayloadKind, PayloadTypes, +}; +use reth_provider::{BlockReader, ChainSpecProvider}; +use reth_rpc_types_compat::engine::payload::block_to_payload; use reth_transaction_pool::TransactionPool; use std::{ future::Future, pin::Pin, task::{Context, Poll}, - time::Duration, + time::{Duration, UNIX_EPOCH}, +}; +use tokio::{ + sync::{mpsc::UnboundedSender, oneshot}, + time::Interval, }; -use tokio::time::Interval; use tokio_stream::wrappers::ReceiverStream; +use tracing::error; /// A mining mode for the local dev engine. #[derive(Debug)] @@ -58,3 +73,177 @@ impl Future for MiningMode { } } } + +/// Local miner advancing the chain/ +#[derive(Debug)] +pub struct LocalMiner { + /// Provider to read the current tip of the chain. + provider: Provider, + /// The payload attribute builder for the engine + payload_attributes_builder: B, + /// Sender for events to engine. + to_engine: UnboundedSender>, + /// The mining mode for the engine + mode: MiningMode, + /// The payload builder for the engine + payload_builder: PayloadBuilderHandle, + /// Timestamp for the next block. + last_timestamp: u64, + /// Stores latest mined blocks. + last_block_hashes: Vec, +} + +impl LocalMiner +where + EngineT: EngineTypes, + Provider: BlockReader + ChainSpecProvider + 'static, + B: PayloadAttributesBuilder<::PayloadAttributes>, +{ + /// Spawns a new [`LocalMiner`] with the given parameters. + pub fn spawn_new( + provider: Provider, + payload_attributes_builder: B, + to_engine: UnboundedSender>, + mode: MiningMode, + payload_builder: PayloadBuilderHandle, + ) { + let latest_header = + provider.sealed_header(provider.best_block_number().unwrap()).unwrap().unwrap(); + + let miner = Self { + provider, + payload_attributes_builder, + to_engine, + mode, + payload_builder, + last_timestamp: latest_header.timestamp, + last_block_hashes: vec![latest_header.hash()], + }; + + // Spawn the miner + tokio::spawn(miner.run()); + } + + /// Runs the [`LocalMiner`] in a loop, polling the miner and building payloads. + async fn run(mut self) { + let mut fcu_interval = tokio::time::interval(Duration::from_secs(1)); + loop { + tokio::select! { + // Wait for the interval or the pool to receive a transaction + _ = &mut self.mode => { + if let Err(e) = self.advance().await { + error!(target: "engine::local", "Error advancing the chain: {:?}", e); + } + } + // send FCU once in a while + _ = fcu_interval.tick() => { + if let Err(e) = self.update_forkchoice_state().await { + error!(target: "engine::local", "Error updating fork choice: {:?}", e); + } + } + } + } + } + + /// Returns current forkchoice state. + fn forkchoice_state(&self) -> ForkchoiceState { + ForkchoiceState { + head_block_hash: *self.last_block_hashes.last().expect("at least 1 block exists"), + safe_block_hash: *self + .last_block_hashes + .get(self.last_block_hashes.len().saturating_sub(32)) + .expect("at least 1 block exists"), + finalized_block_hash: *self + .last_block_hashes + .get(self.last_block_hashes.len().saturating_sub(64)) + .expect("at least 1 block exists"), + } + } + + /// Sends a FCU to the engine. + async fn update_forkchoice_state(&self) -> eyre::Result<()> { + let (tx, rx) = oneshot::channel(); + self.to_engine.send(BeaconEngineMessage::ForkchoiceUpdated { + state: self.forkchoice_state(), + payload_attrs: None, + tx, + version: EngineApiMessageVersion::default(), + })?; + + let res = rx.await??; + if !res.forkchoice_status().is_valid() { + eyre::bail!("Invalid fork choice update") + } + + Ok(()) + } + + /// Generates payload attributes for a new block, passes them to FCU and inserts built payload + /// through newPayload. + async fn advance(&mut self) -> eyre::Result<()> { + let timestamp = std::cmp::max( + self.last_timestamp + 1, + std::time::SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("cannot be earlier than UNIX_EPOCH") + .as_secs(), + ); + + let (tx, rx) = oneshot::channel(); + self.to_engine.send(BeaconEngineMessage::ForkchoiceUpdated { + state: self.forkchoice_state(), + payload_attrs: Some(self.payload_attributes_builder.build(timestamp)), + tx, + version: EngineApiMessageVersion::default(), + })?; + + let res = rx.await??.await?; + if !res.payload_status.is_valid() { + eyre::bail!("Invalid payload status") + } + + let payload_id = res.payload_id.ok_or_eyre("No payload id")?; + + let Some(Ok(payload)) = + self.payload_builder.resolve_kind(payload_id, PayloadKind::WaitForPending).await + else { + eyre::bail!("No payload") + }; + + let block = payload.block(); + + let cancun_fields = + self.provider.chain_spec().is_cancun_active_at_timestamp(block.timestamp).then(|| { + CancunPayloadFields { + parent_beacon_block_root: block.parent_beacon_block_root.unwrap(), + versioned_hashes: block.blob_versioned_hashes().into_iter().copied().collect(), + } + }); + + let (tx, rx) = oneshot::channel(); + self.to_engine.send(BeaconEngineMessage::NewPayload { + payload: block_to_payload(payload.block().clone()), + // todo: prague support + sidecar: cancun_fields + .map(ExecutionPayloadSidecar::v3) + .unwrap_or_else(ExecutionPayloadSidecar::none), + tx, + })?; + + let res = rx.await??; + + if !res.is_valid() { + eyre::bail!("Invalid payload") + } + + self.last_timestamp = timestamp; + self.last_block_hashes.push(block.hash()); + // ensure we keep at most 64 blocks + if self.last_block_hashes.len() > 64 { + self.last_block_hashes = + self.last_block_hashes.split_off(self.last_block_hashes.len() - 64); + } + + Ok(()) + } +} diff --git a/crates/engine/local/src/payload.rs b/crates/engine/local/src/payload.rs index 4fd49f53fb..5111360d5b 100644 --- a/crates/engine/local/src/payload.rs +++ b/crates/engine/local/src/payload.rs @@ -2,29 +2,60 @@ //! [`LocalEngineService`](super::service::LocalEngineService). use alloy_primitives::{Address, B256}; +use reth_chainspec::EthereumHardforks; use reth_ethereum_engine_primitives::EthPayloadAttributes; use reth_payload_primitives::PayloadAttributesBuilder; -use std::{convert::Infallible, time::UNIX_EPOCH}; +use std::sync::Arc; /// The attributes builder for local Ethereum payload. #[derive(Debug)] -pub struct EthLocalPayloadAttributesBuilder; - -impl PayloadAttributesBuilder for EthLocalPayloadAttributesBuilder { - type PayloadAttributes = EthPayloadAttributes; - type Error = Infallible; +#[non_exhaustive] +pub struct LocalPayloadAttributesBuilder { + chain_spec: Arc, +} - fn build(&self) -> Result { - let ts = std::time::SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("cannot be earlier than UNIX_EPOCH"); +impl LocalPayloadAttributesBuilder { + /// Creates a new instance of the builder. + pub const fn new(chain_spec: Arc) -> Self { + Self { chain_spec } + } +} - Ok(EthPayloadAttributes { - timestamp: ts.as_secs(), +impl PayloadAttributesBuilder + for LocalPayloadAttributesBuilder +where + ChainSpec: Send + Sync + EthereumHardforks + 'static, +{ + fn build(&self, timestamp: u64) -> EthPayloadAttributes { + EthPayloadAttributes { + timestamp, prev_randao: B256::random(), suggested_fee_recipient: Address::random(), - withdrawals: None, - parent_beacon_block_root: None, - }) + withdrawals: self + .chain_spec + .is_shanghai_active_at_timestamp(timestamp) + .then(Default::default), + parent_beacon_block_root: self + .chain_spec + .is_cancun_active_at_timestamp(timestamp) + .then(B256::random), + } + } +} + +#[cfg(feature = "optimism")] +impl PayloadAttributesBuilder + for LocalPayloadAttributesBuilder +where + ChainSpec: Send + Sync + EthereumHardforks + 'static, +{ + fn build(&self, timestamp: u64) -> op_alloy_rpc_types_engine::OpPayloadAttributes { + op_alloy_rpc_types_engine::OpPayloadAttributes { + payload_attributes: self.build(timestamp), + transactions: None, + no_tx_pool: None, + gas_limit: None, + eip_1559_params: None, + } } } diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index 340a76a1c3..02599e227c 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -6,357 +6,160 @@ //! with a single transaction. The `Interval` mode will initiate block //! building at a fixed interval. -use crate::miner::MiningMode; -use eyre::eyre; -use reth_beacon_consensus::EngineNodeTypes; -use reth_chain_state::{CanonicalInMemoryState, ExecutedBlock, NewCanonicalChain}; -use reth_engine_tree::persistence::PersistenceHandle; -use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_primitives::{ - BuiltPayload, PayloadAttributesBuilder, PayloadBuilder, PayloadBuilderAttributes, PayloadTypes, +use core::fmt; +use std::{ + fmt::{Debug, Formatter}, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; + +use crate::miner::{LocalMiner, MiningMode}; +use futures_util::{Stream, StreamExt}; +use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage, EngineNodeTypes}; +use reth_chainspec::EthChainSpec; +use reth_consensus::Consensus; +use reth_engine_service::service::EngineMessageStream; +use reth_engine_tree::{ + chain::{ChainEvent, HandlerEvent}, + engine::{ + EngineApiKind, EngineApiRequest, EngineApiRequestHandler, EngineRequestHandler, FromEngine, + RequestHandlerEvent, + }, + persistence::PersistenceHandle, + tree::{EngineApiTreeHandler, InvalidBlockHook, TreeConfig}, }; -use reth_provider::ProviderFactory; +use reth_evm::execute::BlockExecutorProvider; +use reth_payload_builder::PayloadBuilderHandle; +use reth_payload_primitives::{PayloadAttributesBuilder, PayloadTypes}; +use reth_payload_validator::ExecutionPayloadValidator; +use reth_provider::{providers::BlockchainProvider2, ChainSpecProvider, ProviderFactory}; use reth_prune::PrunerWithFactory; use reth_stages_api::MetricEventsSender; -use tokio::sync::oneshot; -use tracing::debug; +use tokio::sync::mpsc::UnboundedSender; +use tracing::error; /// Provides a local dev service engine that can be used to drive the /// chain forward. -#[derive(Debug)] -pub struct LocalEngineService +/// +/// This service both produces and consumes [`BeaconEngineMessage`]s. This is done to allow +/// modifications of the stream +pub struct LocalEngineService where N: EngineNodeTypes, - B: PayloadAttributesBuilder::PayloadAttributes>, { - /// The payload builder for the engine - payload_builder: PayloadBuilderHandle, - /// The payload attribute builder for the engine - payload_attributes_builder: B, - /// Keep track of the Canonical chain state that isn't persisted on disk yet - canonical_in_memory_state: CanonicalInMemoryState, - /// A handle to the persistence layer - persistence_handle: PersistenceHandle, - /// The mining mode for the engine - mode: MiningMode, + /// Processes requests. + /// + /// This type is responsible for processing incoming requests. + handler: EngineApiRequestHandler>, + /// Receiver for incoming requests (from the engine API endpoint) that need to be processed. + incoming_requests: EngineMessageStream, } -impl LocalEngineService +impl LocalEngineService where N: EngineNodeTypes, - B: PayloadAttributesBuilder::PayloadAttributes>, { /// Constructor for [`LocalEngineService`]. - pub fn new( - payload_builder: PayloadBuilderHandle, - payload_attributes_builder: B, + #[allow(clippy::too_many_arguments)] + pub fn new( + consensus: Arc, + executor_factory: impl BlockExecutorProvider, provider: ProviderFactory, + blockchain_db: BlockchainProvider2, pruner: PrunerWithFactory>, - canonical_in_memory_state: CanonicalInMemoryState, + payload_builder: PayloadBuilderHandle, + tree_config: TreeConfig, + invalid_block_hook: Box, sync_metrics_tx: MetricEventsSender, + to_engine: UnboundedSender>, + from_engine: EngineMessageStream, mode: MiningMode, - ) -> Self { + payload_attributes_builder: B, + skip_state_root_validation: bool, + enable_prefetch: bool, + enable_execution_cache: bool, + ) -> Self + where + B: PayloadAttributesBuilder<::PayloadAttributes>, + { + let chain_spec = provider.chain_spec(); + let engine_kind = + if chain_spec.is_optimism() { EngineApiKind::OpStack } else { EngineApiKind::Ethereum }; + let persistence_handle = PersistenceHandle::spawn_service(provider, pruner, sync_metrics_tx, false); + let payload_validator = ExecutionPayloadValidator::new(chain_spec); - Self { - payload_builder, - payload_attributes_builder, - canonical_in_memory_state, + let canonical_in_memory_state = blockchain_db.canonical_in_memory_state(); + + let (to_tree_tx, from_tree) = EngineApiTreeHandler::spawn_new( + blockchain_db.clone(), + executor_factory, + consensus, + payload_validator, persistence_handle, - mode, - } - } + payload_builder.clone(), + canonical_in_memory_state, + tree_config, + invalid_block_hook, + engine_kind, + skip_state_root_validation, + enable_prefetch, + enable_execution_cache, + ); - /// Spawn the [`LocalEngineService`] on a tokio green thread. The service will poll the payload - /// builder with two varying modes, [`MiningMode::Instant`] or [`MiningMode::Interval`] - /// which will respectively either execute the block as soon as it finds a - /// transaction in the pool or build the block based on an interval. - pub fn spawn_new( - payload_builder: PayloadBuilderHandle, - payload_attributes_builder: B, - provider: ProviderFactory, - pruner: PrunerWithFactory>, - canonical_in_memory_state: CanonicalInMemoryState, - sync_metrics_tx: MetricEventsSender, - mode: MiningMode, - ) { - let engine = Self::new( - payload_builder, + let handler = EngineApiRequestHandler::new(to_tree_tx, from_tree); + + LocalMiner::spawn_new( + blockchain_db, payload_attributes_builder, - provider, - pruner, - canonical_in_memory_state, - sync_metrics_tx, + to_engine, mode, + payload_builder, ); - // Spawn the engine - tokio::spawn(engine.run()); + Self { handler, incoming_requests: from_engine } } +} - /// Runs the [`LocalEngineService`] in a loop, polling the miner and building - /// payloads. - async fn run(mut self) { - loop { - // Wait for the interval or the pool to receive a transaction - (&mut self.mode).await; - - // Start a new payload building job - let executed_block = self.build_and_save_payload().await; - - if executed_block.is_err() { - debug!(target: "local_engine", err = ?executed_block.unwrap_err(), "failed payload building"); - continue - } - let block = executed_block.expect("not error"); - - let res = self.update_canonical_in_memory_state(block); - if res.is_err() { - debug!(target: "local_engine", err = ?res.unwrap_err(), "failed canonical state update"); +impl Stream for LocalEngineService +where + N: EngineNodeTypes, +{ + type Item = ChainEvent; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.get_mut(); + + if let Poll::Ready(ev) = this.handler.poll(cx) { + return match ev { + RequestHandlerEvent::HandlerEvent(ev) => match ev { + HandlerEvent::BackfillAction(_) => { + error!(target: "engine::local", "received backfill request in local engine"); + Poll::Ready(Some(ChainEvent::FatalError)) + } + HandlerEvent::Event(ev) => Poll::Ready(Some(ChainEvent::Handler(ev))), + HandlerEvent::FatalError => Poll::Ready(Some(ChainEvent::FatalError)), + }, + RequestHandlerEvent::Download(_) => { + error!(target: "engine::local", "received download request in local engine"); + Poll::Ready(Some(ChainEvent::FatalError)) + } } } - } - - /// Builds a payload by initiating a new payload job via the [`PayloadBuilderHandle`], - /// saving the execution outcome to persistence and returning the executed block. - async fn build_and_save_payload(&self) -> eyre::Result { - let payload_attributes = self.payload_attributes_builder.build()?; - let parent = self.canonical_in_memory_state.get_canonical_head().hash(); - let payload_builder_attributes = - ::PayloadBuilderAttributes::try_new( - parent, - payload_attributes, - ) - .map_err(|_| eyre::eyre!("failed to fetch payload attributes"))?; - - let payload = self - .payload_builder - .send_and_resolve_payload(payload_builder_attributes) - .await? - .await?; - - let executed_block = - payload.executed_block().ok_or_else(|| eyre!("missing executed block"))?; - let (tx, rx) = oneshot::channel(); - - let _ = self.persistence_handle.save_blocks(vec![executed_block.clone()], tx); - - // Wait for the persistence_handle to complete - let _ = rx.await?.ok_or_else(|| eyre!("missing new head"))?; - Ok(executed_block) - } - - /// Update the canonical in memory state and send notification for a new canon state to - /// all the listeners. - fn update_canonical_in_memory_state(&self, executed_block: ExecutedBlock) -> eyre::Result<()> { - let chain = NewCanonicalChain::Commit { new: vec![executed_block] }; - let tip = chain.tip().header.clone(); - let notification = chain.to_chain_notification(); - - // Update the tracked in-memory state with the new chain - self.canonical_in_memory_state.update_chain(chain); - self.canonical_in_memory_state.set_canonical_head(tip); - - // Sends an event to all active listeners about the new canonical chain - self.canonical_in_memory_state.notify_canon_state(notification); - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use reth_chainspec::MAINNET; - use reth_config::PruneConfig; - use reth_db::test_utils::{create_test_rw_db, create_test_static_files_dir}; - use reth_ethereum_engine_primitives::EthEngineTypes; - use reth_exex_test_utils::TestNode; - use reth_node_types::NodeTypesWithDBAdapter; - use reth_payload_builder::test_utils::spawn_test_payload_service; - use reth_provider::{providers::StaticFileProvider, BlockReader, ProviderFactory}; - use reth_prune::PrunerBuilder; - use reth_transaction_pool::{ - test_utils::{testing_pool, MockTransaction}, - TransactionPool, - }; - use std::{convert::Infallible, time::Duration}; - use tokio::sync::mpsc::unbounded_channel; - - #[derive(Debug)] - struct TestPayloadAttributesBuilder; - - impl PayloadAttributesBuilder for TestPayloadAttributesBuilder { - type PayloadAttributes = alloy_rpc_types_engine::PayloadAttributes; - type Error = Infallible; - - fn build(&self) -> Result { - Ok(alloy_rpc_types_engine::PayloadAttributes { - timestamp: 0, - prev_randao: Default::default(), - suggested_fee_recipient: Default::default(), - withdrawals: None, - parent_beacon_block_root: None, - }) + // forward incoming requests to the handler + while let Poll::Ready(Some(req)) = this.incoming_requests.poll_next_unpin(cx) { + this.handler.on_event(FromEngine::Request(req.into())); } - } - - #[tokio::test] - async fn test_local_engine_service_interval() -> eyre::Result<()> { - reth_tracing::init_test_tracing(); - - // Start the provider and the pruner - let (_, static_dir_path) = create_test_static_files_dir(); - let provider = ProviderFactory::>::new( - create_test_rw_db(), - MAINNET.clone(), - StaticFileProvider::read_write(static_dir_path)?, - ); - let pruner = PrunerBuilder::new(PruneConfig::default()) - .build_with_provider_factory(provider.clone()); - - // Create an empty canonical in memory state - let canonical_in_memory_state = CanonicalInMemoryState::empty(); - - // Start the payload builder service - let payload_handle = spawn_test_payload_service::(); - - // Sync metric channel - let (sync_metrics_tx, _) = unbounded_channel(); - - // Launch the LocalEngineService in interval mode - let period = Duration::from_secs(1); - LocalEngineService::spawn_new( - payload_handle, - TestPayloadAttributesBuilder, - provider.clone(), - pruner, - canonical_in_memory_state, - sync_metrics_tx, - MiningMode::interval(period), - ); - - // Check that we have no block for now - let block = provider.block_by_number(0)?; - assert!(block.is_none()); - // Wait 4 intervals - tokio::time::sleep(2 * period).await; - - // Assert a block has been build - let block = provider.block_by_number(0)?; - assert!(block.is_some()); - - Ok(()) + Poll::Pending } +} - #[tokio::test] - async fn test_local_engine_service_instant() -> eyre::Result<()> { - reth_tracing::init_test_tracing(); - - // Start the provider and the pruner - let (_, static_dir_path) = create_test_static_files_dir(); - let provider = ProviderFactory::>::new( - create_test_rw_db(), - MAINNET.clone(), - StaticFileProvider::read_write(static_dir_path)?, - ); - let pruner = PrunerBuilder::new(PruneConfig::default()) - .build_with_provider_factory(provider.clone()); - - // Create an empty canonical in memory state - let canonical_in_memory_state = CanonicalInMemoryState::empty(); - - // Start the payload builder service - let payload_handle = spawn_test_payload_service::(); - - // Start a transaction pool - let pool = testing_pool(); - - // Sync metric channel - let (sync_metrics_tx, _) = unbounded_channel(); - - // Launch the LocalEngineService in instant mode - LocalEngineService::spawn_new( - payload_handle, - TestPayloadAttributesBuilder, - provider.clone(), - pruner, - canonical_in_memory_state, - sync_metrics_tx, - MiningMode::instant(pool.clone()), - ); - - // Wait for a small period to assert block building is - // triggered by adding a transaction to the pool - let period = Duration::from_millis(500); - tokio::time::sleep(period).await; - let block = provider.block_by_number(0)?; - assert!(block.is_none()); - - // Add a transaction to the pool - let transaction = MockTransaction::legacy().with_gas_price(10); - pool.add_transaction(Default::default(), transaction).await?; - - // Wait for block building - let period = Duration::from_secs(2); - tokio::time::sleep(period).await; - - // Assert a block has been build - let block = provider.block_by_number(0)?; - assert!(block.is_some()); - - Ok(()) - } - - #[tokio::test] - async fn test_canonical_chain_subscription() -> eyre::Result<()> { - reth_tracing::init_test_tracing(); - - // Start the provider and the pruner - let (_, static_dir_path) = create_test_static_files_dir(); - let provider = ProviderFactory::>::new( - create_test_rw_db(), - MAINNET.clone(), - StaticFileProvider::read_write(static_dir_path)?, - ); - let pruner = PrunerBuilder::new(PruneConfig::default()) - .build_with_provider_factory(provider.clone()); - - // Create an empty canonical in memory state - let canonical_in_memory_state = CanonicalInMemoryState::empty(); - let mut notifications = canonical_in_memory_state.subscribe_canon_state(); - - // Start the payload builder service - let payload_handle = spawn_test_payload_service::(); - - // Start a transaction pool - let pool = testing_pool(); - - // Sync metric channel - let (sync_metrics_tx, _) = unbounded_channel(); - - // Launch the LocalEngineService in instant mode - LocalEngineService::spawn_new( - payload_handle, - TestPayloadAttributesBuilder, - provider.clone(), - pruner, - canonical_in_memory_state, - sync_metrics_tx, - MiningMode::instant(pool.clone()), - ); - - // Add a transaction to the pool - let transaction = MockTransaction::legacy().with_gas_price(10); - pool.add_transaction(Default::default(), transaction).await?; - - // Check a notification is received for block 0 - let res = notifications.recv().await?; - - assert_eq!(res.tip().number, 0); - - Ok(()) +impl Debug for LocalEngineService { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("LocalEngineService").finish_non_exhaustive() } } diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index 2cf1366eb0..949ebf0155 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -23,22 +23,46 @@ use serde::{de::DeserializeOwned, ser::Serialize}; /// payload job. Hence this trait is also [`PayloadTypes`]. pub trait EngineTypes: PayloadTypes< - BuiltPayload: TryInto - + TryInto - + TryInto - + TryInto, + BuiltPayload: TryInto + + TryInto + + TryInto + + TryInto, > + DeserializeOwned + Serialize + 'static { - /// Execution Payload V1 type. - type ExecutionPayloadV1: DeserializeOwned + Serialize + Clone + Unpin + Send + Sync + 'static; - /// Execution Payload V2 type. - type ExecutionPayloadV2: DeserializeOwned + Serialize + Clone + Unpin + Send + Sync + 'static; - /// Execution Payload V3 type. - type ExecutionPayloadV3: DeserializeOwned + Serialize + Clone + Unpin + Send + Sync + 'static; - /// Execution Payload V4 type. - type ExecutionPayloadV4: DeserializeOwned + Serialize + Clone + Unpin + Send + Sync + 'static; + /// Execution Payload V1 envelope type. + type ExecutionPayloadEnvelopeV1: DeserializeOwned + + Serialize + + Clone + + Unpin + + Send + + Sync + + 'static; + /// Execution Payload V2 envelope type. + type ExecutionPayloadEnvelopeV2: DeserializeOwned + + Serialize + + Clone + + Unpin + + Send + + Sync + + 'static; + /// Execution Payload V3 envelope type. + type ExecutionPayloadEnvelopeV3: DeserializeOwned + + Serialize + + Clone + + Unpin + + Send + + Sync + + 'static; + /// Execution Payload V4 envelope type. + type ExecutionPayloadEnvelopeV4: DeserializeOwned + + Serialize + + Clone + + Unpin + + Send + + Sync + + 'static; } /// Type that validates the payloads sent to the engine. diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index f0a371dce7..0d5c1848d5 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -31,7 +31,7 @@ use std::{ }; /// Alias for consensus engine stream. -type EngineMessageStream = Pin> + Send + Sync>>; +pub type EngineMessageStream = Pin> + Send + Sync>>; /// Alias for chain orchestrator. type EngineServiceType = ChainOrchestrator< @@ -162,7 +162,8 @@ mod tests { use reth_network_p2p::test_utils::TestFullBlockClient; use reth_primitives::SealedHeader; use reth_provider::{ - test_utils::create_test_provider_factory_with_chain_spec, StaticFileProviderFactory, + providers::BlockchainProvider2, test_utils::create_test_provider_factory_with_chain_spec, + StaticFileProviderFactory, }; use reth_prune::Pruner; use reth_tasks::TokioTaskExecutor; diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index d5215d1d89..8569e97860 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -40,9 +40,12 @@ alloy-primitives.workspace = true alloy-eips.workspace = true alloy-rpc-types-engine.workspace = true +revm-primitives.workspace = true + # common futures.workspace = true tokio = { workspace = true, features = ["macros", "sync"] } +tokio-stream.workspace = true thiserror.workspace = true # metrics @@ -78,17 +81,27 @@ reth-chainspec.workspace = true alloy-rlp.workspace = true assert_matches.workspace = true -rand.workspace = true [features] test-utils = [ - "reth-db/test-utils", - "reth-chain-state/test-utils", - "reth-network-p2p/test-utils", - "reth-prune-types", - "reth-stages/test-utils", - "reth-static-file", - "reth-tracing" + "reth-db/test-utils", + "reth-chain-state/test-utils", + "reth-network-p2p/test-utils", + "reth-prune-types", + "reth-stages/test-utils", + "reth-static-file", + "reth-tracing", + "reth-blockchain-tree/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-evm/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-revm/test-utils", + "reth-stages-api/test-utils", + "reth-provider/test-utils", + "reth-trie/test-utils", + "reth-prune-types?/test-utils" ] bsc = [] \ No newline at end of file diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs index c1571ed821..914121adce 100644 --- a/crates/engine/tree/src/engine.rs +++ b/crates/engine/tree/src/engine.rs @@ -113,9 +113,11 @@ where } // advance the downloader - if let Poll::Ready(DownloadOutcome::Blocks(blocks)) = self.downloader.poll(cx) { - // delegate the downloaded blocks to the handler - self.handler.on_event(FromEngine::DownloadedBlocks(blocks)); + if let Poll::Ready(outcome) = self.downloader.poll(cx) { + if let DownloadOutcome::Blocks(blocks) = outcome { + // delegate the downloaded blocks to the handler + self.handler.on_event(FromEngine::DownloadedBlocks(blocks)); + } continue } diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index 7aaf8f72b8..991d5f196a 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -87,20 +87,22 @@ impl PersistenceService { } PersistenceAction::SaveBlocks(blocks, sender) => { let result = self.on_save_blocks(blocks)?; - if let Some(ref num_hash) = result { + let result_number = result.map(|r| r.number); + + // we ignore the error because the caller may or may not care about the result + let _ = sender.send(result); + + if let Some(block_number) = result_number { // send new sync metrics based on saved blocks let _ = self .sync_metrics_tx - .send(MetricEvent::SyncHeight { height: num_hash.number }); - } - // we ignore the error because the caller may or may not care about the result - let _ = sender.send(result); - } - PersistenceAction::PruneBefore(block_num, sender) => { - let res = self.prune_before(block_num)?; + .send(MetricEvent::SyncHeight { height: block_number }); - // we ignore the error because the caller may or may not care about the result - let _ = sender.send(res); + if self.pruner.is_pruning_needed(block_number) { + // We log `PrunerOutput` inside the `Pruner` + let _ = self.prune_before(block_number)?; + } + } } PersistenceAction::SaveFinalizedBlock(finalized_block) => { let provider = self.provider.database_provider_rw()?; @@ -196,10 +198,6 @@ pub enum PersistenceAction { /// static files. RemoveBlocksAbove(u64, oneshot::Sender>), - /// Prune associated block data before the given block number, according to already-configured - /// prune modes. - PruneBefore(u64, oneshot::Sender), - /// Update the persisted finalized block on disk SaveFinalizedBlock(u64), @@ -306,18 +304,6 @@ impl PersistenceHandle { ) -> Result<(), SendError> { self.send_action(PersistenceAction::RemoveBlocksAbove(block_num, tx)) } - - /// Tells the persistence service to remove block data before the given hash, according to the - /// configured prune config. - /// - /// The resulting [`PrunerOutput`] is returned in the receiver end of the sender argument. - pub fn prune_before( - &self, - block_num: u64, - tx: oneshot::Sender, - ) -> Result<(), SendError> { - self.send_action(PersistenceAction::PruneBefore(block_num, tx)) - } } #[cfg(test)] diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 0d090ace99..d099cccc02 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -10,7 +10,7 @@ use alloy_primitives::{ BlockNumber, B256, U256, }; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, PayloadStatusEnum, + ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, PayloadStatus, PayloadStatusEnum, PayloadValidationError, }; use dashmap::DashMap; @@ -28,7 +28,7 @@ use reth_chain_state::{ }; use reth_chainspec::EthereumHardforks; use reth_consensus::{Consensus, PostExecutionInput}; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; use reth_errors::{ConsensusError, ProviderResult}; use reth_evm::execute::BlockExecutorProvider; use reth_payload_builder::PayloadBuilderHandle; @@ -82,6 +82,8 @@ pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; pub use persistence_state::PersistenceState; pub use reth_engine_primitives::InvalidBlockHook; +mod root; + /// Keeps track of the state of the tree. /// /// ## Invariants @@ -265,6 +267,7 @@ impl TreeState { } } } + debug!(target: "engine::tree", ?upper_bound, ?last_persisted_hash, "Removed canonical blocks from the tree"); } /// Removes all blocks that are below the finalized block, as well as removing non-canonical @@ -757,7 +760,7 @@ where fn on_new_payload( &mut self, payload: ExecutionPayload, - cancun_fields: Option, + sidecar: ExecutionPayloadSidecar, ) -> Result, InsertBlockFatalError> { trace!(target: "engine::tree", "invoked new payload"); self.metrics.engine.new_payload_messages.increment(1); @@ -788,10 +791,7 @@ where // // This validation **MUST** be instantly run in all cases even during active sync process. let parent_hash = payload.parent_hash(); - let block = match self - .payload_validator - .ensure_well_formed_payload(payload, cancun_fields.into()) - { + let block = match self.payload_validator.ensure_well_formed_payload(payload, sidecar) { Ok(block) => block, Err(error) => { error!(target: "engine::tree", %error, "Invalid payload"); @@ -1008,6 +1008,7 @@ where &mut self, state: ForkchoiceState, attrs: Option, + version: EngineApiMessageVersion, ) -> ProviderResult> { trace!(target: "engine::tree", ?attrs, "invoked forkchoice update"); self.metrics.engine.forkchoice_updated_messages.increment(1); @@ -1057,7 +1058,7 @@ where // to return an error ProviderError::HeaderNotFound(state.head_block_hash.into()) })?; - let updated = self.process_payload_attributes(attr, &tip, state); + let updated = self.process_payload_attributes(attr, &tip, state, version); return Ok(TreeOutcome::new(updated)) } @@ -1077,7 +1078,7 @@ where } if let Some(attr) = attrs { - let updated = self.process_payload_attributes(attr, &tip, state); + let updated = self.process_payload_attributes(attr, &tip, state, version); return Ok(TreeOutcome::new(updated)) } @@ -1093,7 +1094,8 @@ where if self.engine_kind.is_opstack() { if let Some(attr) = attrs { debug!(target: "engine::tree", head = canonical_header.number, "handling payload attributes for canonical head"); - let updated = self.process_payload_attributes(attr, &canonical_header, state); + let updated = + self.process_payload_attributes(attr, &canonical_header, state, version); return Ok(TreeOutcome::new(updated)) } } @@ -1180,6 +1182,7 @@ where if blocks_to_persist.is_empty() { debug!(target: "engine::tree", "Returned empty set of blocks to persist"); } else { + debug!(target: "engine::tree", blocks = ?blocks_to_persist.iter().map(|block| block.block.num_hash()).collect::>(), "Persisting blocks"); let (tx, rx) = oneshot::channel(); let _ = self.persistence.save_blocks(blocks_to_persist, tx); self.persistence_state.start(rx); @@ -1208,7 +1211,7 @@ where return Ok(()) }; - trace!(target: "engine::tree", ?last_persisted_block_hash, ?last_persisted_block_number, "Finished persisting, calling finish"); + debug!(target: "engine::tree", ?last_persisted_block_hash, ?last_persisted_block_number, "Finished persisting, calling finish"); self.persistence_state .finish(last_persisted_block_hash, last_persisted_block_number); self.on_new_persisted_block()?; @@ -1244,8 +1247,14 @@ where } EngineApiRequest::Beacon(request) => { match request { - BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { - let mut output = self.on_forkchoice_updated(state, payload_attrs); + BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version, + } => { + let mut output = + self.on_forkchoice_updated(state, payload_attrs, version); if let Ok(res) = &mut output { // track last received forkchoice state @@ -1273,8 +1282,8 @@ where error!(target: "engine::tree", "Failed to send event: {err:?}"); } } - BeaconEngineMessage::NewPayload { payload, cancun_fields, tx } => { - let output = self.on_new_payload(payload, cancun_fields); + BeaconEngineMessage::NewPayload { payload, sidecar, tx } => { + let output = self.on_new_payload(payload, sidecar); if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(|e| { reth_beacon_consensus::BeaconOnNewPayloadError::Internal( Box::new(e), @@ -1625,7 +1634,7 @@ where /// Returns an error if we failed to fetch the state from the database. fn state_provider(&self, hash: B256) -> ProviderResult> { if let Some((historical, blocks)) = self.state.tree_state.blocks_by_hash(hash) { - trace!(target: "engine::tree", %hash, "found canonical state for block in memory"); + debug!(target: "engine::tree", %hash, %historical, "found canonical state for block in memory"); // the block leads back to the canonical chain let historical = self.provider.state_by_block_hash(historical)?; if self.enable_execution_cache { @@ -1639,13 +1648,13 @@ where // the hash could belong to an unknown block or a persisted block if let Some(header) = self.provider.header(&hash)? { - trace!(target: "engine::tree", %hash, number = %header.number, "found canonical state for block in database"); + debug!(target: "engine::tree", %hash, number = %header.number, "found canonical state for block in database"); // the block is known and persisted let historical = self.provider.state_by_block_hash(hash)?; return Ok(Some(historical)) } - trace!(target: "engine::tree", %hash, "no canonical state found for block"); + debug!(target: "engine::tree", %hash, "no canonical state found for block"); Ok(None) } @@ -2175,7 +2184,8 @@ where &mut self, block: SealedBlockWithSenders, ) -> Result { - debug!(target: "engine::tree", block=?block.num_hash(), "Inserting new block into tree"); + debug!(target: "engine::tree", block=?block.num_hash(), parent = ?block.parent_hash, state_root = ?block.state_root, "Inserting new block into tree"); + if self.block_by_hash(block.hash())?.is_some() { return Ok(InsertPayloadOk2::AlreadySeen(BlockStatus2::Valid)) } @@ -2265,7 +2275,7 @@ where let hashed_state = HashedPostState::from_bundle_state(&output.state.state); let mut trie_output: TrieUpdates = TrieUpdates::default(); - trace!(target: "engine::tree", block=?BlockNumHash::new(block_number, block_hash), "Calculating block state root"); + trace!(target: "engine::tree", block=?sealed_block.num_hash(), "Calculating block state root"); if !self.skip_state_root_validation { let root_time = Instant::now(); let mut state_root_result = None; @@ -2322,7 +2332,7 @@ where self.metrics .block_validation .record_state_root(&trie_output, root_elapsed.as_secs_f64()); - debug!(target: "engine::tree", ?root_elapsed, ?block_number, "Calculated state root"); + debug!(target: "engine::tree", ?root_elapsed, block=?sealed_block.num_hash(), "Calculated state root"); } let executed = ExecutedBlock { @@ -2373,6 +2383,7 @@ where let mut input = TrieInput::default(); if let Some((historical, blocks)) = self.state.tree_state.blocks_by_hash(parent_hash) { + debug!(target: "engine::tree", %parent_hash, %historical, "Calculating state root in parallel, parent found in memory"); // Retrieve revert state for historical block. let revert_state = consistent_view.revert_state(historical)?; input.append(revert_state); @@ -2383,6 +2394,7 @@ where } } else { // The block attaches to canonical persisted parent. + debug!(target: "engine::tree", %parent_hash, "Calculating state root in parallel, parent found in disk"); let revert_state = consistent_view.revert_state(parent_hash)?; input.append(revert_state); } @@ -2565,6 +2577,7 @@ where attrs: T::PayloadAttributes, head: &Header, state: ForkchoiceState, + version: EngineApiMessageVersion, ) -> OnForkChoiceUpdated { // 7. Client software MUST ensure that payloadAttributes.timestamp is greater than timestamp // of a block referenced by forkchoiceState.headBlockHash. If this condition isn't held @@ -2582,6 +2595,7 @@ where match ::try_new( state.head_block_hash, attrs, + version as u8, ) { Ok(attributes) => { // send the payload to the builder and return the receiver for the pending payload @@ -2681,6 +2695,7 @@ mod tests { use crate::persistence::PersistenceAction; use alloy_primitives::{Bytes, Sealable}; use alloy_rlp::Decodable; + use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayloadSidecar}; use assert_matches::assert_matches; use reth_beacon_consensus::{EthBeaconConsensus, ForkchoiceStatus}; use reth_chain_state::{test_utils::TestBlockBuilder, BlockState}; @@ -2918,6 +2933,7 @@ mod tests { state: fcu_state, payload_attrs: None, tx, + version: EngineApiMessageVersion::default(), } .into(), )) @@ -2961,7 +2977,7 @@ mod tests { self.tree .on_new_payload( payload.into(), - Some(CancunPayloadFields { + ExecutionPayloadSidecar::v3(CancunPayloadFields { parent_beacon_block_root: block.parent_beacon_block_root.unwrap(), versioned_hashes: vec![], }), @@ -3207,6 +3223,7 @@ mod tests { }, payload_attrs: None, tx, + version: EngineApiMessageVersion::default(), } .into(), )) @@ -3227,7 +3244,10 @@ mod tests { let mut test_harness = TestHarness::new(HOLESKY.clone()); - let outcome = test_harness.tree.on_new_payload(payload.into(), None).unwrap(); + let outcome = test_harness + .tree + .on_new_payload(payload.into(), ExecutionPayloadSidecar::none()) + .unwrap(); assert!(outcome.outcome.is_syncing()); // ensure block is buffered @@ -3271,7 +3291,7 @@ mod tests { .on_engine_message(FromEngine::Request( BeaconEngineMessage::NewPayload { payload: payload.clone().into(), - cancun_fields: None, + sidecar: ExecutionPayloadSidecar::none(), tx, } .into(), diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs new file mode 100644 index 0000000000..48b2eccdf1 --- /dev/null +++ b/crates/engine/tree/src/tree/root.rs @@ -0,0 +1,60 @@ +//! State root task related functionality. + +use reth_provider::providers::ConsistentDbView; +use reth_trie::{updates::TrieUpdates, TrieInput}; +use reth_trie_parallel::parallel_root::ParallelStateRootError; +use revm_primitives::{EvmState, B256}; +use std::{ + future::Future, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; +use tokio_stream::wrappers::UnboundedReceiverStream; + +/// Standalone task that receives a transaction state stream and updates relevant +/// data structures to calculate state root. +/// +/// It is responsile of initializing a blinded sparse trie and subscribe to +/// transaction state stream. As it receives transaction execution results, it +/// fetches the proofs for relevant accounts from the database and reveal them +/// to the tree. +/// Then it updates relevant leaves according to the result of the transaction. +#[allow(dead_code)] +pub(crate) struct StateRootTask { + /// View over the state in the database. + consistent_view: ConsistentDbView, + /// Incoming state updates. + state_stream: UnboundedReceiverStream, + /// Latest trie input. + input: Arc, +} + +#[allow(dead_code)] +impl StateRootTask { + /// Creates a new `StateRootTask`. + pub(crate) const fn new( + consistent_view: ConsistentDbView, + input: Arc, + state_stream: UnboundedReceiverStream, + ) -> Self { + Self { consistent_view, state_stream, input } + } + + /// Handles state updates. + pub(crate) fn on_state_update(&self, _update: EvmState) { + // TODO: calculate hashed state update and dispatch proof gathering for it. + } +} + +impl Future for StateRootTask { + type Output = Result<(B256, TrieUpdates), ParallelStateRootError>; + + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { + // TODO: + // * poll incoming state updates stream + // * keep track of proof calculation + // * keep track of intermediate root computation + Poll::Pending + } +} diff --git a/crates/engine/util/Cargo.toml b/crates/engine/util/Cargo.toml index 20a0acb8d4..07aa40165e 100644 --- a/crates/engine/util/Cargo.toml +++ b/crates/engine/util/Cargo.toml @@ -27,8 +27,10 @@ revm-primitives.workspace = true reth-trie.workspace = true # alloy +alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true +alloy-consensus.workspace = true # async tokio = { workspace = true, default-features = false } @@ -49,5 +51,8 @@ tracing.workspace = true [features] optimism = [ - "reth-beacon-consensus/optimism", + "reth-beacon-consensus/optimism", + "reth-primitives/optimism", + "reth-provider/optimism", + "revm-primitives/optimism" ] diff --git a/crates/engine/util/src/engine_store.rs b/crates/engine/util/src/engine_store.rs index 1f34451996..6b584f0c1f 100644 --- a/crates/engine/util/src/engine_store.rs +++ b/crates/engine/util/src/engine_store.rs @@ -1,6 +1,6 @@ //! Stores engine API messages to disk for later inspection and replay. -use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayload, ForkchoiceState}; +use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState}; use futures::{Stream, StreamExt}; use reth_beacon_consensus::BeaconEngineMessage; use reth_engine_primitives::EngineTypes; @@ -30,8 +30,9 @@ pub enum StoredEngineApiMessage { NewPayload { /// The [`ExecutionPayload`] sent in the persisted call. payload: ExecutionPayload, - /// The Cancun-specific fields sent in the persisted call, if any. - cancun_fields: Option, + /// The execution payload sidecar with additional version-specific fields received by + /// engine API. + sidecar: ExecutionPayloadSidecar, }, } @@ -63,7 +64,12 @@ impl EngineMessageStore { fs::create_dir_all(&self.path)?; // ensure that store path had been created let timestamp = received_at.duration_since(SystemTime::UNIX_EPOCH).unwrap().as_millis(); match msg { - BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx: _tx } => { + BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx: _tx, + version: _version, + } => { let filename = format!("{}-fcu-{}.json", timestamp, state.head_block_hash); fs::write( self.path.join(filename), @@ -73,14 +79,14 @@ impl EngineMessageStore { })?, )?; } - BeaconEngineMessage::NewPayload { payload, cancun_fields, tx: _tx } => { + BeaconEngineMessage::NewPayload { payload, sidecar, tx: _tx } => { let filename = format!("{}-new_payload-{}.json", timestamp, payload.block_hash()); fs::write( self.path.join(filename), serde_json::to_vec( &StoredEngineApiMessage::::NewPayload { payload: payload.clone(), - cancun_fields: cancun_fields.clone(), + sidecar: sidecar.clone(), }, )?, )?; diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index dd6e1e595b..61bb8d70f5 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -1,23 +1,26 @@ //! Stream wrapper that simulates reorgs. +use alloy_consensus::Transaction; use alloy_primitives::U256; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, + CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, PayloadStatus, }; use futures::{stream::FuturesUnordered, Stream, StreamExt, TryFutureExt}; use itertools::Either; use reth_beacon_consensus::{BeaconEngineMessage, BeaconOnNewPayloadError, OnForkChoiceUpdated}; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; use reth_errors::{BlockExecutionError, BlockValidationError, RethError, RethResult}; use reth_ethereum_forks::EthereumHardforks; -use reth_evm::{system_calls::SystemCaller, ConfigureEvm}; +use reth_evm::{ + state_change::post_block_withdrawals_balance_increments, system_calls::SystemCaller, + ConfigureEvm, +}; use reth_payload_validator::ExecutionPayloadValidator; use reth_primitives::{proofs, Block, BlockBody, Header, Receipt, Receipts}; use reth_provider::{BlockReader, ExecutionOutcome, ProviderError, StateProviderFactory}; use reth_revm::{ database::StateProviderDatabase, db::{states::bundle_state::BundleRetention, State}, - state_change::post_block_withdrawals_balance_increments, DatabaseCommit, }; use reth_rpc_types_compat::engine::payload::block_to_payload; @@ -146,7 +149,7 @@ where let next = ready!(this.stream.poll_next_unpin(cx)); let item = match (next, &this.last_forkchoice_state) { ( - Some(BeaconEngineMessage::NewPayload { payload, cancun_fields, tx }), + Some(BeaconEngineMessage::NewPayload { payload, sidecar, tx }), Some(last_forkchoice_state), ) if this.forkchoice_states_forwarded > this.frequency && // Only enter reorg state if new payload attaches to current head. @@ -161,13 +164,13 @@ where // forkchoice state. We will rely on CL to reorg us back to canonical chain. // TODO: This is an expensive blocking operation, ideally it's spawned as a task // so that the stream could yield the control back. - let (reorg_payload, reorg_cancun_fields) = match create_reorg_head( + let (reorg_payload, reorg_sidecar) = match create_reorg_head( this.provider, this.evm_config, this.payload_validator, *this.depth, payload.clone(), - cancun_fields.clone(), + sidecar.clone(), ) { Ok(result) => result, Err(error) => { @@ -176,7 +179,7 @@ where // the next one return Poll::Ready(Some(BeaconEngineMessage::NewPayload { payload, - cancun_fields, + sidecar, tx, })) } @@ -196,11 +199,11 @@ where let queue = VecDeque::from([ // Current payload - BeaconEngineMessage::NewPayload { payload, cancun_fields, tx }, + BeaconEngineMessage::NewPayload { payload, sidecar, tx }, // Reorg payload BeaconEngineMessage::NewPayload { payload: reorg_payload, - cancun_fields: reorg_cancun_fields, + sidecar: reorg_sidecar, tx: reorg_payload_tx, }, // Reorg forkchoice state @@ -208,18 +211,32 @@ where state: reorg_forkchoice_state, payload_attrs: None, tx: reorg_fcu_tx, + version: EngineApiMessageVersion::default(), }, ]); *this.state = EngineReorgState::Reorg { queue }; continue } - (Some(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx }), _) => { + ( + Some(BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version, + }), + _, + ) => { // Record last forkchoice state forwarded to the engine. // We do not care if it's valid since engine should be able to handle // reorgs that rely on invalid forkchoice state. *this.last_forkchoice_state = Some(state); *this.forkchoice_states_forwarded += 1; - Some(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx }) + Some(BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version, + }) } (item, _) => item, }; @@ -234,8 +251,8 @@ fn create_reorg_head( payload_validator: &ExecutionPayloadValidator, mut depth: usize, next_payload: ExecutionPayload, - next_cancun_fields: Option, -) -> RethResult<(ExecutionPayload, Option)> + next_sidecar: ExecutionPayloadSidecar, +) -> RethResult<(ExecutionPayload, ExecutionPayloadSidecar)> where Provider: BlockReader + StateProviderFactory, Evm: ConfigureEvm
, @@ -245,7 +262,7 @@ where // Ensure next payload is valid. let next_block = payload_validator - .ensure_well_formed_payload(next_payload, next_cancun_fields.into()) + .ensure_well_formed_payload(next_payload, next_sidecar) .map_err(RethError::msg)?; // Fetch reorg target block depending on its depth and its parent. @@ -286,7 +303,7 @@ where let mut evm = evm_config.evm_with_env(&mut state, env); // apply eip-4788 pre block contract call - let mut system_caller = SystemCaller::new(evm_config, chain_spec); + let mut system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); system_caller.apply_beacon_root_contract_call( reorg_target.timestamp, @@ -400,7 +417,7 @@ where transactions_root: proofs::calculate_transaction_root(&transactions), receipts_root: outcome.receipts_root_slow(reorg_target.header.number).unwrap(), logs_bloom: outcome.block_logs_bloom(reorg_target.header.number).unwrap(), - requests_root: None, // TODO(prague) + requests_hash: None, // TODO(prague) gas_used: cumulative_gas_used, blob_gas_used: blob_gas_used.map(Into::into), excess_blob_gas: excess_blob_gas.map(Into::into), @@ -411,16 +428,22 @@ where ommers: reorg_target.body.ommers, withdrawals: reorg_target.body.withdrawals, sidecars: reorg_target.body.sidecars, - requests: None, // TODO(prague) }, } .seal_slow(); Ok(( block_to_payload(reorg_block), + // todo(onbjerg): how do we support execution requests? reorg_target .header .parent_beacon_block_root - .map(|root| CancunPayloadFields { parent_beacon_block_root: root, versioned_hashes }), + .map(|root| { + ExecutionPayloadSidecar::v3(CancunPayloadFields { + parent_beacon_block_root: root, + versioned_hashes, + }) + }) + .unwrap_or_else(ExecutionPayloadSidecar::none), )) } diff --git a/crates/engine/util/src/skip_fcu.rs b/crates/engine/util/src/skip_fcu.rs index e110cecedc..adadfb595f 100644 --- a/crates/engine/util/src/skip_fcu.rs +++ b/crates/engine/util/src/skip_fcu.rs @@ -45,7 +45,12 @@ where loop { let next = ready!(this.stream.poll_next_unpin(cx)); let item = match next { - Some(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx }) => { + Some(BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version, + }) => { if this.skipped < this.threshold { *this.skipped += 1; tracing::warn!(target: "engine::stream::skip_fcu", ?state, ?payload_attrs, threshold=this.threshold, skipped=this.skipped, "Skipping FCU"); @@ -53,7 +58,12 @@ where continue } *this.skipped = 0; - Some(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx }) + Some(BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version, + }) } next => next, }; diff --git a/crates/engine/util/src/skip_new_payload.rs b/crates/engine/util/src/skip_new_payload.rs index d2450711ec..16f2e98197 100644 --- a/crates/engine/util/src/skip_new_payload.rs +++ b/crates/engine/util/src/skip_new_payload.rs @@ -41,14 +41,14 @@ where loop { let next = ready!(this.stream.poll_next_unpin(cx)); let item = match next { - Some(BeaconEngineMessage::NewPayload { payload, cancun_fields, tx }) => { + Some(BeaconEngineMessage::NewPayload { payload, sidecar, tx }) => { if this.skipped < this.threshold { *this.skipped += 1; tracing::warn!( target: "engine::stream::skip_new_payload", block_number = payload.block_number(), block_hash = %payload.block_hash(), - ?cancun_fields, + ?sidecar, threshold=this.threshold, skipped=this.skipped, "Skipping new payload" ); @@ -56,7 +56,7 @@ where continue } *this.skipped = 0; - Some(BeaconEngineMessage::NewPayload { payload, cancun_fields, tx }) + Some(BeaconEngineMessage::NewPayload { payload, sidecar, tx }) } next => next, }; diff --git a/crates/ethereum-forks/Cargo.toml b/crates/ethereum-forks/Cargo.toml index 7eb58b15fe..9f7ce7ee8f 100644 --- a/crates/ethereum-forks/Cargo.toml +++ b/crates/ethereum-forks/Cargo.toml @@ -35,11 +35,31 @@ auto_impl.workspace = true [dev-dependencies] arbitrary = { workspace = true, features = ["derive"] } -proptest.workspace = true +alloy-consensus.workspace = true [features] default = ["std", "serde", "rustc-hash"] -arbitrary = ["dep:arbitrary", "dep:proptest", "dep:proptest-derive"] -serde = ["dep:serde"] -std = ["thiserror-no-std/std", "rustc-hash/std"] +arbitrary = [ + "dep:arbitrary", + "dep:proptest", + "dep:proptest-derive", + "alloy-chains/arbitrary", + "alloy-consensus/arbitrary", + "alloy-primitives/arbitrary" +] +serde = [ + "dep:serde", + "alloy-chains/serde", + "alloy-consensus/serde", + "alloy-primitives/serde" +] +std = [ + "alloy-chains/std", + "alloy-primitives/std", + "thiserror-no-std/std", + "rustc-hash/std", + "alloy-consensus/std", + "once_cell/std", + "serde?/std" +] rustc-hash = ["dep:rustc-hash"] diff --git a/crates/ethereum-forks/src/forkid.rs b/crates/ethereum-forks/src/forkid.rs index 0faea9e280..0de319e26e 100644 --- a/crates/ethereum-forks/src/forkid.rs +++ b/crates/ethereum-forks/src/forkid.rs @@ -262,32 +262,24 @@ impl ForkFilter { } fn set_head_priv(&mut self, head: Head) -> Option { - let recompute_cache = { - let head_in_past = match self.cache.epoch_start { - ForkFilterKey::Block(epoch_start_block) => head.number < epoch_start_block, - ForkFilterKey::Time(epoch_start_time) => head.timestamp < epoch_start_time, - }; - let head_in_future = match self.cache.epoch_end { - Some(ForkFilterKey::Block(epoch_end_block)) => head.number >= epoch_end_block, - Some(ForkFilterKey::Time(epoch_end_time)) => head.timestamp >= epoch_end_time, - None => false, - }; - - head_in_past || head_in_future + let head_in_past = match self.cache.epoch_start { + ForkFilterKey::Block(epoch_start_block) => head.number < epoch_start_block, + ForkFilterKey::Time(epoch_start_time) => head.timestamp < epoch_start_time, }; - - // recompute the cache - let transition = if recompute_cache { - let past = self.current(); - self.cache = Cache::compute_cache(&self.forks, head); - Some(ForkTransition { current: self.current(), past }) - } else { - None + let head_in_future = match self.cache.epoch_end { + Some(ForkFilterKey::Block(epoch_end_block)) => head.number >= epoch_end_block, + Some(ForkFilterKey::Time(epoch_end_time)) => head.timestamp >= epoch_end_time, + None => false, }; self.head = head; - transition + // Recompute the cache if the head is in the past or future epoch. + (head_in_past || head_in_future).then(|| { + let past = self.current(); + self.cache = Cache::compute_cache(&self.forks, head); + ForkTransition { current: self.current(), past } + }) } /// Set the current head. @@ -458,15 +450,12 @@ impl Cache { #[cfg(test)] mod tests { use super::*; - use alloy_primitives::b256; - - const GENESIS_HASH: B256 = - b256!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"); + use alloy_consensus::constants::MAINNET_GENESIS_HASH; // EIP test vectors. #[test] fn forkhash() { - let mut fork_hash = ForkHash::from(GENESIS_HASH); + let mut fork_hash = ForkHash::from(MAINNET_GENESIS_HASH); assert_eq!(fork_hash.0, hex!("fc64ec04")); fork_hash += 1_150_000u64; @@ -480,7 +469,7 @@ mod tests { fn compatibility_check() { let mut filter = ForkFilter::new( Head { number: 0, ..Default::default() }, - GENESIS_HASH, + MAINNET_GENESIS_HASH, 0, vec![ ForkFilterKey::Block(1_150_000), @@ -739,7 +728,7 @@ mod tests { let mut fork_filter = ForkFilter::new( Head { number: 0, ..Default::default() }, - GENESIS_HASH, + MAINNET_GENESIS_HASH, 0, vec![ForkFilterKey::Block(b1), ForkFilterKey::Block(b2)], ); diff --git a/crates/ethereum-forks/src/hardfork/dev.rs b/crates/ethereum-forks/src/hardfork/dev.rs index 87abd68f23..068e290709 100644 --- a/crates/ethereum-forks/src/hardfork/dev.rs +++ b/crates/ethereum-forks/src/hardfork/dev.rs @@ -1,12 +1,17 @@ use alloc::vec; use alloy_primitives::U256; -use once_cell::sync::Lazy; + +use once_cell as _; +#[cfg(not(feature = "std"))] +use once_cell::sync::Lazy as LazyLock; +#[cfg(feature = "std")] +use std::sync::LazyLock; use crate::{ChainHardforks, EthereumHardfork, ForkCondition}; /// Dev hardforks -pub static DEV_HARDFORKS: Lazy = Lazy::new(|| { +pub static DEV_HARDFORKS: LazyLock = LazyLock::new(|| { ChainHardforks::new(vec![ (EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)), (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), diff --git a/crates/ethereum-forks/src/hardfork/ethereum.rs b/crates/ethereum-forks/src/hardfork/ethereum.rs index 3d85b54a96..4e13b00178 100644 --- a/crates/ethereum-forks/src/hardfork/ethereum.rs +++ b/crates/ethereum-forks/src/hardfork/ethereum.rs @@ -49,6 +49,8 @@ hardfork!( Cancun, /// Prague: Prague, + /// Osaka: + Osaka, } ); diff --git a/crates/ethereum-forks/src/hardforks/ethereum.rs b/crates/ethereum-forks/src/hardforks/ethereum.rs index 3069367158..086d2d3b46 100644 --- a/crates/ethereum-forks/src/hardforks/ethereum.rs +++ b/crates/ethereum-forks/src/hardforks/ethereum.rs @@ -21,6 +21,11 @@ pub trait EthereumHardforks: Hardforks { self.is_fork_active_at_timestamp(EthereumHardfork::Prague, timestamp) } + /// Convenience method to check if [`EthereumHardfork::Osaka`] is active at a given timestamp. + fn is_osaka_active_at_timestamp(&self, timestamp: u64) -> bool { + self.is_fork_active_at_timestamp(EthereumHardfork::Osaka, timestamp) + } + /// Convenience method to check if [`EthereumHardfork::Byzantium`] is active at a given block /// number. fn is_byzantium_active_at_block(&self, block_number: u64) -> bool { diff --git a/crates/ethereum/cli/src/chainspec.rs b/crates/ethereum/cli/src/chainspec.rs index cbcce9f69f..a60d701794 100644 --- a/crates/ethereum/cli/src/chainspec.rs +++ b/crates/ethereum/cli/src/chainspec.rs @@ -89,7 +89,8 @@ mod tests { "terminalTotalDifficulty": 0, "shanghaiTime": 0, "cancunTime": 0, - "pragueTime": 0 + "pragueTime": 0, + "osakaTime": 0 } }"#; @@ -97,5 +98,6 @@ mod tests { assert!(spec.is_shanghai_active_at_timestamp(0)); assert!(spec.is_cancun_active_at_timestamp(0)); assert!(spec.is_prague_active_at_timestamp(0)); + assert!(spec.is_osaka_active_at_timestamp(0)); } } diff --git a/crates/ethereum/consensus/Cargo.toml b/crates/ethereum/consensus/Cargo.toml index 02d217b63b..bace4195ca 100644 --- a/crates/ethereum/consensus/Cargo.toml +++ b/crates/ethereum/consensus/Cargo.toml @@ -18,6 +18,8 @@ reth-primitives.workspace = true reth-consensus.workspace = true # alloy +alloy-eips.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true tracing.workspace = true diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index e74f3498fa..07c2a71e8c 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -8,6 +8,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +use alloy_consensus::EMPTY_OMMER_ROOT_HASH; use alloy_primitives::U256; use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks}; use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; @@ -19,12 +20,11 @@ use reth_consensus_common::validation::{ }; use reth_primitives::{ constants::MINIMUM_GAS_LIMIT, BlockWithSenders, Header, SealedBlock, SealedHeader, - EMPTY_OMMER_ROOT_HASH, }; use std::{fmt::Debug, sync::Arc, time::SystemTime}; /// The bound divisor of the gas limit, used in update calculations. -const GAS_LIMIT_BOUND_DIVISOR: u64 = 1024; +pub const GAS_LIMIT_BOUND_DIVISOR: u64 = 1024; mod validation; pub use validation::validate_block_post_execution; @@ -32,7 +32,7 @@ pub use validation::validate_block_post_execution; /// Ethereum beacon consensus /// /// This consensus engine does basic checks as outlined in the execution specs. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct EthBeaconConsensus { /// Configuration chain_spec: Arc, @@ -121,11 +121,11 @@ impl Consensu } if self.chain_spec.is_prague_active_at_timestamp(header.timestamp) { - if header.requests_root.is_none() { - return Err(ConsensusError::RequestsRootMissing) + if header.requests_hash.is_none() { + return Err(ConsensusError::RequestsHashMissing) } - } else if header.requests_root.is_some() { - return Err(ConsensusError::RequestsRootUnexpected) + } else if header.requests_hash.is_some() { + return Err(ConsensusError::RequestsHashUnexpected) } Ok(()) diff --git a/crates/ethereum/consensus/src/validation.rs b/crates/ethereum/consensus/src/validation.rs index e510a91ab9..f990ecc57d 100644 --- a/crates/ethereum/consensus/src/validation.rs +++ b/crates/ethereum/consensus/src/validation.rs @@ -1,7 +1,8 @@ +use alloy_eips::eip7685::Requests; use alloy_primitives::{Bloom, B256}; use reth_chainspec::EthereumHardforks; use reth_consensus::ConsensusError; -use reth_primitives::{gas_spent_by_transactions, BlockWithSenders, GotExpected, Receipt, Request}; +use reth_primitives::{gas_spent_by_transactions, BlockWithSenders, GotExpected, Receipt}; /// Validate a block with regard to execution results: /// @@ -11,7 +12,7 @@ pub fn validate_block_post_execution( block: &BlockWithSenders, chain_spec: &ChainSpec, receipts: &[Receipt], - requests: &[Request], + requests: &Requests, ) -> Result<(), ConsensusError> { // Check if gas used matches the value set in header. let cumulative_gas_used = @@ -36,15 +37,15 @@ pub fn validate_block_post_execution( } } - // Validate that the header requests root matches the calculated requests root + // Validate that the header requests hash matches the calculated requests hash if chain_spec.is_prague_active_at_timestamp(block.timestamp) { - let Some(header_requests_root) = block.header.requests_root else { - return Err(ConsensusError::RequestsRootMissing) + let Some(header_requests_hash) = block.header.requests_hash else { + return Err(ConsensusError::RequestsHashMissing) }; - let requests_root = reth_primitives::proofs::calculate_requests_root(requests); - if requests_root != header_requests_root { - return Err(ConsensusError::BodyRequestsRootDiff( - GotExpected::new(requests_root, header_requests_root).into(), + let requests_hash = requests.requests_hash(); + if requests_hash != header_requests_hash { + return Err(ConsensusError::BodyRequestsHashDiff( + GotExpected::new(requests_hash, header_requests_hash).into(), )) } } diff --git a/crates/ethereum/engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs index 034a8c6bff..5addf2a18c 100644 --- a/crates/ethereum/engine-primitives/src/lib.rs +++ b/crates/ethereum/engine-primitives/src/lib.rs @@ -43,10 +43,10 @@ where + TryInto + TryInto, { - type ExecutionPayloadV1 = ExecutionPayloadV1; - type ExecutionPayloadV2 = ExecutionPayloadEnvelopeV2; - type ExecutionPayloadV3 = ExecutionPayloadEnvelopeV3; - type ExecutionPayloadV4 = ExecutionPayloadEnvelopeV4; + type ExecutionPayloadEnvelopeV1 = ExecutionPayloadV1; + type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2; + type ExecutionPayloadEnvelopeV3 = ExecutionPayloadEnvelopeV3; + type ExecutionPayloadEnvelopeV4 = ExecutionPayloadEnvelopeV4; } /// A default payload type for [`EthEngineTypes`] diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index ae370fdb9d..ed377d003d 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -1,6 +1,6 @@ //! Contains types required for building a payload. -use alloy_eips::eip4844::BlobTransactionSidecar; +use alloy_eips::{eip4844::BlobTransactionSidecar, eip7685::Requests}; use alloy_primitives::{Address, B256, U256}; use alloy_rlp::Encodable; use alloy_rpc_types_engine::{ @@ -11,8 +11,7 @@ use reth_chain_state::ExecutedBlock; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; use reth_primitives::{SealedBlock, Withdrawals}; use reth_rpc_types_compat::engine::payload::{ - block_to_payload_v1, block_to_payload_v3, block_to_payload_v4, - convert_block_to_payload_field_v2, + block_to_payload_v1, block_to_payload_v3, convert_block_to_payload_field_v2, }; use std::convert::Infallible; @@ -34,19 +33,24 @@ pub struct EthBuiltPayload { /// The blobs, proofs, and commitments in the block. If the block is pre-cancun, this will be /// empty. pub(crate) sidecars: Vec, + /// The requests of the payload + pub(crate) requests: Option, } // === impl BuiltPayload === impl EthBuiltPayload { - /// Initializes the payload with the given initial block. + /// Initializes the payload with the given initial block + /// + /// Caution: This does not set any [`BlobTransactionSidecar`]. pub const fn new( id: PayloadId, block: SealedBlock, fees: U256, executed_block: Option, + requests: Option, ) -> Self { - Self { id, block, executed_block, fees, sidecars: Vec::new() } + Self { id, block, executed_block, fees, sidecars: Vec::new(), requests } } /// Returns the identifier of the payload. @@ -70,9 +74,18 @@ impl EthBuiltPayload { } /// Adds sidecars to the payload. - pub fn extend_sidecars(&mut self, sidecars: Vec) { + pub fn extend_sidecars(&mut self, sidecars: impl IntoIterator) { self.sidecars.extend(sidecars) } + + /// Same as [`Self::extend_sidecars`] but returns the type again. + pub fn with_sidecars( + mut self, + sidecars: impl IntoIterator, + ) -> Self { + self.extend_sidecars(sidecars); + self + } } impl BuiltPayload for EthBuiltPayload { @@ -87,6 +100,10 @@ impl BuiltPayload for EthBuiltPayload { fn executed_block(&self) -> Option { self.executed_block.clone() } + + fn requests(&self) -> Option { + self.requests.clone() + } } impl BuiltPayload for &EthBuiltPayload { @@ -101,6 +118,10 @@ impl BuiltPayload for &EthBuiltPayload { fn executed_block(&self) -> Option { self.executed_block.clone() } + + fn requests(&self) -> Option { + self.requests.clone() + } } // V1 engine_getPayloadV1 response @@ -135,17 +156,17 @@ impl From for ExecutionPayloadEnvelopeV3 { // Spec: // should_override_builder: false, - blobs_bundle: sidecars.into_iter().map(Into::into).collect::>().into(), + blobs_bundle: sidecars.into(), } } } impl From for ExecutionPayloadEnvelopeV4 { fn from(value: EthBuiltPayload) -> Self { - let EthBuiltPayload { block, fees, sidecars, .. } = value; + let EthBuiltPayload { block, fees, sidecars, requests, .. } = value; Self { - execution_payload: block_to_payload_v4(block), + execution_payload: block_to_payload_v3(block), block_value: fees, // From the engine API spec: // @@ -157,12 +178,13 @@ impl From for ExecutionPayloadEnvelopeV4 { // should_override_builder: false, blobs_bundle: sidecars.into_iter().map(Into::into).collect::>().into(), + execution_requests: requests.unwrap_or_default().take(), } } } /// Container type for all components required to build a payload. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Default)] pub struct EthPayloadBuilderAttributes { /// Id of the payload pub id: PayloadId, @@ -215,7 +237,11 @@ impl PayloadBuilderAttributes for EthPayloadBuilderAttributes { /// Creates a new payload builder for the given parent block and the attributes. /// /// Derives the unique [`PayloadId`] for the given parent and attributes - fn try_new(parent: B256, attributes: PayloadAttributes) -> Result { + fn try_new( + parent: B256, + attributes: PayloadAttributes, + _version: u8, + ) -> Result { Ok(Self::new(parent, attributes)) } diff --git a/crates/ethereum/evm/Cargo.toml b/crates/ethereum/evm/Cargo.toml index 4b06c682f0..2533bd4a47 100644 --- a/crates/ethereum/evm/Cargo.toml +++ b/crates/ethereum/evm/Cargo.toml @@ -18,8 +18,7 @@ reth-evm.workspace = true reth-primitives = { workspace = true, features = ["reth-codec"] } reth-revm.workspace = true reth-ethereum-consensus.workspace = true -reth-prune-types.workspace = true -reth-execution-types.workspace = true +reth-consensus.workspace = true # Ethereum revm-primitives.workspace = true @@ -28,6 +27,7 @@ revm-primitives.workspace = true alloy-primitives.workspace = true alloy-eips.workspace = true alloy-sol-types.workspace = true +alloy-consensus.workspace = true # misc tracing.workspace = true @@ -37,13 +37,24 @@ tokio = { workspace = true, features = ["sync", "time"] } [dev-dependencies] reth-testing-utils.workspace = true +reth-evm = { workspace = true, features = ["test-utils"] } reth-revm = { workspace = true, features = ["test-utils"] } reth-primitives = { workspace = true, features = ["secp256k1"] } +reth-execution-types.workspace = true secp256k1.workspace = true serde_json.workspace = true alloy-genesis.workspace = true -alloy-consensus.workspace = true [features] default = ["std"] -std = [] +std = [ + "reth-consensus/std", + "reth-primitives/std", + "reth-revm/std", + "alloy-consensus/std", + "alloy-eips/std", + "alloy-genesis/std", + "alloy-primitives/std", + "revm-primitives/std", + "secp256k1/std" +] diff --git a/crates/ethereum/evm/src/config.rs b/crates/ethereum/evm/src/config.rs index e5253307b3..9d6b6d8796 100644 --- a/crates/ethereum/evm/src/config.rs +++ b/crates/ethereum/evm/src/config.rs @@ -11,7 +11,9 @@ pub fn revm_spec_by_timestamp_after_merge( chain_spec: &ChainSpec, timestamp: u64, ) -> revm_primitives::SpecId { - if chain_spec.is_prague_active_at_timestamp(timestamp) { + if chain_spec.is_osaka_active_at_timestamp(timestamp) { + revm_primitives::OSAKA + } else if chain_spec.is_prague_active_at_timestamp(timestamp) { revm_primitives::PRAGUE } else if chain_spec.is_cancun_active_at_timestamp(timestamp) { revm_primitives::CANCUN diff --git a/crates/ethereum/evm/src/eip6110.rs b/crates/ethereum/evm/src/eip6110.rs index e78becd960..d570020819 100644 --- a/crates/ethereum/evm/src/eip6110.rs +++ b/crates/ethereum/evm/src/eip6110.rs @@ -1,11 +1,17 @@ //! EIP-6110 deposit requests parsing use alloc::{string::ToString, vec::Vec}; -use alloy_eips::eip6110::{DepositRequest, MAINNET_DEPOSIT_CONTRACT_ADDRESS}; -use alloy_primitives::Log; +use alloy_eips::eip6110::MAINNET_DEPOSIT_CONTRACT_ADDRESS; +use alloy_primitives::{Address, Bytes, Log}; use alloy_sol_types::{sol, SolEvent}; -use reth_chainspec::ChainSpec; +use reth_chainspec::{ChainSpec, EthChainSpec}; use reth_evm::execute::BlockValidationError; -use reth_primitives::{Receipt, Request}; +use reth_primitives::Receipt; + +/// The size of a deposit request in bytes. While the event fields emit +/// bytestrings, those bytestrings are fixed size. The fields are: 48-byte +/// pubkey, 32-byte withdrawal credentials, 8-byte amount, 96-byte signature, +/// and 8-byte index. +const DEPOSIT_BYTES_SIZE: usize = 48 + 32 + 8 + 96 + 8; sol! { #[allow(missing_docs)] @@ -18,75 +24,85 @@ sol! { ); } -/// Parse [deposit contract](https://etherscan.io/address/0x00000000219ab540356cbb839cbe05303d7705fa) -/// (address is from the passed [`ChainSpec`]) deposits from receipts, and return them as a -/// [vector](Vec) of (requests)[Request]. -pub fn parse_deposits_from_receipts<'a, I>( - chain_spec: &ChainSpec, +/// Accumulate a deposit request from a log. containing a [`DepositEvent`]. +pub fn accumulate_deposit_from_log(log: &Log, out: &mut Vec) { + out.reserve(DEPOSIT_BYTES_SIZE); + out.extend_from_slice(log.pubkey.as_ref()); + out.extend_from_slice(log.withdrawal_credentials.as_ref()); + out.extend_from_slice(log.amount.as_ref()); + out.extend_from_slice(log.signature.as_ref()); + out.extend_from_slice(log.index.as_ref()); +} + +/// Accumulate deposits from an iterator of logs. +pub fn accumulate_deposits_from_logs<'a>( + address: Address, + logs: impl IntoIterator, + out: &mut Vec, +) -> Result<(), BlockValidationError> { + logs.into_iter().filter(|log| log.address == address).try_for_each(|log| { + // We assume that the log is valid because it was emitted by the + // deposit contract. + let decoded_log = + DepositEvent::decode_log(log, false).map_err(|err: alloy_sol_types::Error| { + BlockValidationError::DepositRequestDecode(err.to_string()) + })?; + accumulate_deposit_from_log(&decoded_log, out); + Ok(()) + }) +} + +/// Accumulate deposits from a receipt. Iterates over the logs in the receipt +/// and accumulates the deposit request bytestrings. +pub fn accumulate_deposits_from_receipt( + address: Address, + receipt: &Receipt, + out: &mut Vec, +) -> Result<(), BlockValidationError> { + accumulate_deposits_from_logs(address, &receipt.logs, out) +} + +/// Accumulate deposits from a list of receipts. Iterates over the logs in the +/// receipts and accumulates the deposit request bytestrings. +pub fn accumulate_deposits_from_receipts<'a, I>( + address: Address, receipts: I, -) -> Result, BlockValidationError> + out: &mut Vec, +) -> Result<(), BlockValidationError> where I: IntoIterator, { - let deposit_contract_address = chain_spec - .deposit_contract - .as_ref() - .map_or(MAINNET_DEPOSIT_CONTRACT_ADDRESS, |contract| contract.address); receipts .into_iter() - .flat_map(|receipt| receipt.logs.iter()) - // No need to filter for topic because there's only one event and that's the Deposit event - // in the deposit contract. - .filter(|log| log.address == deposit_contract_address) - .map(|log| { - let decoded_log = DepositEvent::decode_log(log, false)?; - let deposit = parse_deposit_from_log(&decoded_log); - Ok(Request::DepositRequest(deposit)) - }) - .collect::, _>>() - .map_err(|err: alloy_sol_types::Error| { - BlockValidationError::DepositRequestDecode(err.to_string()) - }) + .try_for_each(|receipt| accumulate_deposits_from_receipt(address, receipt, out)) } -fn parse_deposit_from_log(log: &Log) -> DepositRequest { - // SAFETY: These `expect` https://github.com/ethereum/consensus-specs/blob/5f48840f4d768bf0e0a8156a3ed06ec333589007/solidity_deposit_contract/deposit_contract.sol#L107-L110 - // are safe because the `DepositEvent` is the only event in the deposit contract and the length - // checks are done there. - DepositRequest { - pubkey: log - .pubkey - .as_ref() - .try_into() - .expect("pubkey length should be enforced in deposit contract"), - withdrawal_credentials: log - .withdrawal_credentials - .as_ref() - .try_into() - .expect("withdrawal_credentials length should be enforced in deposit contract"), - amount: u64::from_le_bytes( - log.amount - .as_ref() - .try_into() - .expect("amount length should be enforced in deposit contract"), - ), - signature: log - .signature - .as_ref() - .try_into() - .expect("signature length should be enforced in deposit contract"), - index: u64::from_le_bytes( - log.index - .as_ref() - .try_into() - .expect("deposit index length should be enforced in deposit contract"), - ), - } +/// Find deposit logs in a list of receipts, and return the concatenated +/// deposit request bytestring. +/// +/// The address of the deposit contract is taken from the chain spec, and +/// defaults to [`MAINNET_DEPOSIT_CONTRACT_ADDRESS`] if not specified in +/// the chain spec. +pub fn parse_deposits_from_receipts<'a, I>( + chainspec: &ChainSpec, + receipts: I, +) -> Result +where + I: IntoIterator, +{ + let mut out = Vec::new(); + accumulate_deposits_from_receipts( + chainspec.deposit_contract().map(|c| c.address).unwrap_or(MAINNET_DEPOSIT_CONTRACT_ADDRESS), + receipts, + &mut out, + )?; + Ok(out.into()) } #[cfg(test)] mod tests { use super::*; + use alloy_primitives::bytes; use reth_chainspec::MAINNET; use reth_primitives::TxType; @@ -119,9 +135,12 @@ mod tests { }, ]; - let requests = parse_deposits_from_receipts(&MAINNET, &receipts).unwrap(); - assert_eq!(requests.len(), 2); - assert_eq!(requests[0].as_deposit_request().unwrap().amount, 32e9 as u64); - assert_eq!(requests[1].as_deposit_request().unwrap().amount, 32e9 as u64); + let request_data = parse_deposits_from_receipts(&MAINNET, &receipts).unwrap(); + assert_eq!( + request_data, + bytes!( + "998c8086669bf65e24581cda47d8537966e9f5066fc6ffdcba910a1bfb91eae7a4873fcce166a1c4ea217e6b1afd396201000000000000000000000001c340fb72ed14d4eaa71f7633ee9e33b88d4f39004059730700000098ddbffd700c1aac324cfdf0492ff289223661eb26718ce3651ba2469b22f480d56efab432ed91af05a006bde0c1ea68134e0acd8cacca0c13ad1f716db874b44abfcc966368019753174753bca3af2ea84bc569c46f76592a91e97f311eddece474160000000000a1a2ba870a90e889aa594a0cc1c6feffb94c2d8f65646c937f1f456a315ef649533e25a4614d8f4f66ebdb06481b90af0100000000000000000000000a0f04a231efbc29e1db7d086300ff550211c2f60040597307000000ad416d590e1a7f52baff770a12835b68904efad22cc9f8ba531e50cbbd26f32b9c7373cf6538a0577f501e4d3e3e63e208767bcccaae94e1e3720bfb734a286f9c017d17af46536545ccb7ca94d71f295e71f6d25bf978c09ada6f8d3f7ba039e374160000000000" + ) + ); } } diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 1357f1b6fd..bf7793537c 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -1,182 +1,164 @@ -//! Ethereum block executor. +//! Ethereum block execution strategy. use crate::{ dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, EthEvmConfig, }; use alloc::{boxed::Box, sync::Arc, vec, vec::Vec}; -use alloy_primitives::{BlockNumber, U256}; +use alloy_consensus::Transaction as _; +use alloy_eips::eip7685::Requests; use core::fmt::Display; -use reth_chainspec::{ChainSpec, EthereumHardforks, MAINNET}; +use reth_chainspec::{ChainSpec, EthereumHardfork, EthereumHardforks, MAINNET}; +use reth_consensus::ConsensusError; use reth_ethereum_consensus::validate_block_post_execution; use reth_evm::{ execute::{ - BatchExecutor, BlockExecutionError, BlockExecutionInput, BlockExecutionOutput, - BlockExecutorProvider, BlockValidationError, Executor, ProviderError, + BasicBlockExecutorProvider, BlockExecutionError, BlockExecutionStrategy, + BlockExecutionStrategyFactory, BlockValidationError, ExecuteOutput, ProviderError, }, - system_calls::{NoopHook, OnStateHook, SystemCaller}, - ConfigureEvm, -}; -use reth_execution_types::ExecutionOutcome; -use reth_primitives::{BlockWithSenders, EthereumHardfork, Header, Receipt, Request}; -use reth_prune_types::PruneModes; -use reth_revm::{ - batch::BlockBatchRecord, - db::{states::bundle_state::BundleRetention, State}, state_change::post_block_balance_increments, - Evm, + system_calls::{OnStateHook, SystemCaller}, + ConfigureEvm, }; +use reth_primitives::{BlockWithSenders, Receipt}; +use reth_revm::db::State; use revm_primitives::{ db::{Database, DatabaseCommit}, - BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, EvmState, ResultAndState, + BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, EvmState, ResultAndState, U256, }; use tokio::sync::mpsc::UnboundedSender; use tracing::debug; -/// Provides executors to execute regular ethereum blocks +/// Factory for [`EthExecutionStrategy`]. #[derive(Debug, Clone)] -pub struct EthExecutorProvider { +pub struct EthExecutionStrategyFactory { + /// The chainspec chain_spec: Arc, + /// How to create an EVM. evm_config: EvmConfig, } -impl EthExecutorProvider { - /// Creates a new default ethereum executor provider. +impl EthExecutionStrategyFactory { + /// Creates a new default ethereum executor strategy factory. pub fn ethereum(chain_spec: Arc) -> Self { Self::new(chain_spec.clone(), EthEvmConfig::new(chain_spec)) } - /// Returns a new provider for the mainnet. + /// Returns a new factory for the mainnet. pub fn mainnet() -> Self { Self::ethereum(MAINNET.clone()) } } -impl EthExecutorProvider { - /// Creates a new executor provider. +impl EthExecutionStrategyFactory { + /// Creates a new executor strategy factory. pub const fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { Self { chain_spec, evm_config } } } -impl EthExecutorProvider +impl BlockExecutionStrategyFactory for EthExecutionStrategyFactory where - EvmConfig: ConfigureEvm
, + EvmConfig: + Clone + Unpin + Sync + Send + 'static + ConfigureEvm
, { - fn eth_executor( - &self, - db: DB, - prefetch_tx: Option>, - ) -> EthBlockExecutor - where - DB: Database>, - { - if let Some(tx) = prefetch_tx { - EthBlockExecutor::new_with_prefetch_tx( - self.chain_spec.clone(), - self.evm_config.clone(), - State::builder() - .with_database(db) - .with_bundle_update() - .without_state_clear() - .build(), - tx, - ) - } else { - EthBlockExecutor::new( - self.chain_spec.clone(), - self.evm_config.clone(), - State::builder() - .with_database(db) - .with_bundle_update() - .without_state_clear() - .build(), - ) - } - } -} - -impl BlockExecutorProvider for EthExecutorProvider -where - EvmConfig: ConfigureEvm
, -{ - type Executor + Display>> = - EthBlockExecutor; - - type BatchExecutor + Display>> = - EthBatchExecutor; - - fn executor( - &self, - db: DB, - prefetch_tx: Option>, - ) -> Self::Executor - where - DB: Database + Display>, - { - self.eth_executor(db, prefetch_tx) - } + type Strategy + Display>> = + EthExecutionStrategy; - fn batch_executor(&self, db: DB) -> Self::BatchExecutor + fn create_strategy(&self, db: DB) -> Self::Strategy where DB: Database + Display>, { - let executor = self.eth_executor(db, None); - EthBatchExecutor { executor, batch_record: BlockBatchRecord::default() } + let state = + State::builder().with_database(db).with_bundle_update().without_state_clear().build(); + EthExecutionStrategy::new(state, self.chain_spec.clone(), self.evm_config.clone()) } } -/// Helper type for the output of executing a block. -#[derive(Debug, Clone)] -struct EthExecuteOutput { - receipts: Vec, - requests: Vec, - gas_used: u64, -} - -/// Helper container type for EVM with chain spec. -#[derive(Debug, Clone)] -struct EthEvmExecutor { +/// Block execution strategy for Ethereum. +#[allow(missing_debug_implementations)] +pub struct EthExecutionStrategy +where + EvmConfig: Clone, +{ /// The chainspec chain_spec: Arc, /// How to create an EVM. evm_config: EvmConfig, + /// Current state for block execution. + state: State, + /// Utility to call system smart contracts. + system_caller: SystemCaller, } -impl EthEvmExecutor +impl EthExecutionStrategy where - EvmConfig: ConfigureEvm
, + EvmConfig: Clone, { - /// Executes the transactions in the block and returns the receipts of the transactions in the - /// block, the total gas used and the list of EIP-7685 [requests](Request). - /// - /// This applies the pre-execution and post-execution changes that require an [EVM](Evm), and - /// executes the transactions. - /// - /// The optional `state_hook` will be executed with the state changes if present. + /// Creates a new [`EthExecutionStrategy`] + pub fn new(state: State, chain_spec: Arc, evm_config: EvmConfig) -> Self { + let system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); + Self { state, chain_spec, evm_config, system_caller } + } +} + +impl EthExecutionStrategy +where + DB: Database + Display>, + EvmConfig: ConfigureEvm
, +{ + /// Configures a new evm configuration and block environment for the given block. /// - /// # Note + /// # Caution /// - /// It does __not__ apply post-execution changes that do not require an [EVM](Evm), for that see - /// [`EthBlockExecutor::post_execution`]. - fn execute_state_transitions( + /// This does not initialize the tx environment. + fn evm_env_for_block( &self, + header: &alloy_consensus::Header, + total_difficulty: U256, + ) -> EnvWithHandlerCfg { + let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); + let mut block_env = BlockEnv::default(); + self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); + + EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) + } +} + +impl BlockExecutionStrategy for EthExecutionStrategy +where + DB: Database + Display>, + EvmConfig: ConfigureEvm
, +{ + type Error = BlockExecutionError; + + fn apply_pre_execution_changes( + &mut self, block: &BlockWithSenders, - mut evm: Evm<'_, Ext, &mut State>, - state_hook: Option, - tx: Option>, - ) -> Result - where - DB: Database, - DB::Error: Into + Display, - F: OnStateHook, - { - let mut system_caller = - SystemCaller::new(&self.evm_config, &self.chain_spec).with_state_hook(state_hook); + total_difficulty: U256, + ) -> Result<(), Self::Error> { + // Set state clear flag if the block is after the Spurious Dragon hardfork. + let state_clear_flag = + (*self.chain_spec).is_spurious_dragon_active_at_block(block.header.number); + self.state.set_state_clear_flag(state_clear_flag); - system_caller.apply_pre_execution_changes(block, &mut evm)?; + let env = self.evm_env_for_block(&block.header, total_difficulty); + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); + + self.system_caller.apply_pre_execution_changes(block, &mut evm)?; + + Ok(()) + } + + fn execute_transactions( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + tx: Option>, + ) -> Result { + let env = self.evm_env_for_block(&block.header, total_difficulty); + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - // execute transactions let mut cumulative_gas_used = 0; let mut receipts = Vec::with_capacity(block.body.transactions.len()); for (sender, transaction) in block.transactions_with_sender() { @@ -202,12 +184,14 @@ where error: Box::new(new_err), } })?; - system_caller.on_state(&result_and_state); + self.system_caller.on_state(&result_and_state); let ResultAndState { result, state } = result_and_state; + // Send post state to prefetch channel. if let Some(tx) = tx.as_ref() { tx.send(state.clone()).unwrap_or_else(|err| { - debug!(target: "evm_executor", ?err, "Failed to send post state to prefetch channel") + debug!(target: "evm_executor", ?err, "Failed to send post state to prefetch + channel") }); } @@ -231,154 +215,36 @@ where }, ); } - - let requests = if self.chain_spec.is_prague_active_at_timestamp(block.timestamp) { - // Collect all EIP-6110 deposits - let deposit_requests = - crate::eip6110::parse_deposits_from_receipts(&self.chain_spec, &receipts)?; - - let post_execution_requests = system_caller.apply_post_execution_changes(&mut evm)?; - - [deposit_requests, post_execution_requests].concat() - } else { - vec![] - }; - - Ok(EthExecuteOutput { receipts, requests, gas_used: cumulative_gas_used }) + Ok(ExecuteOutput { receipts, gas_used: cumulative_gas_used }) } -} - -/// A basic Ethereum block executor. -/// -/// Expected usage: -/// - Create a new instance of the executor. -/// - Execute the block. -#[derive(Debug)] -pub struct EthBlockExecutor { - /// Chain specific evm config that's used to execute a block. - executor: EthEvmExecutor, - /// The state to use for execution - state: State, - /// Prefetch channel - prefetch_tx: Option>, -} - -impl EthBlockExecutor { - /// Creates a new Ethereum block executor. - pub const fn new(chain_spec: Arc, evm_config: EvmConfig, state: State) -> Self { - Self { executor: EthEvmExecutor { chain_spec, evm_config }, state, prefetch_tx: None } - } - - /// Creates a new Ethereum block executor with a prefetch channel. - pub const fn new_with_prefetch_tx( - chain_spec: Arc, - evm_config: EvmConfig, - state: State, - tx: UnboundedSender, - ) -> Self { - Self { executor: EthEvmExecutor { chain_spec, evm_config }, state, prefetch_tx: Some(tx) } - } - - #[inline] - fn chain_spec(&self) -> &ChainSpec { - &self.executor.chain_spec - } - - /// Returns mutable reference to the state that wraps the underlying database. - #[allow(unused)] - fn state_mut(&mut self) -> &mut State { - &mut self.state - } -} - -impl EthBlockExecutor -where - EvmConfig: ConfigureEvm
, - DB: Database + Display>, -{ - /// Configures a new evm configuration and block environment for the given block. - /// - /// # Caution - /// - /// This does not initialize the tx environment. - fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.executor.evm_config.fill_cfg_and_block_env( - &mut cfg, - &mut block_env, - header, - total_difficulty, - ); - EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) - } - - /// Convenience method to invoke `execute_without_verification_with_state_hook` setting the - /// state hook as `None`. - fn execute_without_verification( + fn apply_post_execution_changes( &mut self, block: &BlockWithSenders, total_difficulty: U256, - ) -> Result { - self.execute_without_verification_with_state_hook(block, total_difficulty, None::) - } - - /// Execute a single block and apply the state changes to the internal state. - /// - /// Returns the receipts of the transactions in the block, the total gas used and the list of - /// EIP-7685 [requests](Request). - /// - /// Returns an error if execution fails. - fn execute_without_verification_with_state_hook( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - state_hook: Option, - ) -> Result - where - F: OnStateHook, - { - // 1. prepare state on new block - self.on_new_block(&block.header); - - // 2. configure the evm and execute + receipts: &[Receipt], + ) -> Result { let env = self.evm_env_for_block(&block.header, total_difficulty); - let output = { - let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); - self.executor.execute_state_transitions( - block, - evm, - state_hook, - self.prefetch_tx.clone(), - ) - }?; - - // 3. apply post execution changes - self.post_execution(block, total_difficulty)?; + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - Ok(output) - } + let requests = if self.chain_spec.is_prague_active_at_timestamp(block.timestamp) { + // Collect all EIP-6110 deposits + let deposit_requests = + crate::eip6110::parse_deposits_from_receipts(&self.chain_spec, receipts)?; - /// Apply settings before a new block is executed. - pub(crate) fn on_new_block(&mut self, header: &Header) { - // Set state clear flag if the block is after the Spurious Dragon hardfork. - let state_clear_flag = self.chain_spec().is_spurious_dragon_active_at_block(header.number); - self.state.set_state_clear_flag(state_clear_flag); - } + let mut requests = Requests::new(vec![deposit_requests]); + requests.extend(self.system_caller.apply_post_execution_changes(&mut evm)?); + requests + } else { + Requests::default() + }; + drop(evm); - /// Apply post execution state changes that do not require an [EVM](Evm), such as: block - /// rewards, withdrawals, and irregular DAO hardfork state change - pub fn post_execution( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(), BlockExecutionError> { let mut balance_increments = - post_block_balance_increments(self.chain_spec(), block, total_difficulty); + post_block_balance_increments(&self.chain_spec, block, total_difficulty); // Irregular state change at Ethereum DAO hardfork - if self.chain_spec().fork(EthereumHardfork::Dao).transitions_at_block(block.number) { + if self.chain_spec.fork(EthereumHardfork::Dao).transitions_at_block(block.number) { // drain balances from hardcoded addresses. let drained_balance: u128 = self .state @@ -395,183 +261,67 @@ where .increment_balances(balance_increments) .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; - Ok(()) + Ok(requests) } -} -impl Executor for EthBlockExecutor -where - EvmConfig: ConfigureEvm
, - DB: Database + Display>, -{ - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders, Header>; - type Output = BlockExecutionOutput; - type Error = BlockExecutionError; - - /// Executes the block and commits the changes to the internal state. - /// - /// Returns the receipts of the transactions in the block. - /// - /// Returns an error if the block could not be executed or failed verification. - fn execute(mut self, input: Self::Input<'_>) -> Result { - let BlockExecutionInput { block, total_difficulty, .. } = input; - let EthExecuteOutput { receipts, requests, gas_used } = - self.execute_without_verification(block, total_difficulty)?; - - // NOTE: we need to merge keep the reverts for the bundle retention - self.state.merge_transitions(BundleRetention::Reverts); - - Ok(BlockExecutionOutput { - state: self.state.take_bundle(), - receipts, - requests, - gas_used, - snapshot: None, - }) - } - - fn execute_with_state_closure( - mut self, - input: Self::Input<'_>, - mut witness: F, - ) -> Result - where - F: FnMut(&State), - { - let BlockExecutionInput { block, total_difficulty, .. } = input; - let EthExecuteOutput { receipts, requests, gas_used } = - self.execute_without_verification(block, total_difficulty)?; - - // NOTE: we need to merge keep the reverts for the bundle retention - self.state.merge_transitions(BundleRetention::Reverts); - witness(&self.state); - Ok(BlockExecutionOutput { - state: self.state.take_bundle(), - receipts, - requests, - gas_used, - snapshot: None, - }) - } - - fn execute_with_state_hook( - mut self, - input: Self::Input<'_>, - state_hook: F, - ) -> Result - where - F: OnStateHook, - { - let BlockExecutionInput { block, total_difficulty, .. } = input; - let EthExecuteOutput { receipts, requests, gas_used } = self - .execute_without_verification_with_state_hook( - block, - total_difficulty, - Some(state_hook), - )?; - - // NOTE: we need to merge keep the reverts for the bundle retention - self.state.merge_transitions(BundleRetention::Reverts); - Ok(BlockExecutionOutput { - state: self.state.take_bundle(), - receipts, - requests, - gas_used, - snapshot: None, - }) + fn state_ref(&self) -> &State { + &self.state } -} -/// An executor for a batch of blocks. -/// -/// State changes are tracked until the executor is finalized. -#[derive(Debug)] -pub struct EthBatchExecutor { - /// The executor used to execute single blocks - /// - /// All state changes are committed to the [State]. - executor: EthBlockExecutor, - /// Keeps track of the batch and records receipts based on the configured prune mode - batch_record: BlockBatchRecord, -} -impl EthBatchExecutor { - /// Returns mutable reference to the state that wraps the underlying database. - #[allow(unused)] fn state_mut(&mut self) -> &mut State { - self.executor.state_mut() + &mut self.state } -} -impl BatchExecutor for EthBatchExecutor -where - EvmConfig: ConfigureEvm
, - DB: Database + Display>, -{ - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders, Header>; - type Output = ExecutionOutcome; - type Error = BlockExecutionError; - - fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { - let BlockExecutionInput { block, total_difficulty, .. } = input; - - if self.batch_record.first_block().is_none() { - self.batch_record.set_first_block(block.number); - } - let EthExecuteOutput { receipts, requests, gas_used: _ } = - self.executor.execute_without_verification(block, total_difficulty)?; - - validate_block_post_execution(block, self.executor.chain_spec(), &receipts, &requests)?; - - // prepare the state according to the prune mode - let retention = self.batch_record.bundle_retention(block.number); - self.executor.state.merge_transitions(retention); - - // store receipts in the set - self.batch_record.save_receipts(receipts)?; - - // store requests in the set - self.batch_record.save_requests(requests); - - Ok(()) + fn with_state_hook(&mut self, hook: Option>) { + self.system_caller.with_state_hook(hook); } - fn finalize(mut self) -> Self::Output { - ExecutionOutcome::new( - self.executor.state.take_bundle(), - self.batch_record.take_receipts(), - self.batch_record.first_block().unwrap_or_default(), - self.batch_record.take_requests(), - ) + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + receipts: &[Receipt], + requests: &Requests, + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec.clone(), receipts, requests) } +} - fn set_tip(&mut self, tip: BlockNumber) { - self.batch_record.set_tip(tip); - } +/// Helper type with backwards compatible methods to obtain Ethereum executor +/// providers. +#[derive(Debug)] +pub struct EthExecutorProvider; - fn set_prune_modes(&mut self, prune_modes: PruneModes) { - self.batch_record.set_prune_modes(prune_modes); +impl EthExecutorProvider { + /// Creates a new default ethereum executor provider. + pub fn ethereum( + chain_spec: Arc, + ) -> BasicBlockExecutorProvider { + BasicBlockExecutorProvider::new(EthExecutionStrategyFactory::ethereum(chain_spec)) } - fn size_hint(&self) -> Option { - Some(self.executor.state.bundle_state.size_hint()) + /// Returns a new provider for the mainnet. + pub fn mainnet() -> BasicBlockExecutorProvider { + BasicBlockExecutorProvider::new(EthExecutionStrategyFactory::mainnet()) } } #[cfg(test)] mod tests { use super::*; - use alloy_consensus::TxLegacy; + use alloy_consensus::{constants::ETH_TO_WEI, Header, TxLegacy}; use alloy_eips::{ eip2935::{HISTORY_STORAGE_ADDRESS, HISTORY_STORAGE_CODE}, eip4788::{BEACON_ROOTS_ADDRESS, BEACON_ROOTS_CODE, SYSTEM_ADDRESS}, eip7002::{WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, WITHDRAWAL_REQUEST_PREDEPLOY_CODE}, + eip7685::EMPTY_REQUESTS_HASH, }; use alloy_primitives::{b256, fixed_bytes, keccak256, Bytes, TxKind, B256}; use reth_chainspec::{ChainSpecBuilder, ForkCondition}; - use reth_primitives::{ - constants::{EMPTY_ROOT_HASH, ETH_TO_WEI}, - public_key_to_address, Account, Block, BlockBody, Transaction, + use reth_evm::execute::{ + BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider, Executor, }; + use reth_execution_types::BlockExecutionOutput; + use reth_primitives::{public_key_to_address, Account, Block, BlockBody, Transaction}; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, TransitionState, }; @@ -618,8 +368,13 @@ mod tests { db } - fn executor_provider(chain_spec: Arc) -> EthExecutorProvider { - EthExecutorProvider { evm_config: EthEvmConfig::new(chain_spec.clone()), chain_spec } + fn executor_provider( + chain_spec: Arc, + ) -> BasicBlockExecutorProvider { + let strategy_factory = + EthExecutionStrategyFactory::new(chain_spec.clone(), EthEvmConfig::new(chain_spec)); + + BasicBlockExecutorProvider::new(strategy_factory) } #[test] @@ -638,10 +393,11 @@ mod tests { let provider = executor_provider(chain_spec); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db), None); + // attempt to execute a block without parent beacon block root, expect err - let err = provider - .executor(StateProviderDatabase::new(&db), None) - .execute( + let err = executor + .execute_and_verify_one( ( &BlockWithSenders { block: Block { @@ -651,7 +407,6 @@ mod tests { ommers: vec![], withdrawals: None, sidecars: None, - requests: None, }, }, senders: vec![], @@ -673,25 +428,26 @@ mod tests { // fix header, set a gas limit header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); - let mut executor = provider.executor(StateProviderDatabase::new(&db), None); - // Now execute a block with the fixed header, ensure that it does not fail executor - .execute_without_verification( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: BlockBody { - transactions: vec![], - ommers: vec![], - withdrawals: None, - sidecars: None, - requests: None, + .execute_and_verify_one( + ( + &BlockWithSenders { + block: Block { + header: header.clone(), + body: BlockBody { + transactions: vec![], + ommers: vec![], + withdrawals: None, + sidecars: None, + }, }, + senders: vec![], }, - senders: vec![], - }, - U256::ZERO, + U256::ZERO, + None, + ) + .into(), ) .unwrap(); @@ -705,16 +461,17 @@ mod tests { let parent_beacon_block_root_index = timestamp_index % history_buffer_length + history_buffer_length; - // get timestamp storage and compare - let timestamp_storage = - executor.state.storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap(); + let timestamp_storage = executor.with_state_mut(|state| { + state.storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap() + }); assert_eq!(timestamp_storage, U256::from(header.timestamp)); // get parent beacon block root storage and compare - let parent_beacon_block_root_storage = executor - .state - .storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)) - .expect("storage value should exist"); + let parent_beacon_block_root_storage = executor.with_state_mut(|state| { + state + .storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)) + .expect("storage value should exist") + }); assert_eq!(parent_beacon_block_root_storage, U256::from(0x69)); } @@ -744,7 +501,7 @@ mod tests { // attempt to execute an empty block with parent beacon block root, this should not fail provider - .batch_executor(StateProviderDatabase::new(&db)) + .batch_executor(StateProviderDatabase::new(&db), None) .execute_and_verify_one( ( &BlockWithSenders { @@ -755,7 +512,6 @@ mod tests { ommers: vec![], withdrawals: None, sidecars: None, - requests: None, }, }, senders: vec![], @@ -798,7 +554,7 @@ mod tests { ..Header::default() }; - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db), None); // attempt to execute an empty block with parent beacon block root, this should not fail executor @@ -812,7 +568,6 @@ mod tests { ommers: vec![], withdrawals: None, sidecars: None, - requests: None, }, }, senders: vec![], @@ -827,7 +582,8 @@ mod tests { ); // ensure that the nonce of the system address account has not changed - let nonce = executor.state_mut().basic(SYSTEM_ADDRESS).unwrap().unwrap().nonce; + let nonce = + executor.with_state_mut(|state| state.basic(SYSTEM_ADDRESS).unwrap().unwrap().nonce); assert_eq!(nonce, 0); } @@ -845,7 +601,7 @@ mod tests { let mut header = chain_spec.genesis_header().clone(); let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db), None); // attempt to execute the genesis block with non-zero parent beacon block root, expect err header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); @@ -887,11 +643,12 @@ mod tests { // there is no system contract call so there should be NO STORAGE CHANGES // this means we'll check the transition state - let transition_state = executor - .state_mut() - .transition_state - .take() - .expect("the evm should be initialized with bundle updates"); + let transition_state = executor.with_state_mut(|state| { + state + .transition_state + .take() + .expect("the evm should be initialized with bundle updates") + }); // assert that it is the default (empty) transition state assert_eq!(transition_state, TransitionState::default()); @@ -922,7 +679,7 @@ mod tests { let provider = executor_provider(chain_spec); // execute header - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db), None); // Now execute a block with the fixed header, ensure that it does not fail executor @@ -950,17 +707,15 @@ mod tests { timestamp_index % history_buffer_length + history_buffer_length; // get timestamp storage and compare - let timestamp_storage = executor - .state_mut() - .storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)) - .unwrap(); + let timestamp_storage = executor.with_state_mut(|state| { + state.storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap() + }); assert_eq!(timestamp_storage, U256::from(header.timestamp)); // get parent beacon block root storage and compare - let parent_beacon_block_root_storage = executor - .state_mut() - .storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)) - .unwrap(); + let parent_beacon_block_root_storage = executor.with_state_mut(|state| { + state.storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)).unwrap() + }); assert_eq!(parent_beacon_block_root_storage, U256::from(0x69)); } @@ -986,7 +741,6 @@ mod tests { db } - #[test] fn eip_2935_pre_fork() { let db = create_state_provider_with_block_hashes(1); @@ -999,7 +753,7 @@ mod tests { ); let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db), None); // construct the header for block one let header = Header { timestamp: 1, number: 1, ..Header::default() }; @@ -1026,12 +780,11 @@ mod tests { // // we load the account first, because revm expects it to be // loaded - executor.state_mut().basic(HISTORY_STORAGE_ADDRESS).unwrap(); - assert!(executor - .state_mut() + executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap()); + assert!(executor.with_state_mut(|state| state .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) .unwrap() - .is_zero()); + .is_zero())); } #[test] @@ -1047,7 +800,7 @@ mod tests { let header = chain_spec.genesis_header().clone(); let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db), None); // attempt to execute genesis block, this should not fail executor @@ -1071,12 +824,11 @@ mod tests { // // we load the account first, because revm expects it to be // loaded - executor.state_mut().basic(HISTORY_STORAGE_ADDRESS).unwrap(); - assert!(executor - .state_mut() + executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap()); + assert!(executor.with_state_mut(|state| state .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) .unwrap() - .is_zero()); + .is_zero())); } #[test] @@ -1095,11 +847,11 @@ mod tests { parent_hash: B256::random(), timestamp: 1, number: fork_activation_block, - requests_root: Some(EMPTY_ROOT_HASH), + requests_hash: Some(EMPTY_REQUESTS_HASH), ..Header::default() }; let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db), None); // attempt to execute the fork activation block, this should not fail executor @@ -1119,21 +871,20 @@ mod tests { ); // the hash for the ancestor of the fork activation block should be present - assert!(executor.state_mut().basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some()); + assert!(executor + .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); assert_ne!( - executor - .state_mut() + executor.with_state_mut(|state| state .storage(HISTORY_STORAGE_ADDRESS, U256::from(fork_activation_block - 1)) - .unwrap(), + .unwrap()), U256::ZERO ); // the hash of the block itself should not be in storage - assert!(executor - .state_mut() + assert!(executor.with_state_mut(|state| state .storage(HISTORY_STORAGE_ADDRESS, U256::from(fork_activation_block)) .unwrap() - .is_zero()); + .is_zero())); } #[test] @@ -1149,13 +900,13 @@ mod tests { ); let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db), None); let header = Header { parent_hash: B256::random(), timestamp: 1, number: fork_activation_block, - requests_root: Some(EMPTY_ROOT_HASH), + requests_hash: Some(EMPTY_REQUESTS_HASH), ..Header::default() }; @@ -1177,15 +928,15 @@ mod tests { ); // the hash for the ancestor of the fork activation block should be present - assert!(executor.state_mut().basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some()); + assert!(executor + .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); assert_ne!( - executor - .state_mut() + executor.with_state_mut(|state| state .storage( HISTORY_STORAGE_ADDRESS, U256::from(fork_activation_block % BLOCKHASH_SERVE_WINDOW as u64 - 1) ) - .unwrap(), + .unwrap()), U256::ZERO ); } @@ -1202,11 +953,11 @@ mod tests { ); let mut header = chain_spec.genesis_header().clone(); - header.requests_root = Some(EMPTY_ROOT_HASH); + header.requests_hash = Some(EMPTY_REQUESTS_HASH); let header_hash = header.hash_slow(); let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db), None); // attempt to execute the genesis block, this should not fail executor @@ -1229,19 +980,18 @@ mod tests { // // we load the account first, because revm expects it to be // loaded - executor.state_mut().basic(HISTORY_STORAGE_ADDRESS).unwrap(); - assert!(executor - .state_mut() + executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap()); + assert!(executor.with_state_mut(|state| state .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) .unwrap() - .is_zero()); + .is_zero())); // attempt to execute block 1, this should not fail let header = Header { parent_hash: header_hash, timestamp: 1, number: 1, - requests_root: Some(EMPTY_ROOT_HASH), + requests_hash: Some(EMPTY_REQUESTS_HASH), ..Header::default() }; let header_hash = header.hash_slow(); @@ -1263,23 +1013,25 @@ mod tests { ); // the block hash of genesis should now be in storage, but not block 1 - assert!(executor.state_mut().basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some()); + assert!(executor + .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); assert_ne!( - executor.state_mut().storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap(), + executor.with_state_mut(|state| state + .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) + .unwrap()), U256::ZERO ); - assert!(executor - .state_mut() + assert!(executor.with_state_mut(|state| state .storage(HISTORY_STORAGE_ADDRESS, U256::from(1)) .unwrap() - .is_zero()); + .is_zero())); // attempt to execute block 2, this should not fail let header = Header { parent_hash: header_hash, timestamp: 1, number: 2, - requests_root: Some(EMPTY_ROOT_HASH), + requests_hash: Some(EMPTY_REQUESTS_HASH), ..Header::default() }; @@ -1300,20 +1052,24 @@ mod tests { ); // the block hash of genesis and block 1 should now be in storage, but not block 2 - assert!(executor.state_mut().basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some()); + assert!(executor + .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); assert_ne!( - executor.state_mut().storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap(), + executor.with_state_mut(|state| state + .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) + .unwrap()), U256::ZERO ); assert_ne!( - executor.state_mut().storage(HISTORY_STORAGE_ADDRESS, U256::from(1)).unwrap(), + executor.with_state_mut(|state| state + .storage(HISTORY_STORAGE_ADDRESS, U256::from(1)) + .unwrap()), U256::ZERO ); - assert!(executor - .state_mut() + assert!(executor.with_state_mut(|state| state .storage(HISTORY_STORAGE_ADDRESS, U256::from(2)) .unwrap() - .is_zero()); + .is_zero())); } #[test] @@ -1338,15 +1094,16 @@ mod tests { HashMap::default(), ); - // https://github.com/lightclient/7002asm/blob/e0d68e04d15f25057af7b6d180423d94b6b3bdb3/test/Contract.t.sol.in#L49-L64 + // https://github.com/lightclient/sys-asm/blob/9282bdb9fd64e024e27f60f507486ffb2183cba2/test/Withdrawal.t.sol.in#L36 let validator_public_key = fixed_bytes!("111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); - let withdrawal_amount = fixed_bytes!("2222222222222222"); + let withdrawal_amount = fixed_bytes!("0203040506070809"); let input: Bytes = [&validator_public_key[..], &withdrawal_amount[..]].concat().into(); assert_eq!(input.len(), 56); let mut header = chain_spec.genesis_header().clone(); header.gas_limit = 1_500_000; - header.gas_used = 134_807; + // measured + header.gas_used = 135_856; header.receipts_root = b256!("b31a3e47b902e9211c4d349af4e4c5604ce388471e79ca008907ae4616bb0ed3"); @@ -1356,10 +1113,10 @@ mod tests { chain_id: Some(chain_spec.chain.id()), nonce: 1, gas_price: header.base_fee_per_gas.unwrap().into(), - gas_limit: 134_807, + gas_limit: header.gas_used, to: TxKind::Call(WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS), // `MIN_WITHDRAWAL_REQUEST_FEE` - value: U256::from(1), + value: U256::from(2), input, }), ); @@ -1387,11 +1144,9 @@ mod tests { let receipt = receipts.first().unwrap(); assert!(receipt.success); - let request = requests.first().unwrap(); - let withdrawal_request = request.as_withdrawal_request().unwrap(); - assert_eq!(withdrawal_request.source_address, sender_address); - assert_eq!(withdrawal_request.validator_pubkey, validator_public_key); - assert_eq!(withdrawal_request.amount, u64::from_be_bytes(withdrawal_amount.into())); + assert!(requests[0].is_empty(), "there should be no deposits"); + assert!(!requests[1].is_empty(), "there should be a withdrawal"); + assert!(requests[2].is_empty(), "there should be no consolidations"); } #[test] diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index a71f26f703..1c340c0927 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -1,4 +1,10 @@ //! EVM config for vanilla ethereum. +//! +//! # Revm features +//! +//! This crate does __not__ enforce specific revm features such as `blst` or `c-kzg`, which are +//! critical for revm's evm internals, it is the responsibility of the implementer to ensure the +//! proper features are selected. #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", @@ -11,7 +17,9 @@ extern crate alloc; -use alloc::vec::Vec; +use core::convert::Infallible; + +use alloc::{sync::Arc, vec::Vec}; use alloy_primitives::{Address, Bytes, TxKind, U256}; use reth_chainspec::{ChainSpec, Head}; use reth_evm::{ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; @@ -19,12 +27,11 @@ use reth_primitives::{transaction::FillTxEnv, Header, TransactionSigned}; use revm_primitives::{ AnalysisKind, BlobExcessGasAndPrice, BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, Env, SpecId, TxEnv, }; -use std::sync::Arc; mod config; +use alloy_eips::eip1559::INITIAL_BASE_FEE; pub use config::{revm_spec, revm_spec_by_timestamp_after_merge}; use reth_ethereum_forks::EthereumHardfork; -use reth_primitives::constants::EIP1559_INITIAL_BASE_FEE; pub mod execute; @@ -54,6 +61,7 @@ impl EthEvmConfig { impl ConfigureEvmEnv for EthEvmConfig { type Header = Header; + type Error = Infallible; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { transaction.fill_tx_env(tx_env, sender); @@ -126,7 +134,7 @@ impl ConfigureEvmEnv for EthEvmConfig { &self, parent: &Self::Header, attributes: NextBlockEnvAttributes, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), Self::Error> { // configure evm env based on parent block let cfg = CfgEnv::default().with_chain_id(self.chain_spec.chain().id()); @@ -134,17 +142,10 @@ impl ConfigureEvmEnv for EthEvmConfig { let spec_id = revm_spec_by_timestamp_after_merge(&self.chain_spec, attributes.timestamp); // if the parent block did not have excess blob gas (i.e. it was pre-cancun), but it is - // cancun now, we need to set the excess blob gas to the default value + // cancun now, we need to set the excess blob gas to the default value(0) let blob_excess_gas_and_price = parent .next_block_excess_blob_gas() - .or_else(|| { - if spec_id == SpecId::CANCUN { - // default excess blob gas is zero - Some(0) - } else { - None - } - }) + .or_else(|| (spec_id == SpecId::CANCUN).then_some(0)) .map(BlobExcessGasAndPrice::new); let mut basefee = parent.next_block_base_fee( @@ -165,7 +166,7 @@ impl ConfigureEvmEnv for EthEvmConfig { gas_limit *= U256::from(elasticity_multiplier); // set the base fee to the initial base fee from the EIP-1559 spec - basefee = Some(EIP1559_INITIAL_BASE_FEE) + basefee = Some(INITIAL_BASE_FEE) } let block_env = BlockEnv { @@ -181,7 +182,7 @@ impl ConfigureEvmEnv for EthEvmConfig { blob_excess_gas_and_price, }; - (CfgEnvWithHandlerCfg::new_with_spec_id(cfg, spec_id), block_env) + Ok((CfgEnvWithHandlerCfg::new_with_spec_id(cfg, spec_id), block_env)) } } @@ -194,13 +195,14 @@ impl ConfigureEvm for EthEvmConfig { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::constants::KECCAK_EMPTY; use alloy_genesis::Genesis; use alloy_primitives::{B256, U256}; use reth_chainspec::{Chain, ChainSpec, MAINNET}; use reth_evm::execute::ProviderError; use reth_primitives::{ revm_primitives::{BlockEnv, CfgEnv, SpecId}, - Header, KECCAK_EMPTY, + Header, }; use reth_revm::{ db::{CacheDB, EmptyDBTyped}, diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 598cf7ce82..eb386e60ab 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -21,6 +21,7 @@ reth-tracing.workspace = true reth-provider.workspace = true reth-transaction-pool.workspace = true reth-network.workspace = true +reth-evm.workspace = true reth-evm-ethereum.workspace = true reth-consensus.workspace = true reth-auto-seal-consensus.workspace = true @@ -29,6 +30,11 @@ reth-rpc.workspace = true reth-node-api.workspace = true reth-chainspec.workspace = true reth-primitives.workspace = true +reth-revm = { workspace = true, features = ["std"] } +reth-trie-db.workspace = true + +# revm with required ethereum features +revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } # bsc reth-bsc-consensus.workspace = true @@ -42,16 +48,36 @@ reth-chainspec.workspace = true reth-db.workspace = true reth-exex.workspace = true reth-node-api.workspace = true -reth-node-core.workspace = true reth-e2e-test-utils.workspace = true reth-tasks.workspace = true futures.workspace = true alloy-primitives.workspace = true alloy-genesis.workspace = true tokio.workspace = true -futures-util.workspace = true serde_json.workspace = true +alloy-consensus.workspace = true +alloy-provider.workspace = true +rand.workspace = true +alloy-signer.workspace = true +alloy-eips.workspace = true +alloy-sol-types.workspace = true +alloy-contract.workspace = true +alloy-rpc-types-beacon.workspace = true [features] default = [] -test-utils = ["reth-node-builder/test-utils"] +test-utils = [ + "reth-node-builder/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-network/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-revm/test-utils", + "reth-db/test-utils", + "reth-provider/test-utils", + "reth-transaction-pool/test-utils", + "reth-trie-db/test-utils", + "revm/test-utils", + "reth-evm/test-utils" +] diff --git a/crates/ethereum/node/src/evm.rs b/crates/ethereum/node/src/evm.rs index d710d8d8d4..bcdcaac6bf 100644 --- a/crates/ethereum/node/src/evm.rs +++ b/crates/ethereum/node/src/evm.rs @@ -1,6 +1,8 @@ //! Ethereum EVM support #[doc(inline)] -pub use reth_evm_ethereum::execute::EthExecutorProvider; +pub use reth_evm::execute::BasicBlockExecutorProvider; +#[doc(inline)] +pub use reth_evm_ethereum::execute::{EthExecutionStrategyFactory, EthExecutorProvider}; #[doc(inline)] pub use reth_evm_ethereum::EthEvmConfig; diff --git a/crates/ethereum/node/src/lib.rs b/crates/ethereum/node/src/lib.rs index 44ec6836c8..421cee37fb 100644 --- a/crates/ethereum/node/src/lib.rs +++ b/crates/ethereum/node/src/lib.rs @@ -8,10 +8,15 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +use reth_revm as _; +use revm as _; + pub use reth_ethereum_engine_primitives::EthEngineTypes; pub mod evm; -pub use evm::{EthEvmConfig, EthExecutorProvider}; +pub use evm::{ + BasicBlockExecutorProvider, EthEvmConfig, EthExecutionStrategyFactory, EthExecutorProvider, +}; pub mod node; pub use node::EthereumNode; diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 78d28f29dd..05a4fcf6b8 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -10,19 +10,24 @@ use reth_chainspec::ChainSpec; use reth_ethereum_engine_primitives::{ EthBuiltPayload, EthPayloadAttributes, EthPayloadBuilderAttributes, EthereumEngineValidator, }; -use reth_evm_ethereum::execute::EthExecutorProvider; -use reth_network::NetworkHandle; -use reth_node_api::{ConfigureEvm, EngineValidator, FullNodeComponents, NodeAddOns}; +use reth_evm::execute::BasicBlockExecutorProvider; +use reth_evm_ethereum::execute::EthExecutionStrategyFactory; +use reth_network::{NetworkHandle, PeersInfo}; +use reth_node_api::{ + AddOnsContext, ConfigureEvm, EngineValidator, FullNodeComponents, NodePrimitives, + NodeTypesWithDB, +}; use reth_node_builder::{ components::{ - ComponentsBuilder, ConsensusBuilder, EngineValidatorBuilder, ExecutorBuilder, - NetworkBuilder, ParliaBuilder, PayloadServiceBuilder, PoolBuilder, + ComponentsBuilder, ConsensusBuilder, ExecutorBuilder, NetworkBuilder, ParliaBuilder, + PayloadServiceBuilder, PoolBuilder, }, node::{FullNodeTypes, NodeTypes, NodeTypesWithEngine}, - BuilderContext, Node, PayloadBuilderConfig, PayloadTypes, + rpc::{EngineValidatorBuilder, RpcAddOns}, + BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, PayloadTypes, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::Header; +use reth_primitives::{Block, Header}; use reth_provider::CanonStateSubscriptions; use reth_rpc::EthApi; use reth_tracing::tracing::{debug, info}; @@ -30,9 +35,18 @@ use reth_transaction_pool::{ blobstore::DiskFileBlobStore, EthTransactionPool, TransactionPool, TransactionValidationTaskExecutor, }; +use reth_trie_db::MerklePatriciaTrie; use crate::{EthEngineTypes, EthEvmConfig}; +/// Ethereum primitive types. +#[derive(Debug)] +pub struct EthPrimitives; + +impl NodePrimitives for EthPrimitives { + type Block = Block; +} + /// Type configuration for a regular Ethereum node. #[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] @@ -47,7 +61,6 @@ impl EthereumNode { EthereumNetworkBuilder, EthereumExecutorBuilder, EthereumConsensusBuilder, - EthereumEngineValidatorBuilder, EthereumParliaBuilder, > where @@ -65,14 +78,14 @@ impl EthereumNode { .network(EthereumNetworkBuilder::default()) .executor(EthereumExecutorBuilder::default()) .consensus(EthereumConsensusBuilder::default()) - .engine_validator(EthereumEngineValidatorBuilder::default()) .parlia(EthereumParliaBuilder::default()) } } impl NodeTypes for EthereumNode { - type Primitives = (); + type Primitives = EthPrimitives; type ChainSpec = ChainSpec; + type StateCommitment = MerklePatriciaTrie; } impl NodeTypesWithEngine for EthereumNode { @@ -80,17 +93,20 @@ impl NodeTypesWithEngine for EthereumNode { } /// Add-ons w.r.t. l1 ethereum. -#[derive(Debug, Clone, Default)] -#[non_exhaustive] -pub struct EthereumAddOns; - -impl NodeAddOns for EthereumAddOns { - type EthApi = EthApi; -} +pub type EthereumAddOns = RpcAddOns< + N, + EthApi< + ::Provider, + ::Pool, + NetworkHandle, + ::Evm, + >, + EthereumEngineValidatorBuilder, +>; impl Node for EthereumNode where - Types: NodeTypesWithEngine, + Types: NodeTypesWithDB + NodeTypesWithEngine, N: FullNodeTypes, { type ComponentsBuilder = ComponentsBuilder< @@ -100,11 +116,12 @@ where EthereumNetworkBuilder, EthereumExecutorBuilder, EthereumConsensusBuilder, - EthereumEngineValidatorBuilder, EthereumParliaBuilder, >; - type AddOns = EthereumAddOns; + type AddOns = EthereumAddOns< + NodeAdapter>::Components>, + >; fn components_builder(&self) -> Self::ComponentsBuilder { Self::components() @@ -126,7 +143,7 @@ where Node: FullNodeTypes, { type EVM = EthEvmConfig; - type Executor = EthExecutorProvider; + type Executor = BasicBlockExecutorProvider; async fn build_evm( self, @@ -134,7 +151,8 @@ where ) -> eyre::Result<(Self::EVM, Self::Executor)> { let chain_spec = ctx.chain_spec(); let evm_config = EthEvmConfig::new(ctx.chain_spec()); - let executor = EthExecutorProvider::new(chain_spec, evm_config.clone()); + let strategy_factory = EthExecutionStrategyFactory::new(chain_spec, evm_config.clone()); + let executor = BasicBlockExecutorProvider::new(strategy_factory); Ok((evm_config, executor)) } @@ -302,7 +320,7 @@ where ) -> eyre::Result { let network = ctx.network_builder().await?; let handle = ctx.start_network(network, pool); - + info!(target: "reth::cli", enode=%handle.local_node_record(), "P2P networking initialized"); Ok(handle) } } @@ -336,13 +354,13 @@ pub struct EthereumEngineValidatorBuilder; impl EngineValidatorBuilder for EthereumEngineValidatorBuilder where Types: NodeTypesWithEngine, - Node: FullNodeTypes, + Node: FullNodeComponents, EthereumEngineValidator: EngineValidator, { type Validator = EthereumEngineValidator; - async fn build_validator(self, ctx: &BuilderContext) -> eyre::Result { - Ok(EthereumEngineValidator::new(ctx.chain_spec())) + async fn build(self, ctx: &AddOnsContext<'_, Node>) -> eyre::Result { + Ok(EthereumEngineValidator::new(ctx.config.chain.clone())) } } diff --git a/crates/ethereum/node/tests/e2e/blobs.rs b/crates/ethereum/node/tests/e2e/blobs.rs index 9390b34f44..976727bc81 100644 --- a/crates/ethereum/node/tests/e2e/blobs.rs +++ b/crates/ethereum/node/tests/e2e/blobs.rs @@ -1,7 +1,7 @@ use std::sync::Arc; +use alloy_consensus::constants::MAINNET_GENESIS_HASH; use alloy_genesis::Genesis; -use alloy_primitives::b256; use reth::{ args::RpcServerArgs, builder::{NodeBuilder, NodeConfig, NodeHandle}, @@ -41,7 +41,7 @@ async fn can_handle_blobs() -> eyre::Result<()> { .launch() .await?; - let mut node = NodeTestContext::new(node).await?; + let mut node = NodeTestContext::new(node, eth_payload_attributes).await?; let wallets = Wallet::new(2).gen(); let blob_wallet = wallets.first().unwrap(); @@ -51,7 +51,7 @@ async fn can_handle_blobs() -> eyre::Result<()> { let raw_tx = TransactionTestContext::transfer_tx_bytes(1, second_wallet.clone()).await; let tx_hash = node.rpc.inject_tx(raw_tx).await?; // build payload with normal tx - let (payload, attributes) = node.new_payload(eth_payload_attributes).await?; + let (payload, attributes) = node.new_payload().await?; // clean the pool node.inner.pool.remove_transactions(vec![tx_hash]); @@ -64,28 +64,24 @@ async fn can_handle_blobs() -> eyre::Result<()> { // fetch it from rpc let envelope = node.rpc.envelope_by_hash(blob_tx_hash).await?; // validate sidecar - let versioned_hashes = TransactionTestContext::validate_sidecar(envelope); + TransactionTestContext::validate_sidecar(envelope); // build a payload - let (blob_payload, blob_attr) = node.new_payload(eth_payload_attributes).await?; + let (blob_payload, blob_attr) = node.new_payload().await?; // submit the blob payload - let blob_block_hash = node - .engine_api - .submit_payload(blob_payload, blob_attr, PayloadStatusEnum::Valid, versioned_hashes.clone()) - .await?; - - let genesis_hash = b256!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"); + let blob_block_hash = + node.engine_api.submit_payload(blob_payload, blob_attr, PayloadStatusEnum::Valid).await?; let (_, _) = tokio::join!( // send fcu with blob hash - node.engine_api.update_forkchoice(genesis_hash, blob_block_hash), + node.engine_api.update_forkchoice(MAINNET_GENESIS_HASH, blob_block_hash), // send fcu with normal hash - node.engine_api.update_forkchoice(genesis_hash, payload.block().hash()) + node.engine_api.update_forkchoice(MAINNET_GENESIS_HASH, payload.block().hash()) ); // submit normal payload - node.engine_api.submit_payload(payload, attributes, PayloadStatusEnum::Valid, vec![]).await?; + node.engine_api.submit_payload(payload, attributes, PayloadStatusEnum::Valid).await?; tokio::time::sleep(std::time::Duration::from_secs(3)).await; diff --git a/crates/ethereum/node/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs index 2ef6e08c7e..f0fcaf6452 100644 --- a/crates/ethereum/node/tests/e2e/dev.rs +++ b/crates/ethereum/node/tests/e2e/dev.rs @@ -1,31 +1,70 @@ use std::sync::Arc; +use crate::utils::eth_payload_attributes; use alloy_genesis::Genesis; use alloy_primitives::{b256, hex}; use futures::StreamExt; -use reth::core::rpc::eth::helpers::EthTransactions; +use reth::{args::DevArgs, rpc::api::eth::helpers::EthTransactions}; use reth_chainspec::ChainSpec; use reth_e2e_test_utils::setup; -use reth_provider::CanonStateSubscriptions; - -use crate::utils::EthNode; +use reth_node_api::FullNodeComponents; +use reth_node_builder::{ + rpc::RethRpcAddOns, EngineNodeLauncher, FullNode, NodeBuilder, NodeConfig, NodeHandle, +}; +use reth_node_ethereum::{node::EthereumAddOns, EthereumNode}; +use reth_provider::{providers::BlockchainProvider2, CanonStateSubscriptions}; +use reth_tasks::TaskManager; #[tokio::test] async fn can_run_dev_node() -> eyre::Result<()> { reth_tracing::init_test_tracing(); - let (mut nodes, _tasks, _) = setup(1, custom_chain(), true).await?; + let (mut nodes, _tasks, _) = + setup::(1, custom_chain(), true, eth_payload_attributes).await?; + + assert_chain_advances(nodes.pop().unwrap().inner).await; + Ok(()) +} + +#[tokio::test] +async fn can_run_dev_node_new_engine() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + let tasks = TaskManager::current(); + let exec = tasks.executor(); + + let node_config = NodeConfig::test() + .with_chain(custom_chain()) + .with_dev(DevArgs { dev: true, ..Default::default() }); + let NodeHandle { node, .. } = NodeBuilder::new(node_config.clone()) + .testing_node(exec.clone()) + .with_types_and_provider::>() + .with_components(EthereumNode::components()) + .with_add_ons(EthereumAddOns::default()) + .launch_with_fn(|builder| { + let launcher = EngineNodeLauncher::new( + builder.task_executor().clone(), + builder.config().datadir(), + Default::default(), + ); + builder.launch_with(launcher) + }) + .await?; + + assert_chain_advances(node).await; - assert_chain_advances(nodes.pop().unwrap()).await; Ok(()) } -async fn assert_chain_advances(node: EthNode) { - let mut notifications = node.inner.provider.canonical_state_stream(); +async fn assert_chain_advances(node: FullNode) +where + N: FullNodeComponents, + AddOns: RethRpcAddOns, +{ + let mut notifications = node.provider.canonical_state_stream(); // submit tx through rpc let raw_tx = hex!("02f876820a28808477359400847735940082520894ab0840c0e43688012c1adb0f5e3fc665188f83d28a029d394a5d630544000080c080a0a044076b7e67b5deecc63f61a8d7913fab86ca365b344b5759d1fe3563b4c39ea019eab979dd000da04dfc72bb0377c092d30fd9e1cab5ae487de49586cc8b0090"); - let eth_api = node.inner.rpc_registry.eth_api(); + let eth_api = node.rpc_registry.eth_api(); let hash = eth_api.send_raw_transaction(raw_tx.into()).await.unwrap(); diff --git a/crates/ethereum/node/tests/e2e/eth.rs b/crates/ethereum/node/tests/e2e/eth.rs index 14bfb92d47..cb7517c0c9 100644 --- a/crates/ethereum/node/tests/e2e/eth.rs +++ b/crates/ethereum/node/tests/e2e/eth.rs @@ -26,6 +26,7 @@ async fn can_run_eth_node() -> eyre::Result<()> { .build(), ), false, + eth_payload_attributes, ) .await?; @@ -36,7 +37,7 @@ async fn can_run_eth_node() -> eyre::Result<()> { let tx_hash = node.rpc.inject_tx(raw_tx).await?; // make the node advance - let (payload, _) = node.advance_block(vec![], eth_payload_attributes).await?; + let (payload, _) = node.advance_block().await?; let block_hash = payload.block().hash(); let block_number = payload.block().number; @@ -74,7 +75,7 @@ async fn can_run_eth_node_with_auth_engine_api_over_ipc() -> eyre::Result<()> { .node(EthereumNode::default()) .launch() .await?; - let mut node = NodeTestContext::new(node).await?; + let mut node = NodeTestContext::new(node, eth_payload_attributes).await?; // Configure wallet from test mnemonic and create dummy transfer tx let wallet = Wallet::default(); @@ -84,7 +85,7 @@ async fn can_run_eth_node_with_auth_engine_api_over_ipc() -> eyre::Result<()> { let tx_hash = node.rpc.inject_tx(raw_tx).await?; // make the node advance - let (payload, _) = node.advance_block(vec![], eth_payload_attributes).await?; + let (payload, _) = node.advance_block().await?; let block_hash = payload.block().hash(); let block_number = payload.block().number; @@ -120,7 +121,7 @@ async fn test_failed_run_eth_node_with_no_auth_engine_api_over_ipc_opts() -> eyr .launch() .await?; - let node = NodeTestContext::new(node).await?; + let node = NodeTestContext::new(node, eth_payload_attributes).await?; // Ensure that the engine api client is not available let client = node.inner.engine_ipc_client().await; diff --git a/crates/ethereum/node/tests/e2e/main.rs b/crates/ethereum/node/tests/e2e/main.rs index 5dff7be17e..4ed8ac5fcb 100644 --- a/crates/ethereum/node/tests/e2e/main.rs +++ b/crates/ethereum/node/tests/e2e/main.rs @@ -4,6 +4,7 @@ mod blobs; mod dev; mod eth; mod p2p; +mod rpc; mod utils; const fn main() {} diff --git a/crates/ethereum/node/tests/e2e/p2p.rs b/crates/ethereum/node/tests/e2e/p2p.rs index a40c1b3f4b..180b88bbd5 100644 --- a/crates/ethereum/node/tests/e2e/p2p.rs +++ b/crates/ethereum/node/tests/e2e/p2p.rs @@ -1,7 +1,19 @@ use crate::utils::eth_payload_attributes; +use alloy_consensus::TxType; +use alloy_primitives::bytes; +use alloy_provider::{ + network::{ + Ethereum, EthereumWallet, NetworkWallet, TransactionBuilder, TransactionBuilder7702, + }, + Provider, ProviderBuilder, SendableTx, +}; +use alloy_signer::SignerSync; +use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; +use reth::rpc::types::TransactionRequest; use reth_chainspec::{ChainSpecBuilder, MAINNET}; -use reth_e2e_test_utils::{setup, transaction::TransactionTestContext}; +use reth_e2e_test_utils::{setup, setup_engine, transaction::TransactionTestContext}; use reth_node_ethereum::EthereumNode; +use revm::primitives::{AccessListItem, Authorization}; use std::sync::Arc; #[tokio::test] @@ -18,6 +30,7 @@ async fn can_sync() -> eyre::Result<()> { .build(), ), false, + eth_payload_attributes, ) .await?; @@ -29,7 +42,7 @@ async fn can_sync() -> eyre::Result<()> { let tx_hash = first_node.rpc.inject_tx(raw_tx).await?; // make the node advance - let (payload, _) = first_node.advance_block(vec![], eth_payload_attributes).await?; + let (payload, _) = first_node.advance_block().await?; let block_hash = payload.block().hash(); let block_number = payload.block().number; @@ -45,3 +58,115 @@ async fn can_sync() -> eyre::Result<()> { Ok(()) } + +#[tokio::test] +async fn e2e_test_send_transactions() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let seed: [u8; 32] = rand::thread_rng().gen(); + let mut rng = StdRng::from_seed(seed); + println!("Seed: {:?}", seed); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .prague_activated() + .build(), + ); + + let (mut nodes, _tasks, wallet) = + setup_engine::(2, chain_spec.clone(), false, eth_payload_attributes).await?; + let mut node = nodes.pop().unwrap(); + let signers = wallet.gen(); + let provider = ProviderBuilder::new().with_recommended_fillers().on_http(node.rpc_url()); + + // simple contract which writes to storage on any call + let dummy_bytecode = bytes!("6080604052348015600f57600080fd5b50602880601d6000396000f3fe4360a09081523360c0526040608081905260e08152902080805500fea164736f6c6343000810000a"); + let mut call_destinations = signers.iter().map(|s| s.address()).collect::>(); + + // Produce 100 random blocks with random transactions + for _ in 0..100 { + let tx_count = rng.gen_range(1..20); + + let mut pending = vec![]; + for _ in 0..tx_count { + let signer = signers.choose(&mut rng).unwrap(); + let tx_type = TxType::try_from(rng.gen_range(0..=4)).unwrap(); + + let mut tx = TransactionRequest::default().with_from(signer.address()); + + let should_create = + rng.gen::() && tx_type != TxType::Eip4844 && tx_type != TxType::Eip7702; + if should_create { + tx = tx.into_create().with_input(dummy_bytecode.clone()); + } else { + tx = tx.with_to(*call_destinations.choose(&mut rng).unwrap()).with_input( + (0..rng.gen_range(0..10000)).map(|_| rng.gen()).collect::>(), + ); + } + + if matches!(tx_type, TxType::Legacy | TxType::Eip2930) { + tx = tx.with_gas_price(provider.get_gas_price().await?); + } + + if rng.gen::() || tx_type == TxType::Eip2930 { + tx = tx.with_access_list( + vec![AccessListItem { + address: *call_destinations.choose(&mut rng).unwrap(), + storage_keys: (0..rng.gen_range(0..100)).map(|_| rng.gen()).collect(), + }] + .into(), + ); + } + + if tx_type == TxType::Eip7702 { + let signer = signers.choose(&mut rng).unwrap(); + let auth = Authorization { + chain_id: provider.get_chain_id().await?, + address: *call_destinations.choose(&mut rng).unwrap(), + nonce: provider.get_transaction_count(signer.address()).await?, + }; + let sig = signer.sign_hash_sync(&auth.signature_hash())?; + tx = tx.with_authorization_list(vec![auth.into_signed(sig)]) + } + + let SendableTx::Builder(tx) = provider.fill(tx).await? else { unreachable!() }; + let tx = + NetworkWallet::::sign_request(&EthereumWallet::new(signer.clone()), tx) + .await?; + + pending.push(provider.send_tx_envelope(tx).await?); + } + + let (payload, _) = node.advance_block().await?; + assert!(payload.block().raw_transactions().len() == tx_count); + + for pending in pending { + let receipt = pending.get_receipt().await?; + if let Some(address) = receipt.contract_address { + call_destinations.push(address); + } + } + } + + let second_node = nodes.pop().unwrap(); + let second_provider = + ProviderBuilder::new().with_recommended_fillers().on_http(second_node.rpc_url()); + + assert_eq!(second_provider.get_block_number().await?, 0); + + let head = provider.get_block_by_number(Default::default(), false).await?.unwrap().header.hash; + second_node.engine_api.update_forkchoice(head, head).await?; + + let start = std::time::Instant::now(); + + while provider.get_block_number().await? != second_provider.get_block_number().await? { + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + assert!(start.elapsed() <= std::time::Duration::from_secs(10), "timed out"); + } + + Ok(()) +} diff --git a/crates/ethereum/node/tests/e2e/rpc.rs b/crates/ethereum/node/tests/e2e/rpc.rs new file mode 100644 index 0000000000..1f7ac32e04 --- /dev/null +++ b/crates/ethereum/node/tests/e2e/rpc.rs @@ -0,0 +1,267 @@ +use crate::utils::eth_payload_attributes; +use alloy_eips::{calc_next_block_base_fee, eip2718::Encodable2718}; +use alloy_primitives::{Address, B256, U256}; +use alloy_provider::{network::EthereumWallet, Provider, ProviderBuilder, SendableTx}; +use alloy_rpc_types_beacon::relay::{BidTrace, SignedBidSubmissionV3, SignedBidSubmissionV4}; +use rand::{rngs::StdRng, Rng, SeedableRng}; +use reth::{ + payload::BuiltPayload, + rpc::{ + api::{BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4}, + compat::engine::payload::block_to_payload_v3, + types::{engine::BlobsBundleV1, TransactionRequest}, + }, +}; +use reth_chainspec::{ChainSpecBuilder, MAINNET}; +use reth_e2e_test_utils::setup_engine; +use reth_node_ethereum::EthereumNode; +use std::sync::Arc; + +alloy_sol_types::sol! { + #[sol(rpc, bytecode = "6080604052348015600f57600080fd5b5060405160db38038060db833981016040819052602a91607a565b60005b818110156074576040805143602082015290810182905260009060600160408051601f19818403018152919052805160209091012080555080606d816092565b915050602d565b505060b8565b600060208284031215608b57600080fd5b5051919050565b60006001820160b157634e487b7160e01b600052601160045260246000fd5b5060010190565b60168060c56000396000f3fe6080604052600080fdfea164736f6c6343000810000a")] + contract GasWaster { + constructor(uint256 iterations) { + for (uint256 i = 0; i < iterations; i++) { + bytes32 slot = keccak256(abi.encode(block.number, i)); + assembly { + sstore(slot, slot) + } + } + } + } +} + +#[tokio::test] +async fn test_fee_history() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let seed: [u8; 32] = rand::thread_rng().gen(); + let mut rng = StdRng::from_seed(seed); + println!("Seed: {:?}", seed); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .build(), + ); + + let (mut nodes, _tasks, wallet) = + setup_engine::(1, chain_spec.clone(), false, eth_payload_attributes).await?; + let mut node = nodes.pop().unwrap(); + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .wallet(EthereumWallet::new(wallet.gen().swap_remove(0))) + .on_http(node.rpc_url()); + + let fee_history = provider.get_fee_history(10, 0_u64.into(), &[]).await?; + + let genesis_base_fee = chain_spec.initial_base_fee().unwrap() as u128; + let expected_first_base_fee = genesis_base_fee - + genesis_base_fee / chain_spec.base_fee_params_at_block(0).max_change_denominator; + assert_eq!(fee_history.base_fee_per_gas[0], genesis_base_fee); + assert_eq!(fee_history.base_fee_per_gas[1], expected_first_base_fee,); + + // Spend some gas + let builder = GasWaster::deploy_builder(&provider, U256::from(500)).send().await?; + node.advance_block().await?; + let receipt = builder.get_receipt().await?; + assert!(receipt.status()); + + let block = provider.get_block_by_number(1.into(), false).await?.unwrap(); + assert_eq!(block.header.gas_used as u128, receipt.gas_used,); + assert_eq!(block.header.base_fee_per_gas.unwrap(), expected_first_base_fee as u64); + + for _ in 0..100 { + let _ = + GasWaster::deploy_builder(&provider, U256::from(rng.gen_range(0..1000))).send().await?; + + node.advance_block().await?; + } + + let latest_block = provider.get_block_number().await?; + + for _ in 0..100 { + let latest_block = rng.gen_range(0..=latest_block); + let block_count = rng.gen_range(1..=(latest_block + 1)); + + let fee_history = provider.get_fee_history(block_count, latest_block.into(), &[]).await?; + + let mut prev_header = provider + .get_block_by_number((latest_block + 1 - block_count).into(), false) + .await? + .unwrap() + .header; + for block in (latest_block + 2 - block_count)..=latest_block { + let expected_base_fee = calc_next_block_base_fee( + prev_header.gas_used, + prev_header.gas_limit, + prev_header.base_fee_per_gas.unwrap(), + chain_spec.base_fee_params_at_block(block), + ); + + let header = provider.get_block_by_number(block.into(), false).await?.unwrap().header; + + assert_eq!(header.base_fee_per_gas.unwrap(), expected_base_fee as u64); + assert_eq!( + header.base_fee_per_gas.unwrap(), + fee_history.base_fee_per_gas[(block + block_count - 1 - latest_block) as usize] + as u64 + ); + + prev_header = header; + } + } + + Ok(()) +} + +#[tokio::test] +async fn test_flashbots_validate_v3() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .build(), + ); + + let (mut nodes, _tasks, wallet) = + setup_engine::(1, chain_spec.clone(), false, eth_payload_attributes).await?; + let mut node = nodes.pop().unwrap(); + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .wallet(EthereumWallet::new(wallet.gen().swap_remove(0))) + .on_http(node.rpc_url()); + + node.advance(100, |_| { + let provider = provider.clone(); + Box::pin(async move { + let SendableTx::Envelope(tx) = + provider.fill(TransactionRequest::default().to(Address::ZERO)).await.unwrap() + else { + unreachable!() + }; + + tx.encoded_2718().into() + }) + }) + .await?; + + let _ = provider.send_transaction(TransactionRequest::default().to(Address::ZERO)).await?; + let (payload, attrs) = node.new_payload().await?; + + let mut request = BuilderBlockValidationRequestV3 { + request: SignedBidSubmissionV3 { + message: BidTrace { + parent_hash: payload.block().parent_hash, + block_hash: payload.block().hash(), + gas_used: payload.block().gas_used, + gas_limit: payload.block().gas_limit, + ..Default::default() + }, + execution_payload: block_to_payload_v3(payload.block().clone()), + blobs_bundle: BlobsBundleV1::new([]), + signature: Default::default(), + }, + parent_beacon_block_root: attrs.parent_beacon_block_root.unwrap(), + registered_gas_limit: payload.block().gas_limit, + }; + + assert!(provider + .raw_request::<_, ()>("flashbots_validateBuilderSubmissionV3".into(), (&request,)) + .await + .is_ok()); + + request.registered_gas_limit -= 1; + assert!(provider + .raw_request::<_, ()>("flashbots_validateBuilderSubmissionV3".into(), (&request,)) + .await + .is_err()); + request.registered_gas_limit += 1; + + request.request.execution_payload.payload_inner.payload_inner.state_root = B256::ZERO; + assert!(provider + .raw_request::<_, ()>("flashbots_validateBuilderSubmissionV3".into(), (&request,)) + .await + .is_err()); + Ok(()) +} + +#[tokio::test] +async fn test_flashbots_validate_v4() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .prague_activated() + .build(), + ); + + let (mut nodes, _tasks, wallet) = + setup_engine::(1, chain_spec.clone(), false, eth_payload_attributes).await?; + let mut node = nodes.pop().unwrap(); + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .wallet(EthereumWallet::new(wallet.gen().swap_remove(0))) + .on_http(node.rpc_url()); + + node.advance(100, |_| { + let provider = provider.clone(); + Box::pin(async move { + let SendableTx::Envelope(tx) = + provider.fill(TransactionRequest::default().to(Address::ZERO)).await.unwrap() + else { + unreachable!() + }; + + tx.encoded_2718().into() + }) + }) + .await?; + + let _ = provider.send_transaction(TransactionRequest::default().to(Address::ZERO)).await?; + let (payload, attrs) = node.new_payload().await?; + + let mut request = BuilderBlockValidationRequestV4 { + request: SignedBidSubmissionV4 { + message: BidTrace { + parent_hash: payload.block().parent_hash, + block_hash: payload.block().hash(), + gas_used: payload.block().gas_used, + gas_limit: payload.block().gas_limit, + ..Default::default() + }, + execution_payload: block_to_payload_v3(payload.block().clone()), + blobs_bundle: BlobsBundleV1::new([]), + execution_requests: payload.requests().unwrap_or_default().to_vec(), + signature: Default::default(), + }, + parent_beacon_block_root: attrs.parent_beacon_block_root.unwrap(), + registered_gas_limit: payload.block().gas_limit, + }; + + provider + .raw_request::<_, ()>("flashbots_validateBuilderSubmissionV4".into(), (&request,)) + .await + .expect("request should validate"); + + request.registered_gas_limit -= 1; + assert!(provider + .raw_request::<_, ()>("flashbots_validateBuilderSubmissionV4".into(), (&request,)) + .await + .is_err()); + request.registered_gas_limit += 1; + + request.request.execution_payload.payload_inner.payload_inner.state_root = B256::ZERO; + assert!(provider + .raw_request::<_, ()>("flashbots_validateBuilderSubmissionV4".into(), (&request,)) + .await + .is_err()); + Ok(()) +} diff --git a/crates/ethereum/node/tests/e2e/utils.rs b/crates/ethereum/node/tests/e2e/utils.rs index 5a79509991..6e534f5dc0 100644 --- a/crates/ethereum/node/tests/e2e/utils.rs +++ b/crates/ethereum/node/tests/e2e/utils.rs @@ -1,12 +1,7 @@ use alloy_primitives::{Address, B256}; use reth::rpc::types::engine::PayloadAttributes; -use reth_e2e_test_utils::NodeHelperType; -use reth_node_ethereum::{node::EthereumAddOns, EthereumNode}; use reth_payload_builder::EthPayloadBuilderAttributes; -/// Ethereum Node Helper type -pub(crate) type EthNode = NodeHelperType; - /// Helper function to create a new eth payload attributes pub(crate) fn eth_payload_attributes(timestamp: u64) -> EthPayloadBuilderAttributes { let attributes = PayloadAttributes { diff --git a/crates/ethereum/payload/Cargo.toml b/crates/ethereum/payload/Cargo.toml index f169d58f7e..443e837b2e 100644 --- a/crates/ethereum/payload/Cargo.toml +++ b/crates/ethereum/payload/Cargo.toml @@ -33,6 +33,8 @@ revm.workspace = true revm-primitives.workspace = true # alloy +alloy-eips.workspace = true +alloy-consensus.workspace = true alloy-primitives.workspace = true # misc diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index dc997aa962..65106b00d5 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -9,6 +9,8 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![allow(clippy::useless_let_if_seq)] +use alloy_consensus::EMPTY_OMMER_ROOT_HASH; +use alloy_eips::{eip4844::MAX_DATA_GAS_PER_BLOCK, eip7685::Requests, merge::BEACON_NONCE}; use alloy_primitives::U256; use reth_basic_payload_builder::{ commit_withdrawals, is_better_payload, BuildArguments, BuildOutcome, PayloadBuilder, @@ -23,15 +25,15 @@ use reth_execution_types::ExecutionOutcome; use reth_payload_builder::{EthBuiltPayload, EthPayloadBuilderAttributes}; use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; use reth_primitives::{ - constants::{eip4844::MAX_DATA_GAS_PER_BLOCK, BEACON_NONCE}, - proofs::{self, calculate_requests_root}, + proofs::{self}, revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, - Block, BlockBody, EthereumHardforks, Header, Receipt, EMPTY_OMMER_ROOT_HASH, + Block, BlockBody, EthereumHardforks, Header, Receipt, }; use reth_provider::{ChainSpecProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::{ - noop::NoopTransactionPool, BestTransactionsAttributes, TransactionPool, + noop::NoopTransactionPool, BestTransactions, BestTransactionsAttributes, TransactionPool, + ValidPoolTransaction, }; use reth_trie::HashedPostState; use revm::{ @@ -43,6 +45,10 @@ use revm_primitives::calc_excess_blob_gas; use std::sync::Arc; use tracing::{debug, trace, warn}; +type BestTransactionsIter = Box< + dyn BestTransactions::Transaction>>>, +>; + /// Ethereum payload builder #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct EthereumPayloadBuilder { @@ -67,7 +73,7 @@ where &self, config: &PayloadConfig, parent: &Header, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), EvmConfig::Error> { let next_attributes = NextBlockEnvAttributes { timestamp: config.attributes.timestamp(), suggested_fee_recipient: config.attributes.suggested_fee_recipient(), @@ -91,8 +97,14 @@ where &self, args: BuildArguments, ) -> Result, PayloadBuilderError> { - let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); - default_ethereum_payload(self.evm_config.clone(), args, cfg_env, block_env) + let (cfg_env, block_env) = self + .cfg_and_block_env(&args.config, &args.config.parent_header) + .map_err(PayloadBuilderError::other)?; + + let pool = args.pool.clone(); + default_ethereum_payload(self.evm_config.clone(), args, cfg_env, block_env, |attributes| { + pool.best_transactions_with_attributes(attributes) + }) } fn build_empty_payload( @@ -100,19 +112,27 @@ where client: &Client, config: PayloadConfig, ) -> Result { - let args = BuildArguments { + let args = BuildArguments::new( client, - config, // we use defaults here because for the empty payload we don't need to execute anything - pool: NoopTransactionPool::default(), - cached_reads: Default::default(), - cancel: Default::default(), - best_payload: None, - }; - let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); - default_ethereum_payload(self.evm_config.clone(), args, cfg_env, block_env)? - .into_payload() - .ok_or_else(|| PayloadBuilderError::MissingPayload) + NoopTransactionPool::default(), + Default::default(), + config, + Default::default(), + None, + ); + + let (cfg_env, block_env) = self + .cfg_and_block_env(&args.config, &args.config.parent_header) + .map_err(PayloadBuilderError::other)?; + + let pool = args.pool.clone(); + + default_ethereum_payload(self.evm_config.clone(), args, cfg_env, block_env, |attributes| { + pool.best_transactions_with_attributes(attributes) + })? + .into_payload() + .ok_or_else(|| PayloadBuilderError::MissingPayload) } } @@ -122,27 +142,29 @@ where /// and configuration, this function creates a transaction payload. Returns /// a result indicating success with the payload or an error in case of failure. #[inline] -pub fn default_ethereum_payload( +pub fn default_ethereum_payload( evm_config: EvmConfig, args: BuildArguments, initialized_cfg: CfgEnvWithHandlerCfg, initialized_block_env: BlockEnv, + best_txs: F, ) -> Result, PayloadBuilderError> where EvmConfig: ConfigureEvm
, Client: StateProviderFactory + ChainSpecProvider, Pool: TransactionPool, + F: FnOnce(BestTransactionsAttributes) -> BestTransactionsIter, { let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; let chain_spec = client.chain_spec(); - let state_provider = client.state_by_block_hash(config.parent_block.hash())?; + let state_provider = client.state_by_block_hash(config.parent_header.hash())?; let state = StateProviderDatabase::new(state_provider); let mut db = - State::builder().with_database_ref(cached_reads.as_db(state)).with_bundle_update().build(); - let PayloadConfig { parent_block, extra_data, attributes } = config; + State::builder().with_database(cached_reads.as_db_mut(state)).with_bundle_update().build(); + let PayloadConfig { parent_header, extra_data, attributes } = config; - debug!(target: "payload_builder", id=%attributes.id, parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building new payload"); + debug!(target: "payload_builder", id=%attributes.id, parent_header = ?parent_header.hash(), parent_number = parent_header.number, "building new payload"); let mut cumulative_gas_used = 0; let mut sum_blob_gas_used = 0; let block_gas_limit: u64 = initialized_block_env.gas_limit.to::(); @@ -151,16 +173,15 @@ where let mut executed_txs = Vec::new(); let mut executed_senders = Vec::new(); - let mut best_txs = pool.best_transactions_with_attributes(BestTransactionsAttributes::new( + let mut best_txs = best_txs(BestTransactionsAttributes::new( base_fee, initialized_block_env.get_blob_gasprice().map(|gasprice| gasprice as u64), )); - let mut total_fees = U256::ZERO; let block_number = initialized_block_env.number.to::(); - let mut system_caller = SystemCaller::new(&evm_config, chain_spec.clone()); + let mut system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); // apply eip-4788 pre block contract call system_caller @@ -172,7 +193,7 @@ where ) .map_err(|err| { warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), + parent_hash=%parent_header.hash(), %err, "failed to apply beacon root contract call for payload" ); @@ -184,10 +205,10 @@ where &mut db, &initialized_cfg, &initialized_block_env, - parent_block.hash(), + parent_header.hash(), ) .map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to update blockhashes for payload"); + warn!(target: "payload_builder", parent_hash=%parent_header.hash(), %err, "failed to update parent header blockhashes for payload"); PayloadBuilderError::Internal(err.into()) })?; @@ -228,7 +249,7 @@ where let env = EnvWithHandlerCfg::new_with_cfg_env( initialized_cfg.clone(), initialized_block_env.clone(), - evm_config.tx_env(&tx), + evm_config.tx_env(tx.as_signed(), tx.signer()), ); // Configure the environment for the block. @@ -307,9 +328,7 @@ where } // calculate the requests and the requests root - let (requests, requests_root) = if chain_spec - .is_prague_active_at_timestamp(attributes.timestamp) - { + let requests = if chain_spec.is_prague_active_at_timestamp(attributes.timestamp) { let deposit_requests = parse_deposits_from_receipts(&chain_spec, receipts.iter().flatten()) .map_err(|err| PayloadBuilderError::Internal(RethError::Execution(err.into())))?; let withdrawal_requests = system_caller @@ -327,11 +346,9 @@ where ) .map_err(|err| PayloadBuilderError::Internal(err.into()))?; - let requests = [deposit_requests, withdrawal_requests, consolidation_requests].concat(); - let requests_root = calculate_requests_root(&requests); - (Some(requests.into()), Some(requests_root)) + Some(Requests::new(vec![deposit_requests, withdrawal_requests, consolidation_requests])) } else { - (None, None) + None }; let WithdrawalsOutcome { withdrawals_root, withdrawals } = @@ -341,9 +358,10 @@ where // and 4788 contract call db.merge_transitions(BundleRetention::Reverts); + let requests_hash = requests.as_ref().map(|requests| requests.requests_hash()); let execution_outcome = ExecutionOutcome::new( db.take_bundle(), - vec![receipts.clone()].into(), + vec![receipts].into(), block_number, vec![requests.clone().unwrap_or_default()], ); @@ -354,10 +372,9 @@ where // calculate the state root let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); let (state_root, trie_output) = { - let state_provider = db.database.0.inner.borrow_mut(); - state_provider.db.state_root_with_updates(hashed_state.clone()).inspect_err(|err| { + db.database.inner().state_root_with_updates(hashed_state.clone()).inspect_err(|err| { warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), + parent_hash=%parent_header.hash(), %err, "failed to calculate state root for payload" ); @@ -379,9 +396,9 @@ where executed_txs.iter().filter(|tx| tx.is_eip4844()).map(|tx| tx.hash).collect(), )?; - excess_blob_gas = if chain_spec.is_cancun_active_at_timestamp(parent_block.timestamp) { - let parent_excess_blob_gas = parent_block.excess_blob_gas.unwrap_or_default(); - let parent_blob_gas_used = parent_block.blob_gas_used.unwrap_or_default(); + excess_blob_gas = if chain_spec.is_cancun_active_at_timestamp(parent_header.timestamp) { + let parent_excess_blob_gas = parent_header.excess_blob_gas.unwrap_or_default(); + let parent_blob_gas_used = parent_header.blob_gas_used.unwrap_or_default(); Some(calc_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used)) } else { // for the first post-fork block, both parent.blob_gas_used and @@ -393,7 +410,7 @@ where } let header = Header { - parent_hash: parent_block.hash(), + parent_hash: parent_header.hash(), ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: initialized_block_env.coinbase, state_root, @@ -405,7 +422,7 @@ where mix_hash: attributes.prev_randao, nonce: BEACON_NONCE.into(), base_fee_per_gas: Some(base_fee), - number: parent_block.number + 1, + number: parent_header.number + 1, gas_limit: block_gas_limit, difficulty: U256::ZERO, gas_used: cumulative_gas_used, @@ -413,19 +430,13 @@ where parent_beacon_block_root: attributes.parent_beacon_block_root, blob_gas_used: blob_gas_used.map(Into::into), excess_blob_gas: excess_blob_gas.map(Into::into), - requests_root, + requests_hash, }; // seal the block let block = Block { header, - body: BlockBody { - transactions: executed_txs, - ommers: vec![], - withdrawals, - sidecars: None, - requests, - }, + body: BlockBody { transactions: executed_txs, ommers: vec![], withdrawals, sidecars: None }, }; let sealed_block = block.seal_slow(); @@ -440,10 +451,11 @@ where trie: Arc::new(trie_output), }; - let mut payload = EthBuiltPayload::new(attributes.id, sealed_block, total_fees, Some(executed)); + let mut payload = + EthBuiltPayload::new(attributes.id, sealed_block, total_fees, Some(executed), requests); // extend the payload with the blob sidecars from the executed txs - payload.extend_sidecars(blob_sidecars); + payload.extend_sidecars(blob_sidecars.into_iter().map(Arc::unwrap_or_clone)); Ok(BuildOutcome::Better { payload, cached_reads }) } diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index 1a82ebd554..9b7a5e2174 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -13,16 +13,19 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true +reth-consensus.workspace = true +reth-consensus-common.workspace = true reth-execution-errors.workspace = true +reth-execution-types.workspace = true +reth-metrics = { workspace = true, optional = true } reth-primitives.workspace = true reth-primitives-traits.workspace = true -revm-primitives.workspace = true reth-prune-types.workspace = true -reth-metrics = { workspace = true, optional = true } +reth-revm.workspace = true reth-storage-errors.workspace = true -reth-execution-types.workspace = true revm.workspace = true +revm-primitives.workspace = true # alloy alloy-primitives.workspace = true @@ -36,8 +39,31 @@ tokio = { workspace = true, features = ["sync", "time"] } [dev-dependencies] parking_lot.workspace = true +reth-ethereum-forks.workspace = true +alloy-consensus.workspace = true [features] default = ["std"] -std = ["dep:metrics", "dep:reth-metrics"] -test-utils = ["dep:parking_lot"] +std = [ + "dep:metrics", + "dep:reth-metrics", + "reth-consensus/std", + "reth-primitives/std", + "reth-primitives-traits/std", + "reth-revm/std", + "alloy-eips/std", + "alloy-primitives/std", + "alloy-consensus/std", + "revm-primitives/std", + "revm/std", +] +test-utils = [ + "dep:parking_lot", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-revm/test-utils", + "revm/test-utils", + "reth-prune-types/test-utils" +] diff --git a/crates/evm/execution-errors/Cargo.toml b/crates/evm/execution-errors/Cargo.toml index d4f8534e75..721c805511 100644 --- a/crates/evm/execution-errors/Cargo.toml +++ b/crates/evm/execution-errors/Cargo.toml @@ -26,4 +26,9 @@ derive_more.workspace = true [features] default = ["std"] -std = ["reth-consensus/std"] +std = [ + "reth-consensus/std", + "alloy-eips/std", + "alloy-primitives/std", + "revm-primitives/std" +] diff --git a/crates/evm/execution-types/Cargo.toml b/crates/evm/execution-types/Cargo.toml index 9bd6537326..b6af3dee9a 100644 --- a/crates/evm/execution-types/Cargo.toml +++ b/crates/evm/execution-types/Cargo.toml @@ -25,7 +25,6 @@ serde = { workspace = true, optional = true } serde_with = { workspace = true, optional = true } [dev-dependencies] -alloy-eips.workspace = true arbitrary.workspace = true bincode.workspace = true rand.workspace = true @@ -34,6 +33,24 @@ reth-primitives = { workspace = true, features = ["arbitrary", "test-utils"] } [features] default = ["std"] optimism = ["reth-primitives/optimism", "revm/optimism"] -serde = ["dep:serde", "reth-trie/serde", "revm/serde"] -serde-bincode-compat = ["reth-primitives/serde-bincode-compat", "reth-trie/serde-bincode-compat", "serde_with"] -std = [] +serde = [ + "dep:serde", + "reth-trie/serde", + "revm/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "rand/serde" +] +serde-bincode-compat = [ + "reth-primitives/serde-bincode-compat", + "reth-trie/serde-bincode-compat", + "serde_with", + "alloy-eips/serde-bincode-compat" +] +std = [ + "reth-primitives/std", + "alloy-eips/std", + "alloy-primitives/std", + "revm/std", + "serde?/std" +] diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index 77f082bb4f..c997889fc1 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -52,7 +52,7 @@ impl Chain { execution_outcome: ExecutionOutcome, trie_updates: Option, ) -> Self { - let blocks = BTreeMap::from_iter(blocks.into_iter().map(|b| (b.number, b))); + let blocks = blocks.into_iter().map(|b| (b.number, b)).collect::>(); debug_assert!(!blocks.is_empty(), "Chain should have at least one block"); Self { blocks, execution_outcome, trie_updates } @@ -228,11 +228,11 @@ impl Chain { /// /// Attachment includes block number, block hash, transaction hash and transaction index. pub fn receipts_with_attachment(&self) -> Vec { - let mut receipt_attach = Vec::new(); + let mut receipt_attach = Vec::with_capacity(self.blocks().len()); for ((block_num, block), receipts) in self.blocks().iter().zip(self.execution_outcome.receipts().iter()) { - let mut tx_receipts = Vec::new(); + let mut tx_receipts = Vec::with_capacity(receipts.len()); for (tx, receipt) in block.body.transactions().zip(receipts.iter()) { tx_receipts.push(( tx.hash(), @@ -453,7 +453,7 @@ impl IntoIterator for ChainBlocks<'_> { } /// Used to hold receipts and their attachment. -#[derive(Default, Clone, Debug)] +#[derive(Default, Clone, Debug, PartialEq, Eq)] pub struct BlockReceipts { /// Block identifier pub block: BlockNumHash, @@ -656,7 +656,6 @@ pub(super) mod serde_bincode_compat { mod tests { use super::*; use alloy_primitives::B256; - use reth_primitives::{Receipt, Receipts, TxType}; use revm::primitives::{AccountInfo, HashMap}; #[test] @@ -789,7 +788,10 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn receipts_by_block_hash() { + use reth_primitives::{Receipt, Receipts, TxType}; + // Create a default SealedBlockWithSenders object let block = SealedBlockWithSenders::default(); @@ -811,10 +813,6 @@ mod tests { cumulative_gas_used: 46913, logs: vec![], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), }; // Create another random receipt object, receipt2 @@ -823,10 +821,6 @@ mod tests { cumulative_gas_used: 1325345, logs: vec![], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), }; // Create a Receipts object with a vector of receipt vectors diff --git a/crates/evm/execution-types/src/execute.rs b/crates/evm/execution-types/src/execute.rs index 2bb8c58e47..0fa012cb85 100644 --- a/crates/evm/execution-types/src/execute.rs +++ b/crates/evm/execution-types/src/execute.rs @@ -1,5 +1,6 @@ +use alloy_eips::eip7685::Requests; use alloy_primitives::{map::HashMap, B256, U256}; -use reth_primitives::{parlia::Snapshot, Request}; +use reth_primitives::parlia::Snapshot; use revm::db::BundleState; /// A helper type for ethereum block inputs that consists of a block and the total difficulty. @@ -47,8 +48,8 @@ pub struct BlockExecutionOutput { pub state: BundleState, /// All the receipts of the transactions in the block. pub receipts: Vec, - /// All the EIP-7685 requests of the transactions in the block. - pub requests: Vec, + /// All the EIP-7685 requests in the block. + pub requests: Requests, /// The total gas used by the block. pub gas_used: u64, diff --git a/crates/evm/execution-types/src/execution_outcome.rs b/crates/evm/execution-types/src/execution_outcome.rs index 74c7a3b21b..e73fe6aa0d 100644 --- a/crates/evm/execution-types/src/execution_outcome.rs +++ b/crates/evm/execution-types/src/execution_outcome.rs @@ -1,7 +1,8 @@ use crate::BlockExecutionOutput; +use alloy_eips::eip7685::Requests; use alloy_primitives::{Address, BlockNumber, Bloom, Log, B256, U256}; use reth_primitives::{ - logs_bloom, parlia::Snapshot, Account, Bytecode, Receipt, Receipts, Requests, StorageEntry, + logs_bloom, parlia::Snapshot, Account, Bytecode, Receipt, Receipts, StorageEntry, }; use reth_trie::HashedPostState; use revm::{ @@ -186,7 +187,7 @@ impl ExecutionOutcome { } /// Transform block number to the index of block. - fn block_number_to_index(&self, block_number: BlockNumber) -> Option { + pub fn block_number_to_index(&self, block_number: BlockNumber) -> Option { if self.first_block > block_number { return None } @@ -376,7 +377,7 @@ impl From<(BlockExecutionOutput, BlockNumber)> for ExecutionOutcome { bundle: value.0.state, receipts: Receipts::from(value.0.receipts), first_block: value.1, - requests: vec![Requests::from(value.0.requests)], + requests: vec![value.0.requests], snapshots: vec![value.0.snapshot.unwrap_or_default()], } } @@ -385,12 +386,15 @@ impl From<(BlockExecutionOutput, BlockNumber)> for ExecutionOutcome { #[cfg(test)] mod tests { use super::*; - use alloy_eips::{eip6110::DepositRequest, eip7002::WithdrawalRequest}; - use alloy_primitives::{Address, FixedBytes, LogData, B256}; - use reth_primitives::{Receipts, Request, Requests, TxType}; - use std::collections::HashMap; + #[cfg(not(feature = "optimism"))] + use alloy_primitives::bytes; + use alloy_primitives::{Address, B256}; + use reth_primitives::Receipts; + #[cfg(not(feature = "optimism"))] + use reth_primitives::{LogData, TxType}; #[test] + #[cfg(not(feature = "optimism"))] fn test_initialisation() { // Create a new BundleState object with initial data let bundle = BundleState::new( @@ -406,36 +410,11 @@ mod tests { cumulative_gas_used: 46913, logs: vec![], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), })]], }; - // Create a Requests object with a vector of requests, including DepositRequest and - // WithdrawalRequest - let requests = vec![Requests(vec![ - Request::DepositRequest(DepositRequest { - pubkey: FixedBytes::<48>::from([1; 48]), - withdrawal_credentials: B256::from([0; 32]), - amount: 1111, - signature: FixedBytes::<96>::from([2; 96]), - index: 222, - }), - Request::DepositRequest(DepositRequest { - pubkey: FixedBytes::<48>::from([23; 48]), - withdrawal_credentials: B256::from([0; 32]), - amount: 34343, - signature: FixedBytes::<96>::from([43; 96]), - index: 1212, - }), - Request::WithdrawalRequest(WithdrawalRequest { - source_address: Address::from([1; 20]), - validator_pubkey: FixedBytes::<48>::from([10; 48]), - amount: 72, - }), - ])]; + // Create a Requests object with a vector of requests + let requests = vec![Requests::new(vec![bytes!("dead"), bytes!("beef"), bytes!("beebee")])]; // Define the first block number let first_block = 123; @@ -485,6 +464,7 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn test_block_number_to_index() { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { @@ -493,10 +473,6 @@ mod tests { cumulative_gas_used: 46913, logs: vec![], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), })]], }; @@ -524,6 +500,7 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn test_get_logs() { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { @@ -532,10 +509,6 @@ mod tests { cumulative_gas_used: 46913, logs: vec![Log::::default()], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), })]], }; @@ -560,6 +533,7 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn test_receipts_by_block() { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { @@ -568,10 +542,6 @@ mod tests { cumulative_gas_used: 46913, logs: vec![Log::::default()], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), })]], }; @@ -599,15 +569,12 @@ mod tests { cumulative_gas_used: 46913, logs: vec![Log::::default()], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), })] ); } #[test] + #[cfg(not(feature = "optimism"))] fn test_receipts_len() { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { @@ -616,10 +583,6 @@ mod tests { cumulative_gas_used: 46913, logs: vec![Log::::default()], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), })]], }; @@ -662,6 +625,7 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn test_revert_to() { // Create a random receipt object let receipt = Receipt { @@ -669,10 +633,6 @@ mod tests { cumulative_gas_used: 46913, logs: vec![], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), }; // Create a Receipts object with a vector of receipt vectors @@ -683,17 +643,12 @@ mod tests { // Define the first block number let first_block = 123; - // Create a DepositRequest object with specific attributes. - let request = Request::DepositRequest(DepositRequest { - pubkey: FixedBytes::<48>::from([1; 48]), - withdrawal_credentials: B256::from([0; 32]), - amount: 1111, - signature: FixedBytes::<96>::from([2; 96]), - index: 222, - }); + // Create a request. + let request = bytes!("deadbeef"); // Create a vector of Requests containing the request. - let requests = vec![Requests(vec![request]), Requests(vec![request])]; + let requests = + vec![Requests::new(vec![request.clone()]), Requests::new(vec![request.clone()])]; // Create a ExecutionOutcome object with the created bundle, receipts, requests, and // first_block @@ -712,7 +667,7 @@ mod tests { assert_eq!(exec_res.receipts, Receipts { receipt_vec: vec![vec![Some(receipt)]] }); // Assert that the requests are properly cut after reverting to the initial block number. - assert_eq!(exec_res.requests, vec![Requests(vec![request])]); + assert_eq!(exec_res.requests, vec![Requests::new(vec![request])]); // Assert that the revert_to method returns false when attempting to revert to a block // number greater than the initial block number. @@ -724,6 +679,7 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn test_extend_execution_outcome() { // Create a Receipt object with specific attributes. let receipt = Receipt { @@ -731,26 +687,16 @@ mod tests { cumulative_gas_used: 46913, logs: vec![], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), }; // Create a Receipts object containing the receipt. let receipts = Receipts { receipt_vec: vec![vec![Some(receipt.clone())]] }; - // Create a DepositRequest object with specific attributes. - let request = Request::DepositRequest(DepositRequest { - pubkey: FixedBytes::<48>::from([1; 48]), - withdrawal_credentials: B256::from([0; 32]), - amount: 1111, - signature: FixedBytes::<96>::from([2; 96]), - index: 222, - }); + // Create a request. + let request = bytes!("deadbeef"); // Create a vector of Requests containing the request. - let requests = vec![Requests(vec![request])]; + let requests = vec![Requests::new(vec![request.clone()])]; // Define the initial block number. let first_block = 123; @@ -775,7 +721,7 @@ mod tests { receipts: Receipts { receipt_vec: vec![vec![Some(receipt.clone())], vec![Some(receipt)]] }, - requests: vec![Requests(vec![request]), Requests(vec![request])], + requests: vec![Requests::new(vec![request.clone()]), Requests::new(vec![request])], first_block: 123, snapshots: vec![], } @@ -783,6 +729,7 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn test_split_at_execution_outcome() { // Create a random receipt object let receipt = Receipt { @@ -790,10 +737,6 @@ mod tests { cumulative_gas_used: 46913, logs: vec![], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), }; // Create a Receipts object with a vector of receipt vectors @@ -808,18 +751,15 @@ mod tests { // Define the first block number let first_block = 123; - // Create a DepositRequest object with specific attributes. - let request = Request::DepositRequest(DepositRequest { - pubkey: FixedBytes::<48>::from([1; 48]), - withdrawal_credentials: B256::from([0; 32]), - amount: 1111, - signature: FixedBytes::<96>::from([2; 96]), - index: 222, - }); + // Create a request. + let request = bytes!("deadbeef"); // Create a vector of Requests containing the request. - let requests = - vec![Requests(vec![request]), Requests(vec![request]), Requests(vec![request])]; + let requests = vec![ + Requests::new(vec![request.clone()]), + Requests::new(vec![request.clone()]), + Requests::new(vec![request.clone()]), + ]; // Create a ExecutionOutcome object with the created bundle, receipts, requests, and // first_block @@ -838,7 +778,7 @@ mod tests { let lower_execution_outcome = ExecutionOutcome { bundle: Default::default(), receipts: Receipts { receipt_vec: vec![vec![Some(receipt.clone())]] }, - requests: vec![Requests(vec![request])], + requests: vec![Requests::new(vec![request.clone()])], first_block, snapshots: vec![], }; @@ -849,7 +789,7 @@ mod tests { receipts: Receipts { receipt_vec: vec![vec![Some(receipt.clone())], vec![Some(receipt)]], }, - requests: vec![Requests(vec![request]), Requests(vec![request])], + requests: vec![Requests::new(vec![request.clone()]), Requests::new(vec![request])], first_block: 124, snapshots: vec![], }; diff --git a/crates/evm/src/either.rs b/crates/evm/src/either.rs index 1824c66398..0f19059ffe 100644 --- a/crates/evm/src/either.rs +++ b/crates/evm/src/either.rs @@ -44,13 +44,17 @@ where } } - fn batch_executor(&self, db: DB) -> Self::BatchExecutor + fn batch_executor( + &self, + db: DB, + prefetch_tx: Option>, + ) -> Self::BatchExecutor where DB: Database + Display>, { match self { - Self::Left(a) => Either::Left(a.batch_executor(db)), - Self::Right(b) => Either::Right(b.batch_executor(db)), + Self::Left(a) => Either::Left(a.batch_executor(db, prefetch_tx)), + Self::Right(b) => Either::Right(b.batch_executor(db, prefetch_tx)), } } } @@ -102,7 +106,7 @@ where state_hook: F, ) -> Result where - F: OnStateHook, + F: OnStateHook + 'static, { match self { Self::Left(a) => a.execute_with_state_hook(input, state_hook), diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 6d64bd47da..ffda21e583 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -1,22 +1,27 @@ //! Traits for execution. // Re-export execution types +use crate::system_calls::OnStateHook; +use alloc::{boxed::Box, vec::Vec}; +use alloy_eips::eip7685::Requests; +use alloy_primitives::BlockNumber; +use core::{fmt::Display, marker::PhantomData}; +use reth_consensus::ConsensusError; pub use reth_execution_errors::{ BlockExecutionError, BlockValidationError, InternalBlockExecutionError, }; pub use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; -pub use reth_storage_errors::provider::ProviderError; - -use alloy_primitives::BlockNumber; -use core::fmt::Display; use reth_primitives::{BlockWithSenders, Header, Receipt}; use reth_prune_types::PruneModes; -use revm::State; -use revm_primitives::{db::Database, EvmState}; +use reth_revm::batch::BlockBatchRecord; +pub use reth_storage_errors::provider::ProviderError; +use revm::{ + db::{states::bundle_state::BundleRetention, BundleState}, + State, +}; +use revm_primitives::{db::Database, EvmState, U256}; use tokio::sync::mpsc::UnboundedSender; -use crate::system_calls::OnStateHook; - /// A general purpose executor trait that executes an input (e.g. block) and produces an output /// (e.g. state changes and receipts). /// @@ -57,7 +62,7 @@ pub trait Executor { state_hook: F, ) -> Result where - F: OnStateHook; + F: OnStateHook + 'static; } /// A general purpose executor that can execute multiple inputs in sequence, validate the outputs, @@ -154,7 +159,7 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { fn executor( &self, db: DB, - prefetch_rx: Option>, + prefetch_tx: Option>, ) -> Self::Executor where DB: Database + Display>; @@ -163,17 +168,351 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { /// /// Batch executor is used to execute multiple blocks in sequence and keep track of the state /// during historical sync which involves executing multiple blocks in sequence. - fn batch_executor(&self, db: DB) -> Self::BatchExecutor + fn batch_executor( + &self, + db: DB, + prefetch_tx: Option>, + ) -> Self::BatchExecutor + where + DB: Database + Display>; +} + +/// Helper type for the output of executing a block. +#[derive(Debug, Clone)] +pub struct ExecuteOutput { + /// Receipts obtained after executing a block. + pub receipts: Vec, + /// Cumulative gas used in the block execution. + pub gas_used: u64, +} + +/// Defines the strategy for executing a single block. +pub trait BlockExecutionStrategy +where + DB: Database, +{ + /// The error type returned by this strategy's methods. + type Error: From + core::error::Error; + + /// Applies any necessary changes before executing the block's transactions. + fn apply_pre_execution_changes( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + ) -> Result<(), Self::Error>; + + /// Executes all transactions in the block. + fn execute_transactions( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + tx: Option>, + ) -> Result; + + /// Applies any necessary changes after executing the block's transactions. + fn apply_post_execution_changes( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + receipts: &[Receipt], + ) -> Result; + + /// Returns a reference to the current state. + fn state_ref(&self) -> &State; + + /// Returns a mutable reference to the current state. + fn state_mut(&mut self) -> &mut State; + + /// Sets a hook to be called after each state change during execution. + fn with_state_hook(&mut self, _hook: Option>) {} + + /// Returns the final bundle state. + fn finish(&mut self) -> BundleState { + self.state_mut().merge_transitions(BundleRetention::Reverts); + self.state_mut().take_bundle() + } + + /// Validate a block with regard to execution results. + fn validate_block_post_execution( + &self, + _block: &BlockWithSenders, + _receipts: &[Receipt], + _requests: &Requests, + ) -> Result<(), ConsensusError> { + Ok(()) + } +} + +/// A strategy factory that can create block execution strategies. +pub trait BlockExecutionStrategyFactory: Send + Sync + Clone + Unpin + 'static { + /// Associated strategy type. + type Strategy + Display>>: BlockExecutionStrategy< + DB, + Error = BlockExecutionError, + >; + + /// Creates a strategy using the give database. + fn create_strategy(&self, db: DB) -> Self::Strategy where DB: Database + Display>; } +impl Clone for BasicBlockExecutorProvider +where + F: Clone, +{ + fn clone(&self) -> Self { + Self { strategy_factory: self.strategy_factory.clone() } + } +} + +/// A generic block executor provider that can create executors using a strategy factory. +#[allow(missing_debug_implementations)] +pub struct BasicBlockExecutorProvider { + strategy_factory: F, +} + +impl BasicBlockExecutorProvider { + /// Creates a new `BasicBlockExecutorProvider` with the given strategy factory. + pub const fn new(strategy_factory: F) -> Self { + Self { strategy_factory } + } +} + +impl BlockExecutorProvider for BasicBlockExecutorProvider +where + F: BlockExecutionStrategyFactory, +{ + type Executor + Display>> = + BasicBlockExecutor, DB>; + + type BatchExecutor + Display>> = + BasicBatchExecutor, DB>; + + fn executor( + &self, + db: DB, + prefetch_tx: Option>, + ) -> Self::Executor + where + DB: Database + Display>, + { + let strategy = self.strategy_factory.create_strategy(db); + BasicBlockExecutor::new(strategy, prefetch_tx) + } + + fn batch_executor( + &self, + db: DB, + prefetch_tx: Option>, + ) -> Self::BatchExecutor + where + DB: Database + Display>, + { + let strategy = self.strategy_factory.create_strategy(db); + let batch_record = BlockBatchRecord::default(); + BasicBatchExecutor::new(strategy, batch_record, prefetch_tx) + } +} + +/// A generic block executor that uses a [`BlockExecutionStrategy`] to +/// execute blocks. +#[allow(missing_debug_implementations, dead_code)] +pub struct BasicBlockExecutor +where + S: BlockExecutionStrategy, + DB: Database, +{ + /// Block execution strategy. + pub(crate) strategy: S, + /// Channel to send prefetch requests. + pub(crate) prefetch_tx: Option>, + _phantom: PhantomData, +} + +impl BasicBlockExecutor +where + S: BlockExecutionStrategy, + DB: Database, +{ + /// Creates a new `BasicBlockExecutor` with the given strategy. + pub const fn new(strategy: S, prefetch_tx: Option>) -> Self { + Self { strategy, prefetch_tx, _phantom: PhantomData } + } +} + +// TODO: FIX BasicBlockExecutor and remove this comment +impl Executor for BasicBlockExecutor +where + S: BlockExecutionStrategy, + DB: Database + Display>, +{ + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders, Header>; + type Output = BlockExecutionOutput; + type Error = S::Error; + + fn execute(mut self, input: Self::Input<'_>) -> Result { + let BlockExecutionInput { block, total_difficulty, .. } = input; + + self.strategy.apply_pre_execution_changes(block, total_difficulty)?; + let ExecuteOutput { receipts, gas_used } = + self.strategy.execute_transactions(block, total_difficulty, self.prefetch_tx)?; + let requests = + self.strategy.apply_post_execution_changes(block, total_difficulty, &receipts)?; + let state = self.strategy.finish(); + + Ok(BlockExecutionOutput { state, receipts, requests, gas_used, snapshot: None }) + } + + fn execute_with_state_closure( + mut self, + input: Self::Input<'_>, + mut state: F, + ) -> Result + where + F: FnMut(&State), + { + let BlockExecutionInput { block, total_difficulty, .. } = input; + + self.strategy.apply_pre_execution_changes(block, total_difficulty)?; + let ExecuteOutput { receipts, gas_used } = + self.strategy.execute_transactions(block, total_difficulty, self.prefetch_tx)?; + let requests = + self.strategy.apply_post_execution_changes(block, total_difficulty, &receipts)?; + + state(self.strategy.state_ref()); + + let state = self.strategy.finish(); + + Ok(BlockExecutionOutput { state, receipts, requests, gas_used, snapshot: None }) + } + + fn execute_with_state_hook( + mut self, + input: Self::Input<'_>, + state_hook: H, + ) -> Result + where + H: OnStateHook + 'static, + { + let BlockExecutionInput { block, total_difficulty, .. } = input; + + self.strategy.with_state_hook(Some(Box::new(state_hook))); + + self.strategy.apply_pre_execution_changes(block, total_difficulty)?; + let ExecuteOutput { receipts, gas_used } = + self.strategy.execute_transactions(block, total_difficulty, self.prefetch_tx)?; + let requests = + self.strategy.apply_post_execution_changes(block, total_difficulty, &receipts)?; + + let state = self.strategy.finish(); + + Ok(BlockExecutionOutput { state, receipts, requests, gas_used, snapshot: None }) + } +} + +/// A generic batch executor that uses a [`BlockExecutionStrategy`] to +/// execute batches. +#[allow(missing_debug_implementations)] +pub struct BasicBatchExecutor +where + S: BlockExecutionStrategy, + DB: Database, +{ + /// Batch execution strategy. + pub(crate) strategy: S, + /// Keeps track of batch execution receipts and requests. + pub(crate) batch_record: BlockBatchRecord, + /// Channel to send prefetch requests. + pub(crate) prefetch_tx: Option>, + _phantom: PhantomData, +} + +impl BasicBatchExecutor +where + S: BlockExecutionStrategy, + DB: Database, +{ + /// Creates a new `BasicBatchExecutor` with the given strategy. + pub const fn new( + strategy: S, + batch_record: BlockBatchRecord, + prefetch_tx: Option>, + ) -> Self { + Self { strategy, batch_record, prefetch_tx, _phantom: PhantomData } + } +} + +impl BatchExecutor for BasicBatchExecutor +where + S: BlockExecutionStrategy, + DB: Database + Display>, +{ + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders, Header>; + type Output = ExecutionOutcome; + type Error = BlockExecutionError; + + fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { + let BlockExecutionInput { block, total_difficulty, .. } = input; + + if self.batch_record.first_block().is_none() { + self.batch_record.set_first_block(block.number); + } + + self.strategy.apply_pre_execution_changes(block, total_difficulty)?; + let ExecuteOutput { receipts, .. } = self.strategy.execute_transactions( + block, + total_difficulty, + self.prefetch_tx.clone(), + )?; + let requests = + self.strategy.apply_post_execution_changes(block, total_difficulty, &receipts)?; + + self.strategy.validate_block_post_execution(block, &receipts, &requests)?; + + // prepare the state according to the prune mode + let retention = self.batch_record.bundle_retention(block.number); + self.strategy.state_mut().merge_transitions(retention); + + // store receipts in the set + self.batch_record.save_receipts(receipts)?; + + // store requests in the set + self.batch_record.save_requests(requests); + + Ok(()) + } + + fn finalize(mut self) -> Self::Output { + ExecutionOutcome::new( + self.strategy.state_mut().take_bundle(), + self.batch_record.take_receipts(), + self.batch_record.first_block().unwrap_or_default(), + self.batch_record.take_requests(), + ) + } + + fn set_tip(&mut self, tip: BlockNumber) { + self.batch_record.set_tip(tip); + } + + fn set_prune_modes(&mut self, prune_modes: PruneModes) { + self.batch_record.set_prune_modes(prune_modes); + } + + fn size_hint(&self) -> Option { + Some(self.strategy.state_ref().bundle_state.size_hint()) + } +} + #[cfg(test)] mod tests { use super::*; use alloy_primitives::U256; + use reth_chainspec::{ChainSpec, MAINNET}; use revm::db::{CacheDB, EmptyDBTyped}; - use std::marker::PhantomData; + use revm_primitives::bytes; + use std::sync::Arc; #[derive(Clone, Default)] struct TestExecutorProvider; @@ -193,7 +532,11 @@ mod tests { TestExecutor(PhantomData) } - fn batch_executor(&self, _db: DB) -> Self::BatchExecutor + fn batch_executor( + &self, + _db: DB, + _: Option>, + ) -> Self::BatchExecutor where DB: Database + Display>, { @@ -261,6 +604,110 @@ mod tests { } } + struct TestExecutorStrategy { + // chain spec and evm config here only to illustrate how the strategy + // factory can use them in a real use case. + _chain_spec: Arc, + _evm_config: EvmConfig, + state: State, + execute_transactions_result: ExecuteOutput, + apply_post_execution_changes_result: Requests, + finish_result: BundleState, + } + + #[derive(Clone)] + struct TestExecutorStrategyFactory { + execute_transactions_result: ExecuteOutput, + apply_post_execution_changes_result: Requests, + finish_result: BundleState, + } + + impl BlockExecutionStrategyFactory for TestExecutorStrategyFactory { + type Strategy + Display>> = + TestExecutorStrategy; + + fn create_strategy(&self, db: DB) -> Self::Strategy + where + DB: Database + Display>, + { + let state = State::builder() + .with_database(db) + .with_bundle_update() + .without_state_clear() + .build(); + + TestExecutorStrategy { + _chain_spec: MAINNET.clone(), + _evm_config: TestEvmConfig {}, + execute_transactions_result: self.execute_transactions_result.clone(), + apply_post_execution_changes_result: self + .apply_post_execution_changes_result + .clone(), + finish_result: self.finish_result.clone(), + state, + } + } + } + + impl BlockExecutionStrategy for TestExecutorStrategy + where + DB: Database, + { + type Error = BlockExecutionError; + + fn apply_pre_execution_changes( + &mut self, + _block: &BlockWithSenders, + _total_difficulty: U256, + ) -> Result<(), Self::Error> { + Ok(()) + } + + fn execute_transactions( + &mut self, + _block: &BlockWithSenders, + _total_difficulty: U256, + _tx: Option>, + ) -> Result { + Ok(self.execute_transactions_result.clone()) + } + + fn apply_post_execution_changes( + &mut self, + _block: &BlockWithSenders, + _total_difficulty: U256, + _receipts: &[Receipt], + ) -> Result { + Ok(self.apply_post_execution_changes_result.clone()) + } + + fn state_ref(&self) -> &State { + &self.state + } + + fn state_mut(&mut self) -> &mut State { + &mut self.state + } + + fn with_state_hook(&mut self, _hook: Option>) {} + + fn finish(&mut self) -> BundleState { + self.finish_result.clone() + } + + fn validate_block_post_execution( + &self, + _block: &BlockWithSenders, + _receipts: &[Receipt], + _requests: &Requests, + ) -> Result<(), ConsensusError> { + Ok(()) + } + } + + #[derive(Clone)] + struct TestEvmConfig {} + #[test] fn test_provider() { let provider = TestExecutorProvider; @@ -268,4 +715,33 @@ mod tests { let executor = provider.executor(db, None); let _ = executor.execute(BlockExecutionInput::new(&Default::default(), U256::ZERO, None)); } + + #[test] + fn test_strategy() { + let expected_gas_used = 10; + let expected_receipts = vec![Receipt::default()]; + let expected_execute_transactions_result = + ExecuteOutput { receipts: expected_receipts.clone(), gas_used: expected_gas_used }; + let expected_apply_post_execution_changes_result = Requests::new(vec![bytes!("deadbeef")]); + let expected_finish_result = BundleState::default(); + + let strategy_factory = TestExecutorStrategyFactory { + execute_transactions_result: expected_execute_transactions_result, + apply_post_execution_changes_result: expected_apply_post_execution_changes_result + .clone(), + finish_result: expected_finish_result.clone(), + }; + let provider = BasicBlockExecutorProvider::new(strategy_factory); + let db = CacheDB::>::default(); + let executor = provider.executor(db, None); + let result = + executor.execute(BlockExecutionInput::new(&Default::default(), U256::ZERO, None)); + + assert!(result.is_ok()); + let block_execution_output = result.unwrap(); + assert_eq!(block_execution_output.gas_used, expected_gas_used); + assert_eq!(block_execution_output.receipts, expected_receipts); + assert_eq!(block_execution_output.requests, expected_apply_post_execution_changes_result); + assert_eq!(block_execution_output.state, expected_finish_result); + } } diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index 6fcb3d9f8c..e30ff9b1a7 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -1,4 +1,10 @@ //! Traits for configuring an EVM specifics. +//! +//! # Revm features +//! +//! This crate does __not__ enforce specific revm features such as `blst` or `c-kzg`, which are +//! critical for revm's evm internals, it is the responsibility of the implementer to ensure the +//! proper features are selected. #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", @@ -11,11 +17,9 @@ extern crate alloc; -use core::ops::Deref; - use crate::builder::RethEvmBuilder; use alloy_primitives::{Address, Bytes, B256, U256}; -use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; +use reth_primitives::TransactionSigned; use reth_primitives_traits::BlockHeader; use revm::{Database, Evm, GetInspector}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg, SpecId, TxEnv}; @@ -27,6 +31,7 @@ pub mod execute; pub mod metrics; pub mod noop; pub mod provider; +pub mod state_change; pub mod system_calls; #[cfg(any(test, feature = "test-utils"))] @@ -111,10 +116,13 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { /// The header type used by the EVM. type Header: BlockHeader; - /// Returns a [`TxEnv`] from a [`TransactionSignedEcRecovered`]. - fn tx_env(&self, transaction: &TransactionSignedEcRecovered) -> TxEnv { + /// The error type that is returned by [`Self::next_cfg_and_block_env`]. + type Error: core::error::Error + Send + Sync; + + /// Returns a [`TxEnv`] from a [`TransactionSigned`] and [`Address`]. + fn tx_env(&self, transaction: &TransactionSigned, signer: Address) -> TxEnv { let mut tx_env = TxEnv::default(); - self.fill_tx_env(&mut tx_env, transaction.deref(), transaction.signer()); + self.fill_tx_env(&mut tx_env, transaction, signer); tx_env } @@ -187,7 +195,7 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { &self, parent: &Self::Header, attributes: NextBlockEnvAttributes, - ) -> (CfgEnvWithHandlerCfg, BlockEnv); + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), Self::Error>; } /// Represents additional attributes required to configure the next block. diff --git a/crates/evm/src/noop.rs b/crates/evm/src/noop.rs index df3aae0d51..946f7230a0 100644 --- a/crates/evm/src/noop.rs +++ b/crates/evm/src/noop.rs @@ -35,7 +35,11 @@ impl BlockExecutorProvider for NoopBlockExecutorProvider { Self } - fn batch_executor(&self, _: DB) -> Self::BatchExecutor + fn batch_executor( + &self, + _: DB, + _: Option>, + ) -> Self::BatchExecutor where DB: Database + Display>, { diff --git a/crates/evm/src/provider.rs b/crates/evm/src/provider.rs index 8db828ec4a..84c38db0dc 100644 --- a/crates/evm/src/provider.rs +++ b/crates/evm/src/provider.rs @@ -1,7 +1,7 @@ //! Provider trait for populating the EVM environment. use crate::ConfigureEvmEnv; -use alloy_eips::eip1898::BlockHashOrNumber; +use alloy_eips::BlockHashOrNumber; use reth_primitives::Header; use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; diff --git a/crates/revm/src/state_change.rs b/crates/evm/src/state_change.rs similarity index 98% rename from crates/revm/src/state_change.rs rename to crates/evm/src/state_change.rs index afe92561bc..2d91ac30ee 100644 --- a/crates/revm/src/state_change.rs +++ b/crates/evm/src/state_change.rs @@ -1,7 +1,10 @@ +//! State changes that are not related to transactions. + +use alloy_eips::eip4895::Withdrawal; use alloy_primitives::{map::HashMap, Address, U256}; use reth_chainspec::EthereumHardforks; use reth_consensus_common::calc; -use reth_primitives::{Block, Withdrawal, Withdrawals}; +use reth_primitives::{Block, Withdrawals}; /// Collect all balance changes at the end of the block. /// @@ -89,9 +92,9 @@ pub fn insert_post_block_withdrawals_balance_increments Result, BlockExecutionError> { - let mut data = match result { +pub(crate) fn post_commit(result: ExecutionResult) -> Result { + match result { ExecutionResult::Success { output, .. } => Ok(output.into_data()), ExecutionResult::Revert { output, .. } => { Err(BlockValidationError::WithdrawalRequestsContractCall { message: format!("execution reverted: {output}"), - }) + } + .into()) } ExecutionResult::Halt { reason, .. } => { Err(BlockValidationError::WithdrawalRequestsContractCall { message: format!("execution halted: {reason:?}"), - }) - } - }?; - - // Withdrawals are encoded as a series of withdrawal requests, each with the following - // format: - // - // +------+--------+--------+ - // | addr | pubkey | amount | - // +------+--------+--------+ - // 20 48 8 - - const WITHDRAWAL_REQUEST_SIZE: usize = 20 + 48 + 8; - let mut withdrawal_requests = Vec::with_capacity(data.len() / WITHDRAWAL_REQUEST_SIZE); - while data.has_remaining() { - if data.remaining() < WITHDRAWAL_REQUEST_SIZE { - return Err(BlockValidationError::WithdrawalRequestsContractCall { - message: "invalid withdrawal request length".to_string(), } .into()) } - - let mut source_address = Address::ZERO; - data.copy_to_slice(source_address.as_mut_slice()); - - let mut validator_pubkey = FixedBytes::<48>::ZERO; - data.copy_to_slice(validator_pubkey.as_mut_slice()); - - let amount = data.get_u64(); - - withdrawal_requests - .push(WithdrawalRequest { source_address, validator_pubkey, amount }.into()); } - - Ok(withdrawal_requests) } diff --git a/crates/evm/src/system_calls/eip7251.rs b/crates/evm/src/system_calls/eip7251.rs index f09d4be81a..7a55c7a5ae 100644 --- a/crates/evm/src/system_calls/eip7251.rs +++ b/crates/evm/src/system_calls/eip7251.rs @@ -1,10 +1,10 @@ //! [EIP-7251](https://eips.ethereum.org/EIPS/eip-7251) system call implementation. use crate::ConfigureEvm; -use alloc::{boxed::Box, format, string::ToString, vec::Vec}; -use alloy_eips::eip7251::{ConsolidationRequest, CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS}; -use alloy_primitives::{bytes::Buf, Address, Bytes, FixedBytes}; +use alloc::{boxed::Box, format}; +use alloy_eips::eip7251::CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS; +use alloy_primitives::Bytes; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_primitives::{Header, Request}; +use reth_primitives::Header; use revm::{interpreter::Host, Database, Evm}; use revm_primitives::{ExecutionResult, ResultAndState}; @@ -64,56 +64,23 @@ where Ok(res) } -/// Parses the consolidation requests from the execution output. +/// Calls the consolidation requests system contract, and returns the requests from the execution +/// output. #[inline] -pub(crate) fn post_commit(result: ExecutionResult) -> Result, BlockExecutionError> { - let mut data = match result { +pub(crate) fn post_commit(result: ExecutionResult) -> Result { + match result { ExecutionResult::Success { output, .. } => Ok(output.into_data()), ExecutionResult::Revert { output, .. } => { Err(BlockValidationError::ConsolidationRequestsContractCall { message: format!("execution reverted: {output}"), - }) + } + .into()) } ExecutionResult::Halt { reason, .. } => { Err(BlockValidationError::ConsolidationRequestsContractCall { message: format!("execution halted: {reason:?}"), - }) - } - }?; - - // Consolidations are encoded as a series of consolidation requests, each with the following - // format: - // - // +------+--------+---------------+ - // | addr | pubkey | target pubkey | - // +------+--------+---------------+ - // 20 48 48 - - const CONSOLIDATION_REQUEST_SIZE: usize = 20 + 48 + 48; - let mut consolidation_requests = Vec::with_capacity(data.len() / CONSOLIDATION_REQUEST_SIZE); - while data.has_remaining() { - if data.remaining() < CONSOLIDATION_REQUEST_SIZE { - return Err(BlockValidationError::ConsolidationRequestsContractCall { - message: "invalid consolidation request length".to_string(), } .into()) } - - let mut source_address = Address::ZERO; - data.copy_to_slice(source_address.as_mut_slice()); - - let mut source_pubkey = FixedBytes::<48>::ZERO; - data.copy_to_slice(source_pubkey.as_mut_slice()); - - let mut target_pubkey = FixedBytes::<48>::ZERO; - data.copy_to_slice(target_pubkey.as_mut_slice()); - - consolidation_requests.push(Request::ConsolidationRequest(ConsolidationRequest { - source_address, - source_pubkey, - target_pubkey, - })); } - - Ok(consolidation_requests) } diff --git a/crates/evm/src/system_calls/mod.rs b/crates/evm/src/system_calls/mod.rs index 43baa1c766..7fdb31d967 100644 --- a/crates/evm/src/system_calls/mod.rs +++ b/crates/evm/src/system_calls/mod.rs @@ -1,11 +1,13 @@ //! System contract call functions. use crate::ConfigureEvm; -use alloc::vec::Vec; +use alloc::{boxed::Box, sync::Arc, vec}; +use alloy_eips::eip7685::Requests; +use alloy_primitives::Bytes; use core::fmt::Display; use reth_chainspec::EthereumHardforks; use reth_execution_errors::BlockExecutionError; -use reth_primitives::{Block, Header, Request}; +use reth_primitives::{Block, Header}; use revm::{Database, DatabaseCommit, Evm}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, B256}; @@ -42,27 +44,26 @@ impl OnStateHook for NoopHook { /// /// This can be used to chain system transaction calls. #[allow(missing_debug_implementations)] -pub struct SystemCaller<'a, EvmConfig, Chainspec, Hook = NoopHook> { - evm_config: &'a EvmConfig, - chain_spec: Chainspec, +pub struct SystemCaller { + evm_config: EvmConfig, + chain_spec: Arc, /// Optional hook to be called after each state change. - hook: Option, + hook: Option>, } -impl<'a, EvmConfig, Chainspec> SystemCaller<'a, EvmConfig, Chainspec, NoopHook> { +impl SystemCaller { /// Create a new system caller with the given EVM config, database, and chain spec, and creates /// the EVM with the given initialized config and block environment. - pub const fn new(evm_config: &'a EvmConfig, chain_spec: Chainspec) -> Self { + pub const fn new(evm_config: EvmConfig, chain_spec: Arc) -> Self { Self { evm_config, chain_spec, hook: None } } + /// Installs a custom hook to be called after each state change. - pub fn with_state_hook( - self, - hook: Option, - ) -> SystemCaller<'a, EvmConfig, Chainspec, H> { - let Self { evm_config, chain_spec, .. } = self; - SystemCaller { evm_config, chain_spec, hook } + pub fn with_state_hook(&mut self, hook: Option>) -> &mut Self { + self.hook = hook; + self } + /// Convenience method to consume the type and drop borrowed fields pub fn finish(self) {} } @@ -85,11 +86,10 @@ where .build() } -impl SystemCaller<'_, EvmConfig, Chainspec, Hook> +impl SystemCaller where EvmConfig: ConfigureEvm
, Chainspec: EthereumHardforks, - Hook: OnStateHook, { /// Apply pre execution changes. pub fn apply_pre_execution_changes( @@ -121,17 +121,18 @@ where pub fn apply_post_execution_changes( &mut self, evm: &mut Evm<'_, Ext, DB>, - ) -> Result, BlockExecutionError> + ) -> Result where DB: Database + DatabaseCommit, DB::Error: Display, { + // todo // Collect all EIP-7685 requests let withdrawal_requests = self.apply_withdrawal_requests_contract_call(evm)?; // Collect all EIP-7251 requests let consolidation_requests = self.apply_consolidation_requests_contract_call(evm)?; - Ok([withdrawal_requests, consolidation_requests].concat()) + Ok(Requests::new(vec![withdrawal_requests, consolidation_requests])) } /// Applies the pre-block call to the EIP-2935 blockhashes contract. @@ -170,7 +171,7 @@ where DB::Error: Display, { let result_and_state = eip2935::transact_blockhashes_contract_call( - &self.evm_config.clone(), + &self.evm_config, &self.chain_spec, timestamp, block_number, @@ -225,7 +226,7 @@ where DB::Error: Display, { let result_and_state = eip4788::transact_beacon_root_contract_call( - &self.evm_config.clone(), + &self.evm_config, &self.chain_spec, timestamp, block_number, @@ -249,7 +250,7 @@ where db: &mut DB, initialized_cfg: &CfgEnvWithHandlerCfg, initialized_block_env: &BlockEnv, - ) -> Result, BlockExecutionError> + ) -> Result where DB: Database + DatabaseCommit, DB::Error: Display, @@ -265,7 +266,7 @@ where pub fn apply_withdrawal_requests_contract_call( &mut self, evm: &mut Evm<'_, Ext, DB>, - ) -> Result, BlockExecutionError> + ) -> Result where DB: Database + DatabaseCommit, DB::Error: Display, @@ -287,7 +288,7 @@ where db: &mut DB, initialized_cfg: &CfgEnvWithHandlerCfg, initialized_block_env: &BlockEnv, - ) -> Result, BlockExecutionError> + ) -> Result where DB: Database + DatabaseCommit, DB::Error: Display, @@ -303,7 +304,7 @@ where pub fn apply_consolidation_requests_contract_call( &mut self, evm: &mut Evm<'_, Ext, DB>, - ) -> Result, BlockExecutionError> + ) -> Result where DB: Database + DatabaseCommit, DB::Error: Display, diff --git a/crates/evm/src/test_utils.rs b/crates/evm/src/test_utils.rs index e6076b8cc3..78e6893508 100644 --- a/crates/evm/src/test_utils.rs +++ b/crates/evm/src/test_utils.rs @@ -2,15 +2,17 @@ use crate::{ execute::{ - BatchExecutor, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, + BasicBatchExecutor, BasicBlockExecutor, BatchExecutor, BlockExecutionInput, + BlockExecutionOutput, BlockExecutionStrategy, BlockExecutorProvider, Executor, }, system_calls::OnStateHook, }; +use alloy_eips::eip7685::Requests; use alloy_primitives::BlockNumber; use parking_lot::Mutex; use reth_execution_errors::BlockExecutionError; use reth_execution_types::ExecutionOutcome; -use reth_primitives::{BlockWithSenders, Header, Receipt}; +use reth_primitives::{BlockWithSenders, Header, Receipt, Receipts}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderError; use revm::State; @@ -43,7 +45,11 @@ impl BlockExecutorProvider for MockExecutorProvider { self.clone() } - fn batch_executor(&self, _: DB) -> Self::BatchExecutor + fn batch_executor( + &self, + _: DB, + _: Option>, + ) -> Self::BatchExecutor where DB: Database + Display>, { @@ -62,7 +68,10 @@ impl Executor for MockExecutorProvider { Ok(BlockExecutionOutput { state: bundle, receipts: receipts.into_iter().flatten().flatten().collect(), - requests: requests.into_iter().flatten().collect(), + requests: requests.into_iter().fold(Requests::default(), |mut reqs, req| { + reqs.extend(req); + reqs + }), gas_used: 0, snapshot: None, }) @@ -112,3 +121,52 @@ impl BatchExecutor for MockExecutorProvider { None } } + +impl BasicBlockExecutor +where + S: BlockExecutionStrategy, + DB: Database, +{ + /// Provides safe read access to the state + pub fn with_state(&self, f: F) -> R + where + F: FnOnce(&State) -> R, + { + f(self.strategy.state_ref()) + } + + /// Provides safe write access to the state + pub fn with_state_mut(&mut self, f: F) -> R + where + F: FnOnce(&mut State) -> R, + { + f(self.strategy.state_mut()) + } +} + +impl BasicBatchExecutor +where + S: BlockExecutionStrategy, + DB: Database, +{ + /// Provides safe read access to the state + pub fn with_state(&self, f: F) -> R + where + F: FnOnce(&State) -> R, + { + f(self.strategy.state_ref()) + } + + /// Provides safe write access to the state + pub fn with_state_mut(&mut self, f: F) -> R + where + F: FnOnce(&mut State) -> R, + { + f(self.strategy.state_mut()) + } + + /// Accessor for batch executor receipts. + pub const fn receipts(&self) -> &Receipts { + self.batch_record.receipts() + } +} diff --git a/crates/exex/exex/Cargo.toml b/crates/exex/exex/Cargo.toml index 6a3815e404..903e11e784 100644 --- a/crates/exex/exex/Cargo.toml +++ b/crates/exex/exex/Cargo.toml @@ -17,7 +17,10 @@ reth-chain-state.workspace = true reth-chainspec.workspace = true reth-config.workspace = true reth-evm.workspace = true -reth-exex-types = { workspace = true, features = ["serde", "serde-bincode-compat"] } +reth-exex-types = { workspace = true, features = [ + "serde", + "serde-bincode-compat", +] } reth-fs-util.workspace = true reth-metrics.workspace = true reth-node-api.workspace = true @@ -51,7 +54,6 @@ tracing.workspace = true [dev-dependencies] reth-blockchain-tree.workspace = true -reth-db-api.workspace = true reth-db-common.workspace = true reth-evm-ethereum.workspace = true reth-node-api.workspace = true @@ -68,4 +70,14 @@ tempfile.workspace = true [features] default = [] -serde = ["reth-provider/serde", "reth-exex-types/serde"] +serde = [ + "reth-provider/serde", + "reth-exex-types/serde", + "reth-revm/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "parking_lot/serde", + "rand/serde", + "secp256k1/serde" +] diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index 18bd3f8021..013bee9faa 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -72,9 +72,12 @@ where "Executing block range" ); - let mut executor = self.executor.batch_executor(StateProviderDatabase::new( - self.provider.history_by_block_number(self.range.start().saturating_sub(1))?, - )); + let mut executor = self.executor.batch_executor( + StateProviderDatabase::new( + self.provider.history_by_block_number(self.range.start().saturating_sub(1))?, + ), + None, + ); executor.set_prune_modes(self.prune_modes.clone()); let mut fetch_block_duration = Duration::default(); diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index a372c6171f..6b0ad366b6 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use alloy_consensus::TxEip2930; +use alloy_consensus::{constants::ETH_TO_WEI, TxEip2930}; use alloy_genesis::{Genesis, GenesisAccount}; use alloy_primitives::{b256, Address, TxKind, U256}; use eyre::OptionExt; @@ -10,8 +10,7 @@ use reth_evm::execute::{ }; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_primitives::{ - constants::ETH_TO_WEI, Block, BlockBody, BlockWithSenders, Header, Receipt, Requests, - SealedBlockWithSenders, Transaction, + Block, BlockBody, BlockWithSenders, Header, Receipt, SealedBlockWithSenders, Transaction, }; use reth_provider::{ providers::ProviderNodeTypes, BlockWriter as _, ExecutionOutcome, LatestStateProviderRef, @@ -29,7 +28,7 @@ pub(crate) fn to_execution_outcome( bundle: block_execution_output.state.clone(), receipts: block_execution_output.receipts.clone().into(), first_block: block_number, - requests: vec![Requests(block_execution_output.requests.clone())], + requests: vec![block_execution_output.requests.clone()], snapshots: vec![], } } @@ -202,10 +201,13 @@ where let provider = provider_factory.provider()?; - let executor = - EthExecutorProvider::ethereum(chain_spec).batch_executor(StateProviderDatabase::new( - LatestStateProviderRef::new(provider.tx_ref(), provider.static_file_provider()), - )); + let executor = EthExecutorProvider::ethereum(chain_spec).batch_executor( + StateProviderDatabase::new(LatestStateProviderRef::new( + provider.tx_ref(), + provider.static_file_provider(), + )), + None, + ); let mut execution_outcome = executor.execute_and_verify_batch(vec![ (&block1, U256::ZERO, None).into(), diff --git a/crates/exex/exex/src/context.rs b/crates/exex/exex/src/context.rs index 9af12e260a..23d772b738 100644 --- a/crates/exex/exex/src/context.rs +++ b/crates/exex/exex/src/context.rs @@ -7,7 +7,7 @@ use reth_primitives::Head; use reth_tasks::TaskExecutor; use tokio::sync::mpsc::UnboundedSender; -use crate::{ExExEvent, ExExNotifications}; +use crate::{ExExContextDyn, ExExEvent, ExExNotifications, ExExNotificationsStream}; /// Captures the context that an `ExEx` has access to. pub struct ExExContext { @@ -55,7 +55,22 @@ where } } -impl ExExContext { +impl ExExContext +where + Node: FullNodeComponents, + Node::Provider: Debug, + Node::Executor: Debug, +{ + /// Returns dynamic version of the context + pub fn into_dyn(self) -> ExExContextDyn { + ExExContextDyn::from(self) + } +} + +impl ExExContext +where + Node: FullNodeComponents, +{ /// Returns the transaction pool of the node. pub fn pool(&self) -> &Node::Pool { self.components.pool() @@ -106,3 +121,34 @@ impl ExExContext { self.notifications.set_with_head(head); } } + +#[cfg(test)] +mod tests { + use reth_exex_types::ExExHead; + use reth_node_api::FullNodeComponents; + + use crate::ExExContext; + + /// + #[test] + const fn issue_12054() { + #[allow(dead_code)] + struct ExEx { + ctx: ExExContext, + } + + impl ExEx { + async fn _test_bounds(mut self) -> eyre::Result<()> { + self.ctx.pool(); + self.ctx.block_executor(); + self.ctx.provider(); + self.ctx.network(); + self.ctx.payload_builder(); + self.ctx.task_executor(); + self.ctx.set_notifications_without_head(); + self.ctx.set_notifications_with_head(ExExHead { block: Default::default() }); + Ok(()) + } + } + } +} diff --git a/crates/exex/exex/src/dyn_context.rs b/crates/exex/exex/src/dyn_context.rs new file mode 100644 index 0000000000..b48a6ebc95 --- /dev/null +++ b/crates/exex/exex/src/dyn_context.rs @@ -0,0 +1,70 @@ +//! Mirrored version of [`ExExContext`](`crate::ExExContext`) +//! without generic abstraction over [Node](`reth_node_api::FullNodeComponents`) + +use std::fmt::Debug; + +use reth_chainspec::{EthChainSpec, Head}; +use reth_node_api::FullNodeComponents; +use reth_node_core::node_config::NodeConfig; +use tokio::sync::mpsc; + +use crate::{ExExContext, ExExEvent, ExExNotificationsStream}; + +// TODO(0xurb) - add `node` after abstractions +/// Captures the context that an `ExEx` has access to. +pub struct ExExContextDyn { + /// The current head of the blockchain at launch. + pub head: Head, + /// The config of the node + pub config: NodeConfig>, + /// The loaded node config + pub reth_config: reth_config::Config, + /// Channel used to send [`ExExEvent`]s to the rest of the node. + /// + /// # Important + /// + /// The exex should emit a `FinishedHeight` whenever a processed block is safe to prune. + /// Additionally, the exex can pre-emptively emit a `FinishedHeight` event to specify what + /// blocks to receive notifications for. + pub events: mpsc::UnboundedSender, + /// Channel to receive [`ExExNotification`](crate::ExExNotification)s. + /// + /// # Important + /// + /// Once an [`ExExNotification`](crate::ExExNotification) is sent over the channel, it is + /// considered delivered by the node. + pub notifications: Box, +} + +impl Debug for ExExContextDyn { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ExExContext") + .field("head", &self.head) + .field("config", &self.config) + .field("reth_config", &self.reth_config) + .field("events", &self.events) + .field("notifications", &"...") + .finish() + } +} + +impl From> for ExExContextDyn +where + Node: FullNodeComponents, + Node::Provider: Debug, + Node::Executor: Debug, +{ + fn from(ctx: ExExContext) -> Self { + let config = + ctx.config.map_chainspec(|chainspec| Box::new(chainspec) as Box); + let notifications = Box::new(ctx.notifications) as Box; + + Self { + head: ctx.head, + config, + reth_config: ctx.reth_config, + events: ctx.events, + notifications, + } + } +} diff --git a/crates/exex/exex/src/event.rs b/crates/exex/exex/src/event.rs index 1215ea2a50..bbd79addc9 100644 --- a/crates/exex/exex/src/event.rs +++ b/crates/exex/exex/src/event.rs @@ -1,4 +1,4 @@ -use reth_primitives::BlockNumHash; +use alloy_eips::BlockNumHash; /// Events emitted by an `ExEx`. #[derive(Debug, Clone, Copy, PartialEq, Eq)] diff --git a/crates/exex/exex/src/lib.rs b/crates/exex/exex/src/lib.rs index edc9e40d44..ce6641ff67 100644 --- a/crates/exex/exex/src/lib.rs +++ b/crates/exex/exex/src/lib.rs @@ -40,6 +40,9 @@ pub use backfill::*; mod context; pub use context::*; +mod dyn_context; +pub use dyn_context::*; + mod event; pub use event::*; diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index 8c1518f309..a17de66086 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -1,13 +1,14 @@ use crate::{ wal::Wal, ExExEvent, ExExNotification, ExExNotifications, FinishedExExHeight, WalHandle, }; +use alloy_eips::BlockNumHash; use futures::StreamExt; use itertools::Itertools; use metrics::Gauge; use reth_chain_state::ForkChoiceStream; use reth_chainspec::Head; use reth_metrics::{metrics::Counter, Metrics}; -use reth_primitives::{BlockNumHash, SealedHeader}; +use reth_primitives::SealedHeader; use reth_provider::HeaderProvider; use reth_tracing::tracing::debug; use std::{ diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index d0c94d34f6..14cfe9be4d 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -21,6 +21,40 @@ pub struct ExExNotifications { inner: ExExNotificationsInner, } +/// A trait, that represents a stream of [`ExExNotification`]s. The stream will emit notifications +/// for all blocks. If the stream is configured with a head via [`ExExNotifications::set_with_head`] +/// or [`ExExNotifications::with_head`], it will run backfill jobs to catch up to the node head. +pub trait ExExNotificationsStream: Stream> + Unpin { + /// Sets [`ExExNotificationsStream`] to a stream of [`ExExNotification`]s without a head. + /// + /// It's a no-op if the stream has already been configured without a head. + /// + /// See the documentation of [`ExExNotificationsWithoutHead`] for more details. + fn set_without_head(&mut self); + + /// Sets [`ExExNotificationsStream`] to a stream of [`ExExNotification`]s with the provided + /// head. + /// + /// It's a no-op if the stream has already been configured with a head. + /// + /// See the documentation of [`ExExNotificationsWithHead`] for more details. + fn set_with_head(&mut self, exex_head: ExExHead); + + /// Returns a new [`ExExNotificationsStream`] without a head. + /// + /// See the documentation of [`ExExNotificationsWithoutHead`] for more details. + fn without_head(self) -> Self + where + Self: Sized; + + /// Returns a new [`ExExNotificationsStream`] with the provided head. + /// + /// See the documentation of [`ExExNotificationsWithHead`] for more details. + fn with_head(self, exex_head: ExExHead) -> Self + where + Self: Sized; +} + #[derive(Debug)] enum ExExNotificationsInner { /// A stream of [`ExExNotification`]s. The stream will emit notifications for all blocks. @@ -52,13 +86,14 @@ impl ExExNotifications { )), } } +} - /// Sets [`ExExNotifications`] to a stream of [`ExExNotification`]s without a head. - /// - /// It's a no-op if the stream has already been configured without a head. - /// - /// See the documentation of [`ExExNotificationsWithoutHead`] for more details. - pub fn set_without_head(&mut self) { +impl ExExNotificationsStream for ExExNotifications +where + P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + E: BlockExecutorProvider + Clone + Unpin + 'static, +{ + fn set_without_head(&mut self) { let current = std::mem::replace(&mut self.inner, ExExNotificationsInner::Invalid); self.inner = ExExNotificationsInner::WithoutHead(match current { ExExNotificationsInner::WithoutHead(notifications) => notifications, @@ -73,20 +108,7 @@ impl ExExNotifications { }); } - /// Returns a new [`ExExNotifications`] without a head. - /// - /// See the documentation of [`ExExNotificationsWithoutHead`] for more details. - pub fn without_head(mut self) -> Self { - self.set_without_head(); - self - } - - /// Sets [`ExExNotifications`] to a stream of [`ExExNotification`]s with the provided head. - /// - /// It's a no-op if the stream has already been configured with a head. - /// - /// See the documentation of [`ExExNotificationsWithHead`] for more details. - pub fn set_with_head(&mut self, exex_head: ExExHead) { + fn set_with_head(&mut self, exex_head: ExExHead) { let current = std::mem::replace(&mut self.inner, ExExNotificationsInner::Invalid); self.inner = ExExNotificationsInner::WithHead(match current { ExExNotificationsInner::WithoutHead(notifications) => { @@ -104,10 +126,12 @@ impl ExExNotifications { }); } - /// Returns a new [`ExExNotifications`] with the provided head. - /// - /// See the documentation of [`ExExNotificationsWithHead`] for more details. - pub fn with_head(mut self, exex_head: ExExHead) -> Self { + fn without_head(mut self) -> Self { + self.set_without_head(); + self + } + + fn with_head(mut self, exex_head: ExExHead) -> Self { self.set_with_head(exex_head); self } diff --git a/crates/exex/exex/src/wal/storage.rs b/crates/exex/exex/src/wal/storage.rs index af3a590e58..aaa4398fd0 100644 --- a/crates/exex/exex/src/wal/storage.rs +++ b/crates/exex/exex/src/wal/storage.rs @@ -9,6 +9,8 @@ use reth_exex_types::ExExNotification; use reth_tracing::tracing::debug; use tracing::instrument; +static FILE_EXTENSION: &str = "wal"; + /// The underlying WAL storage backed by a directory of files. /// /// Each notification is represented by a single file that contains a MessagePack-encoded @@ -29,7 +31,7 @@ impl Storage { } fn file_path(&self, id: u32) -> PathBuf { - self.path.join(format!("{id}.wal")) + self.path.join(format!("{id}.{FILE_EXTENSION}")) } fn parse_filename(filename: &str) -> eyre::Result { @@ -70,11 +72,14 @@ impl Storage { for entry in reth_fs_util::read_dir(&self.path)? { let entry = entry?; - let file_name = entry.file_name(); - let file_id = Self::parse_filename(&file_name.to_string_lossy())?; - min_id = min_id.map_or(Some(file_id), |min_id: u32| Some(min_id.min(file_id))); - max_id = max_id.map_or(Some(file_id), |max_id: u32| Some(max_id.max(file_id))); + if entry.path().extension() == Some(FILE_EXTENSION.as_ref()) { + let file_name = entry.file_name(); + let file_id = Self::parse_filename(&file_name.to_string_lossy())?; + + min_id = min_id.map_or(Some(file_id), |min_id: u32| Some(min_id.min(file_id))); + max_id = max_id.map_or(Some(file_id), |max_id: u32| Some(max_id.max(file_id))); + } } Ok(min_id.zip(max_id).map(|(min_id, max_id)| min_id..=max_id)) @@ -167,7 +172,7 @@ impl Storage { #[cfg(test)] mod tests { - use std::sync::Arc; + use std::{fs::File, sync::Arc}; use eyre::OptionExt; use reth_exex_types::ExExNotification; @@ -206,4 +211,24 @@ mod tests { Ok(()) } + + #[test] + fn test_files_range() -> eyre::Result<()> { + let temp_dir = tempfile::tempdir()?; + let storage = Storage::new(&temp_dir)?; + + // Create WAL files + File::create(storage.file_path(1))?; + File::create(storage.file_path(2))?; + File::create(storage.file_path(3))?; + + // Create non-WAL files that should be ignored + File::create(temp_dir.path().join("0.tmp"))?; + File::create(temp_dir.path().join("4.tmp"))?; + + // Check files range + assert_eq!(storage.files_range()?, Some(1..=3)); + + Ok(()) + } } diff --git a/crates/exex/test-utils/Cargo.toml b/crates/exex/test-utils/Cargo.toml index 514e34ab7d..13f32f03fe 100644 --- a/crates/exex/test-utils/Cargo.toml +++ b/crates/exex/test-utils/Cargo.toml @@ -31,7 +31,10 @@ reth-primitives.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } reth-tasks.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } -reth-ethereum-engine-primitives.workspace = true +reth-trie-db.workspace = true + +## alloy +alloy-eips.workspace = true ## async futures-util.workspace = true diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 52da48f9b3..a0b11575e8 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -15,6 +15,7 @@ use std::{ task::Poll, }; +use alloy_eips::BlockNumHash; use futures_util::FutureExt; use reth_blockchain_tree::noop::NoopBlockchainTree; use reth_chainspec::{ChainSpec, MAINNET}; @@ -24,7 +25,6 @@ use reth_db::{ DatabaseEnv, }; use reth_db_common::init::init_genesis; -use reth_ethereum_engine_primitives::EthereumEngineValidator; use reth_evm::test_utils::MockExecutorProvider; use reth_execution_types::Chain; use reth_exex::{ExExContext, ExExEvent, ExExNotification, ExExNotifications, Wal}; @@ -41,14 +41,11 @@ use reth_node_builder::{ }; use reth_node_core::node_config::NodeConfig; use reth_node_ethereum::{ - node::{ - EthereumAddOns, EthereumEngineValidatorBuilder, EthereumNetworkBuilder, - EthereumParliaBuilder, EthereumPayloadBuilder, - }, + node::{EthereumAddOns, EthereumNetworkBuilder, EthereumParliaBuilder, EthereumPayloadBuilder}, EthEngineTypes, EthEvmConfig, }; use reth_payload_builder::noop::NoopPayloadBuilderService; -use reth_primitives::{BlockNumHash, Head, SealedBlockWithSenders}; +use reth_primitives::{Head, SealedBlockWithSenders}; use reth_provider::{ providers::{BlockchainProvider, StaticFileProvider}, BlockReader, ProviderFactory, @@ -123,6 +120,7 @@ pub struct TestNode; impl NodeTypes for TestNode { type Primitives = (); type ChainSpec = ChainSpec; + type StateCommitment = reth_trie_db::MerklePatriciaTrie; } impl NodeTypesWithEngine for TestNode { @@ -140,10 +138,11 @@ where EthereumNetworkBuilder, TestExecutorBuilder, TestConsensusBuilder, - EthereumEngineValidatorBuilder, EthereumParliaBuilder, >; - type AddOns = EthereumAddOns; + type AddOns = EthereumAddOns< + NodeAdapter>::Components>, + >; fn components_builder(&self) -> Self::ComponentsBuilder { ComponentsBuilder::default() @@ -153,7 +152,6 @@ where .network(EthereumNetworkBuilder::default()) .executor(TestExecutorBuilder::default()) .consensus(TestConsensusBuilder::default()) - .engine_validator(EthereumEngineValidatorBuilder::default()) .parlia(EthereumParliaBuilder::default()) } @@ -274,17 +272,16 @@ pub async fn test_exex_context_with_chain_spec( let network_manager = NetworkManager::new( NetworkConfigBuilder::new(SecretKey::new(&mut rand::thread_rng())) .with_unused_discovery_port() + .with_unused_listener_port() .build(provider_factory.clone()), ) .await?; let network = network_manager.handle().clone(); - - let (_, payload_builder) = NoopPayloadBuilderService::::new(); - let tasks = TaskManager::current(); let task_executor = tasks.executor(); + tasks.executor().spawn(network_manager); - let engine_validator = EthereumEngineValidator::new(chain_spec.clone()); + let (_, payload_builder) = NoopPayloadBuilderService::::new(); let components = NodeAdapter::, _>, _> { components: Components { @@ -294,7 +291,6 @@ pub async fn test_exex_context_with_chain_spec( consensus, network, payload_builder, - engine_validator, #[cfg(feature = "bsc")] parlia: Default::default(), }, diff --git a/crates/exex/types/Cargo.toml b/crates/exex/types/Cargo.toml index a146cbc227..51097d6109 100644 --- a/crates/exex/types/Cargo.toml +++ b/crates/exex/types/Cargo.toml @@ -33,5 +33,16 @@ rand.workspace = true [features] default = [] -serde = ["dep:serde", "reth-execution-types/serde"] -serde-bincode-compat = ["reth-execution-types/serde-bincode-compat", "serde_with"] +serde = [ + "dep:serde", + "reth-execution-types/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "rand/serde" +] +serde-bincode-compat = [ + "reth-execution-types/serde-bincode-compat", + "serde_with", + "reth-primitives/serde-bincode-compat", + "alloy-eips/serde-bincode-compat" +] diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index fde652ef39..f1c8410eeb 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -42,14 +42,24 @@ parking_lot.workspace = true rand = { workspace = true, optional = true } generic-array.workspace = true serde = { workspace = true, optional = true } +itertools.workspace = true [dev-dependencies] assert_matches.workspace = true rand.workspace = true -tokio = { workspace = true, features = ["macros"] } +tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } reth-tracing.workspace = true [features] default = ["serde"] -serde = ["dep:serde"] +serde = [ + "dep:serde", + "alloy-primitives/serde", + "discv5/serde", + "enr/serde", + "generic-array/serde", + "parking_lot/serde", + "rand?/serde", + "secp256k1/serde" +] test-utils = ["dep:rand"] diff --git a/crates/net/discv4/src/config.rs b/crates/net/discv4/src/config.rs index 4fae31f585..38467304db 100644 --- a/crates/net/discv4/src/config.rs +++ b/crates/net/discv4/src/config.rs @@ -8,8 +8,6 @@ use alloy_rlp::Encodable; use reth_net_banlist::BanList; use reth_net_nat::{NatResolver, ResolveNatInterval}; use reth_network_peers::NodeRecord; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; use std::{ collections::{HashMap, HashSet}, time::Duration, @@ -17,7 +15,7 @@ use std::{ /// Configuration parameters that define the performance of the discovery network. #[derive(Clone, Debug)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Discv4Config { /// Whether to enable the incoming packet filter. Default: false. pub enable_packet_filter: bool, @@ -25,7 +23,7 @@ pub struct Discv4Config { pub udp_egress_message_buffer: usize, /// Size of the channel buffer for incoming messages. pub udp_ingress_message_buffer: usize, - /// The number of allowed failures for `FindNode` requests. Default: 5. + /// The number of allowed consecutive failures for `FindNode` requests. Default: 5. pub max_find_node_failures: u8, /// The interval to use when checking for expired nodes that need to be re-pinged. Default: /// 10min. @@ -118,7 +116,7 @@ impl Default for Discv4Config { // Every outgoing request will eventually lead to an incoming response udp_ingress_message_buffer: 1024, max_find_node_failures: 5, - ping_interval: Duration::from_secs(60 * 10), + ping_interval: Duration::from_secs(10), // Unified expiration and timeout durations, mirrors geth's `expiration` duration ping_expiration: Duration::from_secs(20), bond_expiration: Duration::from_secs(60 * 60), @@ -144,7 +142,7 @@ impl Default for Discv4Config { /// Builder type for [`Discv4Config`] #[derive(Clone, Debug, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Discv4ConfigBuilder { config: Discv4Config, } diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 7c14eac9b6..788e93048f 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -38,6 +38,7 @@ use discv5::{ ConnectionDirection, ConnectionState, }; use enr::Enr; +use itertools::Itertools; use parking_lot::Mutex; use proto::{EnrRequest, EnrResponse}; use reth_ethereum_forks::ForkId; @@ -743,7 +744,8 @@ impl Discv4Service { trace!(target: "discv4", ?target, "Starting lookup"); let target_key = kad_key(target); - // Start a lookup context with the 16 (MAX_NODES_PER_BUCKET) closest nodes + // Start a lookup context with the 16 (MAX_NODES_PER_BUCKET) closest nodes to which we have + // a valid endpoint proof let ctx = LookupContext::new( target_key.clone(), self.kbuckets @@ -772,7 +774,10 @@ impl Discv4Service { trace!(target: "discv4", ?target, num = closest.len(), "Start lookup closest nodes"); for node in closest { - self.find_node(&node, ctx.clone()); + // here we still want to check against previous request failures and if necessary + // re-establish a new endpoint proof because it can be the case that the other node lost + // our entry and no longer has an endpoint proof on their end + self.find_node_checked(&node, ctx.clone()); } } @@ -788,6 +793,22 @@ impl Discv4Service { self.pending_find_nodes.insert(node.id, FindNodeRequest::new(ctx)); } + /// Sends a new `FindNode` packet to the node with `target` as the lookup target but checks + /// whether we should should send a new ping first to renew the endpoint proof by checking the + /// previously failed findNode requests. It could be that the node is no longer reachable or + /// lost our entry. + fn find_node_checked(&mut self, node: &NodeRecord, ctx: LookupContext) { + let max_failures = self.config.max_find_node_failures; + let needs_ping = self + .on_entry(node.id, |entry| entry.exceeds_find_node_failures(max_failures)) + .unwrap_or(true); + if needs_ping { + self.try_ping(*node, PingReason::Lookup(*node, ctx)) + } else { + self.find_node(node, ctx) + } + } + /// Notifies all listeners. /// /// Removes all listeners that are closed. @@ -829,6 +850,24 @@ impl Discv4Service { /// table. Returns `true` if the node was in the table and `false` otherwise. pub fn remove_node(&mut self, node_id: PeerId) -> bool { let key = kad_key(node_id); + self.remove_key(node_id, key) + } + + /// Removes a `node_id` from the routing table but only if there are enough other nodes in the + /// bucket (bucket must be at least half full) + /// + /// Returns `true` if the node was removed + pub fn soft_remove_node(&mut self, node_id: PeerId) -> bool { + let key = kad_key(node_id); + let Some(bucket) = self.kbuckets.get_bucket(&key) else { return false }; + if bucket.num_entries() < MAX_NODES_PER_BUCKET / 2 { + // skip half empty bucket + return false + } + self.remove_key(node_id, key) + } + + fn remove_key(&mut self, node_id: PeerId, key: discv5::Key) -> bool { let removed = self.kbuckets.remove(&key); if removed { trace!(target: "discv4", ?node_id, "removed node"); @@ -842,7 +881,7 @@ impl Discv4Service { self.kbuckets.buckets_iter().fold(0, |count, bucket| count + bucket.num_connected()) } - /// Check if the peer has a bond + /// Check if the peer has an active bond. fn has_bond(&self, remote_id: PeerId, remote_ip: IpAddr) -> bool { if let Some(timestamp) = self.received_pongs.last_pong(remote_id, remote_ip) { if timestamp.elapsed() < self.config.bond_expiration { @@ -852,7 +891,22 @@ impl Discv4Service { false } - /// Update the entry on RE-ping + /// Applies a closure on the pending or present [`NodeEntry`]. + fn on_entry(&mut self, peer_id: PeerId, f: F) -> Option + where + F: FnOnce(&NodeEntry) -> R, + { + let key = kad_key(peer_id); + match self.kbuckets.entry(&key) { + BucketEntry::Present(entry, _) => Some(f(entry.value())), + BucketEntry::Pending(mut entry, _) => Some(f(entry.value())), + _ => None, + } + } + + /// Update the entry on RE-ping. + /// + /// Invoked when we received the Pong to our [`PingReason::RePing`] ping. /// /// On re-ping we check for a changed `enr_seq` if eip868 is enabled and when it changed we sent /// a followup request to retrieve the updated ENR @@ -909,7 +963,7 @@ impl Discv4Service { match self.kbuckets.entry(&key) { kbucket::Entry::Present(mut entry, old_status) => { // endpoint is now proven - entry.value_mut().has_endpoint_proof = true; + entry.value_mut().establish_proof(); entry.value_mut().update_with_enr(last_enr_seq); if !old_status.is_connected() { @@ -925,7 +979,7 @@ impl Discv4Service { } kbucket::Entry::Pending(mut entry, mut status) => { // endpoint is now proven - entry.value().has_endpoint_proof = true; + entry.value().establish_proof(); entry.value().update_with_enr(last_enr_seq); if !status.is_connected() { @@ -1028,11 +1082,23 @@ impl Discv4Service { let old_enr = match self.kbuckets.entry(&key) { kbucket::Entry::Present(mut entry, _) => { - is_proven = entry.value().has_endpoint_proof; + if entry.value().is_expired() { + // If no communication with the sender has occurred within the last 12h, a ping + // should be sent in addition to pong in order to receive an endpoint proof. + needs_bond = true; + } else { + is_proven = entry.value().has_endpoint_proof; + } entry.value_mut().update_with_enr(ping.enr_sq) } kbucket::Entry::Pending(mut entry, _) => { - is_proven = entry.value().has_endpoint_proof; + if entry.value().is_expired() { + // If no communication with the sender has occurred within the last 12h, a ping + // should be sent in addition to pong in order to receive an endpoint proof. + needs_bond = true; + } else { + is_proven = entry.value().has_endpoint_proof; + } entry.value().update_with_enr(ping.enr_sq) } kbucket::Entry::Absent(entry) => { @@ -1097,6 +1163,8 @@ impl Discv4Service { // try to send it ctx.unmark_queried(record.id); } else { + // we just received a ping from that peer so we can send a find node request + // directly self.find_node(&record, ctx); } } @@ -1205,7 +1273,8 @@ impl Discv4Service { self.update_on_pong(node, pong.enr_sq); } PingReason::EstablishBond => { - // nothing to do here + // same as `InitialInsert` which renews the bond if the peer is in the table + self.update_on_pong(node, pong.enr_sq); } PingReason::RePing => { self.update_on_reping(node, pong.enr_sq); @@ -1338,6 +1407,16 @@ impl Discv4Service { } }; + // log the peers we discovered + trace!(target: "discv4", + target=format!("{:#?}", node_id), + peers_count=msg.nodes.len(), + peers=format!("[{:#}]", msg.nodes.iter() + .map(|node_rec| node_rec.id + ).format(", ")), + "Received peers from Neighbours packet" + ); + // This is the recursive lookup step where we initiate new FindNode requests for new nodes // that were discovered. for node in msg.nodes.into_iter().map(NodeRecord::into_ipv4_mapped) { @@ -1386,14 +1465,28 @@ impl Discv4Service { BucketEntry::SelfEntry => { // we received our own node entry } - BucketEntry::Present(mut entry, _) => { - if entry.value_mut().has_endpoint_proof { - self.find_node(&closest, ctx.clone()); + BucketEntry::Present(entry, _) => { + if entry.value().has_endpoint_proof { + if entry + .value() + .exceeds_find_node_failures(self.config.max_find_node_failures) + { + self.try_ping(closest, PingReason::Lookup(closest, ctx.clone())) + } else { + self.find_node(&closest, ctx.clone()); + } } } BucketEntry::Pending(mut entry, _) => { if entry.value().has_endpoint_proof { - self.find_node(&closest, ctx.clone()); + if entry + .value() + .exceeds_find_node_failures(self.config.max_find_node_failures) + { + self.try_ping(closest, PingReason::Lookup(closest, ctx.clone())) + } else { + self.find_node(&closest, ctx.clone()); + } } } } @@ -1431,11 +1524,12 @@ impl Discv4Service { true }); - trace!(target: "discv4", num=%failed_pings.len(), "evicting nodes due to failed pong"); - - // remove nodes that failed to pong - for node_id in failed_pings { - self.remove_node(node_id); + if !failed_pings.is_empty() { + // remove nodes that failed to pong + trace!(target: "discv4", num=%failed_pings.len(), "evicting nodes due to failed pong"); + for node_id in failed_pings { + self.remove_node(node_id); + } } let mut failed_lookups = Vec::new(); @@ -1446,34 +1540,40 @@ impl Discv4Service { } true }); - trace!(target: "discv4", num=%failed_lookups.len(), "evicting nodes due to failed lookup"); - // remove nodes that failed the e2e lookup process, so we can restart it - for node_id in failed_lookups { - self.remove_node(node_id); + if !failed_lookups.is_empty() { + // remove nodes that failed the e2e lookup process, so we can restart it + trace!(target: "discv4", num=%failed_lookups.len(), "evicting nodes due to failed lookup"); + for node_id in failed_lookups { + self.remove_node(node_id); + } } - self.evict_failed_neighbours(now); + self.evict_failed_find_nodes(now); } /// Handles failed responses to `FindNode` - fn evict_failed_neighbours(&mut self, now: Instant) { - let mut failed_neighbours = Vec::new(); + fn evict_failed_find_nodes(&mut self, now: Instant) { + let mut failed_find_nodes = Vec::new(); self.pending_find_nodes.retain(|node_id, find_node_request| { if now.duration_since(find_node_request.sent_at) > self.config.neighbours_expiration { if !find_node_request.answered { // node actually responded but with fewer entries than expected, but we don't // treat this as an hard error since it responded. - failed_neighbours.push(*node_id); + failed_find_nodes.push(*node_id); } return false } true }); - trace!(target: "discv4", num=%failed_neighbours.len(), "processing failed neighbours"); + if failed_find_nodes.is_empty() { + return + } + + trace!(target: "discv4", num=%failed_find_nodes.len(), "processing failed find nodes"); - for node_id in failed_neighbours { + for node_id in failed_find_nodes { let key = kad_key(node_id); let failures = match self.kbuckets.entry(&key) { kbucket::Entry::Present(mut entry, _) => { @@ -1490,14 +1590,8 @@ impl Discv4Service { // if the node failed to respond anything useful multiple times, remove the node from // the table, but only if there are enough other nodes in the bucket (bucket must be at // least half full) - if failures > (self.config.max_find_node_failures as usize) { - if let Some(bucket) = self.kbuckets.get_bucket(&key) { - if bucket.num_entries() < MAX_NODES_PER_BUCKET / 2 { - // skip half empty bucket - continue - } - } - self.remove_node(node_id); + if failures > self.config.max_find_node_failures { + self.soft_remove_node(node_id); } } } @@ -2189,8 +2283,8 @@ struct NodeEntry { last_enr_seq: Option, /// `ForkId` if retrieved via ENR requests. fork_id: Option, - /// Counter for failed findNode requests. - find_node_failures: usize, + /// Counter for failed _consecutive_ findNode requests. + find_node_failures: u8, /// Whether the endpoint of the peer is proven. has_endpoint_proof: bool, } @@ -2217,6 +2311,17 @@ impl NodeEntry { node } + /// Marks the entry with an established proof and resets the consecutive failure counter. + fn establish_proof(&mut self) { + self.has_endpoint_proof = true; + self.find_node_failures = 0; + } + + /// Returns true if the tracked find node failures exceed the max amount + const fn exceeds_find_node_failures(&self, max_failures: u8) -> bool { + self.find_node_failures >= max_failures + } + /// Updates the last timestamp and sets the enr seq fn update_with_enr(&mut self, last_enr_seq: Option) -> Option { self.update_now(|s| std::mem::replace(&mut s.last_enr_seq, last_enr_seq)) @@ -2247,7 +2352,7 @@ impl NodeEntry { impl NodeEntry { /// Returns true if the node should be re-pinged. fn is_expired(&self) -> bool { - self.last_seen.elapsed() > ENDPOINT_PROOF_EXPIRATION + self.last_seen.elapsed() > (ENDPOINT_PROOF_EXPIRATION / 2) } } @@ -2256,8 +2361,7 @@ impl NodeEntry { enum PingReason { /// Initial ping to a previously unknown peer that was inserted into the table. InitialInsert, - /// Initial ping to a previously unknown peer that didn't fit into the table. But we still want - /// to establish a bond. + /// A ping to a peer to establish a bond (endpoint proof). EstablishBond, /// Re-ping a peer. RePing, @@ -2324,9 +2428,9 @@ mod tests { let original = EnrForkIdEntry { fork_id: ForkId { hash: ForkHash([0xdc, 0xe9, 0x6c, 0x2d]), next: 0 }, }; - let mut encoded = Vec::new(); - original.encode(&mut encoded); let expected: [u8; 8] = [0xc7, 0xc6, 0x84, 0xdc, 0xe9, 0x6c, 0x2d, 0x80]; + let mut encoded = Vec::with_capacity(expected.len()); + original.encode(&mut encoded); assert_eq!(&expected[..], encoded.as_slice()); } @@ -2634,6 +2738,45 @@ mod tests { assert_eq!(ctx.inner.closest_nodes.borrow().len(), 1); } + #[tokio::test] + async fn test_reping_on_find_node_failures() { + reth_tracing::init_test_tracing(); + + let config = Discv4Config::builder().build(); + let (_discv4, mut service) = create_discv4_with_config(config).await; + + let target = PeerId::random(); + + let id = PeerId::random(); + let key = kad_key(id); + let record = NodeRecord::new("0.0.0.0:0".parse().unwrap(), id); + + let mut entry = NodeEntry::new_proven(record); + entry.find_node_failures = u8::MAX; + let _ = service.kbuckets.insert_or_update( + &key, + entry, + NodeStatus { + direction: ConnectionDirection::Incoming, + state: ConnectionState::Connected, + }, + ); + + service.lookup(target); + assert_eq!(service.pending_find_nodes.len(), 0); + assert_eq!(service.pending_pings.len(), 1); + + service.update_on_pong(record, None); + + service + .on_entry(record.id, |entry| { + // reset on pong + assert_eq!(entry.find_node_failures, 0); + assert!(entry.has_endpoint_proof); + }) + .unwrap(); + } + #[tokio::test] async fn test_service_commands() { reth_tracing::init_test_tracing(); diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index 0684c263b8..203ef76134 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -412,7 +412,7 @@ pub fn discv5_sockets_wrt_rlpx_addr( discv5_addr_ipv6.map(|ip| SocketAddrV6::new(ip, discv5_port_ipv6, 0, 0)); if let Some(discv5_addr) = discv5_addr_ipv4 { - warn!(target: "discv5", + warn!(target: "net::discv5", %discv5_addr, %rlpx_addr, "Overwriting discv5 IPv4 address with RLPx IPv4 address, limited to one advertised IP address per IP version" @@ -429,7 +429,7 @@ pub fn discv5_sockets_wrt_rlpx_addr( discv5_addr_ipv4.map(|ip| SocketAddrV4::new(ip, discv5_port_ipv4)); if let Some(discv5_addr) = discv5_addr_ipv6 { - warn!(target: "discv5", + warn!(target: "net::discv5", %discv5_addr, %rlpx_addr, "Overwriting discv5 IPv6 address with RLPx IPv6 address, limited to one advertised IP address per IP version" diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index d4e8e928fd..da54d0b526 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -95,14 +95,14 @@ impl Discv5 { /// CAUTION: The value **must** be rlp encoded pub fn set_eip868_in_local_enr(&self, key: Vec, rlp: Bytes) { let Ok(key_str) = std::str::from_utf8(&key) else { - error!(target: "discv5", + error!(target: "net::discv5", err="key not utf-8", "failed to update local enr" ); return }; if let Err(err) = self.discv5.enr_insert(key_str, &rlp) { - error!(target: "discv5", + error!(target: "net::discv5", %err, "failed to update local enr" ); @@ -131,7 +131,7 @@ impl Discv5 { self.discv5.ban_node(&node_id, None); self.ban_ip(ip); } - Err(err) => error!(target: "discv5", + Err(err) => error!(target: "net::discv5", %err, "failed to ban peer" ), diff --git a/crates/net/discv5/src/network_stack_id.rs b/crates/net/discv5/src/network_stack_id.rs index f707c7de7b..a7b6944f35 100644 --- a/crates/net/discv5/src/network_stack_id.rs +++ b/crates/net/discv5/src/network_stack_id.rs @@ -20,12 +20,11 @@ impl NetworkStackId { /// ENR fork ID kv-pair key, for an Optimism CL node. pub const OPSTACK: &'static [u8] = b"opstack"; - #[allow(clippy::missing_const_for_fn)] /// Returns the [`NetworkStackId`] that matches the given chain spec. pub fn id(chain: impl EthChainSpec) -> Option<&'static [u8]> { - if chain.chain().is_optimism() { + if chain.is_optimism() { return Some(Self::OPEL) - } else if chain.chain().is_ethereum() { + } else if chain.is_ethereum() { return Some(Self::ETH) } diff --git a/crates/net/dns/Cargo.toml b/crates/net/dns/Cargo.toml index 2af72afcef..a52f650574 100644 --- a/crates/net/dns/Cargo.toml +++ b/crates/net/dns/Cargo.toml @@ -48,4 +48,15 @@ reth-tracing.workspace = true rand.workspace = true [features] -serde = ["dep:serde", "dep:serde_with"] +serde = [ + "dep:serde", + "dep:serde_with", + "alloy-chains/serde", + "alloy-primitives/serde", + "enr/serde", + "linked_hash_set/serde", + "parking_lot/serde", + "rand/serde", + "secp256k1/serde", + "trust-dns-resolver/serde" +] diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index 5e7f4dd47a..272db6fc6d 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -71,10 +71,14 @@ tempfile.workspace = true [features] test-utils = [ - "dep:tempfile", - "dep:reth-db-api", - "reth-db/test-utils", - "reth-consensus/test-utils", - "reth-network-p2p/test-utils", - "reth-testing-utils", + "dep:tempfile", + "dep:reth-db-api", + "reth-db/test-utils", + "reth-consensus/test-utils", + "reth-network-p2p/test-utils", + "reth-testing-utils", + "reth-chainspec/test-utils", + "reth-primitives/test-utils", + "reth-db-api?/test-utils", + "reth-provider/test-utils" ] diff --git a/crates/net/ecies/src/algorithm.rs b/crates/net/ecies/src/algorithm.rs index 83dcc657bc..e4266d9a06 100644 --- a/crates/net/ecies/src/algorithm.rs +++ b/crates/net/ecies/src/algorithm.rs @@ -650,6 +650,8 @@ impl ECIES { out.extend_from_slice(tag.as_slice()); } + /// Reads the `RLPx` header from the slice, setting up the MAC and AES, returning the body + /// size contained in the header. pub fn read_header(&mut self, data: &mut [u8]) -> Result { // If the data is not large enough to fit the header and mac bytes, return an error // @@ -677,7 +679,7 @@ impl ECIES { self.body_size = Some(body_size); - Ok(self.body_size.unwrap()) + Ok(body_size) } pub const fn header_len() -> usize { diff --git a/crates/net/ecies/src/codec.rs b/crates/net/ecies/src/codec.rs index c3e9b8d58c..b5a10284cf 100644 --- a/crates/net/ecies/src/codec.rs +++ b/crates/net/ecies/src/codec.rs @@ -1,12 +1,15 @@ //! This contains the main codec for `RLPx` ECIES messages -use crate::{algorithm::ECIES, ECIESError, EgressECIESValue, IngressECIESValue}; +use crate::{algorithm::ECIES, ECIESError, ECIESErrorImpl, EgressECIESValue, IngressECIESValue}; use alloy_primitives::{bytes::BytesMut, B512 as PeerId}; use secp256k1::SecretKey; use std::{fmt::Debug, io}; use tokio_util::codec::{Decoder, Encoder}; use tracing::{instrument, trace}; +/// The max size that the initial handshake packet can be. Currently 2KiB. +const MAX_INITIAL_HANDSHAKE_SIZE: usize = 2048; + /// Tokio codec for ECIES #[derive(Debug)] pub struct ECIESCodec { @@ -26,6 +29,11 @@ pub enum ECIESState { /// message containing the nonce and other metadata. Ack, + /// This is the same as the [`ECIESState::Header`] stage, but occurs only after the first + /// [`ECIESState::Ack`] message. This is so that the initial handshake message can be properly + /// validated. + InitialHeader, + /// The third stage of the ECIES handshake, where header is parsed, message integrity checks /// performed, and message is decrypted. Header, @@ -70,7 +78,7 @@ impl Decoder for ECIESCodec { self.ecies.read_auth(&mut buf.split_to(total_size))?; - self.state = ECIESState::Header; + self.state = ECIESState::InitialHeader; return Ok(Some(IngressECIESValue::AuthReceive(self.ecies.remote_id()))) } ECIESState::Ack => { @@ -89,9 +97,29 @@ impl Decoder for ECIESCodec { self.ecies.read_ack(&mut buf.split_to(total_size))?; - self.state = ECIESState::Header; + self.state = ECIESState::InitialHeader; return Ok(Some(IngressECIESValue::Ack)) } + ECIESState::InitialHeader => { + if buf.len() < ECIES::header_len() { + trace!("current len {}, need {}", buf.len(), ECIES::header_len()); + return Ok(None) + } + + let body_size = + self.ecies.read_header(&mut buf.split_to(ECIES::header_len()))?; + + if body_size > MAX_INITIAL_HANDSHAKE_SIZE { + trace!(?body_size, max=?MAX_INITIAL_HANDSHAKE_SIZE, "Header exceeds max initial handshake size"); + return Err(ECIESErrorImpl::InitialHeaderBodyTooLarge { + body_size, + max_body_size: MAX_INITIAL_HANDSHAKE_SIZE, + } + .into()) + } + + self.state = ECIESState::Body; + } ECIESState::Header => { if buf.len() < ECIES::header_len() { trace!("current len {}, need {}", buf.len(), ECIES::header_len()); @@ -131,7 +159,7 @@ impl Encoder for ECIESCodec { Ok(()) } EgressECIESValue::Ack => { - self.state = ECIESState::Header; + self.state = ECIESState::InitialHeader; self.ecies.write_ack(buf); Ok(()) } diff --git a/crates/net/ecies/src/error.rs b/crates/net/ecies/src/error.rs index 79965f7330..9dabfc1618 100644 --- a/crates/net/ecies/src/error.rs +++ b/crates/net/ecies/src/error.rs @@ -62,6 +62,14 @@ pub enum ECIESErrorImpl { /// The encrypted data is not large enough for all fields #[error("encrypted data is not large enough for all fields")] EncryptedDataTooSmall, + /// The initial header body is too large. + #[error("initial header body is {body_size} but the max is {max_body_size}")] + InitialHeaderBodyTooLarge { + /// The body size from the header + body_size: usize, + /// The max body size + max_body_size: usize, + }, /// Error when trying to split an array beyond its length #[error("requested {idx} but array len is {len}")] OutOfBounds { diff --git a/crates/net/eth-wire-types/Cargo.toml b/crates/net/eth-wire-types/Cargo.toml index 82c9fe37a4..1d2b548724 100644 --- a/crates/net/eth-wire-types/Cargo.toml +++ b/crates/net/eth-wire-types/Cargo.toml @@ -45,10 +45,22 @@ alloy-consensus.workspace = true [features] arbitrary = [ - "reth-primitives/arbitrary", - "alloy-chains/arbitrary", - "dep:arbitrary", - "dep:proptest", - "dep:proptest-arbitrary-interop", + "reth-primitives/arbitrary", + "alloy-chains/arbitrary", + "dep:arbitrary", + "dep:proptest", + "dep:proptest-arbitrary-interop", + "reth-chainspec/arbitrary", + "alloy-consensus/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary" +] +serde = [ + "dep:serde", + "alloy-chains/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "bytes/serde", + "rand/serde" ] -serde = ["dep:serde"] diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index 352d123e2a..d3aae69f33 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -113,9 +113,10 @@ mod tests { HeadersDirection, }; use alloy_consensus::TxLegacy; - use alloy_primitives::{hex, Parity, TxKind, U256}; + use alloy_eips::BlockHashOrNumber; + use alloy_primitives::{hex, Parity, Signature, TxKind, U256}; use alloy_rlp::{Decodable, Encodable}; - use reth_primitives::{BlockHashOrNumber, Header, Signature, Transaction, TransactionSigned}; + use reth_primitives::{Header, Transaction, TransactionSigned}; use std::str::FromStr; use super::BlockBody; @@ -277,7 +278,7 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_root: None + requests_hash: None }, ]), }.encode(&mut data); @@ -312,7 +313,7 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_root: None + requests_hash: None }, ]), }; @@ -412,12 +413,11 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_root: None + requests_hash: None }, ], withdrawals: None, sidecars: None, - requests: None } ]), }; @@ -489,16 +489,24 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_root: None + requests_hash: None }, ], withdrawals: None, sidecars: None, - requests: None } ]), }; let result = RequestPair::decode(&mut &data[..]).unwrap(); assert_eq!(result, expected); } + + #[test] + fn empty_block_bodies_rlp() { + let body = BlockBodies::default(); + let mut buf = Vec::new(); + body.encode(&mut buf); + let decoded = BlockBodies::decode(&mut buf.as_slice()).unwrap(); + assert_eq!(body, decoded); + } } diff --git a/crates/net/eth-wire-types/src/header.rs b/crates/net/eth-wire-types/src/header.rs index 4075a4a92f..8c11bfa82b 100644 --- a/crates/net/eth-wire-types/src/header.rs +++ b/crates/net/eth-wire-types/src/header.rs @@ -87,6 +87,7 @@ impl From for bool { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::{EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; use alloy_primitives::{address, b256, bloom, bytes, hex, Address, Bytes, B256, U256}; use alloy_rlp::{Decodable, Encodable}; use reth_primitives::Header; @@ -123,7 +124,7 @@ mod tests { .unwrap(); let header = Header { parent_hash: b256!("e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2a"), - ommers_hash: b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), + ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: address!("ba5e000000000000000000000000000000000000"), state_root: b256!("ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7"), transactions_root: b256!("50f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accf"), @@ -142,7 +143,7 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_root: None + requests_hash: None }; assert_eq!(header.hash_slow(), expected_hash); } @@ -201,10 +202,7 @@ mod tests { gas_used: 0x0125b8, timestamp: 0x079e, extra_data: Bytes::from_str("42").unwrap(), - mix_hash: B256::from_str( - "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - ) - .unwrap(), + mix_hash: EMPTY_ROOT_HASH, base_fee_per_gas: Some(0x09), withdrawals_root: Some( B256::from_str("27f166f1d7c789251299535cb176ba34116e44894476a7886fe5d73d9be5c973") @@ -230,10 +228,7 @@ mod tests { "3a9b485972e7353edd9152712492f0c58d89ef80623686b6bf947a4a6dce6cb6", ) .unwrap(), - ommers_hash: B256::from_str( - "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - ) - .unwrap(), + ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: Address::from_str("2adc25665018aa1fe0e6bc666dac8fc2697ff9ba").unwrap(), state_root: B256::from_str( "3c837fc158e3e93eafcaf2e658a02f5d8f99abc9f1c4c66cdea96c0ca26406ae", @@ -254,20 +249,14 @@ mod tests { gas_used: 0x02a865, timestamp: 0x079e, extra_data: Bytes::from(vec![0x42]), - mix_hash: B256::from_str( - "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - ) - .unwrap(), + mix_hash: EMPTY_ROOT_HASH, nonce: 0u64.into(), base_fee_per_gas: Some(9), - withdrawals_root: Some( - B256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - .unwrap(), - ), + withdrawals_root: Some(EMPTY_ROOT_HASH), blob_gas_used: Some(0x020000), excess_blob_gas: Some(0), parent_beacon_block_root: None, - requests_root: None, + requests_hash: None, }; let header = Header::decode(&mut data.as_slice()).unwrap(); @@ -288,15 +277,11 @@ mod tests { "13a7ec98912f917b3e804654e37c9866092043c13eb8eab94eb64818e886cff5", ) .unwrap(), - ommers_hash: b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), + ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: address!("f97e180c050e5ab072211ad2c213eb5aee4df134"), state_root: b256!("ec229dbe85b0d3643ad0f471e6ec1a36bbc87deffbbd970762d22a53b35d068a"), - transactions_root: b256!( - "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" - ), - receipts_root: b256!( - "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" - ), + transactions_root: EMPTY_ROOT_HASH, + receipts_root: EMPTY_ROOT_HASH, logs_bloom: Default::default(), difficulty: U256::from(0), number: 0x30598, @@ -307,13 +292,11 @@ mod tests { mix_hash: b256!("70ccadc40b16e2094954b1064749cc6fbac783c1712f1b271a8aac3eda2f2325"), nonce: 0u64.into(), base_fee_per_gas: Some(7), - withdrawals_root: Some(b256!( - "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" - )), + withdrawals_root: Some(EMPTY_ROOT_HASH), parent_beacon_block_root: None, blob_gas_used: Some(0), excess_blob_gas: Some(0x1600000), - requests_root: None, + requests_hash: None, }; let header = Header::decode(&mut data.as_slice()).unwrap(); diff --git a/crates/net/eth-wire-types/src/message.rs b/crates/net/eth-wire-types/src/message.rs index 92928b54d6..e6d6dcc5b8 100644 --- a/crates/net/eth-wire-types/src/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -51,9 +51,17 @@ impl ProtocolMessage { let message = match message_type { EthMessageID::Status => EthMessage::Status(Status::decode(buf)?), EthMessageID::NewBlockHashes => { + if version.is_eth69() { + return Err(MessageError::Invalid(version, EthMessageID::NewBlockHashes)); + } EthMessage::NewBlockHashes(NewBlockHashes::decode(buf)?) } - EthMessageID::NewBlock => EthMessage::NewBlock(Box::new(NewBlock::decode(buf)?)), + EthMessageID::NewBlock => { + if version.is_eth69() { + return Err(MessageError::Invalid(version, EthMessageID::NewBlock)); + } + EthMessage::NewBlock(Box::new(NewBlock::decode(buf)?)) + } EthMessageID::Transactions => EthMessage::Transactions(Transactions::decode(buf)?), EthMessageID::NewPooledTransactionHashes => { if version >= EthVersion::Eth68 { @@ -507,7 +515,8 @@ where mod tests { use super::MessageError; use crate::{ - message::RequestPair, EthMessage, EthMessageID, GetNodeData, NodeData, ProtocolMessage, + message::RequestPair, EthMessage, EthMessageID, EthVersion, GetNodeData, NodeData, + ProtocolMessage, }; use alloy_primitives::hex; use alloy_rlp::{Decodable, Encodable, Error}; @@ -579,4 +588,17 @@ mod tests { let result = RequestPair::>::decode(&mut &*raw_pair); assert!(matches!(result, Err(Error::UnexpectedLength))); } + + #[test] + fn empty_block_bodies_protocol() { + let empty_block_bodies = ProtocolMessage::from(EthMessage::BlockBodies(RequestPair { + request_id: 0, + message: Default::default(), + })); + let mut buf = Vec::new(); + empty_block_bodies.encode(&mut buf); + let decoded = + ProtocolMessage::decode_message(EthVersion::Eth68, &mut buf.as_slice()).unwrap(); + assert_eq!(empty_block_bodies, decoded); + } } diff --git a/crates/net/eth-wire-types/src/status.rs b/crates/net/eth-wire-types/src/status.rs index a5e7530ec0..d9e8d4319b 100644 --- a/crates/net/eth-wire-types/src/status.rs +++ b/crates/net/eth-wire-types/src/status.rs @@ -19,7 +19,7 @@ use std::fmt::{Debug, Display}; pub struct Status { /// The current protocol version. For example, peers running `eth/66` would have a version of /// 66. - pub version: u8, + pub version: EthVersion, /// The chain id, as introduced in /// [EIP155](https://eips.ethereum.org/EIPS/eip-155#list-of-chain-ids). @@ -50,7 +50,7 @@ impl Status { /// Sets the [`EthVersion`] for the status. pub fn set_eth_version(&mut self, version: EthVersion) { - self.version = version as u8; + self.version = version; } /// Create a [`StatusBuilder`] from the given [`EthChainSpec`] and head block. @@ -122,7 +122,7 @@ impl Default for Status { fn default() -> Self { let mainnet_genesis = MAINNET.genesis_hash(); Self { - version: EthVersion::Eth68 as u8, + version: EthVersion::Eth68, chain: Chain::from_named(NamedChain::Mainnet), total_difficulty: U256::from(17_179_869_184u64), blockhash: mainnet_genesis, @@ -138,14 +138,14 @@ impl Default for Status { /// /// # Example /// ``` +/// use alloy_consensus::constants::MAINNET_GENESIS_HASH; /// use alloy_primitives::{B256, U256}; /// use reth_chainspec::{Chain, EthereumHardfork, MAINNET}; /// use reth_eth_wire_types::{EthVersion, Status}; -/// use reth_primitives::MAINNET_GENESIS_HASH; /// /// // this is just an example status message! /// let status = Status::builder() -/// .version(EthVersion::Eth66.into()) +/// .version(EthVersion::Eth66) /// .chain(Chain::mainnet()) /// .total_difficulty(U256::from(100)) /// .blockhash(B256::from(MAINNET_GENESIS_HASH)) @@ -156,7 +156,7 @@ impl Default for Status { /// assert_eq!( /// status, /// Status { -/// version: EthVersion::Eth66.into(), +/// version: EthVersion::Eth66, /// chain: Chain::mainnet(), /// total_difficulty: U256::from(100), /// blockhash: B256::from(MAINNET_GENESIS_HASH), @@ -177,7 +177,7 @@ impl StatusBuilder { } /// Sets the protocol version. - pub const fn version(mut self, version: u8) -> Self { + pub const fn version(mut self, version: EthVersion) -> Self { self.status.version = version; self } @@ -216,6 +216,7 @@ impl StatusBuilder { #[cfg(test)] mod tests { use crate::{EthVersion, Status}; + use alloy_consensus::constants::MAINNET_GENESIS_HASH; use alloy_genesis::Genesis; use alloy_primitives::{hex, B256, U256}; use alloy_rlp::{Decodable, Encodable}; @@ -228,17 +229,14 @@ mod tests { fn encode_eth_status_message() { let expected = hex!("f85643018a07aac59dabcdd74bc567a0feb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13da0d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3c684b715077d80"); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: Chain::from_named(NamedChain::Mainnet), total_difficulty: U256::from(36206751599115524359527u128), blockhash: B256::from_str( "feb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13d", ) .unwrap(), - genesis: B256::from_str( - "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3", - ) - .unwrap(), + genesis: MAINNET_GENESIS_HASH, forkid: ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 0 }, }; @@ -251,17 +249,14 @@ mod tests { fn decode_eth_status_message() { let data = hex!("f85643018a07aac59dabcdd74bc567a0feb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13da0d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3c684b715077d80"); let expected = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: Chain::from_named(NamedChain::Mainnet), total_difficulty: U256::from(36206751599115524359527u128), blockhash: B256::from_str( "feb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13d", ) .unwrap(), - genesis: B256::from_str( - "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3", - ) - .unwrap(), + genesis: MAINNET_GENESIS_HASH, forkid: ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 0 }, }; let status = Status::decode(&mut &data[..]).unwrap(); @@ -272,7 +267,7 @@ mod tests { fn encode_network_status_message() { let expected = hex!("f850423884024190faa0f8514c4680ef27700751b08f37645309ce65a449616a3ea966bf39dd935bb27ba00d21840abff46b96c84b2ac9e10e4f5cdaeb5693cb665db62a2f3b02d2d57b5bc6845d43d2fd80"); let status = Status { - version: EthVersion::Eth66 as u8, + version: EthVersion::Eth66, chain: Chain::from_named(NamedChain::BinanceSmartChain), total_difficulty: U256::from(37851386u64), blockhash: B256::from_str( @@ -295,7 +290,7 @@ mod tests { fn decode_network_status_message() { let data = hex!("f850423884024190faa0f8514c4680ef27700751b08f37645309ce65a449616a3ea966bf39dd935bb27ba00d21840abff46b96c84b2ac9e10e4f5cdaeb5693cb665db62a2f3b02d2d57b5bc6845d43d2fd80"); let expected = Status { - version: EthVersion::Eth66 as u8, + version: EthVersion::Eth66, chain: Chain::from_named(NamedChain::BinanceSmartChain), total_difficulty: U256::from(37851386u64), blockhash: B256::from_str( @@ -316,7 +311,7 @@ mod tests { fn decode_another_network_status_message() { let data = hex!("f86142820834936d68fcffffffffffffffffffffffffdeab81b8a0523e8163a6d620a4cc152c547a05f28a03fec91a2a615194cb86df9731372c0ca06499dccdc7c7def3ebb1ce4c6ee27ec6bd02aee570625ca391919faf77ef27bdc6841a67ccd880"); let expected = Status { - version: EthVersion::Eth66 as u8, + version: EthVersion::Eth66, chain: Chain::from_id(2100), total_difficulty: U256::from_str( "0x000000000000000000000000006d68fcffffffffffffffffffffffffdeab81b8", diff --git a/crates/net/eth-wire-types/src/transactions.rs b/crates/net/eth-wire-types/src/transactions.rs index ab65aa178e..7c66f657a1 100644 --- a/crates/net/eth-wire-types/src/transactions.rs +++ b/crates/net/eth-wire-types/src/transactions.rs @@ -78,10 +78,10 @@ impl FromIterator for PooledTransactions { mod tests { use crate::{message::RequestPair, GetPooledTransactions, PooledTransactions}; use alloy_consensus::{TxEip1559, TxLegacy}; - use alloy_primitives::{hex, Parity, TxKind, U256}; + use alloy_primitives::{hex, Parity, Signature, TxKind, U256}; use alloy_rlp::{Decodable, Encodable}; use reth_chainspec::MIN_TRANSACTION_GAS; - use reth_primitives::{PooledTransactionsElement, Signature, Transaction, TransactionSigned}; + use reth_primitives::{PooledTransactionsElement, Transaction, TransactionSigned}; use std::str::FromStr; #[test] diff --git a/crates/net/eth-wire-types/src/version.rs b/crates/net/eth-wire-types/src/version.rs index 4fd3e792dc..40d51cb551 100644 --- a/crates/net/eth-wire-types/src/version.rs +++ b/crates/net/eth-wire-types/src/version.rs @@ -15,15 +15,17 @@ pub struct ParseVersionError(String); /// The `eth` protocol version. #[repr(u8)] #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Display)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] pub enum EthVersion { /// The `eth` protocol version 66. Eth66 = 66, - /// The `eth` protocol version 67. Eth67 = 67, - /// The `eth` protocol version 68. Eth68 = 68, + /// The `eth` protocol version 69. + Eth69 = 69, } impl EthVersion { @@ -38,6 +40,8 @@ impl EthVersion { // eth/67,68 are eth/66 minus GetNodeData and NodeData messages 13 } + // eth69 is both eth67 and eth68 minus NewBlockHashes and NewBlock + Self::Eth69 => 11, } } @@ -55,6 +59,31 @@ impl EthVersion { pub const fn is_eth68(&self) -> bool { matches!(self, Self::Eth68) } + + /// Returns true if the version is eth/69 + pub const fn is_eth69(&self) -> bool { + matches!(self, Self::Eth69) + } +} + +/// RLP encodes `EthVersion` as a single byte (66-69). +impl Encodable for EthVersion { + fn encode(&self, out: &mut dyn BufMut) { + (*self as u8).encode(out) + } + + fn length(&self) -> usize { + (*self as u8).length() + } +} + +/// RLP decodes a single byte into `EthVersion`. +/// Returns error if byte is not a valid version (66-69). +impl Decodable for EthVersion { + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let version = u8::decode(buf)?; + Self::try_from(version).map_err(|_| RlpError::Custom("invalid eth version")) + } } /// Allow for converting from a `&str` to an `EthVersion`. @@ -75,6 +104,7 @@ impl TryFrom<&str> for EthVersion { "66" => Ok(Self::Eth66), "67" => Ok(Self::Eth67), "68" => Ok(Self::Eth68), + "69" => Ok(Self::Eth69), _ => Err(ParseVersionError(s.to_string())), } } @@ -98,6 +128,7 @@ impl TryFrom for EthVersion { 66 => Ok(Self::Eth66), 67 => Ok(Self::Eth67), 68 => Ok(Self::Eth68), + 69 => Ok(Self::Eth69), _ => Err(ParseVersionError(u.to_string())), } } @@ -126,6 +157,7 @@ impl From for &'static str { EthVersion::Eth66 => "66", EthVersion::Eth67 => "67", EthVersion::Eth68 => "68", + EthVersion::Eth69 => "69", } } } @@ -173,13 +205,16 @@ impl Decodable for ProtocolVersion { #[cfg(test)] mod tests { use super::{EthVersion, ParseVersionError}; + use alloy_rlp::{Decodable, Encodable, Error as RlpError}; + use bytes::BytesMut; #[test] fn test_eth_version_try_from_str() { assert_eq!(EthVersion::Eth66, EthVersion::try_from("66").unwrap()); assert_eq!(EthVersion::Eth67, EthVersion::try_from("67").unwrap()); assert_eq!(EthVersion::Eth68, EthVersion::try_from("68").unwrap()); - assert_eq!(Err(ParseVersionError("69".to_string())), EthVersion::try_from("69")); + assert_eq!(EthVersion::Eth69, EthVersion::try_from("69").unwrap()); + assert_eq!(Err(ParseVersionError("70".to_string())), EthVersion::try_from("70")); } #[test] @@ -187,6 +222,48 @@ mod tests { assert_eq!(EthVersion::Eth66, "66".parse().unwrap()); assert_eq!(EthVersion::Eth67, "67".parse().unwrap()); assert_eq!(EthVersion::Eth68, "68".parse().unwrap()); - assert_eq!(Err(ParseVersionError("69".to_string())), "69".parse::()); + assert_eq!(EthVersion::Eth69, "69".parse().unwrap()); + assert_eq!(Err(ParseVersionError("70".to_string())), "70".parse::()); + } + + #[test] + fn test_eth_version_rlp_encode() { + let versions = [EthVersion::Eth66, EthVersion::Eth67, EthVersion::Eth68, EthVersion::Eth69]; + + for version in versions { + let mut encoded = BytesMut::new(); + version.encode(&mut encoded); + + assert_eq!(encoded.len(), 1); + assert_eq!(encoded[0], version as u8); + } + } + #[test] + fn test_eth_version_rlp_decode() { + let test_cases = [ + (66_u8, Ok(EthVersion::Eth66)), + (67_u8, Ok(EthVersion::Eth67)), + (68_u8, Ok(EthVersion::Eth68)), + (69_u8, Ok(EthVersion::Eth69)), + (70_u8, Err(RlpError::Custom("invalid eth version"))), + (65_u8, Err(RlpError::Custom("invalid eth version"))), + ]; + + for (input, expected) in test_cases { + let mut encoded = BytesMut::new(); + input.encode(&mut encoded); + + let mut slice = encoded.as_ref(); + let result = EthVersion::decode(&mut slice); + assert_eq!(result, expected); + } + } + + #[test] + fn test_eth_version_total_messages() { + assert_eq!(EthVersion::Eth66.total_messages(), 15); + assert_eq!(EthVersion::Eth67.total_messages(), 13); + assert_eq!(EthVersion::Eth68.total_messages(), 13); + assert_eq!(EthVersion::Eth69.total_messages(), 11); } } diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index 79c3dd7044..09b50e6e7a 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -66,12 +66,25 @@ alloy-eips.workspace = true [features] arbitrary = [ - "reth-primitives/arbitrary", - "reth-eth-wire-types/arbitrary", - "dep:arbitrary", + "reth-primitives/arbitrary", + "reth-eth-wire-types/arbitrary", + "dep:arbitrary", + "reth-chainspec/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", + "reth-codecs/arbitrary" +] +serde = [ + "dep:serde", + "reth-eth-wire-types/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "bytes/serde", + "rand/serde", + "secp256k1/serde", + "reth-codecs/serde" ] bsc = ["reth-primitives/bsc"] -serde = ["dep:serde", "reth-eth-wire-types/serde"] [[test]] name = "fuzz_roundtrip" diff --git a/crates/net/eth-wire/src/errors/eth.rs b/crates/net/eth-wire/src/errors/eth.rs index 557fbd66a0..1f8b995afd 100644 --- a/crates/net/eth-wire/src/errors/eth.rs +++ b/crates/net/eth-wire/src/errors/eth.rs @@ -5,6 +5,7 @@ use crate::{ }; use alloy_primitives::B256; use reth_chainspec::Chain; +use reth_eth_wire_types::EthVersion; use reth_primitives::{GotExpected, GotExpectedBoxed, ValidationError}; use std::io; @@ -88,7 +89,7 @@ pub enum EthHandshakeError { MismatchedGenesis(GotExpectedBoxed), #[error("mismatched protocol version in status message: {0}")] /// Mismatched protocol versions in status messages. - MismatchedProtocolVersion(GotExpected), + MismatchedProtocolVersion(GotExpected), #[error("mismatched chain in status message: {0}")] /// Mismatch in chain details in status messages. MismatchedChain(GotExpected), diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index def6a3c5eb..b11fbb3de0 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -23,6 +23,9 @@ use tracing::{debug, trace}; // https://github.com/ethereum/go-ethereum/blob/30602163d5d8321fbc68afdcbbaf2362b2641bde/eth/protocols/eth/protocol.go#L50 pub const MAX_MESSAGE_SIZE: usize = 10 * 1024 * 1024; +/// [`MAX_STATUS_SIZE`] is the maximum cap on the size of the initial status message +pub(crate) const MAX_STATUS_SIZE: usize = 500 * 1024; + /// An un-authenticated [`EthStream`]. This is consumed and returns a [`EthStream`] after the /// `Status` handshake is completed. #[pin_project] @@ -99,12 +102,12 @@ where } }?; - if their_msg.len() > MAX_MESSAGE_SIZE { + if their_msg.len() > MAX_STATUS_SIZE { self.inner.disconnect(DisconnectReason::ProtocolBreach).await?; return Err(EthStreamError::MessageTooBig(their_msg.len())); } - let version = EthVersion::try_from(status.version)?; + let version = status.version; let msg = match ProtocolMessage::decode_message(version, &mut their_msg.as_ref()) { Ok(m) => m, Err(err) => { @@ -432,7 +435,7 @@ mod tests { let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: NamedChain::Mainnet.into(), total_difficulty: U256::ZERO, blockhash: B256::random(), @@ -479,7 +482,7 @@ mod tests { let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: NamedChain::Mainnet.into(), total_difficulty: U256::from(2).pow(U256::from(100)) - U256::from(1), blockhash: B256::random(), @@ -526,7 +529,7 @@ mod tests { let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: NamedChain::Mainnet.into(), total_difficulty: U256::from(2).pow(U256::from(100)), blockhash: B256::random(), @@ -667,7 +670,7 @@ mod tests { let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: NamedChain::Mainnet.into(), total_difficulty: U256::ZERO, blockhash: B256::random(), @@ -738,7 +741,7 @@ mod tests { let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: NamedChain::Mainnet.into(), total_difficulty: U256::ZERO, blockhash: B256::random(), diff --git a/crates/net/eth-wire/src/test_utils.rs b/crates/net/eth-wire/src/test_utils.rs index e516c0aee7..d7a3aa582b 100644 --- a/crates/net/eth-wire/src/test_utils.rs +++ b/crates/net/eth-wire/src/test_utils.rs @@ -37,7 +37,7 @@ pub fn eth_handshake() -> (Status, ForkFilter) { let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: Chain::mainnet(), total_difficulty: U256::ZERO, blockhash: B256::random(), diff --git a/crates/net/network-api/Cargo.toml b/crates/net/network-api/Cargo.toml index 650d749048..6d410e9db2 100644 --- a/crates/net/network-api/Cargo.toml +++ b/crates/net/network-api/Cargo.toml @@ -40,4 +40,10 @@ derive_more.workspace = true [features] default = ["serde"] -serde = ["dep:serde"] +serde = [ + "dep:serde", + "reth-eth-wire-types/serde", + "reth-network-types/serde", + "alloy-primitives/serde", + "enr/serde" +] diff --git a/crates/net/network-types/Cargo.toml b/crates/net/network-types/Cargo.toml index 97c8e65cbb..c9b8fdd5bf 100644 --- a/crates/net/network-types/Cargo.toml +++ b/crates/net/network-types/Cargo.toml @@ -22,7 +22,7 @@ serde = { workspace = true, optional = true } humantime-serde = { workspace = true, optional = true } serde_json = { workspace = true } -# misc +# misc tracing.workspace = true [features] diff --git a/crates/net/network-types/src/peers/config.rs b/crates/net/network-types/src/peers/config.rs index 97a8bb3cac..890679f5d3 100644 --- a/crates/net/network-types/src/peers/config.rs +++ b/crates/net/network-types/src/peers/config.rs @@ -24,6 +24,9 @@ pub const DEFAULT_MAX_COUNT_PEERS_INBOUND: u32 = 30; /// This restricts how many outbound dials can be performed concurrently. pub const DEFAULT_MAX_COUNT_CONCURRENT_OUTBOUND_DIALS: usize = 15; +/// A temporary timeout for ips on incoming connection attempts. +pub const INBOUND_IP_THROTTLE_DURATION: Duration = Duration::from_secs(30); + /// The durations to use when a backoff should be applied to a peer. /// /// See also [`BackoffKind`]. @@ -155,6 +158,11 @@ pub struct PeersConfig { /// /// The backoff duration increases with number of backoff attempts. pub backoff_durations: PeerBackoffDurations, + /// How long to temporarily ban ips on incoming connection attempts. + /// + /// This acts as an IP based rate limit. + #[cfg_attr(feature = "serde", serde(default, with = "humantime_serde"))] + pub incoming_ip_throttle_duration: Duration, } impl Default for PeersConfig { @@ -171,6 +179,7 @@ impl Default for PeersConfig { trusted_nodes_only: false, basic_nodes: Default::default(), max_backoff_count: 5, + incoming_ip_throttle_duration: INBOUND_IP_THROTTLE_DURATION, } } } diff --git a/crates/net/network-types/src/peers/state.rs b/crates/net/network-types/src/peers/state.rs index f6ab1a39f8..1e2466c805 100644 --- a/crates/net/network-types/src/peers/state.rs +++ b/crates/net/network-types/src/peers/state.rs @@ -31,6 +31,12 @@ impl PeerConnectionState { } } + /// Returns true if this is the idle state. + #[inline] + pub const fn is_idle(&self) -> bool { + matches!(self, Self::Idle) + } + /// Returns true if this is an active incoming connection. #[inline] pub const fn is_incoming(&self) -> bool { diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index deed760b8f..04d2f50c30 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -101,8 +101,36 @@ criterion = { workspace = true, features = ["async_tokio", "html_reports"] } [features] default = ["serde"] geth-tests = [] -serde = ["dep:serde", "secp256k1/serde", "enr/serde", "reth-network-types/serde"] -test-utils = ["dep:reth-provider", "reth-provider?/test-utils", "dep:tempfile", "reth-transaction-pool/test-utils", "reth-network-types/test-utils"] +serde = [ + "dep:serde", + "secp256k1/serde", + "enr/serde", + "reth-network-types/serde", + "reth-dns-discovery/serde", + "reth-eth-wire/serde", + "reth-provider?/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "discv5/serde", + "parking_lot/serde", + "rand/serde", + "smallvec/serde", + "url/serde" +] +test-utils = [ + "dep:reth-provider", + "reth-provider?/test-utils", + "dep:tempfile", + "reth-transaction-pool/test-utils", + "reth-network-types/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-discv4/test-utils", + "reth-network/test-utils", + "reth-network-p2p/test-utils", + "reth-primitives/test-utils" +] bsc = ["reth-eth-wire/bsc"] [[bench]] diff --git a/crates/net/network/src/budget.rs b/crates/net/network/src/budget.rs index 0b5e3d3a90..5f5d888618 100644 --- a/crates/net/network/src/budget.rs +++ b/crates/net/network/src/budget.rs @@ -5,8 +5,8 @@ pub const DEFAULT_BUDGET_TRY_DRAIN_STREAM: u32 = 10; /// Default budget to try and drain headers and bodies download streams. /// -/// Default is 4 iterations. -pub const DEFAULT_BUDGET_TRY_DRAIN_DOWNLOADERS: u32 = 4; +/// Default is 2 iterations. +pub const DEFAULT_BUDGET_TRY_DRAIN_DOWNLOADERS: u32 = 2; /// Default budget to try and drain [`Swarm`](crate::swarm::Swarm). /// @@ -55,7 +55,7 @@ macro_rules! poll_nested_stream_with_budget { let mut f = $on_ready_some; f(item); - budget = budget.saturating_sub(1); + budget -= 1; if budget == 0 { break true } diff --git a/crates/net/network/src/cache.rs b/crates/net/network/src/cache.rs index fb2daca666..758b491679 100644 --- a/crates/net/network/src/cache.rs +++ b/crates/net/network/src/cache.rs @@ -42,7 +42,7 @@ impl LruCache { pub fn insert_and_get_evicted(&mut self, entry: T) -> (bool, Option) { let new = self.inner.peek(&entry).is_none(); let evicted = - if new && (self.limit as usize) <= self.inner.len() { self.remove_lru() } else { None }; + (new && (self.limit as usize) <= self.inner.len()).then(|| self.remove_lru()).flatten(); _ = self.inner.get_or_insert(entry, || ()); (new, evicted) diff --git a/crates/net/network/src/discovery.rs b/crates/net/network/src/discovery.rs index d366027d68..5b2bb788f4 100644 --- a/crates/net/network/src/discovery.rs +++ b/crates/net/network/src/discovery.rs @@ -214,6 +214,10 @@ impl Discovery { fn on_node_record_update(&mut self, record: NodeRecord, fork_id: Option) { let peer_id = record.id; let tcp_addr = record.tcp_addr(); + if tcp_addr.port() == 0 { + // useless peer for p2p + return + } let udp_addr = record.udp_addr(); let addr = PeerAddr::new(tcp_addr, Some(udp_addr)); _ = diff --git a/crates/net/network/src/metrics.rs b/crates/net/network/src/metrics.rs index 9197ac185b..b38cafbe70 100644 --- a/crates/net/network/src/metrics.rs +++ b/crates/net/network/src/metrics.rs @@ -88,6 +88,8 @@ pub struct SessionManagerMetrics { pub(crate) total_dial_successes: Counter, /// Number of dropped outgoing peer messages. pub(crate) total_outgoing_peer_messages_dropped: Counter, + /// Number of queued outgoing messages + pub(crate) queued_outgoing_messages: Gauge, } /// Metrics for the [`TransactionsManager`](crate::transactions::TransactionsManager). diff --git a/crates/net/network/src/peers.rs b/crates/net/network/src/peers.rs index 3d5ff7a0d4..4855ff5e74 100644 --- a/crates/net/network/src/peers.rs +++ b/crates/net/network/src/peers.rs @@ -84,6 +84,8 @@ pub struct PeersManager { max_backoff_count: u8, /// Tracks the connection state of the node net_connection_state: NetworkConnectionState, + /// How long to temporarily ban ip on an incoming connection attempt. + incoming_ip_throttle_duration: Duration, } impl PeersManager { @@ -100,6 +102,7 @@ impl PeersManager { trusted_nodes_only, basic_nodes, max_backoff_count, + incoming_ip_throttle_duration, } = config; let (manager_tx, handle_rx) = mpsc::unbounded_channel(); let now = Instant::now(); @@ -148,6 +151,7 @@ impl PeersManager { last_tick: Instant::now(), max_backoff_count, net_connection_state: NetworkConnectionState::default(), + incoming_ip_throttle_duration, } } @@ -218,6 +222,11 @@ impl PeersManager { self.backed_off_peers.len() } + /// Returns the number of idle trusted peers. + fn num_idle_trusted_peers(&self) -> usize { + self.peers.iter().filter(|(_, peer)| peer.kind.is_trusted() && peer.state.is_idle()).count() + } + /// Invoked when a new _incoming_ tcp connection is accepted. /// /// returns an error if the inbound ip address is on the ban list @@ -229,12 +238,40 @@ impl PeersManager { return Err(InboundConnectionError::IpBanned) } - if !self.connection_info.has_in_capacity() && self.trusted_peer_ids.is_empty() { - // if we don't have any inbound slots and no trusted peers, we don't accept any new - // connections + // check if we even have slots for a new incoming connection + if !self.connection_info.has_in_capacity() { + if self.trusted_peer_ids.is_empty() { + // if we don't have any incoming slots and no trusted peers, we don't accept any new + // connections + return Err(InboundConnectionError::ExceedsCapacity) + } + + // there's an edge case here where no incoming connections besides from trusted peers + // are allowed (max_inbound == 0), in which case we still need to allow new pending + // incoming connections until all trusted peers are connected. + let num_idle_trusted_peers = self.num_idle_trusted_peers(); + if num_idle_trusted_peers <= self.trusted_peer_ids.len() { + // we still want to limit concurrent pending connections + let max_inbound = + self.trusted_peer_ids.len().max(self.connection_info.config.max_inbound); + if self.connection_info.num_pending_in <= max_inbound { + self.connection_info.inc_pending_in(); + } + return Ok(()) + } + + // all trusted peers are either connected or connecting + return Err(InboundConnectionError::ExceedsCapacity) + } + + // also cap the incoming connections we can process at once + if !self.connection_info.has_in_pending_capacity() { return Err(InboundConnectionError::ExceedsCapacity) } + // apply the rate limit + self.throttle_incoming_ip(addr); + self.connection_info.inc_pending_in(); Ok(()) } @@ -353,6 +390,12 @@ impl PeersManager { self.ban_list.ban_ip_until(ip, std::time::Instant::now() + self.ban_duration); } + /// Bans the IP temporarily to rate limit inbound connection attempts per IP. + fn throttle_incoming_ip(&mut self, ip: IpAddr) { + self.ban_list + .ban_ip_until(ip, std::time::Instant::now() + self.incoming_ip_throttle_duration); + } + /// Temporarily puts the peer in timeout by inserting it into the backedoff peers set fn backoff_peer_until(&mut self, peer_id: PeerId, until: std::time::Instant) { trace!(target: "net::peers", ?peer_id, "backing off"); @@ -968,17 +1011,22 @@ impl ConnectionInfo { Self { config, num_outbound: 0, num_pending_out: 0, num_inbound: 0, num_pending_in: 0 } } - /// Returns `true` if there's still capacity for a new outgoing connection. + /// Returns `true` if there's still capacity to perform an outgoing connection. const fn has_out_capacity(&self) -> bool { self.num_pending_out < self.config.max_concurrent_outbound_dials && self.num_outbound < self.config.max_outbound } - /// Returns `true` if there's still capacity for a new incoming connection. + /// Returns `true` if there's still capacity to accept a new incoming connection. const fn has_in_capacity(&self) -> bool { self.num_inbound < self.config.max_inbound } + /// Returns `true` if we can handle an additional incoming pending connection. + const fn has_in_pending_capacity(&self) -> bool { + self.num_pending_in < self.config.max_inbound + } + fn decr_state(&mut self, state: PeerConnectionState) { match state { PeerConnectionState::Idle => {} @@ -1094,15 +1142,6 @@ impl Display for InboundConnectionError { #[cfg(test)] mod tests { - use std::{ - future::{poll_fn, Future}, - io, - net::{IpAddr, Ipv4Addr, SocketAddr}, - pin::Pin, - task::{Context, Poll}, - time::Duration, - }; - use alloy_primitives::B512; use reth_eth_wire::{ errors::{EthHandshakeError, EthStreamError, P2PHandshakeError, P2PStreamError}, @@ -1114,6 +1153,14 @@ mod tests { use reth_network_types::{ peers::reputation::DEFAULT_REPUTATION, BackoffKind, ReputationChangeKind, }; + use std::{ + future::{poll_fn, Future}, + io, + net::{IpAddr, Ipv4Addr, SocketAddr}, + pin::Pin, + task::{Context, Poll}, + time::Duration, + }; use url::Host; use super::PeersManager; @@ -1597,6 +1644,23 @@ mod tests { assert_eq!(peers.connection_info.num_pending_in, 0); } + #[tokio::test] + async fn test_reject_incoming_at_pending_capacity() { + let mut peers = PeersManager::default(); + + for count in 1..=peers.connection_info.config.max_inbound { + let socket_addr = + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, count as u8)), 8008); + assert!(peers.on_incoming_pending_session(socket_addr.ip()).is_ok()); + assert_eq!(peers.connection_info.num_pending_in, count); + } + assert!(peers.connection_info.has_in_capacity()); + assert!(!peers.connection_info.has_in_pending_capacity()); + + let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 100)), 8008); + assert!(peers.on_incoming_pending_session(socket_addr.ip()).is_err()); + } + #[tokio::test] async fn test_closed_incoming() { let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); @@ -2278,6 +2342,39 @@ mod tests { ); } + #[tokio::test] + async fn test_incoming_rate_limit() { + let config = PeersConfig { + incoming_ip_throttle_duration: Duration::from_millis(100), + ..PeersConfig::test() + }; + let mut peers = PeersManager::new(config); + + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(168, 0, 1, 2)), 8009); + assert!(peers.on_incoming_pending_session(addr.ip()).is_ok()); + assert_eq!( + peers.on_incoming_pending_session(addr.ip()).unwrap_err(), + InboundConnectionError::IpBanned + ); + + peers.release_interval.reset_immediately(); + tokio::time::sleep(peers.incoming_ip_throttle_duration).await; + + // await unban + poll_fn(|cx| loop { + if peers.poll(cx).is_pending() { + return Poll::Ready(()); + } + }) + .await; + + assert!(peers.on_incoming_pending_session(addr.ip()).is_ok()); + assert_eq!( + peers.on_incoming_pending_session(addr.ip()).unwrap_err(), + InboundConnectionError::IpBanned + ); + } + #[tokio::test] async fn test_tick() { let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)); diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index 9df322c966..444d497e00 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -12,6 +12,7 @@ use std::{ }; use futures::{stream::Fuse, SinkExt, StreamExt}; +use metrics::Gauge; use reth_eth_wire::{ errors::{EthHandshakeError, EthStreamError, P2PStreamError}, message::{EthBroadcastMessage, RequestPair}, @@ -87,7 +88,7 @@ pub(crate) struct ActiveSession { /// All requests that were sent by the remote peer and we're waiting on an internal response pub(crate) received_requests_from_remote: Vec, /// Buffered messages that should be handled and sent to the peer. - pub(crate) queued_outgoing: VecDeque, + pub(crate) queued_outgoing: QueuedOutgoingMessages, /// The maximum time we wait for a response from a peer. pub(crate) internal_request_timeout: Arc, /// Interval when to check for timed out requests. @@ -761,6 +762,32 @@ fn calculate_new_timeout(current_timeout: Duration, estimated_rtt: Duration) -> smoothened_timeout.clamp(MINIMUM_TIMEOUT, MAXIMUM_TIMEOUT) } + +/// A helper struct that wraps the queue of outgoing messages and a metric to track their count +pub(crate) struct QueuedOutgoingMessages { + messages: VecDeque, + count: Gauge, +} + +impl QueuedOutgoingMessages { + pub(crate) const fn new(metric: Gauge) -> Self { + Self { messages: VecDeque::new(), count: metric } + } + + pub(crate) fn push_back(&mut self, message: OutgoingMessage) { + self.messages.push_back(message); + self.count.increment(1); + } + + pub(crate) fn pop_front(&mut self) -> Option { + self.messages.pop_front().inspect(|_| self.count.decrement(1)) + } + + pub(crate) fn shrink_to_fit(&mut self) { + self.messages.shrink_to_fit(); + } +} + #[cfg(test)] mod tests { use super::*; @@ -886,7 +913,7 @@ mod tests { internal_request_tx: ReceiverStream::new(messages_rx).fuse(), inflight_requests: Default::default(), conn, - queued_outgoing: Default::default(), + queued_outgoing: QueuedOutgoingMessages::new(Gauge::noop()), received_requests_from_remote: Default::default(), internal_request_timeout_interval: tokio::time::interval( INITIAL_REQUEST_TIMEOUT, diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 74f303df7b..712f076b47 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -5,6 +5,7 @@ mod conn; mod counter; mod handle; +use active::QueuedOutgoingMessages; pub use conn::EthRlpxConnection; pub use handle::{ ActiveSessionHandle, ActiveSessionMessage, PendingSessionEvent, PendingSessionHandle, @@ -495,7 +496,9 @@ impl SessionManager { internal_request_tx: ReceiverStream::new(messages_rx).fuse(), inflight_requests: Default::default(), conn, - queued_outgoing: Default::default(), + queued_outgoing: QueuedOutgoingMessages::new( + self.metrics.queued_outgoing_messages.clone(), + ), received_requests_from_remote: Default::default(), internal_request_timeout_interval: tokio::time::interval( self.initial_internal_request_timeout, @@ -868,7 +871,7 @@ async fn authenticate( extra_handlers: RlpxSubProtocolHandlers, ) { let local_addr = stream.local_addr().ok(); - let stream = match get_eciess_stream(stream, secret_key, direction).await { + let stream = match get_ecies_stream(stream, secret_key, direction).await { Ok(stream) => stream, Err(error) => { let _ = events @@ -917,7 +920,7 @@ async fn authenticate( /// Returns an [`ECIESStream`] if it can be built. If not, send a /// [`PendingSessionEvent::EciesAuthError`] and returns `None` -async fn get_eciess_stream( +async fn get_ecies_stream( stream: Io, secret_key: SecretKey, direction: Direction, diff --git a/crates/net/network/src/transactions/config.rs b/crates/net/network/src/transactions/config.rs index 81ec293ea1..b838f7cfe7 100644 --- a/crates/net/network/src/transactions/config.rs +++ b/crates/net/network/src/transactions/config.rs @@ -47,7 +47,7 @@ pub enum TransactionPropagationMode { } impl TransactionPropagationMode { - /// Returns the number of peers that should + /// Returns the number of peers full transactions should be propagated to. pub(crate) fn full_peer_count(&self, peer_count: usize) -> usize { match self { Self::Sqrt => (peer_count as f64).sqrt().round() as usize, diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 9276219d59..3e85695155 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -912,17 +912,6 @@ impl TransactionFetcher { // fallback peers let GetPooledTxResponse { peer_id, mut requested_hashes, result } = response; - debug_assert!( - self.active_peers.get(&peer_id).is_some(), - "`{}` has been removed from `@active_peers` before inflight request(s) resolved, broken invariant `@active_peers` and `@inflight_requests`, `%peer_id`: {}, `@hashes_fetch_inflight_and_pending_fetch` for `%requested_hashes`: {:?}", - peer_id, - peer_id, - requested_hashes.iter().map(|hash| { - let metadata = self.hashes_fetch_inflight_and_pending_fetch.get(hash); - (*hash, metadata.map(|m| (m.retries, m.tx_encoded_length))) - }).collect::)>)>>() - ); - self.decrement_inflight_request_count_for(&peer_id); match result { diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 0c488ff919..4e23c8527b 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -153,6 +153,14 @@ impl TransactionsHandle { self.send(TransactionsCommand::PropagateTransactionsTo(transactions, peer)) } + /// Manually propagate the given transactions to all peers. + /// + /// It's up to the [`TransactionsManager`] whether the transactions are sent as hashes or in + /// full. + pub fn propagate_transactions(&self, transactions: Vec) { + self.send(TransactionsCommand::PropagateTransactions(transactions)) + } + /// Request the transaction hashes known by specific peers. pub async fn get_transaction_hashes( &self, @@ -398,10 +406,17 @@ where trace!(target: "net::tx", num_hashes=?hashes.len(), "Start propagating transactions"); - // This fetches all transaction from the pool, including the 4844 blob transactions but - // __without__ their sidecar, because 4844 transactions are only ever announced as hashes. + self.propagate_all(hashes); + } + + /// Propagates the given transactions to the peers + /// + /// This fetches all transaction from the pool, including the 4844 blob transactions but + /// __without__ their sidecar, because 4844 transactions are only ever announced as hashes. + fn propagate_all(&mut self, hashes: Vec) { let propagated = self.propagate_transactions( self.pool.get_all(hashes).into_iter().map(PropagateTransaction::new).collect(), + PropagationMode::Basic, ); // notify pool so events get fired @@ -417,6 +432,7 @@ where fn propagate_transactions( &mut self, to_propagate: Vec, + propagation_mode: PropagationMode, ) -> PropagatedTransactions { let mut propagated = PropagatedTransactions::default(); if self.network.tx_gossip_disabled() { @@ -435,14 +451,18 @@ where PropagateTransactionsBuilder::full(peer.version) }; - // Iterate through the transactions to propagate and fill the hashes and full - // transaction lists, before deciding whether or not to send full transactions to the - // peer. - for tx in &to_propagate { - // Only proceed if the transaction is not in the peer's list of seen transactions - if !peer.seen_transactions.contains(&tx.hash()) { - // add transaction to the list of hashes to propagate - builder.push(tx); + if propagation_mode.is_forced() { + builder.extend(to_propagate.iter()); + } else { + // Iterate through the transactions to propagate and fill the hashes and full + // transaction lists, before deciding whether or not to send full transactions to + // the peer. + for tx in &to_propagate { + // Only proceed if the transaction is not in the peer's list of seen + // transactions + if !peer.seen_transactions.contains(&tx.hash()) { + builder.push(tx); + } } } @@ -500,6 +520,7 @@ where &mut self, txs: Vec, peer_id: PeerId, + propagation_mode: PropagationMode, ) -> Option { trace!(target: "net::tx", ?peer_id, "Propagating transactions to peer"); @@ -511,10 +532,17 @@ where let to_propagate = self.pool.get_all(txs).into_iter().map(PropagateTransaction::new); - // Iterate through the transactions to propagate and fill the hashes and full transaction - for tx in to_propagate { - if !peer.seen_transactions.contains(&tx.hash()) { - full_transactions.push(&tx); + if propagation_mode.is_forced() { + // skip cache check if forced + full_transactions.extend(to_propagate); + } else { + // Iterate through the transactions to propagate and fill the hashes and full + // transaction + for tx in to_propagate { + if !peer.seen_transactions.contains(&tx.hash()) { + // Only include if the peer hasn't seen the transaction + full_transactions.push(&tx); + } } } @@ -532,6 +560,7 @@ where // mark transaction as seen by peer peer.seen_transactions.insert(hash); } + // send hashes of transactions self.network.send_transactions_hashes(peer_id, new_pooled_hashes); } @@ -543,6 +572,7 @@ where // mark transaction as seen by peer peer.seen_transactions.insert(tx.hash()); } + // send full transactions self.network.send_transactions(peer_id, new_full_transactions); } @@ -556,7 +586,12 @@ where /// Propagate the transaction hashes to the given peer /// /// Note: This will only send the hashes for transactions that exist in the pool. - fn propagate_hashes_to(&mut self, hashes: Vec, peer_id: PeerId) { + fn propagate_hashes_to( + &mut self, + hashes: Vec, + peer_id: PeerId, + propagation_mode: PropagationMode, + ) { trace!(target: "net::tx", "Start propagating transactions as hashes"); // This fetches a transactions from the pool, including the blob transactions, which are @@ -575,9 +610,14 @@ where // check if transaction is known to peer let mut hashes = PooledTransactionsHashesBuilder::new(peer.version); - for tx in to_propagate { - if !peer.seen_transactions.insert(tx.hash()) { - hashes.push(&tx); + if propagation_mode.is_forced() { + hashes.extend(to_propagate) + } else { + for tx in to_propagate { + if !peer.seen_transactions.contains(&tx.hash()) { + // Include if the peer hasn't seen it + hashes.push(&tx); + } } } @@ -866,17 +906,20 @@ where self.on_new_pending_transactions(vec![hash]) } TransactionsCommand::PropagateHashesTo(hashes, peer) => { - self.propagate_hashes_to(hashes, peer) + self.propagate_hashes_to(hashes, peer, PropagationMode::Forced) } TransactionsCommand::GetActivePeers(tx) => { let peers = self.peers.keys().copied().collect::>(); tx.send(peers).ok(); } - TransactionsCommand::PropagateTransactionsTo(txs, _peer) => { - if let Some(propagated) = self.propagate_full_transactions_to_peer(txs, _peer) { + TransactionsCommand::PropagateTransactionsTo(txs, peer) => { + if let Some(propagated) = + self.propagate_full_transactions_to_peer(txs, peer, PropagationMode::Forced) + { self.pool.on_propagated(propagated); } } + TransactionsCommand::PropagateTransactions(txs) => self.propagate_all(txs), TransactionsCommand::GetTransactionHashes { peers, tx } => { let mut res = HashMap::with_capacity(peers.len()); for peer_id in peers { @@ -1240,29 +1283,6 @@ where // yield back control to tokio. See `NetworkManager` for more context on the design // pattern. - // Advance pool imports (flush txns to pool). - // - // Note, this is done in batches. A batch is filled from one `Transactions` - // broadcast messages or one `PooledTransactions` response at a time. The - // minimum batch size is 1 transaction (and might often be the case with blob - // transactions). - // - // The smallest decodable transaction is an empty legacy transaction, 10 bytes - // (2 MiB / 10 bytes > 200k transactions). - // - // Since transactions aren't validated until they are inserted into the pool, - // this can potentially validate >200k transactions. More if the message size - // is bigger than the soft limit on a `PooledTransactions` response which is - // 2 MiB (`Transactions` broadcast messages is smaller, 128 KiB). - let maybe_more_pool_imports = metered_poll_nested_stream_with_budget!( - poll_durations.acc_pending_imports, - "net::tx", - "Batched pool imports stream", - DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS, - this.pool_imports.poll_next_unpin(cx), - |batch_results| this.on_batch_import_result(batch_results) - ); - // Advance network/peer related events (update peers map). let maybe_more_network_events = metered_poll_nested_stream_with_budget!( poll_durations.acc_network_events, @@ -1336,6 +1356,29 @@ where |event| this.on_network_tx_event(event), ); + // Advance pool imports (flush txns to pool). + // + // Note, this is done in batches. A batch is filled from one `Transactions` + // broadcast messages or one `PooledTransactions` response at a time. The + // minimum batch size is 1 transaction (and might often be the case with blob + // transactions). + // + // The smallest decodable transaction is an empty legacy transaction, 10 bytes + // (2 MiB / 10 bytes > 200k transactions). + // + // Since transactions aren't validated until they are inserted into the pool, + // this can potentially validate >200k transactions. More if the message size + // is bigger than the soft limit on a `PooledTransactions` response which is + // 2 MiB (`Transactions` broadcast messages is smaller, 128 KiB). + let maybe_more_pool_imports = metered_poll_nested_stream_with_budget!( + poll_durations.acc_pending_imports, + "net::tx", + "Batched pool imports stream", + DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS, + this.pool_imports.poll_next_unpin(cx), + |batch_results| this.on_batch_import_result(batch_results) + ); + // Tries to drain hashes pending fetch cache if the tx manager currently has // capacity for this (fetch txns). // @@ -1380,6 +1423,29 @@ where } } +/// Represents the different modes of transaction propagation. +/// +/// This enum is used to determine how transactions are propagated to peers in the network. +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +enum PropagationMode { + /// Default propagation mode. + /// + /// Transactions are only sent to peers that haven't seen them yet. + Basic, + /// Forced propagation mode. + /// + /// Transactions are sent to all peers regardless of whether they have been sent or received + /// before. + Forced, +} + +impl PropagationMode { + /// Returns `true` if the propagation kind is `Forced`. + const fn is_forced(self) -> bool { + matches!(self, Self::Forced) + } +} + /// A transaction that's about to be propagated to multiple peers. #[derive(Debug, Clone)] struct PropagateTransaction { @@ -1426,6 +1492,13 @@ impl PropagateTransactionsBuilder { Self::Full(FullTransactionsBuilder::new(version)) } + /// Appends all transactions + fn extend<'a>(&mut self, txs: impl IntoIterator) { + for tx in txs { + self.push(tx); + } + } + /// Appends a transaction to the list. fn push(&mut self, transaction: &PropagateTransaction) { match self { @@ -1487,6 +1560,13 @@ impl FullTransactionsBuilder { } } + /// Appends all transactions. + fn extend(&mut self, txs: impl IntoIterator) { + for tx in txs { + self.push(&tx) + } + } + /// Append a transaction to the list of full transaction if the total message bytes size doesn't /// exceed the soft maximum target byte size. The limit is soft, meaning if one single /// transaction goes over the limit, it will be broadcasted in its own [`Transactions`] @@ -1566,6 +1646,13 @@ impl PooledTransactionsHashesBuilder { } } + /// Appends all hashes + fn extend(&mut self, txs: impl IntoIterator) { + for tx in txs { + self.push(&tx); + } + } + fn push(&mut self, tx: &PropagateTransaction) { match self { Self::Eth66(msg) => msg.0.push(tx.hash()), @@ -1581,7 +1668,7 @@ impl PooledTransactionsHashesBuilder { fn new(version: EthVersion) -> Self { match version { EthVersion::Eth66 | EthVersion::Eth67 => Self::Eth66(Default::default()), - EthVersion::Eth68 => Self::Eth68(Default::default()), + EthVersion::Eth68 | EthVersion::Eth69 => Self::Eth68(Default::default()), } } @@ -1653,6 +1740,8 @@ enum TransactionsCommand { GetActivePeers(oneshot::Sender>), /// Propagate a collection of full transactions to a specific peer. PropagateTransactionsTo(Vec, PeerId), + /// Propagate a collection of full transactions to all peers. + PropagateTransactions(Vec), /// Request transaction hashes known by specific peers from the [`TransactionsManager`]. GetTransactionHashes { peers: Vec, @@ -2371,7 +2460,8 @@ mod tests { let eip4844_tx = Arc::new(factory.create_eip4844()); propagate.push(PropagateTransaction::new(eip4844_tx.clone())); - let propagated = tx_manager.propagate_transactions(propagate.clone()); + let propagated = + tx_manager.propagate_transactions(propagate.clone(), PropagationMode::Basic); assert_eq!(propagated.0.len(), 2); let prop_txs = propagated.0.get(eip1559_tx.transaction.hash()).unwrap(); assert_eq!(prop_txs.len(), 1); @@ -2387,7 +2477,7 @@ mod tests { peer.seen_transactions.contains(eip4844_tx.transaction.hash()); // propagate again - let propagated = tx_manager.propagate_transactions(propagate); + let propagated = tx_manager.propagate_transactions(propagate, PropagationMode::Basic); assert!(propagated.0.is_empty()); } } diff --git a/crates/net/network/tests/it/big_pooled_txs_req.rs b/crates/net/network/tests/it/big_pooled_txs_req.rs index 3a645da6c9..29b62708ee 100644 --- a/crates/net/network/tests/it/big_pooled_txs_req.rs +++ b/crates/net/network/tests/it/big_pooled_txs_req.rs @@ -1,4 +1,4 @@ -use alloy_primitives::B256; +use alloy_primitives::{Signature, B256}; use reth_eth_wire::{GetPooledTransactions, PooledTransactions}; use reth_network::{ test_utils::{NetworkEventStream, Testnet}, @@ -6,7 +6,7 @@ use reth_network::{ }; use reth_network_api::{NetworkInfo, Peers}; use reth_network_p2p::sync::{NetworkSyncUpdater, SyncState}; -use reth_primitives::{Signature, TransactionSigned}; +use reth_primitives::TransactionSigned; use reth_provider::test_utils::MockEthProvider; use reth_transaction_pool::{ test_utils::{testing_pool, MockTransaction}, diff --git a/crates/net/network/tests/it/requests.rs b/crates/net/network/tests/it/requests.rs index 3a56b304f3..5a113051c9 100644 --- a/crates/net/network/tests/it/requests.rs +++ b/crates/net/network/tests/it/requests.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use alloy_consensus::TxEip2930; -use alloy_primitives::{Bytes, Parity, TxKind, U256}; +use alloy_primitives::{Bytes, Parity, Signature, TxKind, U256}; use rand::Rng; use reth_eth_wire::HeadersDirection; use reth_network::{ @@ -16,7 +16,7 @@ use reth_network_p2p::{ bodies::client::BodiesClient, headers::client::{HeadersClient, HeadersRequest}, }; -use reth_primitives::{Block, Header, Signature, Transaction, TransactionSigned}; +use reth_primitives::{Block, Header, Transaction, TransactionSigned}; use reth_provider::test_utils::MockEthProvider; /// Returns a new [`TransactionSigned`] with some random parameters diff --git a/crates/net/network/tests/it/session.rs b/crates/net/network/tests/it/session.rs index 6bc029d8a7..3f74db3d37 100644 --- a/crates/net/network/tests/it/session.rs +++ b/crates/net/network/tests/it/session.rs @@ -33,7 +33,7 @@ async fn test_session_established_with_highest_version() { } NetworkEvent::SessionEstablished { peer_id, status, .. } => { assert_eq!(handle1.peer_id(), &peer_id); - assert_eq!(status.version, EthVersion::Eth68 as u8); + assert_eq!(status.version, EthVersion::Eth68); } ev => { panic!("unexpected event {ev:?}") @@ -71,7 +71,7 @@ async fn test_session_established_with_different_capability() { } NetworkEvent::SessionEstablished { peer_id, status, .. } => { assert_eq!(handle1.peer_id(), &peer_id); - assert_eq!(status.version, EthVersion::Eth66 as u8); + assert_eq!(status.version, EthVersion::Eth66); } ev => { panic!("unexpected event: {ev:?}") diff --git a/crates/net/network/tests/it/startup.rs b/crates/net/network/tests/it/startup.rs index 89889a8694..d84ff492e5 100644 --- a/crates/net/network/tests/it/startup.rs +++ b/crates/net/network/tests/it/startup.rs @@ -113,6 +113,7 @@ async fn test_node_record_address_with_nat() { .add_nat(Some(NatResolver::ExternalIp("10.1.1.1".parse().unwrap()))) .disable_discv4_discovery() .disable_dns_discovery() + .listener_port(0) .build_with_noop_provider(MAINNET.clone()); let network = NetworkManager::new(config).await.unwrap(); @@ -127,6 +128,7 @@ async fn test_node_record_address_with_nat_disable_discovery() { let config = NetworkConfigBuilder::new(secret_key) .add_nat(Some(NatResolver::ExternalIp("10.1.1.1".parse().unwrap()))) .disable_discovery() + .listener_port(0) .build_with_noop_provider(MAINNET.clone()); let network = NetworkManager::new(config).await.unwrap(); diff --git a/crates/net/network/tests/it/txgossip.rs b/crates/net/network/tests/it/txgossip.rs index 70ac67bb5b..f08a2b2eb9 100644 --- a/crates/net/network/tests/it/txgossip.rs +++ b/crates/net/network/tests/it/txgossip.rs @@ -3,12 +3,12 @@ use std::sync::Arc; use alloy_consensus::TxLegacy; -use alloy_primitives::U256; +use alloy_primitives::{Signature, U256}; use futures::StreamExt; use rand::thread_rng; use reth_network::{test_utils::Testnet, NetworkEvent, NetworkEventListenerProvider}; use reth_network_api::PeersInfo; -use reth_primitives::{Signature, TransactionSigned}; +use reth_primitives::TransactionSigned; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; use reth_transaction_pool::{test_utils::TransactionGenerator, PoolTransaction, TransactionPool}; diff --git a/crates/net/p2p/Cargo.toml b/crates/net/p2p/Cargo.toml index c43f7f5b34..3b6d74c9db 100644 --- a/crates/net/p2p/Cargo.toml +++ b/crates/net/p2p/Cargo.toml @@ -43,5 +43,15 @@ tokio = { workspace = true, features = ["full"] } [features] default = ["std"] -test-utils = ["reth-consensus/test-utils", "parking_lot"] -std = ["reth-consensus/std"] +test-utils = [ + "reth-consensus/test-utils", + "parking_lot", + "reth-network-types/test-utils", + "reth-primitives/test-utils" +] +std = [ + "reth-consensus/std", + "reth-primitives/std", + "alloy-eips/std", + "alloy-primitives/std" +] diff --git a/crates/net/p2p/src/full_block.rs b/crates/net/p2p/src/full_block.rs index 0116f13488..e5129b6867 100644 --- a/crates/net/p2p/src/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -347,22 +347,6 @@ fn ensure_valid_body_response( _ => return Err(ConsensusError::WithdrawalsRootUnexpected), } - match (header.requests_root, &block.requests) { - (Some(header_requests_root), Some(requests)) => { - let requests = requests.0.as_slice(); - let requests_root = reth_primitives::proofs::calculate_requests_root(requests); - if requests_root != header_requests_root { - return Err(ConsensusError::BodyRequestsRootDiff( - GotExpected { got: requests_root, expected: header_requests_root }.into(), - )) - } - } - (None, None) => { - // this is ok because we assume the fork is not active in this case - } - _ => return Err(ConsensusError::RequestsRootUnexpected), - } - Ok(()) } diff --git a/crates/net/p2p/src/test_utils/full_block.rs b/crates/net/p2p/src/test_utils/full_block.rs index acc01a60ef..8a13f69325 100644 --- a/crates/net/p2p/src/test_utils/full_block.rs +++ b/crates/net/p2p/src/test_utils/full_block.rs @@ -185,15 +185,15 @@ impl HeadersClient for TestFullBlockClient { .filter_map(|_| { headers.iter().find_map(|(hash, header)| { // Checks if the header matches the specified block or number. - if BlockNumHash::new(header.number, *hash).matches_block_or_num(&block) { - match request.direction { - HeadersDirection::Falling => block = header.parent_hash.into(), - HeadersDirection::Rising => block = (header.number + 1).into(), - } - Some(header.clone()) - } else { - None - } + BlockNumHash::new(header.number, *hash).matches_block_or_num(&block).then( + || { + match request.direction { + HeadersDirection::Falling => block = header.parent_hash.into(), + HeadersDirection::Rising => block = (header.number + 1).into(), + } + header.clone() + }, + ) }) }) .collect::>(); diff --git a/crates/net/peers/src/lib.rs b/crates/net/peers/src/lib.rs index e80331f904..1d60994d8e 100644 --- a/crates/net/peers/src/lib.rs +++ b/crates/net/peers/src/lib.rs @@ -117,6 +117,7 @@ pub enum AnyNode { impl AnyNode { /// Returns the peer id of the node. + #[allow(clippy::missing_const_for_fn)] pub fn peer_id(&self) -> PeerId { match self { Self::NodeRecord(record) => record.id, @@ -127,6 +128,7 @@ impl AnyNode { } /// Returns the full node record if available. + #[allow(clippy::missing_const_for_fn)] pub fn node_record(&self) -> Option { match self { Self::NodeRecord(record) => Some(*record), diff --git a/crates/node/api/Cargo.toml b/crates/node/api/Cargo.toml index e7685acc84..c803b5f1d8 100644 --- a/crates/node/api/Cargo.toml +++ b/crates/node/api/Cargo.toml @@ -12,6 +12,8 @@ workspace = true [dependencies] # reth +reth-beacon-consensus.workspace = true +reth-consensus.workspace = true reth-evm.workspace = true reth-provider.workspace = true reth-engine-primitives.workspace = true @@ -19,7 +21,12 @@ reth-transaction-pool.workspace = true reth-payload-builder.workspace = true reth-payload-primitives.workspace = true reth-tasks.workspace = true -reth-rpc-eth-api.workspace = true reth-network-api.workspace = true reth-node-types.workspace = true reth-primitives.workspace = true +reth-node-core.workspace = true +reth-bsc-consensus.workspace = true + +alloy-rpc-types-engine.workspace = true + +eyre.workspace = true \ No newline at end of file diff --git a/crates/node/api/src/lib.rs b/crates/node/api/src/lib.rs index 7692ed6f2c..099cf82b5f 100644 --- a/crates/node/api/src/lib.rs +++ b/crates/node/api/src/lib.rs @@ -25,5 +25,3 @@ pub use node::*; // re-export for convenience pub use reth_node_types::*; pub use reth_provider::FullProvider; - -pub use reth_rpc_eth_api::EthApiTypes; diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index ce6b16c8ff..13577bab09 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -1,14 +1,18 @@ //! Traits for configuring a node. -use std::marker::PhantomData; +use std::{future::Future, marker::PhantomData}; +use alloy_rpc_types_engine::JwtSecret; +use reth_beacon_consensus::BeaconConsensusEngineHandle; +use reth_bsc_consensus::BscTraceHelper; +use reth_consensus::Consensus; use reth_evm::execute::BlockExecutorProvider; use reth_network_api::FullNetwork; -use reth_node_types::{NodeTypesWithDB, NodeTypesWithEngine}; +use reth_node_core::node_config::NodeConfig; +use reth_node_types::{NodeTypes, NodeTypesWithDB, NodeTypesWithEngine}; use reth_payload_builder::PayloadBuilderHandle; use reth_primitives::Header; use reth_provider::FullProvider; -use reth_rpc_eth_api::EthApiTypes; use reth_tasks::TaskExecutor; use reth_transaction_pool::TransactionPool; @@ -54,6 +58,9 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { /// The type that knows how to execute blocks. type Executor: BlockExecutorProvider; + /// The consensus type of the node. + type Consensus: Consensus + Clone + Unpin + 'static; + /// Network API. type Network: FullNetwork; @@ -66,8 +73,8 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { /// Returns the node's executor type. fn block_executor(&self) -> &Self::Executor; - /// Returns the provider of the node. - fn provider(&self) -> &Self::Provider; + /// Returns the node's consensus type. + fn consensus(&self) -> &Self::Consensus; /// Returns the handle to the network fn network(&self) -> &Self::Network; @@ -77,37 +84,46 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { &self, ) -> &PayloadBuilderHandle<::Engine>; + /// Returns the provider of the node. + fn provider(&self) -> &Self::Provider; + /// Returns handle to runtime. fn task_executor(&self) -> &TaskExecutor; } -/// Customizable node add-on types. -pub trait NodeAddOns: Send + Sync + Unpin + Clone + 'static { - /// The core `eth` namespace API type to install on the RPC server (see - /// `reth_rpc_eth_api::EthApiServer`). - type EthApi: EthApiTypes + Send + Clone; -} - -impl NodeAddOns for () { - type EthApi = (); +/// Context passed to [`NodeAddOns::launch_add_ons`], +#[derive(Debug, Clone)] +pub struct AddOnsContext<'a, N: FullNodeComponents> { + /// Node with all configured components. + pub node: N, + /// Node configuration. + pub config: &'a NodeConfig<::ChainSpec>, + /// Handle to the beacon consensus engine. + pub beacon_engine_handle: + BeaconConsensusEngineHandle<::Engine>, + /// JWT secret for the node. + pub jwt_secret: JwtSecret, + + /// Handle bsc trace rpc calls. + pub bsc_trace_helper: Option, } -/// Returns the builder for type. -pub trait BuilderProvider: Send { - /// Context required to build type. - type Ctx<'a>; - - /// Returns builder for type. - #[allow(clippy::type_complexity)] - fn builder() -> Box Fn(Self::Ctx<'a>) -> Self + Send>; +/// Customizable node add-on types. +pub trait NodeAddOns: Send { + /// Handle to add-ons. + type Handle: Send + Sync + Clone; + + /// Configures and launches the add-ons. + fn launch_add_ons( + self, + ctx: AddOnsContext<'_, N>, + ) -> impl Future> + Send; } -impl BuilderProvider for () { - type Ctx<'a> = (); +impl NodeAddOns for () { + type Handle = (); - fn builder() -> Box Fn(Self::Ctx<'a>) -> Self + Send> { - Box::new(noop_builder) + async fn launch_add_ons(self, _components: AddOnsContext<'_, N>) -> eyre::Result { + Ok(()) } } - -const fn noop_builder(_: ()) {} diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index df5c3d3bd9..226c352aab 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -26,6 +26,7 @@ reth-db = { workspace = true, features = ["mdbx"], optional = true } reth-db-api.workspace = true reth-db-common.workspace = true reth-downloaders.workspace = true +reth-engine-local.workspace = true reth-engine-service.workspace = true reth-engine-tree.workspace = true reth-engine-util.workspace = true @@ -99,9 +100,26 @@ tempfile.workspace = true [features] default = [] -test-utils = ["reth-db/test-utils"] +test-utils = [ + "reth-db/test-utils", + "reth-blockchain-tree/test-utils", + "reth-chain-state/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-engine-tree/test-utils", + "reth-evm/test-utils", + "reth-downloaders/test-utils", + "reth-network/test-utils", + "reth-network-p2p/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-stages/test-utils", + "reth-db-api/test-utils", + "reth-provider/test-utils", + "reth-transaction-pool/test-utils" +] bsc = [ "reth-bsc-engine/bsc", "reth-beacon-consensus/bsc", "reth-bsc-consensus/bsc", -] \ No newline at end of file +] diff --git a/crates/node/builder/src/builder/add_ons.rs b/crates/node/builder/src/builder/add_ons.rs index 26d7553bb8..7be0411b2f 100644 --- a/crates/node/builder/src/builder/add_ons.rs +++ b/crates/node/builder/src/builder/add_ons.rs @@ -1,8 +1,8 @@ //! Node add-ons. Depend on core [`NodeComponents`](crate::NodeComponents). -use reth_node_api::{EthApiTypes, FullNodeComponents, NodeAddOns}; +use reth_node_api::{FullNodeComponents, NodeAddOns}; -use crate::{exex::BoxedLaunchExEx, hooks::NodeHooks, rpc::RpcHooks}; +use crate::{exex::BoxedLaunchExEx, hooks::NodeHooks}; /// Additional node extensions. /// @@ -12,16 +12,6 @@ pub struct AddOns> { pub hooks: NodeHooks, /// The `ExExs` (execution extensions) of the node. pub exexs: Vec<(String, Box>)>, - /// Additional RPC add-ons. - pub rpc: RpcAddOns, /// Additional captured addons. - pub addons: AddOns, -} - -/// Captures node specific addons that can be installed on top of the type configured node and are -/// required for launching the node, such as RPC. -#[derive(Default)] -pub struct RpcAddOns { - /// Additional RPC hooks. - pub hooks: RpcHooks, + pub add_ons: AddOns, } diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 23d14984fe..01394435b4 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -13,7 +13,7 @@ use crate::{ common::WithConfigs, components::NodeComponentsBuilder, node::FullNode, - rpc::{EthApiBuilderProvider, RethRpcServerHandles, RpcContext}, + rpc::{RethRpcAddOns, RethRpcServerHandles, RpcContext}, DefaultNodeLauncher, LaunchNode, Node, NodeHandle, }; use futures::Future; @@ -37,7 +37,6 @@ use reth_node_core::{ dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, primitives::Head, - rpc::eth::{helpers::AddDevSigners, FullEthApiServer}, }; use reth_primitives::revm_primitives::EnvKzgSettings; use reth_provider::{providers::BlockchainProvider, ChainSpecProvider, FullProvider}; @@ -162,6 +161,26 @@ impl NodeBuilder<(), ChainSpec> { pub const fn new(config: NodeConfig) -> Self { Self { config, database: () } } + + /// Apply a function to the builder + pub fn apply(self, f: F) -> Self + where + F: FnOnce(Self) -> Self, + { + f(self) + } + + /// Apply a function to the builder, if the condition is `true`. + pub fn apply_if(self, cond: bool, f: F) -> Self + where + F: FnOnce(Self) -> Self, + { + if cond { + f(self) + } else { + self + } + } } impl NodeBuilder { @@ -169,6 +188,11 @@ impl NodeBuilder { pub const fn config(&self) -> &NodeConfig { &self.config } + + /// Returns a mutable reference to the node builder's config. + pub fn config_mut(&mut self) -> &mut NodeConfig { + &mut self.config + } } impl NodeBuilder { @@ -333,19 +357,11 @@ where > where N: Node, ChainSpec = ChainSpec>, - N::AddOns: NodeAddOns< + N::AddOns: RethRpcAddOns< NodeAdapter< RethFullAdapter, >>::Components, >, - EthApi: EthApiBuilderProvider< - NodeAdapter< - RethFullAdapter, - >>::Components, - > - > - + FullEthApiServer - + AddDevSigners >, { self.node(node).launch().await @@ -393,13 +409,33 @@ impl WithLaunchContext> where T: FullNodeTypes, CB: NodeComponentsBuilder, - AO: NodeAddOns, EthApi: FullEthApiServer + AddDevSigners>, + AO: RethRpcAddOns>, { /// Returns a reference to the node builder's config. pub const fn config(&self) -> &NodeConfig<::ChainSpec> { &self.builder.config } + /// Apply a function to the builder + pub fn apply(self, f: F) -> Self + where + F: FnOnce(Self) -> Self, + { + f(self) + } + + /// Apply a function to the builder, if the condition is `true`. + pub fn apply_if(self, cond: bool, f: F) -> Self + where + F: FnOnce(Self) -> Self, + { + if cond { + f(self) + } else { + self + } + } + /// Sets the hook that is run once the node's components are initialized. pub fn on_component_initialized(self, hook: F) -> Self where @@ -421,6 +457,14 @@ where Self { builder: self.builder.on_node_started(hook), task_executor: self.task_executor } } + /// Modifies the addons with the given closure. + pub fn map_add_ons(self, f: F) -> Self + where + F: FnOnce(AO) -> AO, + { + Self { builder: self.builder.map_add_ons(f), task_executor: self.task_executor } + } + /// Sets the hook that is run once the rpc server is started. pub fn on_rpc_started(self, hook: F) -> Self where @@ -461,6 +505,24 @@ where } } + /// Installs an `ExEx` (Execution Extension) in the node if the condition is true. + /// + /// # Note + /// + /// The `ExEx` ID must be unique. + pub fn install_exex_if(self, cond: bool, exex_id: impl Into, exex: F) -> Self + where + F: FnOnce(ExExContext>) -> R + Send + 'static, + R: Future> + Send, + E: Future> + Send, + { + if cond { + self.install_exex(exex_id, exex) + } else { + self + } + } + /// Launches the node with the given launcher. pub async fn launch_with(self, launcher: L) -> eyre::Result where @@ -490,12 +552,7 @@ where DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, T: NodeTypesWithEngine, CB: NodeComponentsBuilder>, - AO: NodeAddOns< - NodeAdapter, CB::Components>, - EthApi: EthApiBuilderProvider, CB::Components>> - + FullEthApiServer - + AddDevSigners, - >, + AO: RethRpcAddOns, CB::Components>>, { /// Launches the node with the [`DefaultNodeLauncher`] that sets up engine API consensus and rpc pub async fn launch( diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index 80930ef743..ca5a57d0db 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -11,10 +11,7 @@ use reth_exex::ExExContext; use reth_node_api::{ FullNodeComponents, FullNodeTypes, NodeAddOns, NodeTypes, NodeTypesWithDB, NodeTypesWithEngine, }; -use reth_node_core::{ - node_config::NodeConfig, - rpc::eth::{helpers::AddDevSigners, FullEthApiServer}, -}; +use reth_node_core::node_config::NodeConfig; use reth_payload_builder::PayloadBuilderHandle; use reth_tasks::TaskExecutor; @@ -22,8 +19,8 @@ use crate::{ components::{NodeComponents, NodeComponentsBuilder}, hooks::NodeHooks, launch::LaunchNode, - rpc::{EthApiBuilderProvider, RethRpcServerHandles, RpcContext, RpcHooks}, - AddOns, FullNode, RpcAddOns, + rpc::{RethRpcAddOns, RethRpcServerHandles, RpcContext}, + AddOns, FullNode, }; /// A node builder that also has the configured types. @@ -54,12 +51,7 @@ impl NodeBuilderWithTypes { config, adapter, components_builder, - add_ons: AddOns { - hooks: NodeHooks::default(), - rpc: RpcAddOns { hooks: RpcHooks::default() }, - exexs: Vec::new(), - addons: (), - }, + add_ons: AddOns { hooks: NodeHooks::default(), exexs: Vec::new(), add_ons: () }, } } } @@ -83,8 +75,8 @@ impl fmt::Debug for NodeTypesAdapter { } } -/// Container for the node's types and the components and other internals that can be used by addons -/// of the node. +/// Container for the node's types and the components and other internals that can be used by +/// addons of the node. pub struct NodeAdapter> { /// The components of the node. pub components: C, @@ -103,6 +95,7 @@ impl> FullNodeComponents for NodeAdapter< type Pool = C::Pool; type Evm = C::Evm; type Executor = C::Executor; + type Consensus = C::Consensus; type Network = C::Network; fn pool(&self) -> &Self::Pool { @@ -117,8 +110,8 @@ impl> FullNodeComponents for NodeAdapter< self.components.block_executor() } - fn provider(&self) -> &Self::Provider { - &self.provider + fn consensus(&self) -> &Self::Consensus { + self.components.consensus() } fn network(&self) -> &Self::Network { @@ -129,6 +122,10 @@ impl> FullNodeComponents for NodeAdapter< self.components.payload_builder() } + fn provider(&self) -> &Self::Provider { + &self.provider + } + fn task_executor(&self) -> &TaskExecutor { &self.task_executor } @@ -169,7 +166,7 @@ where { /// Advances the state of the node builder to the next state where all customizable /// [`NodeAddOns`] types are configured. - pub fn with_add_ons(self, addons: AO) -> NodeBuilderWithComponents + pub fn with_add_ons(self, add_ons: AO) -> NodeBuilderWithComponents where AO: NodeAddOns>, { @@ -179,12 +176,7 @@ where config, adapter, components_builder, - add_ons: AddOns { - hooks: NodeHooks::default(), - rpc: RpcAddOns { hooks: RpcHooks::default() }, - exexs: Vec::new(), - addons, - }, + add_ons: AddOns { hooks: NodeHooks::default(), exexs: Vec::new(), add_ons }, } } } @@ -215,31 +207,6 @@ where self } - /// Sets the hook that is run once the rpc server is started. - pub fn on_rpc_started(mut self, hook: F) -> Self - where - F: FnOnce( - RpcContext<'_, NodeAdapter, AO::EthApi>, - RethRpcServerHandles, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.add_ons.rpc.hooks.set_on_rpc_started(hook); - self - } - - /// Sets the hook that is run to configure the rpc modules. - pub fn extend_rpc_modules(mut self, hook: F) -> Self - where - F: FnOnce(RpcContext<'_, NodeAdapter, AO::EthApi>) -> eyre::Result<()> - + Send - + 'static, - { - self.add_ons.rpc.hooks.set_extend_rpc_modules(hook); - self - } - /// Installs an `ExEx` (Execution Extension) in the node. /// /// # Note @@ -269,18 +236,22 @@ where pub const fn check_launch(self) -> Self { self } + + /// Modifies the addons with the given closure. + pub fn map_add_ons(mut self, f: F) -> Self + where + F: FnOnce(AO) -> AO, + { + self.add_ons.add_ons = f(self.add_ons.add_ons); + self + } } impl NodeBuilderWithComponents where T: FullNodeTypes, CB: NodeComponentsBuilder, - AO: NodeAddOns< - NodeAdapter, - EthApi: EthApiBuilderProvider> - + FullEthApiServer - + AddDevSigners, - >, + AO: RethRpcAddOns>, { /// Launches the node with the given launcher. pub async fn launch_with(self, launcher: L) -> eyre::Result @@ -289,4 +260,33 @@ where { launcher.launch_node(self).await } + + /// Sets the hook that is run once the rpc server is started. + pub fn on_rpc_started(self, hook: F) -> Self + where + F: FnOnce( + RpcContext<'_, NodeAdapter, AO::EthApi>, + RethRpcServerHandles, + ) -> eyre::Result<()> + + Send + + 'static, + { + self.map_add_ons(|mut add_ons| { + add_ons.hooks_mut().set_on_rpc_started(hook); + add_ons + }) + } + + /// Sets the hook that is run to configure the rpc modules. + pub fn extend_rpc_modules(self, hook: F) -> Self + where + F: FnOnce(RpcContext<'_, NodeAdapter, AO::EthApi>) -> eyre::Result<()> + + Send + + 'static, + { + self.map_add_ons(|mut add_ons| { + add_ons.hooks_mut().set_extend_rpc_modules(hook); + add_ons + }) + } } diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index bc36ca5084..ea9aa03b7f 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -4,20 +4,17 @@ use std::{future::Future, marker::PhantomData}; use reth_consensus::Consensus; use reth_evm::execute::BlockExecutorProvider; -use reth_node_api::{EngineValidator, NodeTypesWithEngine}; use reth_primitives::Header; use reth_transaction_pool::TransactionPool; use crate::{ components::{ Components, ConsensusBuilder, ExecutorBuilder, NetworkBuilder, NodeComponents, - PayloadServiceBuilder, PoolBuilder, + ParliaBuilder, PayloadServiceBuilder, PoolBuilder, }, BuilderContext, ConfigureEvm, FullNodeTypes, }; -use super::{EngineValidatorBuilder, ParliaBuilder}; - /// A generic, general purpose and customizable [`NodeComponentsBuilder`] implementation. /// /// This type is stateful and captures the configuration of the node's components. @@ -38,24 +35,23 @@ use super::{EngineValidatorBuilder, ParliaBuilder}; /// All component builders are captured in the builder state and will be consumed once the node is /// launched. #[derive(Debug)] -pub struct ComponentsBuilder { +pub struct ComponentsBuilder { pool_builder: PoolB, payload_builder: PayloadB, network_builder: NetworkB, executor_builder: ExecB, consensus_builder: ConsB, - engine_validator_builder: EVB, parlia_builder: ParliaB, _marker: PhantomData, } -impl - ComponentsBuilder +impl + ComponentsBuilder { /// Configures the node types. pub fn node_types( self, - ) -> ComponentsBuilder + ) -> ComponentsBuilder where Types: FullNodeTypes, { @@ -65,7 +61,6 @@ impl network_builder, executor_builder: evm_builder, consensus_builder, - engine_validator_builder, parlia_builder, _marker, } = self; @@ -75,7 +70,6 @@ impl payload_builder, network_builder, consensus_builder, - engine_validator_builder, parlia_builder, _marker: Default::default(), } @@ -89,7 +83,6 @@ impl network_builder: self.network_builder, executor_builder: self.executor_builder, consensus_builder: self.consensus_builder, - engine_validator_builder: self.engine_validator_builder, parlia_builder: self.parlia_builder, _marker: self._marker, } @@ -103,7 +96,6 @@ impl network_builder: self.network_builder, executor_builder: self.executor_builder, consensus_builder: self.consensus_builder, - engine_validator_builder: self.engine_validator_builder, parlia_builder: self.parlia_builder, _marker: self._marker, } @@ -117,7 +109,6 @@ impl network_builder: f(self.network_builder), executor_builder: self.executor_builder, consensus_builder: self.consensus_builder, - engine_validator_builder: self.engine_validator_builder, parlia_builder: self.parlia_builder, _marker: self._marker, } @@ -131,7 +122,6 @@ impl network_builder: self.network_builder, executor_builder: f(self.executor_builder), consensus_builder: self.consensus_builder, - engine_validator_builder: self.engine_validator_builder, parlia_builder: self.parlia_builder, _marker: self._marker, } @@ -145,15 +135,27 @@ impl network_builder: self.network_builder, executor_builder: self.executor_builder, consensus_builder: f(self.consensus_builder), - engine_validator_builder: self.engine_validator_builder, parlia_builder: self.parlia_builder, _marker: self._marker, } } + + /// Apply a function to the parlia builder. + pub fn map_parlia(self, f: impl FnOnce(ParliaB) -> ParliaB) -> Self { + Self { + pool_builder: self.pool_builder, + payload_builder: self.payload_builder, + network_builder: self.network_builder, + executor_builder: self.executor_builder, + consensus_builder: self.consensus_builder, + parlia_builder: f(self.parlia_builder), + _marker: self._marker, + } + } } -impl - ComponentsBuilder +impl + ComponentsBuilder where Node: FullNodeTypes, { @@ -164,7 +166,7 @@ where pub fn pool( self, pool_builder: PB, - ) -> ComponentsBuilder + ) -> ComponentsBuilder where PB: PoolBuilder, { @@ -174,7 +176,6 @@ where network_builder, executor_builder: evm_builder, consensus_builder, - engine_validator_builder, parlia_builder, _marker, } = self; @@ -184,15 +185,14 @@ where network_builder, executor_builder: evm_builder, consensus_builder, - engine_validator_builder, parlia_builder, _marker, } } } -impl - ComponentsBuilder +impl + ComponentsBuilder where Node: FullNodeTypes, PoolB: PoolBuilder, @@ -204,7 +204,7 @@ where pub fn network( self, network_builder: NB, - ) -> ComponentsBuilder + ) -> ComponentsBuilder where NB: NetworkBuilder, { @@ -214,7 +214,6 @@ where network_builder: _, executor_builder: evm_builder, consensus_builder, - engine_validator_builder, parlia_builder, _marker, } = self; @@ -224,7 +223,6 @@ where network_builder, executor_builder: evm_builder, consensus_builder, - engine_validator_builder, parlia_builder, _marker, } @@ -237,7 +235,7 @@ where pub fn payload( self, payload_builder: PB, - ) -> ComponentsBuilder + ) -> ComponentsBuilder where PB: PayloadServiceBuilder, { @@ -247,7 +245,6 @@ where network_builder, executor_builder: evm_builder, consensus_builder, - engine_validator_builder, parlia_builder, _marker, } = self; @@ -257,7 +254,6 @@ where network_builder, executor_builder: evm_builder, consensus_builder, - engine_validator_builder, parlia_builder, _marker, } @@ -270,7 +266,7 @@ where pub fn executor( self, executor_builder: EB, - ) -> ComponentsBuilder + ) -> ComponentsBuilder where EB: ExecutorBuilder, { @@ -280,7 +276,6 @@ where network_builder, executor_builder: _, consensus_builder, - engine_validator_builder, parlia_builder, _marker, } = self; @@ -290,7 +285,6 @@ where network_builder, executor_builder, consensus_builder, - engine_validator_builder, parlia_builder, _marker, } @@ -303,7 +297,7 @@ where pub fn consensus( self, consensus_builder: CB, - ) -> ComponentsBuilder + ) -> ComponentsBuilder where CB: ConsensusBuilder, { @@ -313,40 +307,6 @@ where network_builder, executor_builder, consensus_builder: _, - engine_validator_builder, - parlia_builder, - _marker, - } = self; - ComponentsBuilder { - pool_builder, - payload_builder, - network_builder, - executor_builder, - consensus_builder, - engine_validator_builder, - parlia_builder, - _marker, - } - } - - /// Configures the consensus builder. - /// - /// This accepts a [`ConsensusBuilder`] instance that will be used to create the node's - /// components for consensus. - pub fn engine_validator( - self, - engine_validator_builder: EngineVB, - ) -> ComponentsBuilder - where - EngineVB: EngineValidatorBuilder, - { - let Self { - pool_builder, - payload_builder, - network_builder, - executor_builder, - consensus_builder, - engine_validator_builder: _, parlia_builder, _marker, } = self; @@ -356,7 +316,6 @@ where network_builder, executor_builder, consensus_builder, - engine_validator_builder, parlia_builder, _marker, } @@ -364,12 +323,12 @@ where /// Configures the parlia builder. /// - /// This accepts a [`ParliaBuilder`] instance that will be used to create the node's - /// components for parlia. + /// This accepts a [`ParliaBuilder`] instance that will be used to create the node's components + /// for parlia. pub fn parlia( self, parlia_builder: PB, - ) -> ComponentsBuilder + ) -> ComponentsBuilder where PB: ParliaBuilder, { @@ -379,7 +338,6 @@ where network_builder, executor_builder, consensus_builder, - engine_validator_builder, parlia_builder: _, _marker, } = self; @@ -389,15 +347,14 @@ where network_builder, executor_builder, consensus_builder, - engine_validator_builder, parlia_builder, _marker, } } } -impl NodeComponentsBuilder - for ComponentsBuilder +impl NodeComponentsBuilder + for ComponentsBuilder where Node: FullNodeTypes, PoolB: PoolBuilder, @@ -405,17 +362,9 @@ where PayloadB: PayloadServiceBuilder, ExecB: ExecutorBuilder, ConsB: ConsensusBuilder, - EVB: EngineValidatorBuilder, ParliaB: ParliaBuilder, { - type Components = Components< - Node, - PoolB::Pool, - ExecB::EVM, - ExecB::Executor, - ConsB::Consensus, - EVB::Validator, - >; + type Components = Components; async fn build_components( self, @@ -427,7 +376,6 @@ where network_builder, executor_builder: evm_builder, consensus_builder, - engine_validator_builder, parlia_builder: _parlia_builder, _marker, } = self; @@ -437,7 +385,6 @@ where let network = network_builder.build_network(context, pool.clone()).await?; let payload_builder = payload_builder.spawn_payload_service(context, pool.clone()).await?; let consensus = consensus_builder.build_consensus(context).await?; - let engine_validator = engine_validator_builder.build_validator(context).await?; #[cfg(feature = "bsc")] let parlia = _parlia_builder.build_parlia(context).await?; @@ -448,14 +395,13 @@ where payload_builder, executor, consensus, - engine_validator, #[cfg(feature = "bsc")] parlia, }) } } -impl Default for ComponentsBuilder<(), (), (), (), (), (), (), ()> { +impl Default for ComponentsBuilder<(), (), (), (), (), (), ()> { fn default() -> Self { Self { pool_builder: (), @@ -463,7 +409,6 @@ impl Default for ComponentsBuilder<(), (), (), (), (), (), (), ()> { network_builder: (), executor_builder: (), consensus_builder: (), - engine_validator_builder: (), parlia_builder: (), _marker: Default::default(), } @@ -490,18 +435,17 @@ pub trait NodeComponentsBuilder: Send { ) -> impl Future> + Send; } -impl NodeComponentsBuilder for F +impl NodeComponentsBuilder for F where Node: FullNodeTypes, F: FnOnce(&BuilderContext) -> Fut + Send, - Fut: Future>> + Send, + Fut: Future>> + Send, Pool: TransactionPool + Unpin + 'static, EVM: ConfigureEvm
, Executor: BlockExecutorProvider, Cons: Consensus + Clone + Unpin + 'static, - Val: EngineValidator<::Engine> + Clone + Unpin + 'static, { - type Components = Components; + type Components = Components; fn build_components( self, diff --git a/crates/node/builder/src/components/engine.rs b/crates/node/builder/src/components/engine.rs deleted file mode 100644 index a6d0e8afbc..0000000000 --- a/crates/node/builder/src/components/engine.rs +++ /dev/null @@ -1,62 +0,0 @@ -//! Consensus component for the node builder. -use reth_node_api::{EngineValidator, NodeTypesWithEngine}; - -use crate::{BuilderContext, FullNodeTypes}; -use reth_bsc_consensus::Parlia; -use std::future::Future; - -/// A type that knows how to build the engine validator. -pub trait EngineValidatorBuilder: Send { - /// The consensus implementation to build. - type Validator: EngineValidator<::Engine> - + Clone - + Unpin - + 'static; - - /// Creates the engine validator. - fn build_validator( - self, - ctx: &BuilderContext, - ) -> impl Future> + Send; -} - -impl EngineValidatorBuilder for F -where - Node: FullNodeTypes, - Validator: - EngineValidator<::Engine> + Clone + Unpin + 'static, - F: FnOnce(&BuilderContext) -> Fut + Send, - Fut: Future> + Send, -{ - type Validator = Validator; - - fn build_validator( - self, - ctx: &BuilderContext, - ) -> impl Future> { - self(ctx) - } -} - -/// Needed for bsc parlia consensus. -pub trait ParliaBuilder: Send { - /// Creates the parlia. - fn build_parlia( - self, - ctx: &BuilderContext, - ) -> impl Future> + Send; -} - -impl ParliaBuilder for F -where - Node: FullNodeTypes, - F: FnOnce(&BuilderContext) -> Fut + Send, - Fut: Future> + Send, -{ - fn build_parlia( - self, - ctx: &BuilderContext, - ) -> impl Future> { - self(ctx) - } -} diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index 7d114ae1ba..50f96f7be4 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -9,17 +9,17 @@ mod builder; mod consensus; -mod engine; mod execute; mod network; +mod parlia; mod payload; mod pool; pub use builder::*; pub use consensus::*; -pub use engine::*; pub use execute::*; pub use network::*; +pub use parlia::*; pub use payload::*; pub use pool::*; #[cfg(feature = "bsc")] @@ -28,7 +28,7 @@ use reth_consensus::Consensus; use reth_evm::execute::BlockExecutorProvider; use reth_network::NetworkHandle; use reth_network_api::FullNetwork; -use reth_node_api::{EngineValidator, NodeTypesWithEngine}; +use reth_node_api::NodeTypesWithEngine; use reth_payload_builder::PayloadBuilderHandle; use reth_primitives::Header; use reth_transaction_pool::TransactionPool; @@ -58,9 +58,6 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync + 'stati /// Network API. type Network: FullNetwork; - /// Validator for the engine API. - type EngineValidator: EngineValidator<::Engine>; - /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; @@ -79,9 +76,6 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync + 'stati /// Returns the handle to the payload builder service. fn payload_builder(&self) -> &PayloadBuilderHandle<::Engine>; - /// Returns the engine validator. - fn engine_validator(&self) -> &Self::EngineValidator; - #[cfg(feature = "bsc")] /// Returns the parlia. fn parlia(&self) -> &Parlia; @@ -91,7 +85,7 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync + 'stati /// /// This provides access to all the components of the node. #[derive(Debug)] -pub struct Components { +pub struct Components { /// The transaction pool of the node. pub transaction_pool: Pool, /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. @@ -104,29 +98,25 @@ pub struct Components::Engine>, - /// The validator for the engine API. - pub engine_validator: Validator, #[cfg(feature = "bsc")] /// The parlia consensus. pub parlia: Parlia, } -impl NodeComponents - for Components +impl NodeComponents + for Components where Node: FullNodeTypes, Pool: TransactionPool + Unpin + 'static, EVM: ConfigureEvm
, Executor: BlockExecutorProvider, Cons: Consensus + Clone + Unpin + 'static, - Val: EngineValidator<::Engine> + Clone + Unpin + 'static, { type Pool = Pool; type Evm = EVM; type Executor = Executor; type Consensus = Cons; type Network = NetworkHandle; - type EngineValidator = Val; fn pool(&self) -> &Self::Pool { &self.transaction_pool @@ -154,25 +144,19 @@ where &self.payload_builder } - fn engine_validator(&self) -> &Self::EngineValidator { - &self.engine_validator - } - #[cfg(feature = "bsc")] fn parlia(&self) -> &Parlia { &self.parlia } } -impl Clone - for Components +impl Clone for Components where Node: FullNodeTypes, Pool: TransactionPool, EVM: ConfigureEvm
, Executor: BlockExecutorProvider, Cons: Consensus + Clone, - Val: EngineValidator<::Engine>, { fn clone(&self) -> Self { Self { @@ -182,7 +166,6 @@ where consensus: self.consensus.clone(), network: self.network.clone(), payload_builder: self.payload_builder.clone(), - engine_validator: self.engine_validator.clone(), #[cfg(feature = "bsc")] parlia: self.parlia.clone(), } diff --git a/crates/node/builder/src/components/parlia.rs b/crates/node/builder/src/components/parlia.rs new file mode 100644 index 0000000000..5c0102169d --- /dev/null +++ b/crates/node/builder/src/components/parlia.rs @@ -0,0 +1,27 @@ +//! Parlia component for the node builder. +use crate::{BuilderContext, FullNodeTypes}; +use reth_bsc_consensus::Parlia; +use std::future::Future; + +/// Needed for bsc parlia consensus. +pub trait ParliaBuilder: Send { + /// Creates the parlia. + fn build_parlia( + self, + ctx: &BuilderContext, + ) -> impl Future> + Send; +} + +impl ParliaBuilder for F +where + Node: FullNodeTypes, + F: FnOnce(&BuilderContext) -> Fut + Send, + Fut: Future> + Send, +{ + fn build_parlia( + self, + ctx: &BuilderContext, + ) -> impl Future> { + self(ctx) + } +} diff --git a/crates/node/builder/src/components/pool.rs b/crates/node/builder/src/components/pool.rs index 234455913c..436a80c52e 100644 --- a/crates/node/builder/src/components/pool.rs +++ b/crates/node/builder/src/components/pool.rs @@ -52,6 +52,8 @@ pub struct PoolBuilderConfigOverrides { pub minimal_protocol_basefee: Option, /// Addresses that will be considered as local. Above exemptions apply. pub local_addresses: HashSet
, + /// Additional tasks to validate new transactions. + pub additional_validation_tasks: Option, } impl PoolBuilderConfigOverrides { @@ -65,6 +67,7 @@ impl PoolBuilderConfigOverrides { max_account_slots, minimal_protocol_basefee, local_addresses, + additional_validation_tasks: _, } = self; if let Some(pending_limit) = pending_limit { diff --git a/crates/node/builder/src/handle.rs b/crates/node/builder/src/handle.rs index c81aa9420d..2997a8687a 100644 --- a/crates/node/builder/src/handle.rs +++ b/crates/node/builder/src/handle.rs @@ -1,13 +1,13 @@ use std::fmt; -use reth_node_api::{FullNodeComponents, NodeAddOns}; +use reth_node_api::FullNodeComponents; use reth_node_core::exit::NodeExitFuture; -use crate::node::FullNode; +use crate::{node::FullNode, rpc::RethRpcAddOns}; /// A Handle to the launched node. #[must_use = "Needs to await the node exit future"] -pub struct NodeHandle> { +pub struct NodeHandle> { /// All node components. pub node: FullNode, /// The exit future of the node. @@ -17,7 +17,7 @@ pub struct NodeHandle> { impl NodeHandle where Node: FullNodeComponents, - AddOns: NodeAddOns, + AddOns: RethRpcAddOns, { /// Waits for the node to exit, if it was configured to exit. pub async fn wait_for_node_exit(self) -> eyre::Result<()> { @@ -28,7 +28,7 @@ where impl fmt::Debug for NodeHandle where Node: FullNodeComponents, - AddOns: NodeAddOns, + AddOns: RethRpcAddOns, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("NodeHandle") diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index f62c18f5a8..c6d3a7c951 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -816,6 +816,15 @@ where Ok(initial_target) } + /// Returns true if the node should terminate after the initial backfill run. + /// + /// This is the case if any of these configs are set: + /// `--debug.max-block` + /// `--debug.terminate` + pub const fn terminate_after_initial_backfill(&self) -> bool { + self.node_config().debug.terminate || self.node_config().debug.max_block.is_some() + } + /// Check if the pipeline is consistent (all stages have the checkpoint block numbers no less /// than the checkpoint of the first stage). /// @@ -1073,7 +1082,7 @@ mod tests { let node_config = NodeConfig { pruning: PruningArgs { full: true, - block_interval: 0, + block_interval: None, sender_recovery_full: false, sender_recovery_distance: None, sender_recovery_before: None, diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index bc109f93ec..bae8ce482b 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -1,6 +1,5 @@ //! Engine node related functionality. -use alloy_rpc_types::engine::ClientVersionV1; use futures::{future::Either, stream, stream_select, StreamExt}; use reth_beacon_consensus::{ hooks::{EngineHooks, StaticFileHook}, @@ -13,6 +12,7 @@ use reth_bsc_consensus::BscTraceHelper; use reth_bsc_engine::ParliaEngineBuilder; use reth_chainspec::EthChainSpec; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider}; +use reth_engine_local::{LocalEngineService, LocalPayloadAttributesBuilder, MiningMode}; use reth_engine_service::service::{ChainEvent, EngineService}; use reth_engine_tree::{ engine::{EngineApiRequest, EngineRequestHandler}, @@ -24,19 +24,18 @@ use reth_network::{NetworkSyncUpdater, SyncState}; #[cfg(feature = "bsc")] use reth_network_api::EngineRxProvider; use reth_network_api::{BlockDownloaderProvider, NetworkEventListenerProvider}; -use reth_node_api::{BuiltPayload, FullNodeTypes, NodeAddOns, NodeTypesWithEngine}; +use reth_node_api::{ + BuiltPayload, FullNodeTypes, NodeTypesWithEngine, PayloadAttributesBuilder, PayloadTypes, +}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, exit::NodeExitFuture, primitives::Head, - rpc::eth::{helpers::AddDevSigners, FullEthApiServer}, - version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; use reth_payload_primitives::PayloadBuilder; use reth_primitives::EthereumHardforks; use reth_provider::providers::{BlockchainProvider2, ProviderNodeTypes}; -use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi}; use reth_tasks::TaskExecutor; use reth_tokio_util::EventSender; use reth_tracing::tracing::{debug, error, info}; @@ -49,9 +48,9 @@ use tokio_stream::wrappers::UnboundedReceiverStream; use crate::{ common::{Attached, LaunchContextWith, WithConfigs}, hooks::NodeHooks, - rpc::{launch_rpc_servers, EthApiBuilderProvider}, + rpc::{RethRpcAddOns, RpcHandle}, setup::build_networked_pipeline, - AddOns, ExExLauncher, FullNode, LaunchContext, LaunchNode, NodeAdapter, + AddOns, AddOnsContext, ExExLauncher, FullNode, LaunchContext, LaunchNode, NodeAdapter, NodeBuilderWithComponents, NodeComponents, NodeComponentsBuilder, NodeHandle, NodeTypesAdapter, }; @@ -82,11 +81,9 @@ where Types: ProviderNodeTypes + NodeTypesWithEngine, T: FullNodeTypes>, CB: NodeComponentsBuilder, - AO: NodeAddOns< - NodeAdapter, - EthApi: EthApiBuilderProvider> - + FullEthApiServer - + AddDevSigners, + AO: RethRpcAddOns>, + LocalPayloadAttributesBuilder: PayloadAttributesBuilder< + <::Engine as PayloadTypes>::PayloadAttributes, >, { type Node = NodeHandle, AO>; @@ -99,7 +96,7 @@ where let NodeBuilderWithComponents { adapter: NodeTypesAdapter { database }, components_builder, - add_ons: AddOns { hooks, rpc, exexs: installed_exex, .. }, + add_ons: AddOns { hooks, exexs: installed_exex, add_ons }, config, } = target; let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; @@ -219,8 +216,33 @@ where let pruner_events = pruner.events(); info!(target: "reth::cli", prune_config=?ctx.prune_config().unwrap_or_default(), "Pruner initialized"); - // Configure the consensus engine - let mut eth_service = { + let mut engine_service = if ctx.is_dev() { + let mining_mode = if let Some(block_time) = ctx.node_config().dev.block_time { + MiningMode::interval(block_time) + } else { + MiningMode::instant(ctx.components().pool().clone()) + }; + let eth_service = LocalEngineService::new( + ctx.consensus(), + ctx.components().block_executor().clone(), + ctx.provider_factory().clone(), + ctx.blockchain_db().clone(), + pruner, + ctx.components().payload_builder().clone(), + engine_tree_config, + ctx.invalid_block_hook()?, + ctx.sync_metrics_tx(), + consensus_engine_tx.clone(), + Box::pin(consensus_engine_stream), + mining_mode, + LocalPayloadAttributesBuilder::new(ctx.chain_spec()), + ctx.node_config().skip_state_root_validation, + ctx.node_config().enable_prefetch, + ctx.node_config().enable_execution_cache, + ); + + Either::Left(eth_service) + } else { #[cfg(not(feature = "bsc"))] { let eth_service = EngineService::new( @@ -242,11 +264,12 @@ where ctx.node_config().enable_prefetch, ctx.node_config().enable_execution_cache, ); - eth_service + + Either::Right(eth_service) } #[cfg(feature = "bsc")] { - let engine_rx = ctx.node_adapter().components.network().get_to_engine_rx(); + let engine_rx = ctx.components().network().get_to_engine_rx(); let client = ParliaEngineBuilder::new( ctx.chain_spec(), ctx.blockchain_db().clone(), @@ -278,7 +301,8 @@ where ctx.node_config().enable_prefetch, ctx.node_config().enable_execution_cache, ); - eth_service + + Either::Right(eth_service) } }; @@ -313,25 +337,6 @@ where ), ); - let client = ClientVersionV1 { - code: CLIENT_CODE, - name: NAME_CLIENT.to_string(), - version: CARGO_PKG_VERSION.to_string(), - commit: VERGEN_GIT_SHA.to_string(), - }; - let engine_api = EngineApi::new( - ctx.blockchain_db().clone(), - ctx.chain_spec(), - beacon_engine_handle, - ctx.components().payload_builder().clone().into(), - ctx.components().pool().clone(), - Box::new(ctx.task_executor().clone()), - client, - EngineCapabilities::default(), - ctx.components().engine_validator().clone(), - ); - info!(target: "reth::cli", "Engine API handler initialized"); - // extract the jwt secret from the args if possible let jwt_secret = ctx.auth_jwt_secret()?; @@ -341,16 +346,16 @@ where let bsc_trace_helper = Some(BscTraceHelper::new(Arc::new(ctx.components().parlia().clone()))); - // Start RPC servers - let (rpc_server_handles, rpc_registry) = launch_rpc_servers( - ctx.node_adapter().clone(), - engine_api, - ctx.node_config(), + let add_ons_ctx = AddOnsContext { + node: ctx.node_adapter().clone(), + config: ctx.node_config(), + beacon_engine_handle, jwt_secret, - rpc, bsc_trace_helper, - ) - .await?; + }; + + let RpcHandle { rpc_server_handles, rpc_registry } = + add_ons.launch_add_ons(add_ons_ctx).await?; // TODO: migrate to devmode with https://github.com/paradigmxyz/reth/issues/10104 if let Some(maybe_custom_etherscan_url) = ctx.node_config().debug.etherscan.clone() { @@ -395,11 +400,15 @@ where .fuse(); let chainspec = ctx.chain_spec(); let (exit, rx) = oneshot::channel(); + let terminate_after_backfill = ctx.terminate_after_initial_backfill(); + info!(target: "reth::cli", "Starting consensus engine"); ctx.task_executor().spawn_critical("consensus engine", async move { if let Some(initial_target) = initial_target { debug!(target: "reth::cli", %initial_target, "start backfill sync"); - eth_service.orchestrator_mut().start_backfill_sync(initial_target); + if let Either::Right(eth_service) = &mut engine_service { + eth_service.orchestrator_mut().start_backfill_sync(initial_target); + } } let mut res = Ok(()); @@ -410,14 +419,21 @@ where payload = built_payloads.select_next_some() => { if let Some(executed_block) = payload.executed_block() { debug!(target: "reth::cli", block=?executed_block.block().num_hash(), "inserting built payload"); - eth_service.orchestrator_mut().handler_mut().handler_mut().on_event(EngineApiRequest::InsertExecutedBlock(executed_block).into()); + if let Either::Right(eth_service) = &mut engine_service { + eth_service.orchestrator_mut().handler_mut().handler_mut().on_event(EngineApiRequest::InsertExecutedBlock(executed_block).into()); + } } } - event = eth_service.next() => { + event = engine_service.next() => { let Some(event) = event else { break }; debug!(target: "reth::cli", "Event: {event}"); match event { ChainEvent::BackfillSyncFinished => { + if terminate_after_backfill { + debug!(target: "reth::cli", "Terminating after initial backfill"); + break + } + network_handle.update_sync_state(SyncState::Idle); } ChainEvent::BackfillSyncStarted => { @@ -459,13 +475,12 @@ where provider: ctx.node_adapter().provider.clone(), payload_builder: ctx.components().payload_builder().clone(), task_executor: ctx.task_executor().clone(), - rpc_server_handles, - rpc_registry, config: ctx.node_config().clone(), data_dir: ctx.data_dir().clone(), + add_ons_handle: RpcHandle { rpc_server_handles, rpc_registry }, }; // Notify on node started - on_node_started.on_event(full_node.clone())?; + on_node_started.on_event(FullNode::clone(&full_node))?; let handle = NodeHandle { node_exit_future: NodeExitFuture::new( diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 1eec806954..97dd0bbb8b 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -10,7 +10,6 @@ use common::{Attached, LaunchContextWith, WithConfigs}; pub use exex::ExExLauncher; use alloy_primitives::utils::format_ether; -use alloy_rpc_types::engine::ClientVersionV1; use futures::{future::Either, stream, stream_select, StreamExt}; use reth_beacon_consensus::{ hooks::{EngineHooks, PruneHook, StaticFileHook}, @@ -28,18 +27,14 @@ use reth_exex::ExExManagerHandle; use reth_network::{BlockDownloaderProvider, NetworkEventListenerProvider}; #[cfg(feature = "bsc")] use reth_network_api::EngineRxProvider; -use reth_node_api::{ - FullNodeComponents, FullNodeTypes, NodeAddOns, NodeTypesWithDB, NodeTypesWithEngine, -}; +use reth_node_api::{AddOnsContext, FullNodeTypes, NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, exit::NodeExitFuture, - rpc::eth::{helpers::AddDevSigners, FullEthApiServer}, - version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; use reth_provider::providers::BlockchainProvider; -use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi}; +use reth_rpc::eth::RpcNodeCore; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::TransactionPool; @@ -54,19 +49,18 @@ use crate::{ components::{NodeComponents, NodeComponentsBuilder}, hooks::NodeHooks, node::FullNode, - rpc::EthApiBuilderProvider, + rpc::{RethRpcAddOns, RpcHandle}, AddOns, NodeBuilderWithComponents, NodeHandle, }; -/// Alias for [`reth_rpc_eth_types::EthApiBuilderCtx`], adapter for [`FullNodeComponents`]. -pub type EthApiBuilderCtx = reth_rpc_eth_types::EthApiBuilderCtx< - ::Provider, - ::Pool, - ::Evm, - ::Network, +/// Alias for [`reth_rpc_eth_types::EthApiBuilderCtx`], adapter for [`RpcNodeCore`]. +pub type EthApiBuilderCtx = reth_rpc_eth_types::EthApiBuilderCtx< + ::Provider, + ::Pool, + ::Evm, + ::Network, TaskExecutor, - ::Provider, - Eth, + ::Provider, >; /// A general purpose trait that launches a new node of any kind. @@ -116,12 +110,7 @@ where Types: NodeTypesWithDB + NodeTypesWithEngine, T: FullNodeTypes, Types = Types>, CB: NodeComponentsBuilder, - AO: NodeAddOns< - NodeAdapter, - EthApi: EthApiBuilderProvider> - + FullEthApiServer - + AddDevSigners, - >, + AO: RethRpcAddOns>, { type Node = NodeHandle, AO>; @@ -133,7 +122,7 @@ where let NodeBuilderWithComponents { adapter: NodeTypesAdapter { database }, components_builder, - add_ons: AddOns { hooks, rpc, exexs: installed_exex, .. }, + add_ons: AddOns { hooks, exexs: installed_exex, add_ons }, config, } = target; let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; @@ -287,7 +276,7 @@ where )?; #[cfg(feature = "bsc")] { - let engine_rx = ctx.node_adapter().components.network().get_to_engine_rx(); + let engine_rx = ctx.components().network().get_to_engine_rx(); let client = ParliaEngineBuilder::new( ctx.chain_spec(), ctx.blockchain_db().clone(), @@ -364,25 +353,6 @@ where ), ); - let client = ClientVersionV1 { - code: CLIENT_CODE, - name: NAME_CLIENT.to_string(), - version: CARGO_PKG_VERSION.to_string(), - commit: VERGEN_GIT_SHA.to_string(), - }; - let engine_api = EngineApi::new( - ctx.blockchain_db().clone(), - ctx.chain_spec(), - beacon_engine_handle, - ctx.components().payload_builder().clone().into(), - ctx.components().pool().clone(), - Box::new(ctx.task_executor().clone()), - client, - EngineCapabilities::default(), - ctx.components().engine_validator().clone(), - ); - info!(target: "reth::cli", "Engine API handler initialized"); - // extract the jwt secret from the args if possible let jwt_secret = ctx.auth_jwt_secret()?; @@ -392,21 +362,16 @@ where let bsc_trace_helper = Some(BscTraceHelper::new(Arc::new(ctx.components().parlia().clone()))); - // Start RPC servers - let (rpc_server_handles, rpc_registry) = crate::rpc::launch_rpc_servers( - ctx.node_adapter().clone(), - engine_api, - ctx.node_config(), + let add_ons_ctx = AddOnsContext { + node: ctx.node_adapter().clone(), + config: ctx.node_config(), + beacon_engine_handle, jwt_secret, - rpc, bsc_trace_helper, - ) - .await?; + }; - // in dev mode we generate 20 random dev-signer accounts - if ctx.is_dev() { - rpc_registry.eth_api().with_dev_accounts(); - } + let RpcHandle { rpc_server_handles, rpc_registry } = + add_ons.launch_add_ons(add_ons_ctx).await?; // Run consensus engine to completion let (tx, rx) = oneshot::channel(); @@ -466,13 +431,12 @@ where provider: ctx.node_adapter().provider.clone(), payload_builder: ctx.components().payload_builder().clone(), task_executor: ctx.task_executor().clone(), - rpc_server_handles, - rpc_registry, config: ctx.node_config().clone(), data_dir: ctx.data_dir().clone(), + add_ons_handle: RpcHandle { rpc_server_handles, rpc_registry }, }; // Notify on node started - on_node_started.on_event(full_node.clone())?; + on_node_started.on_event(FullNode::clone(&full_node))?; let handle = NodeHandle { node_exit_future: NodeExitFuture::new( diff --git a/crates/node/builder/src/lib.rs b/crates/node/builder/src/lib.rs index cfe16074a5..899317f158 100644 --- a/crates/node/builder/src/lib.rs +++ b/crates/node/builder/src/lib.rs @@ -20,10 +20,7 @@ pub mod components; pub use components::{NodeComponents, NodeComponentsBuilder}; mod builder; -pub use builder::{ - add_ons::{AddOns, RpcAddOns}, - *, -}; +pub use builder::{add_ons::AddOns, *}; mod launch; pub use launch::{engine::EngineNodeLauncher, *}; diff --git a/crates/node/builder/src/node.rs b/crates/node/builder/src/node.rs index 3a70c08c10..62c710ea80 100644 --- a/crates/node/builder/src/node.rs +++ b/crates/node/builder/src/node.rs @@ -1,24 +1,24 @@ // re-export the node api types pub use reth_node_api::{FullNodeTypes, NodeTypes, NodeTypesWithEngine}; -use std::{marker::PhantomData, sync::Arc}; +use std::{ + marker::PhantomData, + ops::{Deref, DerefMut}, + sync::Arc, +}; use reth_node_api::{EngineTypes, FullNodeComponents}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, - rpc::api::EngineApiClient, }; use reth_payload_builder::PayloadBuilderHandle; use reth_provider::ChainSpecProvider; +use reth_rpc_api::EngineApiClient; use reth_rpc_builder::{auth::AuthServerHandle, RpcServerHandle}; use reth_tasks::TaskExecutor; -use crate::{ - components::NodeComponentsBuilder, - rpc::{RethRpcServerHandles, RpcRegistry}, - NodeAdapter, NodeAddOns, -}; +use crate::{components::NodeComponentsBuilder, rpc::RethRpcAddOns, NodeAdapter, NodeAddOns}; /// A [`crate::Node`] is a [`NodeTypesWithEngine`] that comes with preconfigured components. /// @@ -69,6 +69,8 @@ where type Primitives = ::Primitives; type ChainSpec = ::ChainSpec; + + type StateCommitment = ::StateCommitment; } impl NodeTypesWithEngine for AnyNode @@ -84,7 +86,7 @@ impl Node for AnyNode where N: FullNodeTypes + Clone, C: NodeComponentsBuilder + Clone + Sync + Unpin + 'static, - AO: NodeAddOns>, + AO: NodeAddOns> + Clone + Sync + Unpin + 'static, { type ComponentsBuilder = C; type AddOns = AO; @@ -117,14 +119,12 @@ pub struct FullNode> { pub payload_builder: PayloadBuilderHandle<::Engine>, /// Task executor for the node. pub task_executor: TaskExecutor, - /// Handles to the node's rpc servers - pub rpc_server_handles: RethRpcServerHandles, - /// The configured rpc namespaces - pub rpc_registry: RpcRegistry, /// The initial node config. pub config: NodeConfig<::ChainSpec>, /// The data dir of the node. pub data_dir: ChainPath, + /// The handle to launched add-ons + pub add_ons_handle: AddOns::Handle, } impl> Clone for FullNode { @@ -137,10 +137,9 @@ impl> Clone for FullNode Arc<::ChainSpec> { self.provider.chain_spec() } +} +impl FullNode +where + Engine: EngineTypes, + Node: FullNodeComponents>, + AddOns: RethRpcAddOns, +{ /// Returns the [`RpcServerHandle`] to the started rpc server. pub const fn rpc_server_handle(&self) -> &RpcServerHandle { - &self.rpc_server_handles.rpc + &self.add_ons_handle.rpc_server_handles.rpc } /// Returns the [`AuthServerHandle`] to the started authenticated engine API server. pub const fn auth_server_handle(&self) -> &AuthServerHandle { - &self.rpc_server_handles.auth + &self.add_ons_handle.rpc_server_handles.auth } /// Returns the [`EngineApiClient`] interface for the authenticated engine API. @@ -188,3 +194,17 @@ where self.auth_server_handle().ipc_client().await } } + +impl> Deref for FullNode { + type Target = AddOns::Handle; + + fn deref(&self) -> &Self::Target { + &self.add_ons_handle + } +} + +impl> DerefMut for FullNode { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.add_ons_handle + } +} diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index c65e137b82..d82d64f054 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -1,32 +1,38 @@ //! Builder support for rpc components. use std::{ - fmt, + fmt::{self, Debug}, + future::Future, + marker::PhantomData, ops::{Deref, DerefMut}, }; +use alloy_rpc_types::engine::ClientVersionV1; use futures::TryFutureExt; -use reth_bsc_consensus::BscTraceHelper; -use reth_node_api::{BuilderProvider, FullNodeComponents, NodeTypes, NodeTypesWithEngine}; +use reth_node_api::{ + AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, NodeTypes, NodeTypesWithEngine, +}; use reth_node_core::{ node_config::NodeConfig, - rpc::{ - api::EngineApiServer, - eth::{EthApiTypes, FullEthApiServer}, - }, + version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; use reth_payload_builder::PayloadBuilderHandle; use reth_provider::providers::ProviderNodeTypes; +use reth_rpc::{ + eth::{EthApiTypes, FullEthApiServer}, + EthApi, +}; +use reth_rpc_api::eth::helpers::AddDevSigners; use reth_rpc_builder::{ auth::{AuthRpcModule, AuthServerHandle}, config::RethRpcServerConfig, RpcModuleBuilder, RpcRegistryInner, RpcServerHandle, TransportRpcModules, }; -use reth_rpc_layer::JwtSecret; +use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi}; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; -use crate::{EthApiBuilderCtx, RpcAddOns}; +use crate::EthApiBuilderCtx; /// Contains the handles to the spawned RPC servers. /// @@ -192,6 +198,7 @@ pub struct RpcRegistry { Node::Provider, EthApi, Node::Executor, + Node::Consensus, >, } @@ -208,6 +215,7 @@ where Node::Provider, EthApi, Node::Executor, + Node::Consensus, >; fn deref(&self) -> &Self::Target { @@ -293,104 +301,282 @@ where } } -/// Launch the rpc servers. -pub async fn launch_rpc_servers( - node: Node, - engine_api: Engine, - config: &NodeConfig<::ChainSpec>, - jwt_secret: JwtSecret, - add_ons: RpcAddOns, - bsc_trace_helper: Option, -) -> eyre::Result<(RethRpcServerHandles, RpcRegistry)> +/// Handle to the launched RPC servers. +#[derive(Clone)] +pub struct RpcHandle { + /// Handles to launched servers. + pub rpc_server_handles: RethRpcServerHandles, + /// Configured RPC modules. + pub rpc_registry: RpcRegistry, +} + +impl Deref for RpcHandle { + type Target = RpcRegistry; + + fn deref(&self) -> &Self::Target { + &self.rpc_registry + } +} + +impl Debug for RpcHandle where - Node: FullNodeComponents + Clone, - Engine: EngineApiServer<::Engine>, - EthApi: EthApiBuilderProvider + FullEthApiServer, + RpcRegistry: Debug, { - let auth_config = config.rpc.auth_server_config(jwt_secret)?; - let module_config = config.rpc.transport_rpc_module_config(); - debug!(target: "reth::cli", http=?module_config.http(), ws=?module_config.ws(), "Using RPC module config"); - - let (mut modules, mut auth_module, registry) = RpcModuleBuilder::default() - .with_provider(node.provider().clone()) - .with_pool(node.pool().clone()) - .with_network(node.network().clone()) - .with_events(node.provider().clone()) - .with_executor(node.task_executor().clone()) - .with_evm_config(node.evm_config().clone()) - .with_block_executor(node.block_executor().clone()) - .with_bsc_trace_helper(bsc_trace_helper) - .build_with_auth_server(module_config, engine_api, EthApi::eth_api_builder()); - - let mut registry = RpcRegistry { registry }; - let ctx = RpcContext { - node: node.clone(), - config, - registry: &mut registry, - modules: &mut modules, - auth_module: &mut auth_module, - }; - - let RpcAddOns { hooks, .. } = add_ons; - let RpcHooks { on_rpc_started, extend_rpc_modules } = hooks; - - extend_rpc_modules.extend_rpc_modules(ctx)?; - - let server_config = config.rpc.rpc_server_config(); - let cloned_modules = modules.clone(); - let launch_rpc = server_config.start(&cloned_modules).map_ok(|handle| { - if let Some(path) = handle.ipc_endpoint() { - info!(target: "reth::cli", %path, "RPC IPC server started"); - } - if let Some(addr) = handle.http_local_addr() { - info!(target: "reth::cli", url=%addr, "RPC HTTP server started"); - } - if let Some(addr) = handle.ws_local_addr() { - info!(target: "reth::cli", url=%addr, "RPC WS server started"); + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RpcHandle") + .field("rpc_server_handles", &self.rpc_server_handles) + .field("rpc_registry", &self.rpc_registry) + .finish() + } +} + +/// Node add-ons containing RPC server configuration, with customizable eth API handler. +#[allow(clippy::type_complexity)] +pub struct RpcAddOns { + /// Additional RPC add-ons. + pub hooks: RpcHooks, + /// Builder for `EthApi` + eth_api_builder: Box) -> EthApi + Send + Sync>, + /// Engine validator + engine_validator_builder: EV, + _pd: PhantomData<(Node, EthApi)>, +} + +impl Debug + for RpcAddOns +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RpcAddOns") + .field("hooks", &self.hooks) + .field("eth_api_builder", &"...") + .field("engine_validator_builder", &self.engine_validator_builder) + .finish() + } +} + +impl RpcAddOns { + /// Creates a new instance of the RPC add-ons. + pub fn new( + eth_api_builder: impl FnOnce(&EthApiBuilderCtx) -> EthApi + Send + Sync + 'static, + engine_validator_builder: EV, + ) -> Self { + Self { + hooks: RpcHooks::default(), + eth_api_builder: Box::new(eth_api_builder), + engine_validator_builder, + _pd: PhantomData, } - handle - }); - - let launch_auth = auth_module.clone().start_server(auth_config).map_ok(|handle| { - let addr = handle.local_addr(); - if let Some(ipc_endpoint) = handle.ipc_endpoint() { - info!(target: "reth::cli", url=%addr, ipc_endpoint=%ipc_endpoint,"RPC auth server started"); - } else { - info!(target: "reth::cli", url=%addr, "RPC auth server started"); + } + + /// Sets the hook that is run once the rpc server is started. + pub fn on_rpc_started(mut self, hook: F) -> Self + where + F: FnOnce(RpcContext<'_, Node, EthApi>, RethRpcServerHandles) -> eyre::Result<()> + + Send + + 'static, + { + self.hooks.set_on_rpc_started(hook); + self + } + + /// Sets the hook that is run to configure the rpc modules. + pub fn extend_rpc_modules(mut self, hook: F) -> Self + where + F: FnOnce(RpcContext<'_, Node, EthApi>) -> eyre::Result<()> + Send + 'static, + { + self.hooks.set_extend_rpc_modules(hook); + self + } +} + +impl Default for RpcAddOns +where + Node: FullNodeComponents, + EthApi: EthApiTypes + EthApiBuilder, + EV: Default, +{ + fn default() -> Self { + Self::new(EthApi::build, EV::default()) + } +} + +impl NodeAddOns for RpcAddOns +where + N: FullNodeComponents, + EthApi: EthApiTypes + FullEthApiServer + AddDevSigners + Unpin + 'static, + EV: EngineValidatorBuilder, +{ + type Handle = RpcHandle; + + async fn launch_add_ons(self, ctx: AddOnsContext<'_, N>) -> eyre::Result { + let Self { eth_api_builder, engine_validator_builder, hooks, _pd: _ } = self; + + let engine_validator = engine_validator_builder.build(&ctx).await?; + let AddOnsContext { node, config, beacon_engine_handle, jwt_secret, bsc_trace_helper } = + ctx; + + let client = ClientVersionV1 { + code: CLIENT_CODE, + name: NAME_CLIENT.to_string(), + version: CARGO_PKG_VERSION.to_string(), + commit: VERGEN_GIT_SHA.to_string(), + }; + + let engine_api = EngineApi::new( + node.provider().clone(), + config.chain.clone(), + beacon_engine_handle, + node.payload_builder().clone().into(), + node.pool().clone(), + Box::new(node.task_executor().clone()), + client, + EngineCapabilities::default(), + engine_validator, + ); + info!(target: "reth::cli", "Engine API handler initialized"); + + let auth_config = config.rpc.auth_server_config(jwt_secret)?; + let module_config = config.rpc.transport_rpc_module_config(); + debug!(target: "reth::cli", http=?module_config.http(), ws=?module_config.ws(), "Using RPC module config"); + + let (mut modules, mut auth_module, registry) = RpcModuleBuilder::default() + .with_provider(node.provider().clone()) + .with_pool(node.pool().clone()) + .with_network(node.network().clone()) + .with_events(node.provider().clone()) + .with_executor(node.task_executor().clone()) + .with_evm_config(node.evm_config().clone()) + .with_block_executor(node.block_executor().clone()) + .with_consensus(node.consensus().clone()) + .with_bsc_trace_helper(bsc_trace_helper) + .build_with_auth_server(module_config, engine_api, eth_api_builder); + + // in dev mode we generate 20 random dev-signer accounts + if config.dev.dev { + registry.eth_api().with_dev_accounts(); } - handle - }); - // launch servers concurrently - let (rpc, auth) = futures::future::try_join(launch_rpc, launch_auth).await?; - let handles = RethRpcServerHandles { rpc, auth }; + let mut registry = RpcRegistry { registry }; + let ctx = RpcContext { + node: node.clone(), + config, + registry: &mut registry, + modules: &mut modules, + auth_module: &mut auth_module, + }; + + let RpcHooks { on_rpc_started, extend_rpc_modules } = hooks; + + extend_rpc_modules.extend_rpc_modules(ctx)?; + + let server_config = config.rpc.rpc_server_config(); + let cloned_modules = modules.clone(); + let launch_rpc = server_config.start(&cloned_modules).map_ok(|handle| { + if let Some(path) = handle.ipc_endpoint() { + info!(target: "reth::cli", %path, "RPC IPC server started"); + } + if let Some(addr) = handle.http_local_addr() { + info!(target: "reth::cli", url=%addr, "RPC HTTP server started"); + } + if let Some(addr) = handle.ws_local_addr() { + info!(target: "reth::cli", url=%addr, "RPC WS server started"); + } + handle + }); + + let launch_auth = auth_module.clone().start_server(auth_config).map_ok(|handle| { + let addr = handle.local_addr(); + if let Some(ipc_endpoint) = handle.ipc_endpoint() { + info!(target: "reth::cli", url=%addr, ipc_endpoint=%ipc_endpoint,"RPC auth server started"); + } else { + info!(target: "reth::cli", url=%addr, "RPC auth server started"); + } + handle + }); + + // launch servers concurrently + let (rpc, auth) = futures::future::try_join(launch_rpc, launch_auth).await?; + + let handles = RethRpcServerHandles { rpc, auth }; + + let ctx = RpcContext { + node: node.clone(), + config, + registry: &mut registry, + modules: &mut modules, + auth_module: &mut auth_module, + }; + + on_rpc_started.on_rpc_started(ctx, handles.clone())?; + + Ok(RpcHandle { rpc_server_handles: handles, rpc_registry: registry }) + } +} + +/// Helper trait implemented for add-ons producing [`RpcHandle`]. Used by common node launcher +/// implementations. +pub trait RethRpcAddOns: + NodeAddOns> +{ + /// eth API implementation. + type EthApi: EthApiTypes; + + /// Returns a mutable reference to RPC hooks. + fn hooks_mut(&mut self) -> &mut RpcHooks; +} + +impl RethRpcAddOns for RpcAddOns +where + Self: NodeAddOns>, +{ + type EthApi = EthApi; - let ctx = RpcContext { - node, - config, - registry: &mut registry, - modules: &mut modules, - auth_module: &mut auth_module, - }; + fn hooks_mut(&mut self) -> &mut RpcHooks { + &mut self.hooks + } +} - on_rpc_started.on_rpc_started(ctx, handles.clone())?; +/// A `EthApi` that knows how to build itself from [`EthApiBuilderCtx`]. +pub trait EthApiBuilder: 'static { + /// Builds the `EthApi` from the given context. + fn build(ctx: &EthApiBuilderCtx) -> Self; +} - Ok((handles, registry)) +impl EthApiBuilder for EthApi { + fn build(ctx: &EthApiBuilderCtx) -> Self { + Self::with_spawner(ctx) + } } -/// Provides builder for the core `eth` API type. -pub trait EthApiBuilderProvider: BuilderProvider + EthApiTypes { - /// Returns the eth api builder. - #[allow(clippy::type_complexity)] - fn eth_api_builder() -> Box) -> Self + Send>; +/// A type that knows how to build the engine validator. +pub trait EngineValidatorBuilder: Send { + /// The consensus implementation to build. + type Validator: EngineValidator<::Engine> + + Clone + + Unpin + + 'static; + + /// Creates the engine validator. + fn build( + self, + ctx: &AddOnsContext<'_, Node>, + ) -> impl Future> + Send; } -impl EthApiBuilderProvider for F +impl EngineValidatorBuilder for F where - N: FullNodeComponents, - for<'a> F: BuilderProvider = &'a EthApiBuilderCtx> + EthApiTypes, + Node: FullNodeComponents, + Validator: + EngineValidator<::Engine> + Clone + Unpin + 'static, + F: FnOnce(&AddOnsContext<'_, Node>) -> Fut + Send, + Fut: Future> + Send, { - fn eth_api_builder() -> Box) -> Self + Send> { - F::builder() + type Validator = Validator; + + fn build( + self, + ctx: &AddOnsContext<'_, Node>, + ) -> impl Future> { + self(ctx) } } diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index 55bfa541c1..a3de10834d 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -23,8 +23,6 @@ reth-network-p2p.workspace = true reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-rpc-types-compat.workspace = true -reth-rpc-api = { workspace = true, features = ["client"] } -reth-rpc-eth-api = { workspace = true, features = ["client"] } reth-transaction-pool.workspace = true reth-tracing.workspace = true reth-config.workspace = true @@ -38,7 +36,9 @@ reth-stages-types.workspace = true # ethereum alloy-primitives.workspace = true -alloy-rpc-types-engine = { workspace = true, features = ["jwt"] } +alloy-rpc-types-engine = { workspace = true, features = ["std", "jwt"] } +alloy-consensus.workspace = true +alloy-eips.workspace = true # misc eyre.workspace = true @@ -73,11 +73,12 @@ futures.workspace = true # test vectors generation proptest.workspace = true tokio.workspace = true -tempfile.workspace = true [features] optimism = [ - "reth-primitives/optimism" + "reth-primitives/optimism", + "reth-db/optimism", + "reth-chainspec/optimism" ] opbnb = [ "reth-primitives/opbnb", @@ -85,10 +86,9 @@ opbnb = [ bsc = [ "reth-primitives/bsc", ] - # Features for vergen to generate correct env vars -jemalloc = [] -asm-keccak = [] +jemalloc = ["reth-cli-util/jemalloc"] +asm-keccak = ["reth-primitives/asm-keccak", "alloy-primitives/asm-keccak"] [build-dependencies] vergen = { version = "8.0.0", features = ["build", "cargo", "git", "gitcl"] } diff --git a/crates/node/core/src/args/database.rs b/crates/node/core/src/args/database.rs index 09e9de82f5..5b9d6ae61e 100644 --- a/crates/node/core/src/args/database.rs +++ b/crates/node/core/src/args/database.rs @@ -1,11 +1,14 @@ //! clap [Args](clap::Args) for database configuration +use std::{fmt, str::FromStr, time::Duration}; + use crate::version::default_client_version; use clap::{ builder::{PossibleValue, TypedValueParser}, error::ErrorKind, Arg, Args, Command, Error, }; +use reth_db::{mdbx::MaxReadTransactionDuration, ClientVersion}; use reth_storage_errors::db::LogLevel; /// Parameters for database configuration @@ -19,14 +22,41 @@ pub struct DatabaseArgs { /// NFS volume. #[arg(long = "db.exclusive")] pub exclusive: Option, + /// Maximum database size (e.g., 4TB, 8MB) + #[arg(long = "db.max-size", value_parser = parse_byte_size)] + pub max_size: Option, + /// Database growth step (e.g., 4GB, 4KB) + #[arg(long = "db.growth-step", value_parser = parse_byte_size)] + pub growth_step: Option, + /// Read transaction timeout in seconds, 0 means no timeout. + #[arg(long = "db.read-transaction-timeout")] + pub read_transaction_timeout: Option, } impl DatabaseArgs { /// Returns default database arguments with configured log level and client version. pub fn database_args(&self) -> reth_db::mdbx::DatabaseArguments { - reth_db::mdbx::DatabaseArguments::new(default_client_version()) + self.get_database_args(default_client_version()) + } + + /// Returns the database arguments with configured log level, client version, + /// max read transaction duration, and geometry. + pub fn get_database_args( + &self, + client_version: ClientVersion, + ) -> reth_db::mdbx::DatabaseArguments { + let max_read_transaction_duration = match self.read_transaction_timeout { + None => None, // if not specified, use default value + Some(0) => Some(MaxReadTransactionDuration::Unbounded), // if 0, disable timeout + Some(secs) => Some(MaxReadTransactionDuration::Set(Duration::from_secs(secs))), + }; + + reth_db::mdbx::DatabaseArguments::new(client_version) .with_log_level(self.log_level) .with_exclusive(self.exclusive) + .with_max_read_transaction_duration(max_read_transaction_duration) + .with_geometry_max_size(self.max_size) + .with_growth_step(self.growth_step) } } @@ -68,10 +98,84 @@ impl TypedValueParser for LogLevelValueParser { Some(Box::new(values)) } } + +/// Size in bytes. +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] +pub struct ByteSize(pub usize); + +impl From for usize { + fn from(s: ByteSize) -> Self { + s.0 + } +} + +impl FromStr for ByteSize { + type Err = String; + + fn from_str(s: &str) -> Result { + let s = s.trim().to_uppercase(); + let parts: Vec<&str> = s.split_whitespace().collect(); + + let (num_str, unit) = match parts.len() { + 1 => { + let (num, unit) = + s.split_at(s.find(|c: char| c.is_alphabetic()).unwrap_or(s.len())); + (num, unit) + } + 2 => (parts[0], parts[1]), + _ => { + return Err("Invalid format. Use '' or ' '.".to_string()) + } + }; + + let num: usize = num_str.parse().map_err(|_| "Invalid number".to_string())?; + + let multiplier = match unit { + "B" | "" => 1, // Assume bytes if no unit is specified + "KB" => 1024, + "MB" => 1024 * 1024, + "GB" => 1024 * 1024 * 1024, + "TB" => 1024 * 1024 * 1024 * 1024, + _ => return Err(format!("Invalid unit: {}. Use B, KB, MB, GB, or TB.", unit)), + }; + + Ok(Self(num * multiplier)) + } +} + +impl fmt::Display for ByteSize { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + const KB: usize = 1024; + const MB: usize = KB * 1024; + const GB: usize = MB * 1024; + const TB: usize = GB * 1024; + + let (size, unit) = if self.0 >= TB { + (self.0 as f64 / TB as f64, "TB") + } else if self.0 >= GB { + (self.0 as f64 / GB as f64, "GB") + } else if self.0 >= MB { + (self.0 as f64 / MB as f64, "MB") + } else if self.0 >= KB { + (self.0 as f64 / KB as f64, "KB") + } else { + (self.0 as f64, "B") + }; + + write!(f, "{:.2}{}", size, unit) + } +} + +/// Value parser function that supports various formats. +fn parse_byte_size(s: &str) -> Result { + s.parse::().map(Into::into) +} + #[cfg(test)] mod tests { use super::*; use clap::Parser; + use reth_db::mdbx::{GIGABYTE, KILOBYTE, MEGABYTE, TERABYTE}; /// A helper type to parse Args more easily #[derive(Parser)] @@ -87,6 +191,101 @@ mod tests { assert_eq!(args, default_args); } + #[test] + fn test_command_parser_with_valid_max_size() { + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "4398046511104", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(TERABYTE * 4)); + } + + #[test] + fn test_command_parser_with_invalid_max_size() { + let result = + CommandParser::::try_parse_from(["reth", "--db.max-size", "invalid"]); + assert!(result.is_err()); + } + + #[test] + fn test_command_parser_with_valid_growth_step() { + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.growth-step", + "4294967296", + ]) + .unwrap(); + assert_eq!(cmd.args.growth_step, Some(GIGABYTE * 4)); + } + + #[test] + fn test_command_parser_with_invalid_growth_step() { + let result = + CommandParser::::try_parse_from(["reth", "--db.growth-step", "invalid"]); + assert!(result.is_err()); + } + + #[test] + fn test_command_parser_with_valid_max_size_and_growth_step_from_str() { + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "2TB", + "--db.growth-step", + "1GB", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(TERABYTE * 2)); + assert_eq!(cmd.args.growth_step, Some(GIGABYTE)); + + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "12MB", + "--db.growth-step", + "2KB", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(MEGABYTE * 12)); + assert_eq!(cmd.args.growth_step, Some(KILOBYTE * 2)); + + // with spaces + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "12 MB", + "--db.growth-step", + "2 KB", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(MEGABYTE * 12)); + assert_eq!(cmd.args.growth_step, Some(KILOBYTE * 2)); + + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "1073741824", + "--db.growth-step", + "1048576", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(GIGABYTE)); + assert_eq!(cmd.args.growth_step, Some(MEGABYTE)); + } + + #[test] + fn test_command_parser_max_size_and_growth_step_from_str_invalid_unit() { + let result = + CommandParser::::try_parse_from(["reth", "--db.growth-step", "1 PB"]); + assert!(result.is_err()); + + let result = + CommandParser::::try_parse_from(["reth", "--db.max-size", "2PB"]); + assert!(result.is_err()); + } + #[test] fn test_possible_values() { // Initialize the LogLevelValueParser diff --git a/crates/node/core/src/args/log.rs b/crates/node/core/src/args/log.rs index aa2e0cf5f1..3d124fba22 100644 --- a/crates/node/core/src/args/log.rs +++ b/crates/node/core/src/args/log.rs @@ -78,7 +78,7 @@ impl LogArgs { format, self.verbosity.directive().to_string(), filter, - if use_color { Some(self.color.to_string()) } else { None }, + use_color.then(|| self.color.to_string()), ) } diff --git a/crates/node/core/src/args/payload_builder.rs b/crates/node/core/src/args/payload_builder.rs index aec35253af..cd7ba7dccf 100644 --- a/crates/node/core/src/args/payload_builder.rs +++ b/crates/node/core/src/args/payload_builder.rs @@ -1,12 +1,11 @@ use crate::{cli::config::PayloadBuilderConfig, version::default_extradata}; +use alloy_consensus::constants::MAXIMUM_EXTRA_DATA_SIZE; +use alloy_eips::{eip1559::ETHEREUM_BLOCK_GAS_LIMIT, merge::SLOT_DURATION}; use clap::{ builder::{RangedU64ValueParser, TypedValueParser}, Arg, Args, Command, }; use reth_cli_util::{parse_duration_from_secs, parse_duration_from_secs_or_ms}; -use reth_primitives::constants::{ - ETHEREUM_BLOCK_GAS_LIMIT, MAXIMUM_EXTRA_DATA_SIZE, SLOT_DURATION, -}; use std::{borrow::Cow, ffi::OsStr, time::Duration}; /// Parameters for configuring the Payload Builder @@ -87,7 +86,7 @@ impl TypedValueParser for ExtradataValueParser { ) -> Result { let val = value.to_str().ok_or_else(|| clap::Error::new(clap::error::ErrorKind::InvalidUtf8))?; - if val.as_bytes().len() > MAXIMUM_EXTRA_DATA_SIZE { + if val.len() > MAXIMUM_EXTRA_DATA_SIZE { return Err(clap::Error::raw( clap::error::ErrorKind::InvalidValue, format!( diff --git a/crates/node/core/src/args/pruning.rs b/crates/node/core/src/args/pruning.rs index 190e3a06ac..d130eadfde 100644 --- a/crates/node/core/src/args/pruning.rs +++ b/crates/node/core/src/args/pruning.rs @@ -2,7 +2,7 @@ use crate::args::error::ReceiptsLogError; use alloy_primitives::{Address, BlockNumber}; -use clap::Args; +use clap::{builder::RangedU64ValueParser, Args}; use reth_chainspec::EthChainSpec; use reth_config::config::PruneConfig; use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE}; @@ -17,8 +17,8 @@ pub struct PruningArgs { pub full: bool, /// Minimum pruning interval measured in blocks. - #[arg(long, default_value_t = 0)] - pub block_interval: u64, + #[arg(long, value_parser = RangedU64ValueParser::::new().range(1..),)] + pub block_interval: Option, // Sender Recovery /// Prunes all sender recovery data. @@ -99,7 +99,7 @@ impl PruningArgs { // If --full is set, use full node defaults. if self.full { config = PruneConfig { - block_interval: 5, + block_interval: config.block_interval, recent_sidecars_kept_blocks: 0, segments: PruneModes { sender_recovery: Some(PruneMode::Full), @@ -124,6 +124,9 @@ impl PruningArgs { } // Override with any explicitly set prune.* flags. + if let Some(block_interval) = self.block_interval { + config.block_interval = block_interval as usize; + } if let Some(mode) = self.sender_recovery_prune_mode() { config.segments.sender_recovery = Some(mode); } diff --git a/crates/node/core/src/args/rpc_server.rs b/crates/node/core/src/args/rpc_server.rs index 15771e9897..fe9b80cec4 100644 --- a/crates/node/core/src/args/rpc_server.rs +++ b/crates/node/core/src/args/rpc_server.rs @@ -1,11 +1,13 @@ //! clap [Args](clap::Args) for RPC related arguments. use std::{ + collections::HashSet, ffi::OsStr, net::{IpAddr, Ipv4Addr}, path::PathBuf, }; +use alloy_primitives::Address; use alloy_rpc_types_engine::JwtSecret; use clap::{ builder::{PossibleValue, RangedU64ValueParser, TypedValueParser}, @@ -135,6 +137,11 @@ pub struct RpcServerArgs { pub rpc_max_connections: MaxU32, /// Maximum number of concurrent tracing requests. + /// + /// By default this chooses a sensible value based on the number of available cores. + /// Tracing requests are generally CPU bound. + /// Choosing a value that is higher than the available CPU cores can have a negative impact on + /// the performance of the node and affect the node's ability to maintain sync. #[arg(long = "rpc.max-tracing-requests", alias = "rpc-max-tracing-requests", value_name = "COUNT", default_value_t = constants::default_max_tracing_requests())] pub rpc_max_tracing_requests: usize, @@ -178,6 +185,11 @@ pub struct RpcServerArgs { #[arg(long = "rpc.proof-permits", alias = "rpc-proof-permits", value_name = "COUNT", default_value_t = constants::DEFAULT_PROOF_PERMITS)] pub rpc_proof_permits: usize, + /// Path to file containing disallowed addresses, json-encoded list of strings. Block + /// validation API will reject blocks containing transactions from these addresses. + #[arg(long = "builder.disallow", value_name = "PATH", value_parser = reth_cli_util::parsers::read_json_from_file::>)] + pub builder_disallow: Option>, + /// State cache configuration. #[command(flatten)] pub rpc_state_cache: RpcStateCacheArgs, @@ -194,6 +206,12 @@ impl RpcServerArgs { self } + /// Configures modules for the HTTP-RPC server. + pub fn with_http_api(mut self, http_api: RpcModuleSelection) -> Self { + self.http_api = Some(http_api); + self + } + /// Enables the WS-RPC server. pub const fn with_ws(mut self) -> Self { self.ws = true; @@ -313,6 +331,7 @@ impl Default for RpcServerArgs { gas_price_oracle: GasPriceOracleArgs::default(), rpc_state_cache: RpcStateCacheArgs::default(), rpc_proof_permits: constants::DEFAULT_PROOF_PERMITS, + builder_disallow: Default::default(), } } } diff --git a/crates/node/core/src/args/txpool.rs b/crates/node/core/src/args/txpool.rs index 63f6c566ca..538315101a 100644 --- a/crates/node/core/src/args/txpool.rs +++ b/crates/node/core/src/args/txpool.rs @@ -1,9 +1,9 @@ //! Transaction pool arguments use crate::cli::config::RethTransactionPoolConfig; +use alloy_eips::eip1559::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; use alloy_primitives::Address; use clap::Args; -use reth_primitives::constants::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; use reth_transaction_pool::{ blobstore::disk::DEFAULT_MAX_CACHED_BLOBS, pool::{NEW_TX_LISTENER_BUFFER_SIZE, PENDING_TX_LISTENER_BUFFER_SIZE}, diff --git a/crates/node/core/src/lib.rs b/crates/node/core/src/lib.rs index 6af822e22e..a69a255a3c 100644 --- a/crates/node/core/src/lib.rs +++ b/crates/node/core/src/lib.rs @@ -22,15 +22,6 @@ pub mod primitives { /// Re-export of `reth_rpc_*` crates. pub mod rpc { - /// Re-exported from `reth_rpc_api`. - pub mod api { - pub use reth_rpc_api::*; - } - /// Re-exported from `reth_rpc::eth`. - pub mod eth { - pub use reth_rpc_eth_api::*; - } - /// Re-exported from `reth_rpc::rpc`. pub mod result { pub use reth_rpc_server_types::result::*; diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 77573817a0..b8278c69bd 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -15,8 +15,9 @@ use reth_network_p2p::headers::client::HeadersClient; use serde::{de::DeserializeOwned, Serialize}; use std::{fs, path::Path}; +use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, B256}; -use reth_primitives::{BlockHashOrNumber, Head, SealedHeader}; +use reth_primitives::{Head, SealedHeader}; use reth_stages_types::StageId; use reth_storage_api::{ BlockHashReader, DatabaseProviderFactory, HeaderProvider, StageCheckpointReader, @@ -434,6 +435,32 @@ impl NodeConfig { Err(e) => Err(eyre!("Failed to load configuration: {e}")), } } + + /// Modifies the [`ChainSpec`] generic of the config using the provided closure. + pub fn map_chainspec(self, f: F) -> NodeConfig + where + F: FnOnce(Arc) -> C, + { + let chain = Arc::new(f(self.chain)); + NodeConfig { + chain, + datadir: self.datadir, + config: self.config, + metrics: self.metrics, + instance: self.instance, + network: self.network, + rpc: self.rpc, + txpool: self.txpool, + builder: self.builder, + debug: self.debug, + db: self.db, + dev: self.dev, + pruning: self.pruning, + enable_prefetch: self.enable_prefetch, + skip_state_root_validation: self.skip_state_root_validation, + enable_execution_cache: self.enable_execution_cache, + } + } } impl Default for NodeConfig { diff --git a/crates/node/core/src/utils.rs b/crates/node/core/src/utils.rs index a64d121145..a04d4e324e 100644 --- a/crates/node/core/src/utils.rs +++ b/crates/node/core/src/utils.rs @@ -1,6 +1,7 @@ //! Utility functions for node startup and shutdown, for example path parsing and retrieving single //! blocks from the network. +use alloy_eips::BlockHashOrNumber; use alloy_primitives::Sealable; use alloy_rpc_types_engine::{JwtError, JwtSecret}; use eyre::Result; @@ -11,7 +12,7 @@ use reth_network_p2p::{ headers::client::{HeadersClient, HeadersDirection, HeadersRequest}, priority::Priority, }; -use reth_primitives::{BlockHashOrNumber, SealedBlock, SealedHeader}; +use reth_primitives::{SealedBlock, SealedHeader}; use std::{ env::VarError, path::{Path, PathBuf}, diff --git a/crates/node/events/Cargo.toml b/crates/node/events/Cargo.toml index 9c56c2da9b..6af3d8cbeb 100644 --- a/crates/node/events/Cargo.toml +++ b/crates/node/events/Cargo.toml @@ -19,12 +19,13 @@ reth-network-api.workspace = true reth-stages.workspace = true reth-prune.workspace = true reth-static-file.workspace = true -reth-primitives.workspace = true reth-primitives-traits.workspace = true # ethereum alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true +alloy-consensus.workspace = true +alloy-eips.workspace = true # async tokio.workspace = true diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index c856c0ec9e..fb0f4d48d7 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -1,6 +1,7 @@ //! Support for handling events emitted by node components. use crate::cl::ConsensusLayerHealthEvent; +use alloy_consensus::constants::GWEI_TO_WEI; use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::ForkchoiceState; use futures::Stream; @@ -9,7 +10,6 @@ use reth_beacon_consensus::{ }; use reth_network::NetworkEvent; use reth_network_api::PeersInfo; -use reth_primitives::constants; use reth_primitives_traits::{format_gas, format_gas_throughput}; use reth_prune::PrunerEvent; use reth_stages::{EntitiesCheckpoint, ExecOutput, PipelineEvent, StageCheckpoint, StageId}; @@ -263,9 +263,9 @@ impl NodeState { gas=%format_gas(block.header.gas_used), gas_throughput=%format_gas_throughput(block.header.gas_used, elapsed), full=%format!("{:.1}%", block.header.gas_used as f64 * 100.0 / block.header.gas_limit as f64), - base_fee=%format!("{:.2}gwei", block.header.base_fee_per_gas.unwrap_or(0) as f64 / constants::GWEI_TO_WEI as f64), - blobs=block.header.blob_gas_used.unwrap_or(0) / constants::eip4844::DATA_GAS_PER_BLOB, - excess_blobs=block.header.excess_blob_gas.unwrap_or(0) / constants::eip4844::DATA_GAS_PER_BLOB, + base_fee=%format!("{:.2}gwei", block.header.base_fee_per_gas.unwrap_or(0) as f64 / GWEI_TO_WEI as f64), + blobs=block.header.blob_gas_used.unwrap_or(0) / alloy_eips::eip4844::DATA_GAS_PER_BLOB, + excess_blobs=block.header.excess_blob_gas.unwrap_or(0) / alloy_eips::eip4844::DATA_GAS_PER_BLOB, ?elapsed, "Block added to canonical chain" ); @@ -504,7 +504,7 @@ where } else if let Some(latest_block) = this.state.latest_block { let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(); - if now - this.state.latest_block_time.unwrap_or(0) > 60 { + if now.saturating_sub(this.state.latest_block_time.unwrap_or(0)) > 60 { // Once we start receiving consensus nodes, don't emit status unless stalled for // 1 minute info!( diff --git a/crates/node/metrics/Cargo.toml b/crates/node/metrics/Cargo.toml index 76a3a7f663..9efdbd4959 100644 --- a/crates/node/metrics/Cargo.toml +++ b/crates/node/metrics/Cargo.toml @@ -35,7 +35,6 @@ procfs = "0.16.0" [dev-dependencies] reqwest.workspace = true -reth-chainspec.workspace = true socket2 = { version = "0.5", default-features = false } reth-provider = { workspace = true, features = ["test-utils"] } diff --git a/crates/node/types/Cargo.toml b/crates/node/types/Cargo.toml index f04925d9cd..21facae546 100644 --- a/crates/node/types/Cargo.toml +++ b/crates/node/types/Cargo.toml @@ -14,4 +14,7 @@ workspace = true # reth reth-chainspec.workspace = true reth-db-api.workspace = true -reth-engine-primitives.workspace = true \ No newline at end of file +reth-engine-primitives.workspace = true +reth-primitives.workspace = true +reth-primitives-traits.workspace = true +reth-trie-db.workspace = true diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index 2c72e02d3e..38e194bd4f 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -8,6 +8,8 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +pub use reth_primitives_traits::{Block, BlockBody}; + use std::marker::PhantomData; use reth_chainspec::EthChainSpec; @@ -16,13 +18,17 @@ use reth_db_api::{ Database, }; use reth_engine_primitives::EngineTypes; +use reth_trie_db::StateCommitment; /// Configures all the primitive types of the node. -// TODO(mattsse): this is currently a placeholder -pub trait NodePrimitives {} +pub trait NodePrimitives { + /// Block primitive. + type Block; +} -// TODO(mattsse): Placeholder -impl NodePrimitives for () {} +impl NodePrimitives for () { + type Block = reth_primitives::Block; +} /// The type that configures the essential types of an Ethereum-like node. /// @@ -34,6 +40,8 @@ pub trait NodeTypes: Send + Sync + Unpin + 'static { type Primitives: NodePrimitives; /// The type used for configuration of the EVM. type ChainSpec: EthChainSpec; + /// The type used to perform state commitment operations. + type StateCommitment: StateCommitment; } /// The type that configures an Ethereum-like node with an engine for consensus. @@ -84,6 +92,7 @@ where { type Primitives = Types::Primitives; type ChainSpec = Types::ChainSpec; + type StateCommitment = Types::StateCommitment; } impl NodeTypesWithEngine for NodeTypesWithDBAdapter @@ -104,70 +113,85 @@ where /// A [`NodeTypes`] type builder. #[derive(Default, Debug)] -pub struct AnyNodeTypes

(PhantomData

, PhantomData); +pub struct AnyNodeTypes

(PhantomData

, PhantomData, PhantomData); -impl AnyNodeTypes { +impl AnyNodeTypes { /// Sets the `Primitives` associated type. - pub const fn primitives(self) -> AnyNodeTypes { - AnyNodeTypes::(PhantomData::, PhantomData::) + pub const fn primitives(self) -> AnyNodeTypes { + AnyNodeTypes::(PhantomData::, PhantomData::, PhantomData::) } /// Sets the `ChainSpec` associated type. - pub const fn chain_spec(self) -> AnyNodeTypes { - AnyNodeTypes::(PhantomData::

, PhantomData::) + pub const fn chain_spec(self) -> AnyNodeTypes { + AnyNodeTypes::(PhantomData::

, PhantomData::, PhantomData::) + } + + /// Sets the `StateCommitment` associated type. + pub const fn state_commitment(self) -> AnyNodeTypes { + AnyNodeTypes::(PhantomData::

, PhantomData::, PhantomData::) } } -impl NodeTypes for AnyNodeTypes +impl NodeTypes for AnyNodeTypes where P: NodePrimitives + Send + Sync + Unpin + 'static, C: EthChainSpec + 'static, + S: StateCommitment, { type Primitives = P; type ChainSpec = C; + type StateCommitment = S; } /// A [`NodeTypesWithEngine`] type builder. #[derive(Default, Debug)] -pub struct AnyNodeTypesWithEngine

{ +pub struct AnyNodeTypesWithEngine

{ /// Embedding the basic node types. - base: AnyNodeTypes, + base: AnyNodeTypes, /// Phantom data for the engine. _engine: PhantomData, } -impl AnyNodeTypesWithEngine { +impl AnyNodeTypesWithEngine { /// Sets the `Primitives` associated type. - pub const fn primitives(self) -> AnyNodeTypesWithEngine { + pub const fn primitives(self) -> AnyNodeTypesWithEngine { AnyNodeTypesWithEngine { base: self.base.primitives::(), _engine: PhantomData } } /// Sets the `Engine` associated type. - pub const fn engine(self) -> AnyNodeTypesWithEngine { + pub const fn engine(self) -> AnyNodeTypesWithEngine { AnyNodeTypesWithEngine { base: self.base, _engine: PhantomData:: } } /// Sets the `ChainSpec` associated type. - pub const fn chain_spec(self) -> AnyNodeTypesWithEngine { + pub const fn chain_spec(self) -> AnyNodeTypesWithEngine { AnyNodeTypesWithEngine { base: self.base.chain_spec::(), _engine: PhantomData } } + + /// Sets the `StateCommitment` associated type. + pub const fn state_commitment(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine { base: self.base.state_commitment::(), _engine: PhantomData } + } } -impl NodeTypes for AnyNodeTypesWithEngine +impl NodeTypes for AnyNodeTypesWithEngine where P: NodePrimitives + Send + Sync + Unpin + 'static, E: EngineTypes + Send + Sync + Unpin, C: EthChainSpec + 'static, + S: StateCommitment, { type Primitives = P; type ChainSpec = C; + type StateCommitment = S; } -impl NodeTypesWithEngine for AnyNodeTypesWithEngine +impl NodeTypesWithEngine for AnyNodeTypesWithEngine where P: NodePrimitives + Send + Sync + Unpin + 'static, E: EngineTypes + Send + Sync + Unpin, C: EthChainSpec + 'static, + S: StateCommitment, { type Engine = E; } diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index de2518c347..cd80ad51c8 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -37,7 +37,19 @@ tracy-allocator = ["reth-cli-util/tracy-allocator"] asm-keccak = ["reth-optimism-cli/asm-keccak", "reth-optimism-node/asm-keccak"] -optimism = ["reth-optimism-cli/optimism", "reth-optimism-node/optimism"] +optimism = [ + "reth-optimism-cli/optimism", + "reth-optimism-node/optimism", + "reth-optimism-consensus/optimism", + "reth-optimism-evm/optimism", + "reth-optimism-payload-builder/optimism", + "reth-optimism-rpc/optimism", + "reth-provider/optimism" +] + +dev = [ + "reth-optimism-cli/dev" +] min-error-logs = ["tracing/release_max_level_error"] min-warn-logs = ["tracing/release_max_level_warn"] diff --git a/crates/optimism/bin/src/main.rs b/crates/optimism/bin/src/main.rs index 6822a6a50e..6c440f4349 100644 --- a/crates/optimism/bin/src/main.rs +++ b/crates/optimism/bin/src/main.rs @@ -6,7 +6,6 @@ use clap::Parser; use reth_node_builder::{engine_tree_config::TreeConfig, EngineNodeLauncher}; use reth_optimism_cli::{chainspec::OpChainSpecParser, Cli}; use reth_optimism_node::{args::RollupArgs, node::OptimismAddOns, OptimismNode}; -use reth_optimism_rpc::SequencerClient; use reth_provider::providers::BlockchainProvider2; use tracing as _; @@ -24,27 +23,20 @@ fn main() { if let Err(err) = Cli::::parse().run(|builder, rollup_args| async move { - let enable_engine2 = rollup_args.experimental; + if rollup_args.experimental { + tracing::warn!(target: "reth::cli", "Experimental engine is default now, and the --engine.experimental flag is deprecated. To enable the legacy functionality, use --engine.legacy."); + } + let use_legacy_engine = rollup_args.legacy; let sequencer_http_arg = rollup_args.sequencer_http.clone(); - match enable_engine2 { - true => { + match use_legacy_engine { + false => { let engine_tree_config = TreeConfig::default() .with_persistence_threshold(rollup_args.persistence_threshold) .with_memory_block_buffer_target(rollup_args.memory_block_buffer_target); let handle = builder .with_types_and_provider::>() .with_components(OptimismNode::components(rollup_args)) - .with_add_ons(OptimismAddOns::new(sequencer_http_arg.clone())) - .extend_rpc_modules(move |ctx| { - // register sequencer tx forwarder - if let Some(sequencer_http) = sequencer_http_arg { - ctx.registry - .eth_api() - .set_sequencer_client(SequencerClient::new(sequencer_http))?; - } - - Ok(()) - }) + .with_add_ons(OptimismAddOns::new(sequencer_http_arg)) .launch_with_fn(|builder| { let launcher = EngineNodeLauncher::new( builder.task_executor().clone(), @@ -57,21 +49,9 @@ fn main() { handle.node_exit_future.await } - false => { - let handle = builder - .node(OptimismNode::new(rollup_args.clone())) - .extend_rpc_modules(move |ctx| { - // register sequencer tx forwarder - if let Some(sequencer_http) = sequencer_http_arg { - ctx.registry - .eth_api() - .set_sequencer_client(SequencerClient::new(sequencer_http))?; - } - - Ok(()) - }) - .launch() - .await?; + true => { + let handle = + builder.node(OptimismNode::new(rollup_args.clone())).launch().await?; handle.node_exit_future.await } diff --git a/crates/optimism/chainspec/Cargo.toml b/crates/optimism/chainspec/Cargo.toml index e13b95056c..4e573ce299 100644 --- a/crates/optimism/chainspec/Cargo.toml +++ b/crates/optimism/chainspec/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] # reth -reth-chainspec = { workspace = true, features = ["optimism"] } +reth-chainspec.workspace = true reth-ethereum-forks.workspace = true reth-primitives-traits.workspace = true reth-network-peers.workspace = true @@ -25,6 +25,8 @@ reth-optimism-forks.workspace = true alloy-chains.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true +alloy-eips.workspace = true # op op-alloy-rpc-types.workspace = true @@ -43,4 +45,16 @@ op-alloy-rpc-types.workspace = true [features] default = ["std"] -std = [] \ No newline at end of file +std = [ + "alloy-chains/std", + "alloy-genesis/std", + "alloy-primitives/std", + "alloy-eips/std", + "op-alloy-rpc-types/std", + "reth-chainspec/std", + "reth-ethereum-forks/std", + "reth-primitives-traits/std", + "reth-optimism-forks/std", + "alloy-consensus/std", + "once_cell/std", +] diff --git a/crates/optimism/chainspec/src/base.rs b/crates/optimism/chainspec/src/base.rs index 3d986b3bcb..7aa26bf9a6 100644 --- a/crates/optimism/chainspec/src/base.rs +++ b/crates/optimism/chainspec/src/base.rs @@ -1,18 +1,17 @@ //! Chain specification for the Base Mainnet network. -use alloc::sync::Arc; +use alloc::{sync::Arc, vec}; use alloy_chains::Chain; use alloy_primitives::{b256, U256}; -use once_cell::sync::Lazy; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::EthereumHardfork; use reth_optimism_forks::OptimismHardfork; -use crate::OpChainSpec; +use crate::{LazyLock, OpChainSpec}; /// The Base mainnet spec -pub static BASE_MAINNET: Lazy> = Lazy::new(|| { +pub static BASE_MAINNET: LazyLock> = LazyLock::new(|| { OpChainSpec { inner: ChainSpec { chain: Chain::base_mainnet(), diff --git a/crates/optimism/chainspec/src/base_sepolia.rs b/crates/optimism/chainspec/src/base_sepolia.rs index 5b85f5a6b0..b992dcabaf 100644 --- a/crates/optimism/chainspec/src/base_sepolia.rs +++ b/crates/optimism/chainspec/src/base_sepolia.rs @@ -1,18 +1,17 @@ //! Chain specification for the Base Sepolia testnet network. -use alloc::sync::Arc; +use alloc::{sync::Arc, vec}; use alloy_chains::Chain; use alloy_primitives::{b256, U256}; -use once_cell::sync::Lazy; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::EthereumHardfork; use reth_optimism_forks::OptimismHardfork; -use crate::OpChainSpec; +use crate::{LazyLock, OpChainSpec}; /// The Base Sepolia spec -pub static BASE_SEPOLIA: Lazy> = Lazy::new(|| { +pub static BASE_SEPOLIA: LazyLock> = LazyLock::new(|| { OpChainSpec { inner: ChainSpec { chain: Chain::base_sepolia(), diff --git a/crates/optimism/chainspec/src/dev.rs b/crates/optimism/chainspec/src/dev.rs index 4724e2801b..eae25f73e0 100644 --- a/crates/optimism/chainspec/src/dev.rs +++ b/crates/optimism/chainspec/src/dev.rs @@ -3,19 +3,18 @@ use alloc::sync::Arc; use alloy_chains::Chain; +use alloy_consensus::constants::DEV_GENESIS_HASH; use alloy_primitives::U256; -use once_cell::sync::Lazy; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_optimism_forks::DEV_HARDFORKS; -use reth_primitives_traits::constants::DEV_GENESIS_HASH; -use crate::OpChainSpec; +use crate::{LazyLock, OpChainSpec}; /// OP dev testnet specification /// /// Includes 20 prefunded accounts with `10_000` ETH each derived from mnemonic "test test test test /// test test test test test test test junk". -pub static OP_DEV: Lazy> = Lazy::new(|| { +pub static OP_DEV: LazyLock> = LazyLock::new(|| { OpChainSpec { inner: ChainSpec { chain: Chain::dev(), diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 816ef57d51..39acc97c4f 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -6,6 +6,7 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; @@ -19,28 +20,172 @@ mod opbnb; mod opbnb_qa; mod opbnb_testnet; -use std::fmt::Display; - +use alloc::{boxed::Box, vec, vec::Vec}; +use alloy_chains::Chain; use alloy_genesis::Genesis; -use alloy_primitives::{Parity, Signature, B256, U256}; +use alloy_primitives::{Bytes, Parity, Signature, B256, U256}; pub use base::BASE_MAINNET; pub use base_sepolia::BASE_SEPOLIA; +use derive_more::{Constructor, Deref, Display, From, Into}; pub use dev::OP_DEV; +#[cfg(not(feature = "std"))] +pub(crate) use once_cell::sync::Lazy as LazyLock; pub use op::OP_MAINNET; pub use op_sepolia::OP_SEPOLIA; pub use opbnb::OPBNB_MAINNET; pub use opbnb_qa::OPBNB_QA; pub use opbnb_testnet::OPBNB_TESTNET; -use derive_more::{Constructor, Deref, Into}; -use once_cell::sync::OnceCell; use reth_chainspec::{ - BaseFeeParams, BaseFeeParamsKind, ChainSpec, DepositContract, EthChainSpec, EthereumHardforks, - ForkFilter, ForkId, Hardforks, Head, + BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, DepositContract, EthChainSpec, + EthereumHardforks, ForkFilter, ForkId, Hardforks, Head, }; -use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition}; +use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition, Hardfork}; use reth_network_peers::NodeRecord; +use reth_optimism_forks::OptimismHardforks; use reth_primitives_traits::Header; +#[cfg(feature = "std")] +pub(crate) use std::sync::LazyLock; + +/// Chain spec builder for a OP stack chain. +#[derive(Debug, Default, From)] +pub struct OpChainSpecBuilder { + /// [`ChainSpecBuilder`] + inner: ChainSpecBuilder, +} + +impl OpChainSpecBuilder { + /// Construct a new builder from the base mainnet chain spec. + pub fn base_mainnet() -> Self { + let mut inner = ChainSpecBuilder::default() + .chain(BASE_MAINNET.chain) + .genesis(BASE_MAINNET.genesis.clone()); + let forks = BASE_MAINNET.hardforks.clone(); + inner = inner.with_forks(forks); + + Self { inner } + } + + /// Construct a new builder from the optimism mainnet chain spec. + pub fn optimism_mainnet() -> Self { + let mut inner = + ChainSpecBuilder::default().chain(OP_MAINNET.chain).genesis(OP_MAINNET.genesis.clone()); + let forks = OP_MAINNET.hardforks.clone(); + inner = inner.with_forks(forks); + + Self { inner } + } +} + +impl OpChainSpecBuilder { + /// Set the chain ID + pub fn chain(mut self, chain: Chain) -> Self { + self.inner = self.inner.chain(chain); + self + } + + /// Set the genesis block. + pub fn genesis(mut self, genesis: Genesis) -> Self { + self.inner = self.inner.genesis(genesis); + self + } + + /// Add the given fork with the given activation condition to the spec. + pub fn with_fork(mut self, fork: H, condition: ForkCondition) -> Self { + self.inner = self.inner.with_fork(fork, condition); + self + } + + /// Add the given forks with the given activation condition to the spec. + pub fn with_forks(mut self, forks: ChainHardforks) -> Self { + self.inner = self.inner.with_forks(forks); + self + } + + /// Remove the given fork from the spec. + pub fn without_fork(mut self, fork: reth_optimism_forks::OptimismHardfork) -> Self { + self.inner = self.inner.without_fork(fork); + self + } + + /// Enable Bedrock at genesis + pub fn bedrock_activated(mut self) -> Self { + self.inner = self.inner.paris_activated(); + self.inner = self + .inner + .with_fork(reth_optimism_forks::OptimismHardfork::Bedrock, ForkCondition::Block(0)); + self + } + + /// Enable Regolith at genesis + pub fn regolith_activated(mut self) -> Self { + self = self.bedrock_activated(); + self.inner = self.inner.with_fork( + reth_optimism_forks::OptimismHardfork::Regolith, + ForkCondition::Timestamp(0), + ); + self + } + + /// Enable Canyon at genesis + pub fn canyon_activated(mut self) -> Self { + self = self.regolith_activated(); + // Canyon also activates changes from L1's Shanghai hardfork + self.inner = self.inner.with_fork(EthereumHardfork::Shanghai, ForkCondition::Timestamp(0)); + self.inner = self + .inner + .with_fork(reth_optimism_forks::OptimismHardfork::Canyon, ForkCondition::Timestamp(0)); + self + } + + /// Enable Ecotone at genesis + pub fn ecotone_activated(mut self) -> Self { + self = self.canyon_activated(); + self.inner = self.inner.with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(0)); + self.inner = self + .inner + .with_fork(reth_optimism_forks::OptimismHardfork::Ecotone, ForkCondition::Timestamp(0)); + self + } + + /// Enable Fjord at genesis + pub fn fjord_activated(mut self) -> Self { + self = self.ecotone_activated(); + self.inner = self + .inner + .with_fork(reth_optimism_forks::OptimismHardfork::Fjord, ForkCondition::Timestamp(0)); + self + } + + /// Enable Granite at genesis + pub fn granite_activated(mut self) -> Self { + self = self.fjord_activated(); + self.inner = self + .inner + .with_fork(reth_optimism_forks::OptimismHardfork::Granite, ForkCondition::Timestamp(0)); + self + } + + /// Enable Holocene at genesis + pub fn holocene_activated(mut self) -> Self { + self = self.granite_activated(); + self.inner = self.inner.with_fork( + reth_optimism_forks::OptimismHardfork::Holocene, + ForkCondition::Timestamp(0), + ); + self + } + + /// Build the resulting [`OpChainSpec`]. + /// + /// # Panics + /// + /// This function panics if the chain ID and genesis is not set ([`Self::chain`] and + /// [`Self::genesis`]) + pub fn build(self) -> OpChainSpec { + OpChainSpec { inner: self.inner.build() } + } +} /// OP stack chain spec type. #[derive(Debug, Clone, Deref, Into, Constructor, PartialEq, Eq)] @@ -49,6 +194,75 @@ pub struct OpChainSpec { pub inner: ChainSpec, } +impl OpChainSpec { + /// Read from parent to determine the base fee for the next block + pub fn next_block_base_fee( + &self, + parent: &Header, + timestamp: u64, + ) -> Result { + let is_holocene_activated = self.inner.is_fork_active_at_timestamp( + reth_optimism_forks::OptimismHardfork::Holocene, + timestamp, + ); + // If we are in the Holocene, we need to use the base fee params + // from the parent block's extra data. + // Else, use the base fee params (default values) from chainspec + if is_holocene_activated { + let (denominator, elasticity) = decode_holocene_1559_params(parent.extra_data.clone())?; + if elasticity == 0 && denominator == 0 { + return Ok(U256::from( + parent + .next_block_base_fee(self.base_fee_params_at_timestamp(timestamp)) + .unwrap_or_default(), + )); + } + let base_fee_params = BaseFeeParams::new(denominator as u128, elasticity as u128); + Ok(U256::from(parent.next_block_base_fee(base_fee_params).unwrap_or_default())) + } else { + Ok(U256::from( + parent + .next_block_base_fee(self.base_fee_params_at_timestamp(timestamp)) + .unwrap_or_default(), + )) + } + } +} + +#[derive(Clone, Debug, Display, Eq, PartialEq)] +/// Error type for decoding Holocene 1559 parameters +pub enum DecodeError { + #[display("Insufficient data to decode")] + /// Insufficient data to decode + InsufficientData, + #[display("Invalid denominator parameter")] + /// Invalid denominator parameter + InvalidDenominator, + #[display("Invalid elasticity parameter")] + /// Invalid elasticity parameter + InvalidElasticity, +} + +impl core::error::Error for DecodeError { + fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { + // None of the errors have sub-errors + None + } +} + +/// Extracts the Holcene 1599 parameters from the encoded form: +/// +pub fn decode_holocene_1559_params(extra_data: Bytes) -> Result<(u32, u32), DecodeError> { + if extra_data.len() < 9 { + return Err(DecodeError::InsufficientData); + } + let denominator: [u8; 4] = + extra_data[1..5].try_into().map_err(|_| DecodeError::InvalidDenominator)?; + let elasticity: [u8; 4] = + extra_data[5..9].try_into().map_err(|_| DecodeError::InvalidElasticity)?; + Ok((u32::from_be_bytes(denominator), u32::from_be_bytes(elasticity))) +} + /// Returns the signature for the optimism deposit transactions, which don't include a /// signature. pub fn optimism_deposit_tx_signature() -> Signature { @@ -80,8 +294,8 @@ impl EthChainSpec for OpChainSpec { self.inner.prune_delete_limit() } - fn display_hardforks(&self) -> impl Display { - self.inner.display_hardforks() + fn display_hardforks(&self) -> Box { + Box::new(ChainSpec::display_hardforks(self)) } fn genesis_header(&self) -> &Header { @@ -143,10 +357,12 @@ impl EthereumHardforks for OpChainSpec { } } +impl OptimismHardforks for OpChainSpec {} + impl From for OpChainSpec { fn from(genesis: Genesis) -> Self { use reth_optimism_forks::OptimismHardfork; - let optimism_genesis_info = OptimismGenesisInfo::extract_from(&genesis); + let optimism_genesis_info = OpGenesisInfo::extract_from(&genesis); let genesis_info = optimism_genesis_info.optimism_chain_info.genesis_info.unwrap_or_default(); @@ -226,7 +442,6 @@ impl From for OpChainSpec { inner: ChainSpec { chain: genesis.config.chain_id.into(), genesis, - genesis_hash: OnceCell::new(), hardforks: ChainHardforks::new(ordered_hardforks), paris_block_and_final_difficulty, base_fee_params: optimism_genesis_info.base_fee_params, @@ -237,12 +452,12 @@ impl From for OpChainSpec { } #[derive(Default, Debug)] -struct OptimismGenesisInfo { +struct OpGenesisInfo { optimism_chain_info: op_alloy_rpc_types::genesis::OpChainInfo, base_fee_params: BaseFeeParamsKind, } -impl OptimismGenesisInfo { +impl OpGenesisInfo { fn extract_from(genesis: &Genesis) -> Self { let mut info = Self { optimism_chain_info: op_alloy_rpc_types::genesis::OpChainInfo::extract_from( @@ -286,6 +501,8 @@ impl OptimismGenesisInfo { #[cfg(test)] mod tests { + use std::sync::Arc; + use alloy_genesis::{ChainConfig, Genesis}; use alloy_primitives::b256; use reth_chainspec::{test_fork_ids, BaseFeeParams, BaseFeeParamsKind}; @@ -296,6 +513,8 @@ mod tests { #[test] fn base_mainnet_forkids() { + let base_mainnet = OpChainSpecBuilder::base_mainnet().build(); + let _ = base_mainnet.genesis_hash.set(BASE_MAINNET.genesis_hash.get().copied().unwrap()); test_fork_ids( &BASE_MAINNET, &[ @@ -382,8 +601,12 @@ mod tests { #[test] fn op_mainnet_forkids() { + let op_mainnet = OpChainSpecBuilder::optimism_mainnet().build(); + // for OP mainnet we have to do this because the genesis header can't be properly computed + // from the genesis.json file + let _ = op_mainnet.genesis_hash.set(OP_MAINNET.genesis_hash()); test_fork_ids( - &OP_MAINNET, + &op_mainnet, &[ ( Head { number: 0, ..Default::default() }, @@ -493,9 +716,19 @@ mod tests { ) } + #[test] + fn latest_base_mainnet_fork_id_with_builder() { + let base_mainnet = OpChainSpecBuilder::base_mainnet().build(); + assert_eq!( + ForkId { hash: ForkHash([0xbc, 0x38, 0xf9, 0xca]), next: 0 }, + base_mainnet.latest_fork_id() + ) + } + #[test] fn is_bedrock_active() { - assert!(!OP_MAINNET.is_bedrock_active_at_block(1)) + let op_mainnet = OpChainSpecBuilder::optimism_mainnet().build(); + assert!(!op_mainnet.is_bedrock_active_at_block(1)) } #[test] @@ -784,4 +1017,87 @@ mod tests { .all(|(expected, actual)| &**expected == *actual)); assert_eq!(expected_hardforks.len(), hardforks.len()); } + + #[test] + fn test_get_base_fee_pre_holocene() { + let op_chain_spec = &BASE_SEPOLIA; + let parent = Header { + base_fee_per_gas: Some(1), + gas_used: 15763614, + gas_limit: 144000000, + ..Default::default() + }; + let base_fee = op_chain_spec.next_block_base_fee(&parent, 0); + assert_eq!( + base_fee.unwrap(), + U256::from( + parent + .next_block_base_fee(op_chain_spec.base_fee_params_at_timestamp(0)) + .unwrap_or_default() + ) + ); + } + + fn holocene_chainspec() -> Arc { + let mut hardforks = OptimismHardfork::base_sepolia(); + hardforks.insert(OptimismHardfork::Holocene.boxed(), ForkCondition::Timestamp(1800000000)); + Arc::new(OpChainSpec { + inner: ChainSpec { + chain: BASE_SEPOLIA.inner.chain, + genesis: BASE_SEPOLIA.inner.genesis.clone(), + genesis_hash: BASE_SEPOLIA.inner.genesis_hash.clone(), + paris_block_and_final_difficulty: Some((0, U256::from(0))), + hardforks, + base_fee_params: BASE_SEPOLIA.inner.base_fee_params.clone(), + max_gas_limit: crate::constants::BASE_SEPOLIA_MAX_GAS_LIMIT, + prune_delete_limit: 10000, + ..Default::default() + }, + }) + } + + #[test] + fn test_get_base_fee_holocene_nonce_not_set() { + let op_chain_spec = holocene_chainspec(); + let parent = Header { + base_fee_per_gas: Some(1), + gas_used: 15763614, + gas_limit: 144000000, + timestamp: 1800000003, + extra_data: Bytes::from_static(&[0, 0, 0, 0, 0, 0, 0, 0, 0]), + ..Default::default() + }; + let base_fee = op_chain_spec.next_block_base_fee(&parent, 1800000005); + assert_eq!( + base_fee.unwrap(), + U256::from( + parent + .next_block_base_fee(op_chain_spec.base_fee_params_at_timestamp(0)) + .unwrap_or_default() + ) + ); + } + + #[test] + fn test_get_base_fee_holocene_nonce_set() { + let op_chain_spec = holocene_chainspec(); + let parent = Header { + base_fee_per_gas: Some(1), + gas_used: 15763614, + gas_limit: 144000000, + extra_data: Bytes::from_static(&[0, 0, 0, 0, 8, 0, 0, 0, 8]), + timestamp: 1800000003, + ..Default::default() + }; + + let base_fee = op_chain_spec.next_block_base_fee(&parent, 1800000005); + assert_eq!( + base_fee.unwrap(), + U256::from( + parent + .next_block_base_fee(BaseFeeParams::new(0x00000008, 0x00000008)) + .unwrap_or_default() + ) + ); + } } diff --git a/crates/optimism/chainspec/src/op.rs b/crates/optimism/chainspec/src/op.rs index 4e8b130055..5afb236cd3 100644 --- a/crates/optimism/chainspec/src/op.rs +++ b/crates/optimism/chainspec/src/op.rs @@ -1,19 +1,18 @@ //! Chain specification for the Optimism Mainnet network. -use alloc::sync::Arc; +use alloc::{sync::Arc, vec}; use alloy_chains::Chain; +use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::{b256, U256}; -use once_cell::sync::Lazy; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::EthereumHardfork; use reth_optimism_forks::OptimismHardfork; -use reth_primitives_traits::constants::ETHEREUM_BLOCK_GAS_LIMIT; -use crate::OpChainSpec; +use crate::{LazyLock, OpChainSpec}; /// The Optimism Mainnet spec -pub static OP_MAINNET: Lazy> = Lazy::new(|| { +pub static OP_MAINNET: LazyLock> = LazyLock::new(|| { OpChainSpec { inner: ChainSpec { chain: Chain::optimism_mainnet(), diff --git a/crates/optimism/chainspec/src/op_sepolia.rs b/crates/optimism/chainspec/src/op_sepolia.rs index 9d453c8055..31c9eda6bd 100644 --- a/crates/optimism/chainspec/src/op_sepolia.rs +++ b/crates/optimism/chainspec/src/op_sepolia.rs @@ -1,19 +1,18 @@ //! Chain specification for the Optimism Sepolia testnet network. -use alloc::sync::Arc; +use alloc::{sync::Arc, vec}; use alloy_chains::{Chain, NamedChain}; +use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::{b256, U256}; -use once_cell::sync::Lazy; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::EthereumHardfork; use reth_optimism_forks::OptimismHardfork; -use reth_primitives_traits::constants::ETHEREUM_BLOCK_GAS_LIMIT; -use crate::OpChainSpec; +use crate::{LazyLock, OpChainSpec}; /// The OP Sepolia spec -pub static OP_SEPOLIA: Lazy> = Lazy::new(|| { +pub static OP_SEPOLIA: LazyLock> = LazyLock::new(|| { OpChainSpec { inner: ChainSpec { chain: Chain::from_named(NamedChain::OptimismSepolia), diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index d53270cd62..f9847771da 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -65,6 +65,13 @@ tokio-util = { workspace = true, features = ["codec"] } tracing.workspace = true eyre.workspace = true +# reth test-vectors +proptest = { workspace = true, optional = true } +op-alloy-consensus = { workspace = true, features = [ + "arbitrary", +], optional = true } + + [dev-dependencies] tempfile.workspace = true reth-stages = { workspace = true, features = ["test-utils"] } @@ -73,11 +80,15 @@ reth-cli-commands.workspace = true [features] optimism = [ - "reth-primitives/optimism", - "reth-optimism-evm/optimism", - "reth-provider/optimism", - "reth-node-core/optimism", - "reth-optimism-node/optimism", + "reth-primitives/optimism", + "reth-optimism-evm/optimism", + "reth-provider/optimism", + "reth-node-core/optimism", + "reth-optimism-node/optimism", + "reth-execution-types/optimism", + "reth-db/optimism", + "reth-db-api/optimism", + "reth-chainspec/optimism" ] asm-keccak = [ "alloy-primitives/asm-keccak", @@ -91,3 +102,9 @@ jemalloc = [ "reth-node-core/jemalloc", "reth-node-metrics/jemalloc" ] + +dev = [ + "dep:proptest", + "reth-cli-commands/arbitrary", + "op-alloy-consensus" +] diff --git a/crates/optimism/cli/src/commands/build_pipeline.rs b/crates/optimism/cli/src/commands/build_pipeline.rs index 1cda04dc08..f57cc60b95 100644 --- a/crates/optimism/cli/src/commands/build_pipeline.rs +++ b/crates/optimism/cli/src/commands/build_pipeline.rs @@ -76,6 +76,7 @@ where .with_tip_sender(tip_tx) // we want to sync all blocks the file client provides or 0 if empty .with_max_block(max_block) + .with_fail_on_unwind(true) .add_stages( DefaultStages::new( provider_factory.clone(), diff --git a/crates/optimism/cli/src/commands/init_state/mod.rs b/crates/optimism/cli/src/commands/init_state.rs similarity index 87% rename from crates/optimism/cli/src/commands/init_state/mod.rs rename to crates/optimism/cli/src/commands/init_state.rs index 3537f89e75..68f5d9a585 100644 --- a/crates/optimism/cli/src/commands/init_state/mod.rs +++ b/crates/optimism/cli/src/commands/init_state.rs @@ -6,7 +6,8 @@ use reth_cli_commands::common::{AccessRights, Environment}; use reth_db_common::init::init_from_state_dump; use reth_node_builder::NodeTypesWithEngine; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_primitives::bedrock::BEDROCK_HEADER; +use reth_optimism_primitives::bedrock::{BEDROCK_HEADER, BEDROCK_HEADER_HASH, BEDROCK_HEADER_TTD}; +use reth_primitives::SealedHeader; use reth_provider::{ BlockNumReader, ChainSpecProvider, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, @@ -14,8 +15,6 @@ use reth_provider::{ use std::{fs::File, io::BufReader}; use tracing::info; -mod bedrock; - /// Initializes the database with the genesis block. #[derive(Debug, Parser)] pub struct InitStateCommandOp { @@ -53,7 +52,12 @@ impl> InitStateCommandOp { let last_block_number = provider_rw.last_block_number()?; if last_block_number == 0 { - bedrock::setup_op_mainnet_without_ovm(&provider_rw, &static_file_provider)?; + reth_cli_commands::init_state::without_evm::setup_without_evm( + &provider_rw, + &static_file_provider, + SealedHeader::new(BEDROCK_HEADER, BEDROCK_HEADER_HASH), + BEDROCK_HEADER_TTD, + )?; // SAFETY: it's safe to commit static files, since in the event of a crash, they // will be unwinded according to database checkpoints. diff --git a/crates/optimism/cli/src/commands/mod.rs b/crates/optimism/cli/src/commands/mod.rs index a7674ec2c9..d51f899329 100644 --- a/crates/optimism/cli/src/commands/mod.rs +++ b/crates/optimism/cli/src/commands/mod.rs @@ -16,6 +16,9 @@ pub mod import; pub mod import_receipts; pub mod init_state; +#[cfg(feature = "dev")] +pub mod test_vectors; + /// Commands to be executed #[derive(Debug, Subcommand)] pub enum Commands @@ -55,4 +58,8 @@ pub enum Commands), + /// Generate Test Vectors + #[cfg(feature = "dev")] + #[command(name = "test-vectors")] + TestVectors(test_vectors::Command), } diff --git a/crates/optimism/cli/src/commands/test_vectors.rs b/crates/optimism/cli/src/commands/test_vectors.rs new file mode 100644 index 0000000000..093d63148e --- /dev/null +++ b/crates/optimism/cli/src/commands/test_vectors.rs @@ -0,0 +1,72 @@ +//! Command for generating test vectors. + +use clap::{Parser, Subcommand}; +use op_alloy_consensus::TxDeposit; +use proptest::test_runner::TestRunner; +use reth_cli_commands::{ + compact_types, + test_vectors::{ + compact, + compact::{ + generate_vector, read_vector, GENERATE_VECTORS as ETH_GENERATE_VECTORS, + READ_VECTORS as ETH_READ_VECTORS, + }, + tables, + }, +}; + +/// Generate test-vectors for different data types. +#[derive(Debug, Parser)] +pub struct Command { + #[command(subcommand)] + command: Subcommands, +} + +#[derive(Subcommand, Debug)] +/// `reth test-vectors` subcommands +pub enum Subcommands { + /// Generates test vectors for specified tables. If no table is specified, generate for all. + Tables { + /// List of table names. Case-sensitive. + names: Vec, + }, + /// Generates test vectors for `Compact` types with `--write`. Reads and checks generated + /// vectors with `--read`. + #[group(multiple = false, required = true)] + Compact { + /// Write test vectors to a file. + #[arg(long)] + write: bool, + + /// Read test vectors from a file. + #[arg(long)] + read: bool, + }, +} + +impl Command { + /// Execute the command + pub async fn execute(self) -> eyre::Result<()> { + match self.command { + Subcommands::Tables { names } => { + tables::generate_vectors(names)?; + } + Subcommands::Compact { write, .. } => { + compact_types!( + regular: [ + TxDeposit + ], identifier: [] + ); + + if write { + compact::generate_vectors_with(ETH_GENERATE_VECTORS)?; + compact::generate_vectors_with(GENERATE_VECTORS)?; + } else { + compact::read_vectors_with(ETH_READ_VECTORS)?; + compact::read_vectors_with(READ_VECTORS)?; + } + } + } + Ok(()) + } +} diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index e6eed86bf7..43d1261648 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -54,6 +54,7 @@ use tracing::info; // This allows us to manually enable node metrics features, required for proper jemalloc metric // reporting use reth_node_metrics as _; +use reth_node_metrics::recorder::install_prometheus_recorder; /// The main op-reth cli interface. /// @@ -135,6 +136,9 @@ where let _guard = self.init_tracing()?; info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.logs.log_file_directory); + // Install the prometheus recorder to be sure to record all metrics + let _ = install_prometheus_recorder(); + let runner = CliRunner::default(); match self.command { Commands::Node(command) => { @@ -165,6 +169,8 @@ where runner.run_command_until_exit(|ctx| command.execute::(ctx)) } Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::()), + #[cfg(feature = "dev")] + Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()), } } diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index b0c7b968c5..4aef95bc2f 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -25,6 +25,7 @@ reth-optimism-chainspec.workspace = true # ethereum alloy-primitives.workspace = true +alloy-consensus.workspace = true tracing.workspace = true @@ -33,6 +34,9 @@ alloy-primitives.workspace = true reth-optimism-chainspec.workspace = true [features] -optimism = ["reth-primitives/optimism"] +optimism = [ + "reth-primitives/optimism", + "reth-chainspec/optimism" +] opbnb = ["reth-primitives/opbnb"] diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index b9e376fcb7..080694804c 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -9,6 +9,7 @@ // The `optimism` feature must be enabled to use this crate. #![cfg(feature = "optimism")] +use alloy_consensus::EMPTY_OMMER_ROOT_HASH; use alloy_primitives::{B64, U256}; use reth_chainspec::EthereumHardforks; use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; @@ -20,9 +21,7 @@ use reth_consensus_common::validation::{ }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::OptimismHardforks; -use reth_primitives::{ - BlockWithSenders, GotExpected, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, -}; +use reth_primitives::{BlockWithSenders, GotExpected, Header, SealedBlock, SealedHeader}; use std::{sync::Arc, time::SystemTime}; mod proof; @@ -35,19 +34,19 @@ pub use validation::validate_block_post_execution; /// /// Provides basic checks as outlined in the execution specs. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct OptimismBeaconConsensus { +pub struct OpBeaconConsensus { /// Configuration chain_spec: Arc, } -impl OptimismBeaconConsensus { - /// Create a new instance of [`OptimismBeaconConsensus`] +impl OpBeaconConsensus { + /// Create a new instance of [`OpBeaconConsensus`] pub const fn new(chain_spec: Arc) -> Self { Self { chain_spec } } } -impl Consensus for OptimismBeaconConsensus { +impl Consensus for OpBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { validate_header_gas(header)?; validate_header_base_fee(header, &self.chain_spec) diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 248a3e5e4c..bcea96a9bb 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -20,10 +20,13 @@ reth-revm.workspace = true reth-execution-errors.workspace = true reth-execution-types.workspace = true reth-prune-types.workspace = true +reth-consensus.workspace = true # ethereum +alloy-eips.workspace = true alloy-primitives.workspace = true op-alloy-consensus.workspace = true +alloy-consensus.workspace = true # Optimism reth-optimism-consensus.workspace = true @@ -35,26 +38,40 @@ revm.workspace = true revm-primitives.workspace = true # misc -thiserror.workspace = true +derive_more.workspace = true tracing.workspace = true # async tokio = { workspace = true, features = ["sync", "time"] } [dev-dependencies] -alloy-eips.workspace = true - +reth-evm = { workspace = true, features = ["test-utils"] } reth-revm = { workspace = true, features = ["test-utils"] } +reth-primitives = { workspace = true, features = ["test-utils"] } reth-optimism-chainspec.workspace = true alloy-genesis.workspace = true alloy-consensus.workspace = true [features] +default = ["std"] +std = [ + "reth-consensus/std", + "reth-primitives/std", + "reth-revm/std", + "alloy-consensus/std", + "alloy-eips/std", + "alloy-genesis/std", + "alloy-primitives/std", + "revm-primitives/std", + "revm/std" +] optimism = [ - "reth-primitives/optimism", - "reth-execution-types/optimism", - "reth-optimism-consensus/optimism", - "reth-revm/optimism", + "reth-primitives/optimism", + "reth-execution-types/optimism", + "reth-optimism-consensus/optimism", + "revm/optimism", + "revm-primitives/optimism", + "reth-chainspec/optimism" ] opbnb = [ "reth-primitives/opbnb", diff --git a/crates/optimism/evm/src/config.rs b/crates/optimism/evm/src/config.rs index f5cc4e7930..a4774b986f 100644 --- a/crates/optimism/evm/src/config.rs +++ b/crates/optimism/evm/src/config.rs @@ -1,5 +1,5 @@ -use reth_chainspec::ChainSpec; use reth_ethereum_forks::{EthereumHardfork, Head}; +use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::OptimismHardfork; /// Returns the revm [`SpecId`](revm_primitives::SpecId) at the given timestamp. @@ -9,10 +9,12 @@ use reth_optimism_forks::OptimismHardfork; /// This is only intended to be used after the Bedrock, when hardforks are activated by /// timestamp. pub fn revm_spec_by_timestamp_after_bedrock( - chain_spec: &ChainSpec, + chain_spec: &OpChainSpec, timestamp: u64, ) -> revm_primitives::SpecId { - if chain_spec.fork(OptimismHardfork::Granite).active_at_timestamp(timestamp) { + if chain_spec.fork(OptimismHardfork::Holocene).active_at_timestamp(timestamp) { + revm_primitives::HOLOCENE + } else if chain_spec.fork(OptimismHardfork::Granite).active_at_timestamp(timestamp) { revm_primitives::GRANITE } else if chain_spec.fork(OptimismHardfork::Fjord).active_at_timestamp(timestamp) { revm_primitives::FJORD @@ -34,8 +36,10 @@ pub fn revm_spec_by_timestamp_after_bedrock( } /// Map the latest active hardfork at the given block to a revm [`SpecId`](revm_primitives::SpecId). -pub fn revm_spec(chain_spec: &ChainSpec, block: &Head) -> revm_primitives::SpecId { - if chain_spec.fork(OptimismHardfork::Granite).active_at_head(block) { +pub fn revm_spec(chain_spec: &OpChainSpec, block: &Head) -> revm_primitives::SpecId { + if chain_spec.fork(OptimismHardfork::Holocene).active_at_head(block) { + revm_primitives::HOLOCENE + } else if chain_spec.fork(OptimismHardfork::Granite).active_at_head(block) { revm_primitives::GRANITE } else if chain_spec.fork(OptimismHardfork::Fjord).active_at_head(block) { revm_primitives::FJORD @@ -91,12 +95,13 @@ pub fn revm_spec(chain_spec: &ChainSpec, block: &Head) -> revm_primitives::SpecI mod tests { use super::*; use reth_chainspec::ChainSpecBuilder; + use reth_optimism_chainspec::{OpChainSpec, OpChainSpecBuilder}; #[test] fn test_revm_spec_by_timestamp_after_merge() { #[inline(always)] - fn op_cs(f: impl FnOnce(ChainSpecBuilder) -> ChainSpecBuilder) -> ChainSpec { - let cs = ChainSpecBuilder::mainnet().chain(reth_chainspec::Chain::from_id(10)); + fn op_cs(f: impl FnOnce(OpChainSpecBuilder) -> OpChainSpecBuilder) -> OpChainSpec { + let cs = ChainSpecBuilder::mainnet().chain(reth_chainspec::Chain::from_id(10)).into(); f(cs).build() } assert_eq!( @@ -128,8 +133,8 @@ mod tests { #[test] fn test_to_revm_spec() { #[inline(always)] - fn op_cs(f: impl FnOnce(ChainSpecBuilder) -> ChainSpecBuilder) -> ChainSpec { - let cs = ChainSpecBuilder::mainnet().chain(reth_chainspec::Chain::from_id(10)); + fn op_cs(f: impl FnOnce(OpChainSpecBuilder) -> OpChainSpecBuilder) -> OpChainSpec { + let cs = ChainSpecBuilder::mainnet().chain(reth_chainspec::Chain::from_id(10)).into(); f(cs).build() } assert_eq!( diff --git a/crates/optimism/evm/src/error.rs b/crates/optimism/evm/src/error.rs index c5c6a0a4a3..71f8709e1a 100644 --- a/crates/optimism/evm/src/error.rs +++ b/crates/optimism/evm/src/error.rs @@ -1,27 +1,30 @@ //! Error types for the Optimism EVM module. +use alloc::string::String; use reth_evm::execute::BlockExecutionError; /// Optimism Block Executor Errors -#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, derive_more::Display)] pub enum OptimismBlockExecutionError { /// Error when trying to parse L1 block info - #[error("could not get L1 block info from L2 block: {message:?}")] + #[display("could not get L1 block info from L2 block: {message}")] L1BlockInfoError { /// The inner error message message: String, }, /// Thrown when force deploy of create2deployer code fails. - #[error("failed to force create2deployer account code")] + #[display("failed to force create2deployer account code")] ForceCreate2DeployerFail, /// Thrown when a blob transaction is included in a sequencer's block. - #[error("blob transaction included in sequencer block")] + #[display("blob transaction included in sequencer block")] BlobTransactionRejected, /// Thrown when a database account could not be loaded. - #[error("failed to load account {0}")] + #[display("failed to load account {_0}")] AccountLoadFailed(alloy_primitives::Address), } +impl core::error::Error for OptimismBlockExecutionError {} + impl From for BlockExecutionError { fn from(err: OptimismBlockExecutionError) -> Self { Self::other(err) diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 88432a3bfd..ebefec841d 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -1,172 +1,147 @@ -//! Optimism block executor. - -use crate::{ - l1::ensure_create2_deployer, OpChainSpec, OptimismBlockExecutionError, OptimismEvmConfig, -}; -use alloy_primitives::{Address, BlockNumber, U256}; -use reth_chainspec::{ChainSpec, EthereumHardforks}; +//! Optimism block execution strategy. + +use crate::{l1::ensure_create2_deployer, OpEvmConfig, OptimismBlockExecutionError}; +use alloc::{boxed::Box, sync::Arc, vec::Vec}; +use alloy_consensus::Transaction as _; +use alloy_eips::eip7685::Requests; +use alloy_primitives::Address; +use op_alloy_consensus::DepositTransaction; +use reth_chainspec::EthereumHardforks; +use reth_consensus::ConsensusError; use reth_evm::{ execute::{ - BatchExecutor, BlockExecutionError, BlockExecutionInput, BlockExecutionOutput, - BlockExecutorProvider, BlockValidationError, Executor, ProviderError, + BasicBlockExecutorProvider, BlockExecutionError, BlockExecutionStrategy, + BlockExecutionStrategyFactory, BlockValidationError, ExecuteOutput, ProviderError, }, - system_calls::{NoopHook, OnStateHook, SystemCaller}, + state_change::post_block_balance_increments, + system_calls::{OnStateHook, SystemCaller}, ConfigureEvm, }; -use reth_execution_types::ExecutionOutcome; +use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::validate_block_post_execution; use reth_optimism_forks::OptimismHardfork; -use reth_primitives::{BlockWithSenders, Header, Receipt, Receipts, TxType}; -use reth_prune_types::PruneModes; -use reth_revm::{ - batch::BlockBatchRecord, - db::states::{bundle_state::BundleRetention, StorageSlot}, - state_change::post_block_balance_increments, - Evm, State, -}; +use reth_primitives::{BlockWithSenders, Header, Receipt, TxType}; +use reth_revm::{db::states::StorageSlot, Database, State}; use revm_primitives::{ - db::{Database, DatabaseCommit}, - BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, EvmState, ResultAndState, + db::DatabaseCommit, BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, EvmState, + ResultAndState, U256, }; -use std::{collections::HashMap, fmt::Display, str::FromStr, sync::Arc}; +use std::{collections::HashMap, fmt::Display, str::FromStr}; use tokio::sync::mpsc::UnboundedSender; -use tracing::{debug, trace}; +use tracing::trace; -/// Provides executors to execute regular optimism blocks +/// Factory for [`OpExecutionStrategy`]. #[derive(Debug, Clone)] -pub struct OpExecutorProvider { +pub struct OpExecutionStrategyFactory { + /// The chainspec chain_spec: Arc, + /// How to create an EVM. evm_config: EvmConfig, } -impl OpExecutorProvider { - /// Creates a new default optimism executor provider. +impl OpExecutionStrategyFactory { + /// Creates a new default optimism executor strategy factory. pub fn optimism(chain_spec: Arc) -> Self { - Self::new(chain_spec.clone(), OptimismEvmConfig::new(chain_spec)) + Self::new(chain_spec.clone(), OpEvmConfig::new(chain_spec)) } } -impl OpExecutorProvider { - /// Creates a new executor provider. +impl OpExecutionStrategyFactory { + /// Creates a new executor strategy factory. pub const fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { Self { chain_spec, evm_config } } } -impl OpExecutorProvider +impl BlockExecutionStrategyFactory for OpExecutionStrategyFactory where - EvmConfig: ConfigureEvm

, + EvmConfig: + Clone + Unpin + Sync + Send + 'static + ConfigureEvm
, { - fn op_executor( - &self, - db: DB, - prefetch_tx: Option>, - ) -> OpBlockExecutor + type Strategy + Display>> = + OpExecutionStrategy; + + fn create_strategy(&self, db: DB) -> Self::Strategy where DB: Database + Display>, { - if let Some(tx) = prefetch_tx { - OpBlockExecutor::new_with_prefetch_tx( - self.chain_spec.clone(), - self.evm_config.clone(), - State::builder() - .with_database(db) - .with_bundle_update() - .without_state_clear() - .build(), - tx, - ) - } else { - OpBlockExecutor::new( - self.chain_spec.clone(), - self.evm_config.clone(), - State::builder() - .with_database(db) - .with_bundle_update() - .without_state_clear() - .build(), - ) - } + let state = + State::builder().with_database(db).with_bundle_update().without_state_clear().build(); + OpExecutionStrategy::new(state, self.chain_spec.clone(), self.evm_config.clone()) } } -impl BlockExecutorProvider for OpExecutorProvider +/// Block execution strategy for Optimism. +#[allow(missing_debug_implementations)] +pub struct OpExecutionStrategy where - EvmConfig: ConfigureEvm
, + EvmConfig: Clone, { - type Executor + Display>> = - OpBlockExecutor; - - type BatchExecutor + Display>> = - OpBatchExecutor; - fn executor( - &self, - db: DB, - prefetch_tx: Option>, - ) -> Self::Executor - where - DB: Database + Display>, - { - self.op_executor(db, prefetch_tx) - } - - fn batch_executor(&self, db: DB) -> Self::BatchExecutor - where - DB: Database + Display>, - { - let executor = self.op_executor(db, None); - OpBatchExecutor { executor, batch_record: BlockBatchRecord::default() } - } -} - -/// Helper container type for EVM with chain spec. -#[derive(Debug, Clone)] -pub struct OpEvmExecutor { /// The chainspec chain_spec: Arc, /// How to create an EVM. evm_config: EvmConfig, + /// Current state for block execution. + state: State, + /// Utility to call system smart contracts. + system_caller: SystemCaller, } -impl OpEvmExecutor +impl OpExecutionStrategy where - EvmConfig: ConfigureEvm
, + EvmConfig: Clone, { - /// Executes the transactions in the block and returns the receipts. - /// - /// This applies the pre-execution changes, and executes the transactions. - /// - /// The optional `state_hook` will be executed with the state changes if present. - /// - /// # Note + /// Creates a new [`OpExecutionStrategy`] + pub fn new(state: State, chain_spec: Arc, evm_config: EvmConfig) -> Self { + let system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); + Self { state, chain_spec, evm_config, system_caller } + } +} + +impl OpExecutionStrategy +where + DB: Database + Display>, + EvmConfig: ConfigureEvm
, +{ + /// Configures a new evm configuration and block environment for the given block. /// - /// It does __not__ apply post-execution changes. - fn execute_pre_and_transactions( - &self, + /// Caution: this does not initialize the tx environment. + fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { + let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); + let mut block_env = BlockEnv::default(); + self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); + + EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) + } +} + +impl BlockExecutionStrategy for OpExecutionStrategy +where + DB: Database + Display>, + EvmConfig: ConfigureEvm
, +{ + type Error = BlockExecutionError; + + fn apply_pre_execution_changes( + &mut self, block: &BlockWithSenders, - mut evm: Evm<'_, Ext, &mut State>, - state_hook: Option, - tx: Option>, - ) -> Result<(Vec, u64), BlockExecutionError> - where - DB: Database + Display>, - F: OnStateHook, - { - let mut system_caller = - SystemCaller::new(&self.evm_config, &self.chain_spec).with_state_hook(state_hook); + total_difficulty: U256, + ) -> Result<(), Self::Error> { + // Set state clear flag if the block is after the Spurious Dragon hardfork. + let state_clear_flag = + (*self.chain_spec).is_spurious_dragon_active_at_block(block.header.number); + self.state.set_state_clear_flag(state_clear_flag); + + let env = self.evm_env_for_block(&block.header, total_difficulty); + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - // apply pre execution changes - system_caller.apply_beacon_root_contract_call( + self.system_caller.apply_beacon_root_contract_call( block.timestamp, block.number, block.parent_beacon_block_root, &mut evm, )?; - // execute transactions - let is_regolith = - self.chain_spec.fork(OptimismHardfork::Regolith).active_at_timestamp(block.timestamp); - // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism // blocks will always have at least a single transaction in them (the L1 info transaction), // so we can safely assume that this will always be triggered upon the transition and that @@ -174,6 +149,21 @@ where ensure_create2_deployer(self.chain_spec.clone(), block.timestamp, evm.db_mut()) .map_err(|_| OptimismBlockExecutionError::ForceCreate2DeployerFail)?; + Ok(()) + } + + fn execute_transactions( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + _prefetch_rx: Option>, + ) -> Result { + let env = self.evm_env_for_block(&block.header, total_difficulty); + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); + + let is_regolith = + self.chain_spec.fork(OptimismHardfork::Regolith).active_at_timestamp(block.timestamp); + let mut cumulative_gas_used = 0; let mut receipts = Vec::with_capacity(block.body.transactions.len()); for (sender, transaction) in block.transactions_with_sender() { @@ -226,16 +216,8 @@ where ?transaction, "Executed transaction" ); - - system_caller.on_state(&result_and_state); + self.system_caller.on_state(&result_and_state); let ResultAndState { result, state } = result_and_state; - - if let Some(tx) = tx.as_ref() { - tx.send(state.clone()).unwrap_or_else(|err| { - debug!(target: "evm_executor", ?err, "Failed to send post state to prefetch channel") - }); - } - evm.db_mut().commit(state); // append gas used @@ -259,146 +241,22 @@ where .then_some(1), }); } - drop(evm); - - Ok((receipts, cumulative_gas_used)) - } -} - -/// A basic Optimism block executor. -/// -/// Expected usage: -/// - Create a new instance of the executor. -/// - Execute the block. -#[derive(Debug)] -pub struct OpBlockExecutor { - /// Chain specific evm config that's used to execute a block. - executor: OpEvmExecutor, - /// The state to use for execution - state: State, - /// Prefetch channel - prefetch_tx: Option>, -} - -impl OpBlockExecutor { - /// Creates a new Optimism block executor. - pub const fn new( - chain_spec: Arc, - evm_config: EvmConfig, - state: State, - ) -> Self { - Self { executor: OpEvmExecutor { chain_spec, evm_config }, state, prefetch_tx: None } - } - - /// Creates a new Optimism block executor with a prefetch channel. - pub const fn new_with_prefetch_tx( - chain_spec: Arc, - evm_config: EvmConfig, - state: State, - tx: UnboundedSender, - ) -> Self { - Self { executor: OpEvmExecutor { chain_spec, evm_config }, state, prefetch_tx: Some(tx) } - } - - /// Returns the chain spec. - #[inline] - pub fn chain_spec(&self) -> &ChainSpec { - &self.executor.chain_spec - } - - /// Returns mutable reference to the state that wraps the underlying database. - pub fn state_mut(&mut self) -> &mut State { - &mut self.state - } -} - -impl OpBlockExecutor -where - EvmConfig: ConfigureEvm
, - DB: Database + Display>, -{ - /// Configures a new evm configuration and block environment for the given block. - /// - /// Caution: this does not initialize the tx environment. - fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.executor.evm_config.fill_cfg_and_block_env( - &mut cfg, - &mut block_env, - header, - total_difficulty, - ); - - EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) - } - /// Convenience method to invoke `execute_without_verification_with_state_hook` setting the - /// state hook as `None`. - fn execute_without_verification( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(Vec, u64), BlockExecutionError> { - self.execute_without_verification_with_state_hook(block, total_difficulty, None::) + Ok(ExecuteOutput { receipts, gas_used: cumulative_gas_used }) } - /// Execute a single block and apply the state changes to the internal state. - /// - /// Returns the receipts of the transactions in the block and the total gas used. - /// - /// Returns an error if execution fails. - fn execute_without_verification_with_state_hook( + fn apply_post_execution_changes( &mut self, block: &BlockWithSenders, total_difficulty: U256, - state_hook: Option, - ) -> Result<(Vec, u64), BlockExecutionError> - where - F: OnStateHook, - { - // 1. prepare state on new block - self.on_new_block(&block.header); - - // 2. configure the evm and execute - let env = self.evm_env_for_block(&block.header, total_difficulty); - - let (receipts, gas_used) = { - let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); - self.executor.execute_pre_and_transactions( - block, - evm, - state_hook, - self.prefetch_tx.clone(), - ) - }?; - - // 3. apply post execution changes - self.post_execution(block, total_difficulty)?; - - Ok((receipts, gas_used)) - } - - /// Apply settings before a new block is executed. - pub(crate) fn on_new_block(&mut self, header: &Header) { - // Set state clear flag if the block is after the Spurious Dragon hardfork. - let state_clear_flag = self.chain_spec().is_spurious_dragon_active_at_block(header.number); - self.state.set_state_clear_flag(state_clear_flag); - } - - /// Apply post execution state changes, including block rewards, withdrawals, and irregular DAO - /// hardfork state change. - pub fn post_execution( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(), BlockExecutionError> { + _receipts: &[Receipt], + ) -> Result { let balance_increments = - post_block_balance_increments(self.chain_spec(), block, total_difficulty); + post_block_balance_increments(&self.chain_spec.clone(), block, total_difficulty); #[cfg(all(feature = "optimism", feature = "opbnb"))] if self - .chain_spec() + .chain_spec .fork(OptimismHardfork::PreContractForkBlock) .transitions_at_block(block.number) { @@ -439,171 +297,47 @@ where (governance_token_contract_address, governance_token_change), ]); } + // increment balances self.state .increment_balances(balance_increments) .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; - Ok(()) - } -} - -impl Executor for OpBlockExecutor -where - EvmConfig: ConfigureEvm
, - DB: Database + Display>, -{ - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders, Header>; - type Output = BlockExecutionOutput; - type Error = BlockExecutionError; - - /// Executes the block and commits the state changes. - /// - /// Returns the receipts of the transactions in the block. - /// - /// Returns an error if the block could not be executed or failed verification. - /// - /// State changes are committed to the database. - fn execute(mut self, input: Self::Input<'_>) -> Result { - let BlockExecutionInput { block, total_difficulty, .. } = input; - let (receipts, gas_used) = self.execute_without_verification(block, total_difficulty)?; - - // NOTE: we need to merge keep the reverts for the bundle retention - self.state.merge_transitions(BundleRetention::Reverts); - - Ok(BlockExecutionOutput { - state: self.state.take_bundle(), - receipts, - requests: vec![], - gas_used, - snapshot: None, - }) + Ok(Requests::default()) } - fn execute_with_state_closure( - mut self, - input: Self::Input<'_>, - mut witness: F, - ) -> Result - where - F: FnMut(&State), - { - let BlockExecutionInput { block, total_difficulty, .. } = input; - let (receipts, gas_used) = self.execute_without_verification(block, total_difficulty)?; - - // NOTE: we need to merge keep the reverts for the bundle retention - self.state.merge_transitions(BundleRetention::Reverts); - witness(&self.state); - - Ok(BlockExecutionOutput { - state: self.state.take_bundle(), - receipts, - requests: vec![], - gas_used, - snapshot: None, - }) + fn state_ref(&self) -> &State { + &self.state } - fn execute_with_state_hook( - mut self, - input: Self::Input<'_>, - state_hook: F, - ) -> Result - where - F: OnStateHook, - { - let BlockExecutionInput { block, total_difficulty, .. } = input; - let (receipts, gas_used) = self.execute_without_verification_with_state_hook( - block, - total_difficulty, - Some(state_hook), - )?; - - // NOTE: we need to merge keep the reverts for the bundle retention - self.state.merge_transitions(BundleRetention::Reverts); - - Ok(BlockExecutionOutput { - state: self.state.take_bundle(), - receipts, - requests: vec![], - gas_used, - snapshot: None, - }) + fn state_mut(&mut self) -> &mut State { + &mut self.state } -} - -/// An executor for a batch of blocks. -/// -/// State changes are tracked until the executor is finalized. -#[derive(Debug)] -pub struct OpBatchExecutor { - /// The executor used to execute blocks. - executor: OpBlockExecutor, - /// Keeps track of the batch and record receipts based on the configured prune mode - batch_record: BlockBatchRecord, -} -impl OpBatchExecutor { - /// Returns the receipts of the executed blocks. - pub const fn receipts(&self) -> &Receipts { - self.batch_record.receipts() + fn with_state_hook(&mut self, hook: Option>) { + self.system_caller.with_state_hook(hook); } - /// Returns mutable reference to the state that wraps the underlying database. - pub fn state_mut(&mut self) -> &mut State { - self.executor.state_mut() + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + receipts: &[Receipt], + _requests: &Requests, + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec.clone(), receipts) } } -impl BatchExecutor for OpBatchExecutor -where - EvmConfig: ConfigureEvm
, - DB: Database + Display>, -{ - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders, Header>; - type Output = ExecutionOutcome; - type Error = BlockExecutionError; - - fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { - let BlockExecutionInput { block, total_difficulty, .. } = input; - - if self.batch_record.first_block().is_none() { - self.batch_record.set_first_block(block.number); - } - let (receipts, _gas_used) = - self.executor.execute_without_verification(block, total_difficulty)?; - - validate_block_post_execution(block, self.executor.chain_spec(), &receipts)?; - - // prepare the state according to the prune mode - let retention = self.batch_record.bundle_retention(block.number); - self.executor.state.merge_transitions(retention); - - // store receipts in the set - self.batch_record.save_receipts(receipts)?; - - Ok(()) - } - - fn finalize(mut self) -> Self::Output { - ExecutionOutcome::new( - self.executor.state.take_bundle(), - self.batch_record.take_receipts(), - self.batch_record.first_block().unwrap_or_default(), - self.batch_record.take_requests(), - ) - } - - fn set_tip(&mut self, tip: BlockNumber) { - self.batch_record.set_tip(tip); - } - - fn set_prune_modes(&mut self, prune_modes: PruneModes) { - self.batch_record.set_prune_modes(prune_modes); - } +/// Helper type with backwards compatible methods to obtain executor providers. +#[derive(Debug)] +pub struct OpExecutorProvider; - fn size_hint(&self) -> Option { - Some(self.executor.state.bundle_state.size_hint()) +impl OpExecutorProvider { + /// Creates a new default optimism executor strategy factory. + pub fn optimism( + chain_spec: Arc, + ) -> BasicBlockExecutorProvider { + BasicBlockExecutorProvider::new(OpExecutionStrategyFactory::optimism(chain_spec)) } } @@ -612,10 +346,12 @@ mod tests { use super::*; use crate::OpChainSpec; use alloy_consensus::TxEip1559; - use alloy_primitives::{b256, Address, StorageKey, StorageValue}; - use reth_chainspec::{ChainSpecBuilder, MIN_TRANSACTION_GAS}; - use reth_optimism_chainspec::{optimism_deposit_tx_signature, BASE_MAINNET}; - use reth_primitives::{Account, Block, BlockBody, Signature, Transaction, TransactionSigned}; + use alloy_primitives::{b256, Address, Signature, StorageKey, StorageValue}; + use op_alloy_consensus::TxDeposit; + use reth_chainspec::MIN_TRANSACTION_GAS; + use reth_evm::execute::{BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider}; + use reth_optimism_chainspec::OpChainSpecBuilder; + use reth_primitives::{Account, Block, BlockBody, Transaction, TransactionSigned}; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, L1_BLOCK_CONTRACT, }; @@ -648,9 +384,13 @@ mod tests { db } - fn executor_provider(chain_spec: Arc) -> OpExecutorProvider { - let chain_spec = Arc::new(OpChainSpec::new(Arc::unwrap_or_clone(chain_spec))); - OpExecutorProvider { evm_config: OptimismEvmConfig::new(chain_spec.clone()), chain_spec } + fn executor_provider( + chain_spec: Arc, + ) -> BasicBlockExecutorProvider { + let strategy_factory = + OpExecutionStrategyFactory::new(chain_spec.clone(), OpEvmConfig::new(chain_spec)); + + BasicBlockExecutorProvider::new(strategy_factory) } #[test] @@ -672,11 +412,7 @@ mod tests { let account = Account { balance: U256::MAX, ..Account::default() }; db.insert_account(addr, account, None, HashMap::default()); - let chain_spec = Arc::new( - ChainSpecBuilder::from(&Arc::new(BASE_MAINNET.inner.clone())) - .regolith_activated() - .build(), - ); + let chain_spec = Arc::new(OpChainSpecBuilder::base_mainnet().regolith_activated().build()); let tx = TransactionSigned::from_transaction_and_signature( Transaction::Eip1559(TxEip1559 { @@ -700,9 +436,12 @@ mod tests { ); let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db), None); - executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); + // make sure the L1 block contract state is preloaded. + executor.with_state_mut(|state| { + state.load_cache_account(L1_BLOCK_CONTRACT).unwrap(); + }); // Attempt to execute a block with one deposit and one non-deposit transaction executor @@ -725,8 +464,9 @@ mod tests { ) .unwrap(); - let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); - let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); + let receipts = executor.receipts(); + let tx_receipt = receipts[0][0].as_ref().unwrap(); + let deposit_receipt = receipts[0][1].as_ref().unwrap(); // deposit_receipt_version is not present in pre canyon transactions assert!(deposit_receipt.deposit_receipt_version.is_none()); @@ -757,11 +497,7 @@ mod tests { db.insert_account(addr, account, None, HashMap::default()); - let chain_spec = Arc::new( - ChainSpecBuilder::from(&Arc::new(BASE_MAINNET.inner.clone())) - .canyon_activated() - .build(), - ); + let chain_spec = Arc::new(OpChainSpecBuilder::base_mainnet().canyon_activated().build()); let tx = TransactionSigned::from_transaction_and_signature( Transaction::Eip1559(TxEip1559 { @@ -781,13 +517,16 @@ mod tests { gas_limit: MIN_TRANSACTION_GAS, ..Default::default() }), - optimism_deposit_tx_signature(), + TxDeposit::signature(), ); let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db), None); - executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); + // make sure the L1 block contract state is preloaded. + executor.with_state_mut(|state| { + state.load_cache_account(L1_BLOCK_CONTRACT).unwrap(); + }); // attempt to execute an empty block with parent beacon block root, this should not fail executor @@ -810,8 +549,9 @@ mod tests { ) .expect("Executing a block while canyon is active should not fail"); - let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); - let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); + let receipts = executor.receipts(); + let tx_receipt = receipts[0][0].as_ref().unwrap(); + let deposit_receipt = receipts[0][1].as_ref().unwrap(); // deposit_receipt_version is set to 1 for post canyon deposit transactions assert_eq!(deposit_receipt.deposit_receipt_version, Some(1)); diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index 18ccbed951..e0668ab020 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -1,17 +1,17 @@ //! Optimism-specific implementation and utilities for the executor use crate::OptimismBlockExecutionError; +use alloc::{string::ToString, sync::Arc}; use alloy_primitives::{address, b256, hex, Address, Bytes, B256, U256}; use reth_chainspec::ChainSpec; use reth_execution_errors::BlockExecutionError; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::OptimismHardfork; -use reth_primitives::Block; +use reth_primitives::BlockBody; use revm::{ primitives::{Bytecode, HashMap, SpecId}, DatabaseCommit, L1BlockInfo, }; -use std::sync::Arc; use tracing::trace; /// The address of the create2 deployer @@ -31,9 +31,8 @@ const L1_BLOCK_ECOTONE_SELECTOR: [u8; 4] = hex!("440a5e20"); /// transaction in the L2 block. /// /// Returns an error if the L1 info transaction is not found, if the block is empty. -pub fn extract_l1_info(block: &Block) -> Result { - let l1_info_tx_data = block - .body +pub fn extract_l1_info(body: &BlockBody) -> Result { + let l1_info_tx_data = body .transactions .first() .ok_or_else(|| OptimismBlockExecutionError::L1BlockInfoError { @@ -302,7 +301,7 @@ mod tests { use alloy_eips::eip2718::Decodable2718; use reth_optimism_chainspec::OP_MAINNET; use reth_optimism_forks::OptimismHardforks; - use reth_primitives::{BlockBody, TransactionSigned}; + use reth_primitives::{Block, BlockBody, TransactionSigned}; use super::*; @@ -318,7 +317,7 @@ mod tests { body: BlockBody { transactions: vec![l1_info_tx], ..Default::default() }, }; - let l1_info: L1BlockInfo = extract_l1_info(&mock_block).unwrap(); + let l1_info: L1BlockInfo = extract_l1_info(&mock_block.body).unwrap(); assert_eq!(l1_info.l1_base_fee, U256::from(652_114)); assert_eq!(l1_info.l1_fee_overhead, Some(U256::from(2100))); assert_eq!(l1_info.l1_base_fee_scalar, U256::from(1_000_000)); @@ -358,7 +357,7 @@ mod tests { // test - let l1_block_info: L1BlockInfo = extract_l1_info(&block).unwrap(); + let l1_block_info: L1BlockInfo = extract_l1_info(&block.body).unwrap(); assert_eq!(l1_block_info.l1_base_fee, expected_l1_base_fee); assert_eq!(l1_block_info.l1_base_fee_scalar, expected_l1_base_fee_scalar); diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index b220b6056d..c33a4338d5 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -6,19 +6,22 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] // The `optimism` feature must be enabled to use this crate. #![cfg(feature = "optimism")] +extern crate alloc; + +use alloc::{sync::Arc, vec::Vec}; use alloy_primitives::{Address, U256}; use reth_evm::{ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; -use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_chainspec::{DecodeError, OpChainSpec}; use reth_primitives::{ revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, transaction::FillTxEnv, Head, Header, TransactionSigned, }; use reth_revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; -use std::sync::Arc; mod config; pub use config::{revm_spec, revm_spec_by_timestamp_after_bedrock}; @@ -35,12 +38,12 @@ use revm_primitives::{ /// Optimism-related EVM configuration. #[derive(Debug, Clone)] -pub struct OptimismEvmConfig { +pub struct OpEvmConfig { chain_spec: Arc, } -impl OptimismEvmConfig { - /// Creates a new [`OptimismEvmConfig`] with the given chain spec. +impl OpEvmConfig { + /// Creates a new [`OpEvmConfig`] with the given chain spec. pub const fn new(chain_spec: Arc) -> Self { Self { chain_spec } } @@ -51,8 +54,9 @@ impl OptimismEvmConfig { } } -impl ConfigureEvmEnv for OptimismEvmConfig { +impl ConfigureEvmEnv for OpEvmConfig { type Header = Header; + type Error = DecodeError; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { transaction.fill_tx_env(tx_env, sender); @@ -124,14 +128,14 @@ impl ConfigureEvmEnv for OptimismEvmConfig { cfg_env.perf_analyse_created_bytecodes = AnalysisKind::Analyse; cfg_env.handler_cfg.spec_id = spec_id; - cfg_env.handler_cfg.is_optimism = self.chain_spec.is_optimism(); + cfg_env.handler_cfg.is_optimism = true; } fn next_cfg_and_block_env( &self, parent: &Self::Header, attributes: NextBlockEnvAttributes, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), Self::Error> { // configure evm env based on parent block let cfg = CfgEnv::default().with_chain_id(self.chain_spec.chain().id()); @@ -139,17 +143,10 @@ impl ConfigureEvmEnv for OptimismEvmConfig { let spec_id = revm_spec_by_timestamp_after_bedrock(&self.chain_spec, attributes.timestamp); // if the parent block did not have excess blob gas (i.e. it was pre-cancun), but it is - // cancun now, we need to set the excess blob gas to the default value + // cancun now, we need to set the excess blob gas to the default value(0) let blob_excess_gas_and_price = parent .next_block_excess_blob_gas() - .or_else(|| { - if spec_id.is_enabled_in(SpecId::CANCUN) { - // default excess blob gas is zero - Some(0) - } else { - None - } - }) + .or_else(|| (spec_id.is_enabled_in(SpecId::CANCUN)).then_some(0)) .map(BlobExcessGasAndPrice::new); let block_env = BlockEnv { @@ -160,13 +157,7 @@ impl ConfigureEvmEnv for OptimismEvmConfig { prevrandao: Some(attributes.prev_randao), gas_limit: U256::from(parent.gas_limit), // calculate basefee based on parent block's gas usage - basefee: U256::from( - parent - .next_block_base_fee( - self.chain_spec.base_fee_params_at_timestamp(attributes.timestamp), - ) - .unwrap_or_default(), - ), + basefee: self.chain_spec.next_block_base_fee(parent, attributes.timestamp)?, // calculate excess gas based on parent block's blob gas usage blob_excess_gas_and_price, }; @@ -179,11 +170,11 @@ impl ConfigureEvmEnv for OptimismEvmConfig { }; } - (cfg_with_handler_cfg, block_env) + Ok((cfg_with_handler_cfg, block_env)) } } -impl ConfigureEvm for OptimismEvmConfig { +impl ConfigureEvm for OpEvmConfig { type DefaultExternalContext<'a> = (); fn evm(&self, db: DB) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { @@ -209,25 +200,34 @@ impl ConfigureEvm for OptimismEvmConfig { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::constants::KECCAK_EMPTY; + use alloy_eips::eip7685::Requests; use alloy_genesis::Genesis; - use alloy_primitives::{B256, U256}; - use reth_chainspec::{Chain, ChainSpec}; + use alloy_primitives::{bytes, Address, LogData, B256, U256}; + use reth_chainspec::ChainSpec; use reth_evm::execute::ProviderError; + use reth_execution_types::{ + AccountRevertInit, BundleStateInit, Chain, ExecutionOutcome, RevertsInit, + }; use reth_optimism_chainspec::BASE_MAINNET; use reth_primitives::{ - revm_primitives::{BlockEnv, CfgEnv, SpecId}, - Header, KECCAK_EMPTY, + revm_primitives::{AccountInfo, BlockEnv, CfgEnv, SpecId}, + Account, Header, Log, Receipt, Receipts, SealedBlockWithSenders, TxType, }; + use reth_revm::{ - db::{CacheDB, EmptyDBTyped}, + db::{BundleState, CacheDB, EmptyDBTyped}, inspectors::NoOpInspector, JournaledState, }; use revm_primitives::{CfgEnvWithHandlerCfg, EnvWithHandlerCfg, HandlerCfg}; - use std::{collections::HashSet, sync::Arc}; + use std::{ + collections::{HashMap, HashSet}, + sync::Arc, + }; - fn test_evm_config() -> OptimismEvmConfig { - OptimismEvmConfig::new(BASE_MAINNET.clone()) + fn test_evm_config() -> OpEvmConfig { + OpEvmConfig::new(BASE_MAINNET.clone()) } #[test] @@ -244,7 +244,7 @@ mod tests { // Build the ChainSpec for Ethereum mainnet, activating London, Paris, and Shanghai // hardforks let chain_spec = ChainSpec::builder() - .chain(Chain::mainnet()) + .chain(0.into()) .genesis(Genesis::default()) .london_activated() .paris_activated() @@ -254,9 +254,9 @@ mod tests { // Define the total difficulty as zero (default) let total_difficulty = U256::ZERO; - // Use the `OptimismEvmConfig` to fill the `cfg_env` and `block_env` based on the ChainSpec, + // Use the `OpEvmConfig` to fill the `cfg_env` and `block_env` based on the ChainSpec, // Header, and total difficulty - OptimismEvmConfig::new(Arc::new(OpChainSpec { inner: chain_spec.clone() })) + OpEvmConfig::new(Arc::new(OpChainSpec { inner: chain_spec.clone() })) .fill_cfg_and_block_env(&mut cfg_env, &mut block_env, &header, total_difficulty); // Assert that the chain ID in the `cfg_env` is correctly set to the chain ID of the @@ -266,7 +266,7 @@ mod tests { #[test] fn test_evm_configure() { - // Create a default `OptimismEvmConfig` + // Create a default `OpEvmConfig` let evm_config = test_evm_config(); // Initialize an empty database wrapped in CacheDB @@ -547,4 +547,500 @@ mod tests { // Optimism in handler assert_eq!(evm.handler.cfg, HandlerCfg { spec_id: SpecId::ECOTONE, is_optimism: true }); } + + #[test] + fn receipts_by_block_hash() { + // Create a default SealedBlockWithSenders object + let block = SealedBlockWithSenders::default(); + + // Define block hashes for block1 and block2 + let block1_hash = B256::new([0x01; 32]); + let block2_hash = B256::new([0x02; 32]); + + // Clone the default block into block1 and block2 + let mut block1 = block.clone(); + let mut block2 = block; + + // Set the hashes of block1 and block2 + block1.block.header.set_block_number(10); + block1.block.header.set_hash(block1_hash); + + block2.block.header.set_block_number(11); + block2.block.header.set_hash(block2_hash); + + // Create a random receipt object, receipt1 + let receipt1 = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + }; + + // Create another random receipt object, receipt2 + let receipt2 = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 1325345, + logs: vec![], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + }; + + // Create a Receipts object with a vector of receipt vectors + let receipts = + Receipts { receipt_vec: vec![vec![Some(receipt1.clone())], vec![Some(receipt2)]] }; + + // Create an ExecutionOutcome object with the created bundle, receipts, an empty requests + // vector, and first_block set to 10 + let execution_outcome = ExecutionOutcome { + bundle: Default::default(), + receipts, + requests: vec![], + first_block: 10, + snapshots: vec![], + }; + + // Create a Chain object with a BTreeMap of blocks mapped to their block numbers, + // including block1_hash and block2_hash, and the execution_outcome + let chain = Chain::new([block1, block2], execution_outcome.clone(), None); + + // Assert that the proper receipt vector is returned for block1_hash + assert_eq!(chain.receipts_by_block_hash(block1_hash), Some(vec![&receipt1])); + + // Create an ExecutionOutcome object with a single receipt vector containing receipt1 + let execution_outcome1 = ExecutionOutcome { + bundle: Default::default(), + receipts: Receipts { receipt_vec: vec![vec![Some(receipt1)]] }, + requests: vec![], + first_block: 10, + snapshots: vec![], + }; + + // Assert that the execution outcome at the first block contains only the first receipt + assert_eq!(chain.execution_outcome_at_block(10), Some(execution_outcome1)); + + // Assert that the execution outcome at the tip block contains the whole execution outcome + assert_eq!(chain.execution_outcome_at_block(11), Some(execution_outcome)); + } + + #[test] + fn test_initialisation() { + // Create a new BundleState object with initial data + let bundle = BundleState::new( + vec![(Address::new([2; 20]), None, Some(AccountInfo::default()), HashMap::default())], + vec![vec![(Address::new([2; 20]), None, vec![])]], + vec![], + ); + + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + })]], + }; + + // Create a Requests object with a vector of requests + let requests = vec![Requests::new(vec![bytes!("dead"), bytes!("beef"), bytes!("beebee")])]; + + // Define the first block number + let first_block = 123; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = ExecutionOutcome { + bundle: bundle.clone(), + receipts: receipts.clone(), + requests: requests.clone(), + first_block, + snapshots: vec![], + }; + + // Assert that creating a new ExecutionOutcome using the constructor matches exec_res + assert_eq!( + ExecutionOutcome::new(bundle, receipts.clone(), first_block, requests.clone()), + exec_res + ); + + // Create a BundleStateInit object and insert initial data + let mut state_init: BundleStateInit = HashMap::default(); + state_init + .insert(Address::new([2; 20]), (None, Some(Account::default()), HashMap::default())); + + // Create a HashMap for account reverts and insert initial data + let mut revert_inner: HashMap = HashMap::default(); + revert_inner.insert(Address::new([2; 20]), (None, vec![])); + + // Create a RevertsInit object and insert the revert_inner data + let mut revert_init: RevertsInit = HashMap::default(); + revert_init.insert(123, revert_inner); + + // Assert that creating a new ExecutionOutcome using the new_init method matches + // exec_res + assert_eq!( + ExecutionOutcome::new_init( + state_init, + revert_init, + vec![], + receipts, + first_block, + requests, + ), + exec_res + ); + } + + #[test] + fn test_block_number_to_index() { + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + })]], + }; + + // Define the first block number + let first_block = 123; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = ExecutionOutcome { + bundle: Default::default(), + receipts, + requests: vec![], + first_block, + snapshots: vec![], + }; + + // Test before the first block + assert_eq!(exec_res.block_number_to_index(12), None); + + // Test after after the first block but index larger than receipts length + assert_eq!(exec_res.block_number_to_index(133), None); + + // Test after the first block + assert_eq!(exec_res.block_number_to_index(123), Some(0)); + } + + #[test] + fn test_get_logs() { + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![Log::::default()], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + })]], + }; + + // Define the first block number + let first_block = 123; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = ExecutionOutcome { + bundle: Default::default(), + receipts, + requests: vec![], + first_block, + snapshots: vec![], + }; + + // Get logs for block number 123 + let logs: Vec<&Log> = exec_res.logs(123).unwrap().collect(); + + // Assert that the logs match the expected logs + assert_eq!(logs, vec![&Log::::default()]); + } + + #[test] + fn test_receipts_by_block() { + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![Log::::default()], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + })]], + }; + + // Define the first block number + let first_block = 123; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = ExecutionOutcome { + bundle: Default::default(), // Default value for bundle + receipts, // Include the created receipts + requests: vec![], // Empty vector for requests + first_block, // Set the first block number + snapshots: vec![], + }; + + // Get receipts for block number 123 and convert the result into a vector + let receipts_by_block: Vec<_> = exec_res.receipts_by_block(123).iter().collect(); + + // Assert that the receipts for block number 123 match the expected receipts + assert_eq!( + receipts_by_block, + vec![&Some(Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![Log::::default()], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + })] + ); + } + + #[test] + fn test_receipts_len() { + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![Log::::default()], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + })]], + }; + + // Create an empty Receipts object + let receipts_empty = Receipts { receipt_vec: vec![] }; + + // Define the first block number + let first_block = 123; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = ExecutionOutcome { + bundle: Default::default(), // Default value for bundle + receipts, // Include the created receipts + requests: vec![], // Empty vector for requests + first_block, // Set the first block number + snapshots: vec![], + }; + + // Assert that the length of receipts in exec_res is 1 + assert_eq!(exec_res.len(), 1); + + // Assert that exec_res is not empty + assert!(!exec_res.is_empty()); + + // Create a ExecutionOutcome object with an empty Receipts object + let exec_res_empty_receipts = ExecutionOutcome { + bundle: Default::default(), // Default value for bundle + receipts: receipts_empty, // Include the empty receipts + requests: vec![], // Empty vector for requests + first_block, // Set the first block number + snapshots: vec![], + }; + + // Assert that the length of receipts in exec_res_empty_receipts is 0 + assert_eq!(exec_res_empty_receipts.len(), 0); + + // Assert that exec_res_empty_receipts is empty + assert!(exec_res_empty_receipts.is_empty()); + } + + #[test] + fn test_revert_to() { + // Create a random receipt object + let receipt = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + }; + + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(receipt.clone())], vec![Some(receipt.clone())]], + }; + + // Define the first block number + let first_block = 123; + + // Create a request. + let request = bytes!("deadbeef"); + + // Create a vector of Requests containing the request. + let requests = + vec![Requests::new(vec![request.clone()]), Requests::new(vec![request.clone()])]; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let mut exec_res = ExecutionOutcome { + bundle: Default::default(), + receipts, + requests, + first_block, + snapshots: vec![], + }; + + // Assert that the revert_to method returns true when reverting to the initial block number. + assert!(exec_res.revert_to(123)); + + // Assert that the receipts are properly cut after reverting to the initial block number. + assert_eq!(exec_res.receipts, Receipts { receipt_vec: vec![vec![Some(receipt)]] }); + + // Assert that the requests are properly cut after reverting to the initial block number. + assert_eq!(exec_res.requests, vec![Requests::new(vec![request])]); + + // Assert that the revert_to method returns false when attempting to revert to a block + // number greater than the initial block number. + assert!(!exec_res.revert_to(133)); + + // Assert that the revert_to method returns false when attempting to revert to a block + // number less than the initial block number. + assert!(!exec_res.revert_to(10)); + } + + #[test] + fn test_extend_execution_outcome() { + // Create a Receipt object with specific attributes. + let receipt = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + }; + + // Create a Receipts object containing the receipt. + let receipts = Receipts { receipt_vec: vec![vec![Some(receipt.clone())]] }; + + // Create a request. + let request = bytes!("deadbeef"); + + // Create a vector of Requests containing the request. + let requests = vec![Requests::new(vec![request.clone()])]; + + // Define the initial block number. + let first_block = 123; + + // Create an ExecutionOutcome object. + let mut exec_res = ExecutionOutcome { + bundle: Default::default(), + receipts, + requests, + first_block, + snapshots: vec![], + }; + + // Extend the ExecutionOutcome object by itself. + exec_res.extend(exec_res.clone()); + + // Assert the extended ExecutionOutcome matches the expected outcome. + assert_eq!( + exec_res, + ExecutionOutcome { + bundle: Default::default(), + receipts: Receipts { + receipt_vec: vec![vec![Some(receipt.clone())], vec![Some(receipt)]] + }, + requests: vec![Requests::new(vec![request.clone()]), Requests::new(vec![request])], + first_block: 123, + snapshots: vec![], + } + ); + } + + #[test] + fn test_split_at_execution_outcome() { + // Create a random receipt object + let receipt = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + }; + + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![ + vec![Some(receipt.clone())], + vec![Some(receipt.clone())], + vec![Some(receipt.clone())], + ], + }; + + // Define the first block number + let first_block = 123; + + // Create a request. + let request = bytes!("deadbeef"); + + // Create a vector of Requests containing the request. + let requests = vec![ + Requests::new(vec![request.clone()]), + Requests::new(vec![request.clone()]), + Requests::new(vec![request.clone()]), + ]; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = ExecutionOutcome { + bundle: Default::default(), + receipts, + requests, + first_block, + snapshots: vec![], + }; + + // Split the ExecutionOutcome at block number 124 + let result = exec_res.clone().split_at(124); + + // Define the expected lower ExecutionOutcome after splitting + let lower_execution_outcome = ExecutionOutcome { + bundle: Default::default(), + receipts: Receipts { receipt_vec: vec![vec![Some(receipt.clone())]] }, + requests: vec![Requests::new(vec![request.clone()])], + first_block, + snapshots: vec![], + }; + + // Define the expected higher ExecutionOutcome after splitting + let higher_execution_outcome = ExecutionOutcome { + bundle: Default::default(), + receipts: Receipts { + receipt_vec: vec![vec![Some(receipt.clone())], vec![Some(receipt)]], + }, + requests: vec![Requests::new(vec![request.clone()]), Requests::new(vec![request])], + first_block: 124, + snapshots: vec![], + }; + + // Assert that the split result matches the expected lower and higher outcomes + assert_eq!(result.0, Some(lower_execution_outcome)); + assert_eq!(result.1, higher_execution_outcome); + + // Assert that splitting at the first block number returns None for the lower outcome + assert_eq!(exec_res.clone().split_at(123), (None, exec_res)); + } } diff --git a/crates/optimism/hardforks/Cargo.toml b/crates/optimism/hardforks/Cargo.toml index 815d50c6bc..c30566a54e 100644 --- a/crates/optimism/hardforks/Cargo.toml +++ b/crates/optimism/hardforks/Cargo.toml @@ -27,5 +27,13 @@ once_cell.workspace = true [features] default = ["std"] -std = [] -serde = ["dep:serde"] \ No newline at end of file +std = [ + "alloy-primitives/std", + "once_cell/std", + "serde?/std" +] +serde = [ + "dep:serde", + "alloy-chains/serde", + "alloy-primitives/serde" +] diff --git a/crates/optimism/hardforks/src/dev.rs b/crates/optimism/hardforks/src/dev.rs index 07f88d3f82..328ef501c4 100644 --- a/crates/optimism/hardforks/src/dev.rs +++ b/crates/optimism/hardforks/src/dev.rs @@ -1,9 +1,13 @@ use alloy_primitives::U256; -use once_cell::sync::Lazy; use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition}; +#[cfg(not(feature = "std"))] +use once_cell::sync::Lazy as LazyLock; +#[cfg(feature = "std")] +use std::sync::LazyLock; + /// Dev hardforks -pub static DEV_HARDFORKS: Lazy = Lazy::new(|| { +pub static DEV_HARDFORKS: LazyLock = LazyLock::new(|| { ChainHardforks::new(vec![ (EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)), (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), diff --git a/crates/optimism/hardforks/src/hardfork.rs b/crates/optimism/hardforks/src/hardfork.rs index 3822a1da55..6fd1bad5f0 100644 --- a/crates/optimism/hardforks/src/hardfork.rs +++ b/crates/optimism/hardforks/src/hardfork.rs @@ -39,6 +39,8 @@ hardfork!( Fjord, /// Granite: Granite, + /// Holocene: + Holocene, } ); diff --git a/crates/optimism/hardforks/src/lib.rs b/crates/optimism/hardforks/src/lib.rs index 6e5dba4926..154c119cea 100644 --- a/crates/optimism/hardforks/src/lib.rs +++ b/crates/optimism/hardforks/src/lib.rs @@ -26,14 +26,19 @@ pub trait OptimismHardforks: EthereumHardforks { self.fork(OptimismHardfork::Bedrock).active_at_block(block_number) } + /// Returns `true` if [`Canyon`](OptimismHardfork::Canyon) is active at given block timestamp. + fn is_canyon_active_at_timestamp(&self, timestamp: u64) -> bool { + self.fork(OptimismHardfork::Canyon).active_at_timestamp(timestamp) + } + /// Returns `true` if [`Ecotone`](OptimismHardfork::Ecotone) is active at given block timestamp. fn is_ecotone_active_at_timestamp(&self, timestamp: u64) -> bool { self.fork(OptimismHardfork::Ecotone).active_at_timestamp(timestamp) } - /// Returns `true` if [`Ecotone`](OptimismHardfork::Ecotone) is active at given block timestamp. + /// Returns `true` if [`Fjord`](OptimismHardfork::Fjord) is active at given block timestamp. fn is_fjord_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OptimismHardfork::Ecotone).active_at_timestamp(timestamp) + self.fork(OptimismHardfork::Fjord).active_at_timestamp(timestamp) } /// Returns `true` if [`Granite`](OptimismHardfork::Granite) is active at given block timestamp. @@ -41,6 +46,18 @@ pub trait OptimismHardforks: EthereumHardforks { self.fork(OptimismHardfork::Granite).active_at_timestamp(timestamp) } + /// Returns `true` if [`Holocene`](OptimismHardfork::Holocene) is active at given block + /// timestamp. + fn is_holocene_active_at_timestamp(&self, timestamp: u64) -> bool { + self.fork(OptimismHardfork::Holocene).active_at_timestamp(timestamp) + } + + /// Returns `true` if [`Regolith`](OptimismHardfork::Regolith) is active at given block + /// timestamp. + fn is_regolith_active_at_timestamp(&self, timestamp: u64) -> bool { + self.fork(OptimismHardfork::Regolith).active_at_timestamp(timestamp) + } + /// Convenience method to check if [`OptimismHardfork::Wright`] is active at a given block /// number. fn is_wright_active_at_timestamp(&self, timestamp: u64) -> bool { diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 091da7c29b..ac656ea2d8 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -13,12 +13,12 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true +reth-engine-local.workspace = true reth-primitives.workspace = true reth-payload-builder.workspace = true reth-auto-seal-consensus.workspace = true reth-basic-payload-builder.workspace = true reth-consensus.workspace = true -reth-rpc-types-compat.workspace = true reth-node-api.workspace = true reth-node-builder.workspace = true reth-tracing.workspace = true @@ -26,12 +26,9 @@ reth-provider.workspace = true reth-transaction-pool.workspace = true reth-network.workspace = true reth-evm.workspace = true -reth-revm.workspace = true +reth-revm = { workspace = true, features = ["std"] } reth-beacon-consensus.workspace = true -reth-discv5.workspace = true -reth-rpc-eth-types.workspace = true -reth-rpc-eth-api.workspace = true -reth-rpc.workspace = true +reth-trie-db.workspace = true # op-reth reth-optimism-payload-builder.workspace = true @@ -41,6 +38,9 @@ reth-optimism-chainspec.workspace = true reth-optimism-consensus.workspace = true reth-optimism-forks.workspace = true +# revm with required optimism features +revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } + # bsc reth-bsc-consensus.workspace = true @@ -50,21 +50,13 @@ alloy-primitives.workspace = true op-alloy-rpc-types-engine.workspace = true alloy-rpc-types-engine.workspace = true -# async -async-trait.workspace = true -reqwest = { workspace = true, features = ["rustls-tls-native-roots"] } -tracing.workspace = true - # misc clap.workspace = true serde.workspace = true eyre.workspace = true parking_lot.workspace = true -thiserror.workspace = true # rpc -jsonrpsee.workspace = true -jsonrpsee-types.workspace = true serde_json.workspace = true [dev-dependencies] @@ -81,20 +73,42 @@ op-alloy-consensus.workspace = true [features] optimism = [ - "reth-chainspec/optimism", - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-optimism-evm/optimism", - "reth-optimism-payload-builder/optimism", - "reth-beacon-consensus/optimism", - "reth-revm/optimism", - "reth-auto-seal-consensus/optimism", - "reth-optimism-rpc/optimism", + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-optimism-evm/optimism", + "reth-optimism-payload-builder/optimism", + "reth-beacon-consensus/optimism", + "revm/optimism", + "reth-auto-seal-consensus/optimism", + "reth-optimism-rpc/optimism", + "reth-engine-local/optimism", + "reth-optimism-consensus/optimism", + "reth-db/optimism", + "reth-chainspec/optimism" +] +asm-keccak = [ + "reth-primitives/asm-keccak", + "reth/asm-keccak", + "alloy-primitives/asm-keccak", + "revm/asm-keccak" +] +test-utils = [ + "reth-node-builder/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-evm/test-utils", + "reth-network/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-revm/test-utils", + "reth-db/test-utils", + "reth-provider/test-utils", + "reth-transaction-pool/test-utils", + "reth-trie-db/test-utils", + "revm/test-utils" ] opbnb = [ - "reth-primitives/opbnb", - "reth-optimism-evm/opbnb", - "reth-optimism-payload-builder/opbnb", + "reth-primitives/opbnb", + "reth-optimism-evm/opbnb", + "reth-optimism-payload-builder/opbnb", ] -asm-keccak = ["reth-primitives/asm-keccak"] -test-utils = ["reth-node-builder/test-utils"] diff --git a/crates/optimism/node/src/args.rs b/crates/optimism/node/src/args.rs index 2707748c34..ac6d7ea84e 100644 --- a/crates/optimism/node/src/args.rs +++ b/crates/optimism/node/src/args.rs @@ -38,16 +38,23 @@ pub struct RollupArgs { #[arg(long = "rollup.discovery.v4", default_value = "false")] pub discovery_v4: bool, - /// Enable the engine2 experimental features on op-reth binary + /// Enable the experimental engine features on reth binary + /// + /// DEPRECATED: experimental engine is default now, use --engine.legacy to enable the legacy + /// functionality #[arg(long = "engine.experimental", default_value = "true")] pub experimental: bool, + /// Enable the legacy engine on reth binary + #[arg(long = "engine.legacy", default_value = "false")] + pub legacy: bool, + /// Configure persistence threshold for engine experimental. - #[arg(long = "engine.persistence-threshold", requires = "experimental", default_value_t = DEFAULT_PERSISTENCE_THRESHOLD)] + #[arg(long = "engine.persistence-threshold", conflicts_with = "legacy", default_value_t = DEFAULT_PERSISTENCE_THRESHOLD)] pub persistence_threshold: u64, /// Configure the target number of blocks to keep in memory. - #[arg(long = "engine.memory-block-buffer-target", requires = "experimental", default_value_t = DEFAULT_MEMORY_BLOCK_BUFFER_TARGET)] + #[arg(long = "engine.memory-block-buffer-target", conflicts_with = "legacy", default_value_t = DEFAULT_MEMORY_BLOCK_BUFFER_TARGET)] pub memory_block_buffer_target: u64, } @@ -60,6 +67,7 @@ impl Default for RollupArgs { compute_pending_block: false, discovery_v4: false, experimental: true, + legacy: false, persistence_threshold: DEFAULT_PERSISTENCE_THRESHOLD, memory_block_buffer_target: DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, } diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index a83f4c696a..eb356e86e1 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -14,53 +14,55 @@ use reth_node_api::{ validate_version_specific_fields, EngineTypes, EngineValidator, }; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_forks::OptimismHardfork; -use reth_optimism_payload_builder::{OptimismBuiltPayload, OptimismPayloadBuilderAttributes}; +use reth_optimism_forks::{OptimismHardfork, OptimismHardforks}; +use reth_optimism_payload_builder::{ + builder::decode_eip_1559_params, OpBuiltPayload, OpPayloadBuilderAttributes, +}; /// The types used in the optimism beacon consensus engine. #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] #[non_exhaustive] -pub struct OptimismEngineTypes { +pub struct OpEngineTypes { _marker: std::marker::PhantomData, } -impl PayloadTypes for OptimismEngineTypes { +impl PayloadTypes for OpEngineTypes { type BuiltPayload = T::BuiltPayload; type PayloadAttributes = T::PayloadAttributes; type PayloadBuilderAttributes = T::PayloadBuilderAttributes; } -impl EngineTypes for OptimismEngineTypes +impl EngineTypes for OpEngineTypes where T::BuiltPayload: TryInto + TryInto + TryInto + TryInto, { - type ExecutionPayloadV1 = ExecutionPayloadV1; - type ExecutionPayloadV2 = ExecutionPayloadEnvelopeV2; - type ExecutionPayloadV3 = OpExecutionPayloadEnvelopeV3; - type ExecutionPayloadV4 = OpExecutionPayloadEnvelopeV4; + type ExecutionPayloadEnvelopeV1 = ExecutionPayloadV1; + type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2; + type ExecutionPayloadEnvelopeV3 = OpExecutionPayloadEnvelopeV3; + type ExecutionPayloadEnvelopeV4 = OpExecutionPayloadEnvelopeV4; } -/// A default payload type for [`OptimismEngineTypes`] +/// A default payload type for [`OpEngineTypes`] #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] #[non_exhaustive] -pub struct OptimismPayloadTypes; +pub struct OpPayloadTypes; -impl PayloadTypes for OptimismPayloadTypes { - type BuiltPayload = OptimismBuiltPayload; +impl PayloadTypes for OpPayloadTypes { + type BuiltPayload = OpBuiltPayload; type PayloadAttributes = OpPayloadAttributes; - type PayloadBuilderAttributes = OptimismPayloadBuilderAttributes; + type PayloadBuilderAttributes = OpPayloadBuilderAttributes; } /// Validator for Optimism engine API. #[derive(Debug, Clone)] -pub struct OptimismEngineValidator { +pub struct OpEngineValidator { chain_spec: Arc, } -impl OptimismEngineValidator { +impl OpEngineValidator { /// Instantiates a new validator. pub const fn new(chain_spec: Arc) -> Self { Self { chain_spec } @@ -109,7 +111,7 @@ pub fn validate_withdrawals_presence( Ok(()) } -impl EngineValidator for OptimismEngineValidator +impl EngineValidator for OpEngineValidator where Types: EngineTypes, { @@ -147,6 +149,137 @@ where )) } + if self.chain_spec.is_holocene_active_at_timestamp(attributes.payload_attributes.timestamp) + { + let Some(eip_1559_params) = attributes.eip_1559_params else { + return Err(EngineObjectValidationError::InvalidParams( + "MissingEip1559ParamsInPayloadAttributes".to_string().into(), + )) + }; + let (elasticity, denominator) = decode_eip_1559_params(eip_1559_params); + if elasticity != 0 && denominator == 0 { + return Err(EngineObjectValidationError::InvalidParams( + "Eip1559ParamsDenominatorZero".to_string().into(), + )) + } + } + Ok(()) } } + +#[cfg(test)] +mod test { + + use crate::engine; + use alloy_primitives::{b64, Address, B256, B64}; + use alloy_rpc_types_engine::PayloadAttributes; + use reth_chainspec::ForkCondition; + use reth_optimism_chainspec::BASE_SEPOLIA; + + use super::*; + + fn get_chainspec(is_holocene: bool) -> Arc { + let mut hardforks = OptimismHardfork::base_sepolia(); + if is_holocene { + hardforks + .insert(OptimismHardfork::Holocene.boxed(), ForkCondition::Timestamp(1800000000)); + } + Arc::new(OpChainSpec { + inner: ChainSpec { + chain: BASE_SEPOLIA.inner.chain, + genesis: BASE_SEPOLIA.inner.genesis.clone(), + genesis_hash: BASE_SEPOLIA.inner.genesis_hash.clone(), + paris_block_and_final_difficulty: BASE_SEPOLIA + .inner + .paris_block_and_final_difficulty, + hardforks, + base_fee_params: BASE_SEPOLIA.inner.base_fee_params.clone(), + max_gas_limit: BASE_SEPOLIA.inner.max_gas_limit, + prune_delete_limit: 10000, + ..Default::default() + }, + }) + } + + const fn get_attributes(eip_1559_params: Option, timestamp: u64) -> OpPayloadAttributes { + OpPayloadAttributes { + gas_limit: Some(1000), + eip_1559_params, + transactions: None, + no_tx_pool: None, + payload_attributes: PayloadAttributes { + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: Address::ZERO, + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }, + } + } + + #[test] + fn test_well_formed_attributes_pre_holocene() { + let validator = OpEngineValidator::new(get_chainspec(false)); + let attributes = get_attributes(None, 1799999999); + + let result = >::ensure_well_formed_attributes( + &validator, EngineApiMessageVersion::V3, &attributes + ); + assert!(result.is_ok()); + } + + #[test] + fn test_well_formed_attributes_holocene_no_eip1559_params() { + let validator = OpEngineValidator::new(get_chainspec(true)); + let attributes = get_attributes(None, 1800000000); + + let result = >::ensure_well_formed_attributes( + &validator, EngineApiMessageVersion::V3, &attributes + ); + assert!(matches!(result, Err(EngineObjectValidationError::InvalidParams(_)))); + } + + #[test] + fn test_well_formed_attributes_holocene_eip1559_params_zero_denominator() { + let validator = OpEngineValidator::new(get_chainspec(true)); + let attributes = get_attributes(Some(b64!("0000000000000008")), 1800000000); + + let result = >::ensure_well_formed_attributes( + &validator, EngineApiMessageVersion::V3, &attributes + ); + assert!(matches!(result, Err(EngineObjectValidationError::InvalidParams(_)))); + } + + #[test] + fn test_well_formed_attributes_holocene_valid() { + let validator = OpEngineValidator::new(get_chainspec(true)); + let attributes = get_attributes(Some(b64!("0000000800000008")), 1800000000); + + let result = >::ensure_well_formed_attributes( + &validator, EngineApiMessageVersion::V3, &attributes + ); + assert!(result.is_ok()); + } + + #[test] + fn test_well_formed_attributes_holocene_valid_all_zero() { + let validator = OpEngineValidator::new(get_chainspec(true)); + let attributes = get_attributes(Some(b64!("0000000000000000")), 1800000000); + + let result = >::ensure_well_formed_attributes( + &validator, EngineApiMessageVersion::V3, &attributes + ); + assert!(result.is_ok()); + } +} diff --git a/crates/optimism/node/src/lib.rs b/crates/optimism/node/src/lib.rs index 768f4d94ef..f2870d0b83 100644 --- a/crates/optimism/node/src/lib.rs +++ b/crates/optimism/node/src/lib.rs @@ -15,7 +15,7 @@ pub mod args; /// Exports optimism-specific implementations of the [`EngineTypes`](reth_node_api::EngineTypes) /// trait. pub mod engine; -pub use engine::OptimismEngineTypes; +pub use engine::OpEngineTypes; pub mod node; pub use node::OptimismNode; @@ -23,7 +23,7 @@ pub use node::OptimismNode; pub mod txpool; pub use reth_optimism_payload_builder::{ - OptimismBuiltPayload, OptimismPayloadBuilder, OptimismPayloadBuilderAttributes, + OpBuiltPayload, OpPayloadBuilder, OpPayloadBuilderAttributes, }; pub use reth_optimism_evm::*; diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 1a6456c767..e4f6f7c2c5 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -5,38 +5,49 @@ use std::sync::Arc; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_bsc_consensus::Parlia; use reth_chainspec::{EthChainSpec, Hardforks}; -use reth_evm::ConfigureEvm; -use reth_network::{NetworkConfig, NetworkHandle, NetworkManager}; -use reth_node_api::{EngineValidator, FullNodeComponents, NodeAddOns}; +use reth_evm::{execute::BasicBlockExecutorProvider, ConfigureEvm}; +use reth_network::{NetworkConfig, NetworkHandle, NetworkManager, PeersInfo}; +use reth_node_api::{ + AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, NodePrimitives, +}; use reth_node_builder::{ components::{ - ComponentsBuilder, ConsensusBuilder, EngineValidatorBuilder, ExecutorBuilder, - NetworkBuilder, ParliaBuilder, PayloadServiceBuilder, PoolBuilder, - PoolBuilderConfigOverrides, + ComponentsBuilder, ConsensusBuilder, ExecutorBuilder, NetworkBuilder, ParliaBuilder, + PayloadServiceBuilder, PoolBuilder, PoolBuilderConfigOverrides, }, node::{FullNodeTypes, NodeTypes, NodeTypesWithEngine}, - BuilderContext, Node, PayloadBuilderConfig, + rpc::{EngineValidatorBuilder, RethRpcAddOns, RpcAddOns, RpcHandle}, + BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, }; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_consensus::OptimismBeaconConsensus; -use reth_optimism_evm::{OpExecutorProvider, OptimismEvmConfig}; +use reth_optimism_consensus::OpBeaconConsensus; +use reth_optimism_evm::{OpEvmConfig, OpExecutionStrategyFactory}; use reth_optimism_rpc::OpEthApi; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::Header; +use reth_primitives::{Block, Header}; use reth_provider::CanonStateSubscriptions; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ blobstore::DiskFileBlobStore, CoinbaseTipOrdering, TransactionPool, TransactionValidationTaskExecutor, }; +use reth_trie_db::MerklePatriciaTrie; use crate::{ args::RollupArgs, - engine::OptimismEngineValidator, + engine::OpEngineValidator, txpool::{OpTransactionPool, OpTransactionValidator}, - OptimismEngineTypes, + OpEngineTypes, }; +/// Optimism primitive types. +#[derive(Debug)] +pub struct OpPrimitives; + +impl NodePrimitives for OpPrimitives { + type Block = Block; +} + /// Type configuration for a regular Optimism node. #[derive(Debug, Default, Clone)] #[non_exhaustive] @@ -56,53 +67,50 @@ impl OptimismNode { args: RollupArgs, ) -> ComponentsBuilder< Node, - OptimismPoolBuilder, - OptimismPayloadBuilder, - OptimismNetworkBuilder, - OptimismExecutorBuilder, - OptimismConsensusBuilder, - OptimismEngineValidatorBuilder, + OpPoolBuilder, + OpPayloadBuilder, + OpNetworkBuilder, + OpExecutorBuilder, + OpConsensusBuilder, OptimismParliaBuilder, > where Node: FullNodeTypes< - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, >, { let RollupArgs { disable_txpool_gossip, compute_pending_block, discovery_v4, .. } = args; ComponentsBuilder::default() .node_types::() - .pool(OptimismPoolBuilder::default()) - .payload(OptimismPayloadBuilder::new(compute_pending_block)) - .network(OptimismNetworkBuilder { + .pool(OpPoolBuilder::default()) + .payload(OpPayloadBuilder::new(compute_pending_block)) + .network(OpNetworkBuilder { disable_txpool_gossip, disable_discovery_v4: !discovery_v4, }) - .executor(OptimismExecutorBuilder::default()) - .consensus(OptimismConsensusBuilder::default()) - .engine_validator(OptimismEngineValidatorBuilder::default()) + .executor(OpExecutorBuilder::default()) + .consensus(OpConsensusBuilder::default()) .parlia(OptimismParliaBuilder::default()) } } impl Node for OptimismNode where - N: FullNodeTypes< - Types: NodeTypesWithEngine, - >, + N: FullNodeTypes>, { type ComponentsBuilder = ComponentsBuilder< N, - OptimismPoolBuilder, - OptimismPayloadBuilder, - OptimismNetworkBuilder, - OptimismExecutorBuilder, - OptimismConsensusBuilder, - OptimismEngineValidatorBuilder, + OpPoolBuilder, + OpPayloadBuilder, + OpNetworkBuilder, + OpExecutorBuilder, + OpConsensusBuilder, OptimismParliaBuilder, >; - type AddOns = OptimismAddOns; + type AddOns = OptimismAddOns< + NodeAdapter>::Components>, + >; fn components_builder(&self) -> Self::ComponentsBuilder { let Self { args } = self; @@ -115,54 +123,81 @@ where } impl NodeTypes for OptimismNode { - type Primitives = (); + type Primitives = OpPrimitives; type ChainSpec = OpChainSpec; + type StateCommitment = MerklePatriciaTrie; } impl NodeTypesWithEngine for OptimismNode { - type Engine = OptimismEngineTypes; + type Engine = OpEngineTypes; } /// Add-ons w.r.t. optimism. -#[derive(Debug, Clone)] -pub struct OptimismAddOns { - sequencer_http: Option, +#[derive(Debug)] +pub struct OptimismAddOns( + pub RpcAddOns, OptimismEngineValidatorBuilder>, +); + +impl Default for OptimismAddOns { + fn default() -> Self { + Self::new(None) + } } -impl OptimismAddOns { +impl OptimismAddOns { /// Create a new instance with the given `sequencer_http` URL. - pub const fn new(sequencer_http: Option) -> Self { - Self { sequencer_http } + pub fn new(sequencer_http: Option) -> Self { + Self(RpcAddOns::new(move |ctx| OpEthApi::new(ctx, sequencer_http), Default::default())) } +} - /// Returns the sequencer HTTP URL. - pub fn sequencer_http(&self) -> Option<&str> { - self.sequencer_http.as_deref() +impl NodeAddOns for OptimismAddOns +where + N: FullNodeComponents>, + OpEngineValidator: EngineValidator<::Engine>, +{ + type Handle = RpcHandle>; + + async fn launch_add_ons( + self, + ctx: reth_node_api::AddOnsContext<'_, N>, + ) -> eyre::Result { + self.0.launch_add_ons(ctx).await } } -impl NodeAddOns for OptimismAddOns { +impl RethRpcAddOns for OptimismAddOns +where + N: FullNodeComponents>, + OpEngineValidator: EngineValidator<::Engine>, +{ type EthApi = OpEthApi; + + fn hooks_mut(&mut self) -> &mut reth_node_builder::rpc::RpcHooks { + self.0.hooks_mut() + } } /// A regular optimism evm and executor builder. #[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] -pub struct OptimismExecutorBuilder; +pub struct OpExecutorBuilder; -impl ExecutorBuilder for OptimismExecutorBuilder +impl ExecutorBuilder for OpExecutorBuilder where Node: FullNodeTypes>, { - type EVM = OptimismEvmConfig; - type Executor = OpExecutorProvider; + type EVM = OpEvmConfig; + type Executor = BasicBlockExecutorProvider; async fn build_evm( self, ctx: &BuilderContext, ) -> eyre::Result<(Self::EVM, Self::Executor)> { - let evm_config = OptimismEvmConfig::new(ctx.chain_spec()); - let executor = OpExecutorProvider::new(ctx.chain_spec(), evm_config.clone()); + let evm_config = OpEvmConfig::new(ctx.chain_spec()); + let strategy_factory = + OpExecutionStrategyFactory::new(ctx.chain_spec(), evm_config.clone()); + let executor = BasicBlockExecutorProvider::new(strategy_factory); Ok((evm_config, executor)) } @@ -173,12 +208,12 @@ where /// This contains various settings that can be configured and take precedence over the node's /// config. #[derive(Debug, Default, Clone)] -pub struct OptimismPoolBuilder { +pub struct OpPoolBuilder { /// Enforced overrides that are applied to the pool config. pub pool_config_overrides: PoolBuilderConfigOverrides, } -impl PoolBuilder for OptimismPoolBuilder +impl PoolBuilder for OpPoolBuilder where Node: FullNodeTypes>, { @@ -194,7 +229,11 @@ where )) .with_head_timestamp(ctx.head().timestamp) .kzg_settings(ctx.kzg_settings()?) - .with_additional_tasks(ctx.config().txpool.additional_validation_tasks) + .with_additional_tasks( + pool_config_overrides + .additional_validation_tasks + .unwrap_or_else(|| ctx.config().txpool.additional_validation_tasks), + ) .build_with_tasks(ctx.provider().clone(), ctx.task_executor().clone(), blob_store.clone()) .map(|validator| { OpTransactionValidator::new(validator) @@ -251,7 +290,7 @@ where /// A basic optimism payload service builder #[derive(Debug, Default, Clone)] -pub struct OptimismPayloadBuilder { +pub struct OpPayloadBuilder { /// By default the pending block equals the latest block /// to save resources and not leak txs from the tx-pool, /// this flag enables computing of the pending block @@ -263,7 +302,7 @@ pub struct OptimismPayloadBuilder { pub compute_pending_block: bool, } -impl OptimismPayloadBuilder { +impl OpPayloadBuilder { /// Create a new instance with the given `compute_pending_block` flag. pub const fn new(compute_pending_block: bool) -> Self { Self { compute_pending_block } @@ -275,17 +314,16 @@ impl OptimismPayloadBuilder { evm_config: Evm, ctx: &BuilderContext, pool: Pool, - ) -> eyre::Result> + ) -> eyre::Result> where Node: FullNodeTypes< - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, >, Pool: TransactionPool + Unpin + 'static, Evm: ConfigureEvm
, { - let payload_builder = - reth_optimism_payload_builder::OptimismPayloadBuilder::new(evm_config) - .set_compute_pending_block(self.compute_pending_block); + let payload_builder = reth_optimism_payload_builder::OpPayloadBuilder::new(evm_config) + .set_compute_pending_block(self.compute_pending_block); let conf = ctx.payload_builder_config(); let payload_job_config = BasicPayloadJobGeneratorConfig::default() @@ -311,35 +349,34 @@ impl OptimismPayloadBuilder { } } -impl PayloadServiceBuilder for OptimismPayloadBuilder +impl PayloadServiceBuilder for OpPayloadBuilder where - Node: FullNodeTypes< - Types: NodeTypesWithEngine, - >, + Node: + FullNodeTypes>, Pool: TransactionPool + Unpin + 'static, { async fn spawn_payload_service( self, ctx: &BuilderContext, pool: Pool, - ) -> eyre::Result> { - self.spawn(OptimismEvmConfig::new(ctx.chain_spec()), ctx, pool) + ) -> eyre::Result> { + self.spawn(OpEvmConfig::new(ctx.chain_spec()), ctx, pool) } } /// A basic optimism network builder. #[derive(Debug, Default, Clone)] -pub struct OptimismNetworkBuilder { +pub struct OpNetworkBuilder { /// Disable transaction pool gossip pub disable_txpool_gossip: bool, /// Disable discovery v4 pub disable_discovery_v4: bool, } -impl OptimismNetworkBuilder { +impl OpNetworkBuilder { /// Returns the [`NetworkConfig`] that contains the settings to launch the p2p network. /// - /// This applies the configured [`OptimismNetworkBuilder`] settings. + /// This applies the configured [`OpNetworkBuilder`] settings. pub fn network_config( &self, ctx: &BuilderContext, @@ -384,7 +421,7 @@ impl OptimismNetworkBuilder { } } -impl NetworkBuilder for OptimismNetworkBuilder +impl NetworkBuilder for OpNetworkBuilder where Node: FullNodeTypes>, Pool: TransactionPool + Unpin + 'static, @@ -397,6 +434,7 @@ where let network_config = self.network_config(ctx)?; let network = NetworkManager::builder(network_config).await?; let handle = ctx.start_network(network, pool); + info!(target: "reth::cli", enode=%handle.local_node_record(), "P2P networking initialized"); Ok(handle) } @@ -405,9 +443,9 @@ where /// A basic optimism consensus builder. #[derive(Debug, Default, Clone)] #[non_exhaustive] -pub struct OptimismConsensusBuilder; +pub struct OpConsensusBuilder; -impl ConsensusBuilder for OptimismConsensusBuilder +impl ConsensusBuilder for OpConsensusBuilder where Node: FullNodeTypes>, { @@ -417,12 +455,12 @@ where if ctx.is_dev() { Ok(Arc::new(reth_auto_seal_consensus::AutoSealConsensus::new(ctx.chain_spec()))) } else { - Ok(Arc::new(OptimismBeaconConsensus::new(ctx.chain_spec()))) + Ok(Arc::new(OpBeaconConsensus::new(ctx.chain_spec()))) } } } -/// Builder for [`OptimismEngineValidator`]. +/// Builder for [`OpEngineValidator`]. #[derive(Debug, Default, Clone)] #[non_exhaustive] pub struct OptimismEngineValidatorBuilder; @@ -430,13 +468,13 @@ pub struct OptimismEngineValidatorBuilder; impl EngineValidatorBuilder for OptimismEngineValidatorBuilder where Types: NodeTypesWithEngine, - Node: FullNodeTypes, - OptimismEngineValidator: EngineValidator, + Node: FullNodeComponents, + OpEngineValidator: EngineValidator, { - type Validator = OptimismEngineValidator; + type Validator = OpEngineValidator; - async fn build_validator(self, ctx: &BuilderContext) -> eyre::Result { - Ok(OptimismEngineValidator::new(ctx.chain_spec())) + async fn build(self, ctx: &AddOnsContext<'_, Node>) -> eyre::Result { + Ok(OpEngineValidator::new(ctx.config.chain.clone())) } } diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index 463f7635cf..e5d3272908 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -4,7 +4,7 @@ use alloy_primitives::U256; use parking_lot::RwLock; use reth_chainspec::ChainSpec; use reth_optimism_evm::RethL1BlockInfo; -use reth_optimism_forks::OptimismHardforks; +use reth_optimism_forks::OptimismHardfork; use reth_primitives::{Block, GotExpected, InvalidTransactionError, SealedBlock}; use reth_provider::{BlockReaderIdExt, StateProviderFactory}; use reth_revm::L1BlockInfo; @@ -76,7 +76,7 @@ where pub fn new(inner: EthTransactionValidator) -> Self { let this = Self::with_block_info(inner, OpL1BlockInfo::default()); if let Ok(Some(block)) = - this.inner.client().block_by_number_or_tag(reth_primitives::BlockNumberOrTag::Latest) + this.inner.client().block_by_number_or_tag(alloy_eips::BlockNumberOrTag::Latest) { // genesis block has no txs, so we can't extract L1 info, we set the block info to empty // so that we will accept txs into the pool before the first block @@ -101,7 +101,7 @@ where /// Update the L1 block info. fn update_l1_block_info(&self, block: &Block) { self.block_info.timestamp.store(block.timestamp, Ordering::Relaxed); - if let Ok(cost_addition) = reth_optimism_evm::extract_l1_info(block) { + if let Ok(cost_addition) = reth_optimism_evm::extract_l1_info(&block.body) { *self.block_info.l1_block_info.write() = cost_addition; } } @@ -146,7 +146,7 @@ where let cost_addition = if self .chain_spec() - .is_wright_active_at_timestamp(self.block_timestamp()) && + .is_fork_active_at_timestamp(OptimismHardfork::Wright, self.block_timestamp()) && valid_tx.transaction().priority_fee_or_price() == 0 { U256::from(0) @@ -242,9 +242,8 @@ pub struct OpL1BlockInfo { mod tests { use crate::txpool::OpTransactionValidator; use alloy_eips::eip2718::Encodable2718; - use alloy_primitives::{TxKind, U256}; + use alloy_primitives::{Signature, TxKind, U256}; use op_alloy_consensus::TxDeposit; - use reth::primitives::Signature; use reth_chainspec::MAINNET; use reth_primitives::{Transaction, TransactionSigned, TransactionSignedEcRecovered}; use reth_provider::test_utils::MockEthProvider; diff --git a/crates/optimism/node/tests/e2e/p2p.rs b/crates/optimism/node/tests/e2e/p2p.rs index ebd35cc8a5..30affa9baf 100644 --- a/crates/optimism/node/tests/e2e/p2p.rs +++ b/crates/optimism/node/tests/e2e/p2p.rs @@ -51,7 +51,6 @@ async fn can_sync() -> eyre::Result<()> { side_payload_chain[0].0.clone(), side_payload_chain[0].1.clone(), PayloadStatusEnum::Valid, - Default::default(), ) .await; @@ -81,7 +80,6 @@ async fn can_sync() -> eyre::Result<()> { } .to_string(), }, - Default::default(), ) .await; diff --git a/crates/optimism/node/tests/e2e/utils.rs b/crates/optimism/node/tests/e2e/utils.rs index 1e9ffa652f..16eb974914 100644 --- a/crates/optimism/node/tests/e2e/utils.rs +++ b/crates/optimism/node/tests/e2e/utils.rs @@ -1,32 +1,27 @@ -use std::sync::Arc; - use alloy_genesis::Genesis; use alloy_primitives::{Address, B256}; use reth::{rpc::types::engine::PayloadAttributes, tasks::TaskManager}; -use reth_chainspec::ChainSpecBuilder; -use reth_e2e_test_utils::{transaction::TransactionTestContext, wallet::Wallet, NodeHelperType}; -use reth_optimism_chainspec::{OpChainSpec, BASE_MAINNET}; +use reth_e2e_test_utils::{ + transaction::TransactionTestContext, wallet::Wallet, Adapter, NodeHelperType, +}; +use reth_optimism_chainspec::OpChainSpecBuilder; use reth_optimism_node::{ - node::OptimismAddOns, OptimismBuiltPayload, OptimismNode, OptimismPayloadBuilderAttributes, + node::OptimismAddOns, OpBuiltPayload, OpPayloadBuilderAttributes, OptimismNode, }; use reth_payload_builder::EthPayloadBuilderAttributes; +use std::sync::Arc; use tokio::sync::Mutex; /// Optimism Node Helper type -pub(crate) type OpNode = NodeHelperType; +pub(crate) type OpNode = NodeHelperType>>; pub(crate) async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskManager, Wallet)> { let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); reth_e2e_test_utils::setup( num_nodes, - Arc::new(OpChainSpec::new( - ChainSpecBuilder::default() - .chain(BASE_MAINNET.chain) - .genesis(genesis) - .ecotone_activated() - .build(), - )), + Arc::new(OpChainSpecBuilder::base_mainnet().genesis(genesis).ecotone_activated().build()), false, + optimism_payload_attributes, ) .await } @@ -36,29 +31,25 @@ pub(crate) async fn advance_chain( length: usize, node: &mut OpNode, wallet: Arc>, -) -> eyre::Result> { - node.advance( - length as u64, - |_| { - let wallet = wallet.clone(); - Box::pin(async move { - let mut wallet = wallet.lock().await; - let tx_fut = TransactionTestContext::optimism_l1_block_info_tx( - wallet.chain_id, - wallet.inner.clone(), - wallet.inner_nonce, - ); - wallet.inner_nonce += 1; - tx_fut.await - }) - }, - optimism_payload_attributes, - ) +) -> eyre::Result> { + node.advance(length as u64, |_| { + let wallet = wallet.clone(); + Box::pin(async move { + let mut wallet = wallet.lock().await; + let tx_fut = TransactionTestContext::optimism_l1_block_info_tx( + wallet.chain_id, + wallet.inner.clone(), + wallet.inner_nonce, + ); + wallet.inner_nonce += 1; + tx_fut.await + }) + }) .await } /// Helper function to create a new eth payload attributes -pub(crate) fn optimism_payload_attributes(timestamp: u64) -> OptimismPayloadBuilderAttributes { +pub(crate) fn optimism_payload_attributes(timestamp: u64) -> OpPayloadBuilderAttributes { let attributes = PayloadAttributes { timestamp, prev_randao: B256::ZERO, @@ -67,10 +58,11 @@ pub(crate) fn optimism_payload_attributes(timestamp: u64) -> OptimismPayloadBuil parent_beacon_block_root: Some(B256::ZERO), }; - OptimismPayloadBuilderAttributes { + OpPayloadBuilderAttributes { payload_attributes: EthPayloadBuilderAttributes::new(B256::ZERO, attributes), transactions: vec![], no_tx_pool: false, gas_limit: Some(30_000_000), + eip_1559_params: None, } } diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index b66f733ea7..6259361a41 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -39,8 +39,9 @@ alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true op-alloy-rpc-types-engine.workspace = true -revm-primitives.workspace = true +op-alloy-consensus.workspace = true alloy-rpc-types-engine.workspace = true +alloy-consensus.workspace = true # misc tracing.workspace = true @@ -49,11 +50,13 @@ sha2.workspace = true [features] optimism = [ - "reth-chainspec/optimism", - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-optimism-evm/optimism", - "reth-revm/optimism", + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-optimism-evm/optimism", + "revm/optimism", + "reth-execution-types/optimism", + "reth-optimism-consensus/optimism", + "reth-chainspec/optimism" ] opbnb = [ "reth-primitives/opbnb", diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 263f9aa092..a472b728ed 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -1,22 +1,22 @@ //! Optimism payload builder implementation. - use std::sync::Arc; -use alloy_primitives::U256; +use alloy_consensus::EMPTY_OMMER_ROOT_HASH; +use alloy_eips::merge::BEACON_NONCE; +use alloy_primitives::{B64, U256}; use reth_basic_payload_builder::*; use reth_chain_state::ExecutedBlock; -use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; +use reth_chainspec::ChainSpecProvider; use reth_evm::{system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; use reth_execution_types::ExecutionOutcome; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; -use reth_optimism_forks::OptimismHardfork; +use reth_optimism_forks::OptimismHardforks; use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; use reth_primitives::{ - constants::BEACON_NONCE, proofs, revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, - Block, BlockBody, Header, Receipt, TxType, EMPTY_OMMER_ROOT_HASH, + Block, BlockBody, Header, Receipt, TxType, }; use reth_provider::StateProviderFactory; use reth_revm::database::StateProviderDatabase; @@ -29,17 +29,17 @@ use revm::{ primitives::{EVMError, EnvWithHandlerCfg, InvalidTransaction, ResultAndState}, DatabaseCommit, }; -use revm_primitives::calc_excess_blob_gas; use tracing::{debug, trace, warn}; use crate::{ error::OptimismPayloadBuilderError, - payload::{OptimismBuiltPayload, OptimismPayloadBuilderAttributes}, + payload::{OpBuiltPayload, OpPayloadBuilderAttributes}, }; +use op_alloy_consensus::DepositTransaction; /// Optimism's payload builder #[derive(Debug, Clone, PartialEq, Eq)] -pub struct OptimismPayloadBuilder { +pub struct OpPayloadBuilder { /// The rollup's compute pending block configuration option. // TODO(clabby): Implement this feature. pub compute_pending_block: bool, @@ -47,8 +47,8 @@ pub struct OptimismPayloadBuilder { pub evm_config: EvmConfig, } -impl OptimismPayloadBuilder { - /// `OptimismPayloadBuilder` constructor. +impl OpPayloadBuilder { + /// `OpPayloadBuilder` constructor. pub const fn new(evm_config: EvmConfig) -> Self { Self { compute_pending_block: true, evm_config } } @@ -69,7 +69,7 @@ impl OptimismPayloadBuilder { self.compute_pending_block } } -impl OptimismPayloadBuilder +impl OpPayloadBuilder where EvmConfig: ConfigureEvmEnv
, { @@ -77,9 +77,9 @@ where /// (that has the `parent` as its parent). pub fn cfg_and_block_env( &self, - config: &PayloadConfig, + config: &PayloadConfig, parent: &Header, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), EvmConfig::Error> { let next_attributes = NextBlockEnvAttributes { timestamp: config.attributes.timestamp(), suggested_fee_recipient: config.attributes.suggested_fee_recipient(), @@ -89,33 +89,29 @@ where } } -/// Implementation of the [`PayloadBuilder`] trait for [`OptimismPayloadBuilder`]. -impl PayloadBuilder for OptimismPayloadBuilder +/// Implementation of the [`PayloadBuilder`] trait for [`OpPayloadBuilder`]. +impl PayloadBuilder for OpPayloadBuilder where Client: StateProviderFactory + ChainSpecProvider, Pool: TransactionPool, EvmConfig: ConfigureEvm
, { - type Attributes = OptimismPayloadBuilderAttributes; - type BuiltPayload = OptimismBuiltPayload; + type Attributes = OpPayloadBuilderAttributes; + type BuiltPayload = OpBuiltPayload; fn try_build( &self, - args: BuildArguments, - ) -> Result, PayloadBuilderError> { - let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); - optimism_payload( - self.evm_config.clone(), - args, - cfg_env, - block_env, - self.compute_pending_block, - ) + args: BuildArguments, + ) -> Result, PayloadBuilderError> { + let (cfg_env, block_env) = self + .cfg_and_block_env(&args.config, &args.config.parent_header) + .map_err(PayloadBuilderError::other)?; + optimism_payload(&self.evm_config, args, cfg_env, block_env, self.compute_pending_block) } fn on_missing_payload( &self, - _args: BuildArguments, + _args: BuildArguments, ) -> MissingPayloadBehaviour { // we want to await the job that's already in progress because that should be returned as // is, there's no benefit in racing another job @@ -128,7 +124,7 @@ where &self, client: &Client, config: PayloadConfig, - ) -> Result { + ) -> Result { let args = BuildArguments { client, config, @@ -138,29 +134,31 @@ where cancel: Default::default(), best_payload: None, }; - let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); - optimism_payload(self.evm_config.clone(), args, cfg_env, block_env, false)? + let (cfg_env, block_env) = self + .cfg_and_block_env(&args.config, &args.config.parent_header) + .map_err(PayloadBuilderError::other)?; + optimism_payload(&self.evm_config, args, cfg_env, block_env, false)? .into_payload() .ok_or_else(|| PayloadBuilderError::MissingPayload) } } -/// Constructs an Ethereum transaction payload from the transactions sent through the +/// Constructs an Optimism transaction payload from the transactions sent through the /// Payload attributes by the sequencer. If the `no_tx_pool` argument is passed in /// the payload attributes, the transaction pool will be ignored and the only transactions /// included in the payload will be those sent through the attributes. /// -/// Given build arguments including an Ethereum client, transaction pool, +/// Given build arguments including an Optimism client, transaction pool, /// and configuration, this function creates a transaction payload. Returns /// a result indicating success with the payload or an error in case of failure. #[inline] pub(crate) fn optimism_payload( - evm_config: EvmConfig, - args: BuildArguments, + evm_config: &EvmConfig, + args: BuildArguments, initialized_cfg: CfgEnvWithHandlerCfg, initialized_block_env: BlockEnv, _compute_pending_block: bool, -) -> Result, PayloadBuilderError> +) -> Result, PayloadBuilderError> where EvmConfig: ConfigureEvm
, Client: StateProviderFactory + ChainSpecProvider, @@ -169,13 +167,13 @@ where let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; let chain_spec = client.chain_spec(); - let state_provider = client.state_by_block_hash(config.parent_block.hash())?; + let state_provider = client.state_by_block_hash(config.parent_header.hash())?; let state = StateProviderDatabase::new(state_provider); let mut db = - State::builder().with_database_ref(cached_reads.as_db(state)).with_bundle_update().build(); - let PayloadConfig { parent_block, attributes, extra_data } = config; + State::builder().with_database(cached_reads.as_db_mut(state)).with_bundle_update().build(); + let PayloadConfig { parent_header, attributes, mut extra_data } = config; - debug!(target: "payload_builder", id=%attributes.payload_attributes.payload_id(), parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building new payload"); + debug!(target: "payload_builder", id=%attributes.payload_attributes.payload_id(), parent_header = ?parent_header.hash(), parent_number = parent_header.number, "building new payload"); let mut cumulative_gas_used = 0; let block_gas_limit: u64 = attributes.gas_limit.unwrap_or_else(|| { @@ -195,13 +193,11 @@ where let block_number = initialized_block_env.number.to::(); - let is_regolith = chain_spec.is_fork_active_at_timestamp( - OptimismHardfork::Regolith, - attributes.payload_attributes.timestamp, - ); + let is_regolith = + chain_spec.is_regolith_active_at_timestamp(attributes.payload_attributes.timestamp); // apply eip-4788 pre block contract call - let mut system_caller = SystemCaller::new(&evm_config, &chain_spec); + let mut system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); system_caller .pre_block_beacon_root_contract_call( @@ -212,7 +208,7 @@ where ) .map_err(|err| { warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), + parent_header=%parent_header.hash(), %err, "failed to apply beacon root contract call for payload" ); @@ -275,7 +271,7 @@ where let env = EnvWithHandlerCfg::new_with_cfg_env( initialized_cfg.clone(), initialized_block_env.clone(), - evm_config.tx_env(&sequencer_tx), + evm_config.tx_env(sequencer_tx.as_signed(), sequencer_tx.signer()), ); let mut evm = evm_config.evm_with_env(&mut db, env); @@ -317,10 +313,7 @@ where // receipt hashes should be computed when set. The state transition process // ensures this is only set for post-Canyon deposit transactions. deposit_receipt_version: chain_spec - .is_fork_active_at_timestamp( - OptimismHardfork::Canyon, - attributes.payload_attributes.timestamp, - ) + .is_canyon_active_at_timestamp(attributes.payload_attributes.timestamp) .then_some(1), })); @@ -356,7 +349,7 @@ where let env = EnvWithHandlerCfg::new_with_cfg_env( initialized_cfg.clone(), initialized_block_env.clone(), - evm_config.tx_env(&tx), + evm_config.tx_env(tx.as_signed(), tx.signer()), ); // Configure the environment for the block. @@ -419,8 +412,8 @@ where } } - // check if we have a better block - if !is_better_payload(best_payload.as_ref(), total_fees) { + // check if we have a better block, but only if we included transactions from the pool + if !attributes.no_tx_pool && !is_better_payload(best_payload.as_ref(), total_fees) { // can skip building the block return Ok(BuildOutcome::Aborted { fees: total_fees, cached_reads }) } @@ -429,7 +422,7 @@ where &mut db, &chain_spec, attributes.payload_attributes.timestamp, - attributes.clone().payload_attributes.withdrawals, + attributes.payload_attributes.withdrawals.clone(), )?; // merge all transitions into bundle state, this would apply the withdrawal balance changes @@ -452,10 +445,9 @@ where // calculate the state root let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); let (state_root, trie_output) = { - let state_provider = db.database.0.inner.borrow_mut(); - state_provider.db.state_root_with_updates(hashed_state.clone()).inspect_err(|err| { + db.database.inner().state_root_with_updates(hashed_state.clone()).inspect_err(|err| { warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), + parent_header=%parent_header.hash(), %err, "failed to calculate state root for payload" ); @@ -465,28 +457,29 @@ where // create the block header let transactions_root = proofs::calculate_transaction_root(&executed_txs); - // initialize empty blob sidecars. There are no blob transactions on L2. - let blob_sidecars = Vec::new(); - let mut excess_blob_gas = None; - let mut blob_gas_used = None; - - // only determine cancun fields when active - if chain_spec.is_cancun_active_at_timestamp(attributes.payload_attributes.timestamp) { - excess_blob_gas = if chain_spec.is_cancun_active_at_timestamp(parent_block.timestamp) { - let parent_excess_blob_gas = parent_block.excess_blob_gas.unwrap_or_default(); - let parent_blob_gas_used = parent_block.blob_gas_used.unwrap_or_default(); - Some(calc_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used)) + // OP doesn't support blobs/EIP-4844. + // https://specs.optimism.io/protocol/exec-engine.html#ecotone-disable-blob-transactions + // Need [Some] or [None] based on hardfork to match block hash. + let (excess_blob_gas, blob_gas_used) = + if chain_spec.is_ecotone_active_at_timestamp(attributes.payload_attributes.timestamp) { + (Some(0), Some(0)) } else { - // for the first post-fork block, both parent.blob_gas_used and - // parent.excess_blob_gas are evaluated as 0 - Some(calc_excess_blob_gas(0, 0)) + (None, None) }; - blob_gas_used = Some(0); + let is_holocene = + chain_spec.is_holocene_active_at_timestamp(attributes.payload_attributes.timestamp); + + if is_holocene { + extra_data = attributes + .get_holocene_extra_data( + chain_spec.base_fee_params_at_timestamp(attributes.payload_attributes.timestamp), + ) + .map_err(PayloadBuilderError::other)?; } let header = Header { - parent_hash: parent_block.hash(), + parent_hash: parent_header.hash(), ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: initialized_block_env.coinbase, state_root, @@ -498,27 +491,21 @@ where mix_hash: attributes.payload_attributes.prev_randao, nonce: BEACON_NONCE.into(), base_fee_per_gas: Some(base_fee), - number: parent_block.number + 1, + number: parent_header.number + 1, gas_limit: block_gas_limit, difficulty: U256::ZERO, gas_used: cumulative_gas_used, extra_data, parent_beacon_block_root: attributes.payload_attributes.parent_beacon_block_root, blob_gas_used, - excess_blob_gas: excess_blob_gas.map(Into::into), - requests_root: None, + excess_blob_gas, + requests_hash: None, }; // seal the block let block = Block { header, - body: BlockBody { - transactions: executed_txs, - ommers: vec![], - withdrawals, - sidecars: None, - requests: None, - }, + body: BlockBody { transactions: executed_txs, ommers: vec![], withdrawals, sidecars: None }, }; let sealed_block = block.seal_slow(); @@ -533,7 +520,9 @@ where trie: Arc::new(trie_output), }; - let mut payload = OptimismBuiltPayload::new( + let no_tx_pool = attributes.no_tx_pool; + + let payload = OpBuiltPayload::new( attributes.payload_attributes.id, sealed_block, total_fees, @@ -542,8 +531,21 @@ where Some(executed), ); - // extend the payload with the blob sidecars from the executed txs - payload.extend_sidecars(blob_sidecars); + if no_tx_pool { + // if `no_tx_pool` is set only transactions from the payload attributes will be included in + // the payload. In other words, the payload is deterministic and we can freeze it once we've + // successfully built it. + Ok(BuildOutcome::Freeze(payload)) + } else { + Ok(BuildOutcome::Better { payload, cached_reads }) + } +} + +/// Extracts the Holocene 1599 parameters from the encoded form: +/// +pub fn decode_eip_1559_params(eip_1559_params: B64) -> (u32, u32) { + let denominator: [u8; 4] = eip_1559_params.0[..4].try_into().expect("sufficient length"); + let elasticity: [u8; 4] = eip_1559_params.0[4..8].try_into().expect("sufficient length"); - Ok(BuildOutcome::Better { payload, cached_reads }) + (u32::from_be_bytes(elasticity), u32::from_be_bytes(denominator)) } diff --git a/crates/optimism/payload/src/error.rs b/crates/optimism/payload/src/error.rs index 2016fdc6dd..ce5f584a1c 100644 --- a/crates/optimism/payload/src/error.rs +++ b/crates/optimism/payload/src/error.rs @@ -21,3 +21,17 @@ pub enum OptimismPayloadBuilderError { #[error("blob transaction included in sequencer block")] BlobTransactionRejected, } + +/// Error type for EIP-1559 parameters +#[derive(Debug, thiserror::Error)] +pub enum EIP1559ParamError { + /// No EIP-1559 parameters provided + #[error("No EIP-1559 parameters provided")] + NoEIP1559Params, + /// Denominator overflow + #[error("Denominator overflow")] + DenominatorOverflow, + /// Elasticity overflow + #[error("Elasticity overflow")] + ElasticityOverflow, +} diff --git a/crates/optimism/payload/src/lib.rs b/crates/optimism/payload/src/lib.rs index c06b49c537..8447026d78 100644 --- a/crates/optimism/payload/src/lib.rs +++ b/crates/optimism/payload/src/lib.rs @@ -12,7 +12,7 @@ #![cfg(feature = "optimism")] pub mod builder; -pub use builder::OptimismPayloadBuilder; +pub use builder::OpPayloadBuilder; pub mod error; pub mod payload; -pub use payload::{OpPayloadAttributes, OptimismBuiltPayload, OptimismPayloadBuilderAttributes}; +pub use payload::{OpBuiltPayload, OpPayloadAttributes, OpPayloadBuilderAttributes}; diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 122c2fde52..3a7d87acc4 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -2,8 +2,9 @@ //! Optimism builder support -use alloy_eips::eip2718::Decodable2718; -use alloy_primitives::{Address, B256, U256}; +use crate::{builder::decode_eip_1559_params, error::EIP1559ParamError}; +use alloy_eips::{eip1559::BaseFeeParams, eip2718::Decodable2718, eip7685::Requests}; +use alloy_primitives::{keccak256, Address, Bytes, B256, B64, U256}; use alloy_rlp::Encodable; use alloy_rpc_types_engine::{ExecutionPayloadEnvelopeV2, ExecutionPayloadV1, PayloadId}; /// Re-export for use in downstream arguments. @@ -18,14 +19,13 @@ use reth_primitives::{ transaction::WithEncoded, BlobTransactionSidecar, SealedBlock, TransactionSigned, Withdrawals, }; use reth_rpc_types_compat::engine::payload::{ - block_to_payload_v1, block_to_payload_v3, block_to_payload_v4, - convert_block_to_payload_field_v2, + block_to_payload_v1, block_to_payload_v3, convert_block_to_payload_field_v2, }; use std::sync::Arc; /// Optimism Payload Builder Attributes -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct OptimismPayloadBuilderAttributes { +#[derive(Debug, Clone, PartialEq, Eq, Default)] +pub struct OpPayloadBuilderAttributes { /// Inner ethereum payload builder attributes pub payload_attributes: EthPayloadBuilderAttributes, /// `NoTxPool` option for the generated payload @@ -35,17 +35,57 @@ pub struct OptimismPayloadBuilderAttributes { pub transactions: Vec>, /// The gas limit for the generated payload pub gas_limit: Option, + /// EIP-1559 parameters for the generated payload + pub eip_1559_params: Option, } -impl PayloadBuilderAttributes for OptimismPayloadBuilderAttributes { +impl OpPayloadBuilderAttributes { + /// Extracts the `eip1559` parameters for the payload. + pub fn get_holocene_extra_data( + &self, + default_base_fee_params: BaseFeeParams, + ) -> Result { + let eip_1559_params = self.eip_1559_params.ok_or(EIP1559ParamError::NoEIP1559Params)?; + + let mut extra_data = [0u8; 9]; + // If eip 1559 params aren't set, use the canyon base fee param constants + // otherwise use them + if eip_1559_params.is_zero() { + // Try casting max_change_denominator to u32 + let max_change_denominator: u32 = (default_base_fee_params.max_change_denominator) + .try_into() + .map_err(|_| EIP1559ParamError::DenominatorOverflow)?; + + // Try casting elasticity_multiplier to u32 + let elasticity_multiplier: u32 = (default_base_fee_params.elasticity_multiplier) + .try_into() + .map_err(|_| EIP1559ParamError::ElasticityOverflow)?; + + // Copy the values safely + extra_data[1..5].copy_from_slice(&max_change_denominator.to_be_bytes()); + extra_data[5..9].copy_from_slice(&elasticity_multiplier.to_be_bytes()); + } else { + let (elasticity, denominator) = decode_eip_1559_params(eip_1559_params); + extra_data[1..5].copy_from_slice(&denominator.to_be_bytes()); + extra_data[5..9].copy_from_slice(&elasticity.to_be_bytes()); + } + Ok(Bytes::copy_from_slice(&extra_data)) + } +} + +impl PayloadBuilderAttributes for OpPayloadBuilderAttributes { type RpcPayloadAttributes = OpPayloadAttributes; type Error = alloy_rlp::Error; /// Creates a new payload builder for the given parent block and the attributes. /// /// Derives the unique [`PayloadId`] for the given parent and attributes - fn try_new(parent: B256, attributes: OpPayloadAttributes) -> Result { - let id = payload_id_optimism(&parent, &attributes); + fn try_new( + parent: B256, + attributes: OpPayloadAttributes, + version: u8, + ) -> Result { + let id = payload_id_optimism(&parent, &attributes, version); let transactions = attributes .transactions @@ -79,6 +119,7 @@ impl PayloadBuilderAttributes for OptimismPayloadBuilderAttributes { no_tx_pool: attributes.no_tx_pool.unwrap_or_default(), transactions, gas_limit: attributes.gas_limit, + eip_1559_params: attributes.eip_1559_params, }) } @@ -113,7 +154,7 @@ impl PayloadBuilderAttributes for OptimismPayloadBuilderAttributes { /// Contains the built payload. #[derive(Debug, Clone)] -pub struct OptimismBuiltPayload { +pub struct OpBuiltPayload { /// Identifier of the payload pub(crate) id: PayloadId, /// The built block @@ -128,19 +169,19 @@ pub struct OptimismBuiltPayload { /// The rollup's chainspec. pub(crate) chain_spec: Arc, /// The payload attributes. - pub(crate) attributes: OptimismPayloadBuilderAttributes, + pub(crate) attributes: OpPayloadBuilderAttributes, } // === impl BuiltPayload === -impl OptimismBuiltPayload { +impl OpBuiltPayload { /// Initializes the payload with the given initial block. pub const fn new( id: PayloadId, block: SealedBlock, fees: U256, chain_spec: Arc, - attributes: OptimismPayloadBuilderAttributes, + attributes: OpPayloadBuilderAttributes, executed_block: Option, ) -> Self { Self { id, block, executed_block, fees, sidecars: Vec::new(), chain_spec, attributes } @@ -167,7 +208,7 @@ impl OptimismBuiltPayload { } } -impl BuiltPayload for OptimismBuiltPayload { +impl BuiltPayload for OpBuiltPayload { fn block(&self) -> &SealedBlock { &self.block } @@ -179,9 +220,13 @@ impl BuiltPayload for OptimismBuiltPayload { fn executed_block(&self) -> Option { self.executed_block.clone() } + + fn requests(&self) -> Option { + None + } } -impl BuiltPayload for &OptimismBuiltPayload { +impl BuiltPayload for &OpBuiltPayload { fn block(&self) -> &SealedBlock { (**self).block() } @@ -193,27 +238,31 @@ impl BuiltPayload for &OptimismBuiltPayload { fn executed_block(&self) -> Option { self.executed_block.clone() } + + fn requests(&self) -> Option { + None + } } // V1 engine_getPayloadV1 response -impl From for ExecutionPayloadV1 { - fn from(value: OptimismBuiltPayload) -> Self { +impl From for ExecutionPayloadV1 { + fn from(value: OpBuiltPayload) -> Self { block_to_payload_v1(value.block) } } // V2 engine_getPayloadV2 response -impl From for ExecutionPayloadEnvelopeV2 { - fn from(value: OptimismBuiltPayload) -> Self { - let OptimismBuiltPayload { block, fees, .. } = value; +impl From for ExecutionPayloadEnvelopeV2 { + fn from(value: OpBuiltPayload) -> Self { + let OpBuiltPayload { block, fees, .. } = value; Self { block_value: fees, execution_payload: convert_block_to_payload_field_v2(block) } } } -impl From for OpExecutionPayloadEnvelopeV3 { - fn from(value: OptimismBuiltPayload) -> Self { - let OptimismBuiltPayload { block, fees, sidecars, chain_spec, attributes, .. } = value; +impl From for OpExecutionPayloadEnvelopeV3 { + fn from(value: OpBuiltPayload) -> Self { + let OpBuiltPayload { block, fees, sidecars, chain_spec, attributes, .. } = value; let parent_beacon_block_root = if chain_spec.is_cancun_active_at_timestamp(attributes.timestamp()) { @@ -238,9 +287,9 @@ impl From for OpExecutionPayloadEnvelopeV3 { } } } -impl From for OpExecutionPayloadEnvelopeV4 { - fn from(value: OptimismBuiltPayload) -> Self { - let OptimismBuiltPayload { block, fees, sidecars, chain_spec, attributes, .. } = value; +impl From for OpExecutionPayloadEnvelopeV4 { + fn from(value: OpBuiltPayload) -> Self { + let OpBuiltPayload { block, fees, sidecars, chain_spec, attributes, .. } = value; let parent_beacon_block_root = if chain_spec.is_cancun_active_at_timestamp(attributes.timestamp()) { @@ -249,7 +298,7 @@ impl From for OpExecutionPayloadEnvelopeV4 { B256::ZERO }; Self { - execution_payload: block_to_payload_v4(block), + execution_payload: block_to_payload_v3(block), block_value: fees, // From the engine API spec: // @@ -262,6 +311,7 @@ impl From for OpExecutionPayloadEnvelopeV4 { should_override_builder: false, blobs_bundle: sidecars.into_iter().map(Into::into).collect::>().into(), parent_beacon_block_root, + execution_requests: vec![], } } } @@ -269,7 +319,11 @@ impl From for OpExecutionPayloadEnvelopeV4 { /// Generates the payload id for the configured payload from the [`OpPayloadAttributes`]. /// /// Returns an 8-byte identifier by hashing the payload components with sha256 hash. -pub(crate) fn payload_id_optimism(parent: &B256, attributes: &OpPayloadAttributes) -> PayloadId { +pub(crate) fn payload_id_optimism( + parent: &B256, + attributes: &OpPayloadAttributes, + payload_version: u8, +) -> PayloadId { use sha2::Digest; let mut hasher = sha2::Sha256::new(); hasher.update(parent.as_slice()); @@ -287,15 +341,89 @@ pub(crate) fn payload_id_optimism(parent: &B256, attributes: &OpPayloadAttribute } let no_tx_pool = attributes.no_tx_pool.unwrap_or_default(); - hasher.update([no_tx_pool as u8]); - if let Some(txs) = &attributes.transactions { - txs.iter().for_each(|tx| hasher.update(tx)); + if no_tx_pool || attributes.transactions.as_ref().is_some_and(|txs| !txs.is_empty()) { + hasher.update([no_tx_pool as u8]); + let txs_len = attributes.transactions.as_ref().map(|txs| txs.len()).unwrap_or_default(); + hasher.update(&txs_len.to_be_bytes()[..]); + if let Some(txs) = &attributes.transactions { + for tx in txs { + // we have to just hash the bytes here because otherwise we would need to decode + // the transactions here which really isn't ideal + let tx_hash = keccak256(tx); + // maybe we can try just taking the hash and not decoding + hasher.update(tx_hash) + } + } } if let Some(gas_limit) = attributes.gas_limit { hasher.update(gas_limit.to_be_bytes()); } - let out = hasher.finalize(); + if let Some(eip_1559_params) = attributes.eip_1559_params { + hasher.update(eip_1559_params.as_slice()); + } + + let mut out = hasher.finalize(); + out[0] = payload_version; PayloadId::new(out.as_slice()[..8].try_into().expect("sufficient length")) } + +#[cfg(test)] +mod tests { + use super::*; + use crate::OpPayloadAttributes; + use alloy_primitives::{address, b256, bytes, FixedBytes}; + use alloy_rpc_types_engine::PayloadAttributes; + use reth_payload_primitives::EngineApiMessageVersion; + use std::str::FromStr; + + #[test] + fn test_payload_id_parity_op_geth() { + // INFO rollup_boost::server:received fork_choice_updated_v3 from builder and l2_client + // payload_id_builder="0x6ef26ca02318dcf9" payload_id_l2="0x03d2dae446d2a86a" + let expected = + PayloadId::new(FixedBytes::<8>::from_str("0x03d2dae446d2a86a").unwrap().into()); + let attrs = OpPayloadAttributes { + payload_attributes: PayloadAttributes { + timestamp: 1728933301, + prev_randao: b256!("9158595abbdab2c90635087619aa7042bbebe47642dfab3c9bfb934f6b082765"), + suggested_fee_recipient: address!("4200000000000000000000000000000000000011"), + withdrawals: Some([].into()), + parent_beacon_block_root: b256!("8fe0193b9bf83cb7e5a08538e494fecc23046aab9a497af3704f4afdae3250ff").into() + }, + transactions: Some([bytes!("7ef8f8a0dc19cfa777d90980e4875d0a548a881baaa3f83f14d1bc0d3038bc329350e54194deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e20000f424000000000000000000000000300000000670d6d890000000000000125000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000014bf9181db6e381d4384bbf69c48b0ee0eed23c6ca26143c6d2544f9d39997a590000000000000000000000007f83d659683caf2767fd3c720981d51f5bc365bc")].into()), + no_tx_pool: None, + gas_limit: Some(30000000), + eip_1559_params: None, + }; + + // Reth's `PayloadId` should match op-geth's `PayloadId`. This fails + assert_eq!( + expected, + payload_id_optimism( + &b256!("3533bf30edaf9505d0810bf475cbe4e5f4b9889904b9845e83efdeab4e92eb1e"), + &attrs, + EngineApiMessageVersion::V3 as u8 + ) + ); + } + + #[test] + fn test_get_extra_data_post_holocene() { + let attributes = OpPayloadBuilderAttributes { + eip_1559_params: Some(B64::from_str("0x0000000800000008").unwrap()), + ..Default::default() + }; + let extra_data = attributes.get_holocene_extra_data(BaseFeeParams::new(80, 60)); + assert_eq!(extra_data.unwrap(), Bytes::copy_from_slice(&[0, 0, 0, 0, 8, 0, 0, 0, 8])); + } + + #[test] + fn test_get_extra_data_post_holocene_default() { + let attributes = + OpPayloadBuilderAttributes { eip_1559_params: Some(B64::ZERO), ..Default::default() }; + let extra_data = attributes.get_holocene_extra_data(BaseFeeParams::new(80, 60)); + assert_eq!(extra_data.unwrap(), Bytes::copy_from_slice(&[0, 0, 0, 0, 80, 0, 0, 0, 60])); + } +} diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index 73a3bab1e4..a2d4c20a8b 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -13,5 +13,5 @@ workspace = true [dependencies] reth-primitives.workspace = true -reth-primitives-traits.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true diff --git a/crates/optimism/primitives/src/bedrock.rs b/crates/optimism/primitives/src/bedrock.rs index 4ece12ad67..7153ae3155 100644 --- a/crates/optimism/primitives/src/bedrock.rs +++ b/crates/optimism/primitives/src/bedrock.rs @@ -1,8 +1,8 @@ //! OP mainnet bedrock related data. +use alloy_consensus::{EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; use alloy_primitives::{address, b256, bloom, bytes, B256, B64, U256}; use reth_primitives::Header; -use reth_primitives_traits::constants::EMPTY_OMMER_ROOT_HASH; /// Transaction 0x9ed8f713b2cc6439657db52dcd2fdb9cc944915428f3c6e2a7703e242b259cb9 in block 985, /// replayed in blocks: @@ -73,10 +73,10 @@ pub const BEDROCK_HEADER: Header = Header { nonce: B64::ZERO, number: 105235063, parent_hash: b256!("21a168dfa5e727926063a28ba16fd5ee84c814e847c81a699c7a0ea551e4ca50"), - receipts_root: b256!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"), + receipts_root: EMPTY_ROOT_HASH, state_root: b256!("920314c198da844a041d63bf6cbe8b59583165fd2229d1b3f599da812fd424cb"), timestamp: 1686068903, - transactions_root: b256!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"), + transactions_root: EMPTY_ROOT_HASH, ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: address!("4200000000000000000000000000000000000011"), withdrawals_root: None, @@ -85,7 +85,7 @@ pub const BEDROCK_HEADER: Header = Header { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_root: None, + requests_hash: None, }; /// Bedrock total difficulty on Optimism Mainnet. diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index a0165e357f..d9409c52a6 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -38,6 +38,7 @@ alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true alloy-rpc-types.workspace = true +alloy-consensus.workspace = true op-alloy-network.workspace = true op-alloy-rpc-types.workspace = true op-alloy-consensus.workspace = true @@ -55,17 +56,19 @@ serde_json.workspace = true # misc thiserror.workspace = true tracing.workspace = true -derive_more.workspace = true +derive_more = { workspace = true, features = ["constructor", "deref"] } [dev-dependencies] reth-optimism-chainspec.workspace = true [features] optimism = [ - "reth-optimism-evm/optimism", - "reth-primitives/optimism", - "reth-provider/optimism", - "revm/optimism", + "reth-optimism-evm/optimism", + "reth-primitives/optimism", + "reth-provider/optimism", + "revm/optimism", + "reth-optimism-consensus/optimism", + "reth-chainspec/optimism" ] opbnb = [ diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index d5066be0c6..85f36570f2 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -4,15 +4,13 @@ use alloy_rpc_types::BlockId; use op_alloy_network::Network; use op_alloy_rpc_types::OpTransactionReceipt; use reth_chainspec::ChainSpecProvider; -use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_optimism_chainspec::OpChainSpec; use reth_primitives::TransactionMeta; -use reth_provider::{BlockReaderIdExt, HeaderProvider}; +use reth_provider::HeaderProvider; use reth_rpc_eth_api::{ helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, - RpcReceipt, + RpcNodeCore, RpcReceipt, }; -use reth_rpc_eth_types::EthStateCache; use crate::{OpEthApi, OpEthApiError, OpReceiptBuilder}; @@ -22,13 +20,8 @@ where Error = OpEthApiError, NetworkTypes: Network, >, - N: FullNodeComponents>, + N: RpcNodeCore + HeaderProvider>, { - #[inline] - fn provider(&self) -> impl HeaderProvider { - self.inner.provider() - } - async fn block_receipts( &self, block_id: BlockId, @@ -45,7 +38,7 @@ where let block = block.unseal(); let l1_block_info = - reth_optimism_evm::extract_l1_info(&block).map_err(OpEthApiError::from)?; + reth_optimism_evm::extract_l1_info(&block.body).map_err(OpEthApiError::from)?; return block .body @@ -85,15 +78,6 @@ where impl LoadBlock for OpEthApi where Self: LoadPendingBlock + SpawnBlocking, - N: FullNodeComponents, + N: RpcNodeCore, { - #[inline] - fn provider(&self) -> impl BlockReaderIdExt { - self.inner.provider() - } - - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } } diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index f1c10e6f17..9ddf7b3855 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -1,15 +1,13 @@ use alloy_primitives::{Bytes, TxKind, U256}; use alloy_rpc_types_eth::transaction::TransactionRequest; -use reth_chainspec::EthereumHardforks; use reth_evm::ConfigureEvm; -use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_primitives::{ revm_primitives::{BlockEnv, OptimismFields, TxEnv}, Header, }; use reth_rpc_eth_api::{ - helpers::{Call, EthCall, LoadState, SpawnBlocking}, - FromEthApiError, IntoEthApiError, + helpers::{Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}, + FromEthApiError, IntoEthApiError, RpcNodeCore, }; use reth_rpc_eth_types::{revm_utils::CallFees, RpcInvalidTransactionError}; @@ -17,16 +15,16 @@ use crate::{OpEthApi, OpEthApiError}; impl EthCall for OpEthApi where - Self: Call, - N: FullNodeComponents>, + Self: Call + LoadPendingBlock, + N: RpcNodeCore, { } impl Call for OpEthApi where - Self: LoadState + SpawnBlocking, + Self: LoadState> + SpawnBlocking, Self::Error: From, - N: FullNodeComponents, + N: RpcNodeCore, { #[inline] fn call_gas_limit(&self) -> u64 { @@ -38,11 +36,6 @@ where self.inner.max_simulate_blocks() } - #[inline] - fn evm_config(&self) -> &impl ConfigureEvm
{ - self.inner.evm_config() - } - fn create_txn_env( &self, block_env: &BlockEnv, diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 57ce44100f..dc6e8e59fa 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -14,14 +14,13 @@ use std::{fmt, sync::Arc}; use alloy_primitives::U256; use derive_more::Deref; use op_alloy_network::Optimism; -use reth_chainspec::EthereumHardforks; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; -use reth_node_api::{BuilderProvider, FullNodeComponents, FullNodeTypes, NodeTypes}; use reth_node_builder::EthApiBuilderCtx; use reth_primitives::Header; use reth_provider::{ - BlockIdReader, BlockNumReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider, + BlockNumReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, EvmEnvProvider, StageCheckpointReader, StateProviderFactory, }; use reth_rpc::eth::{core::EthApiInner, DevSigner}; @@ -30,7 +29,7 @@ use reth_rpc_eth_api::{ AddDevSigners, EthApiSpec, EthFees, EthSigner, EthState, LoadBlock, LoadFee, LoadState, SpawnBlocking, Trace, }, - EthApiTypes, + EthApiTypes, RpcNodeCore, RpcNodeCoreExt, }; use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasPriceOracle}; use reth_tasks::{ @@ -38,16 +37,15 @@ use reth_tasks::{ TaskSpawner, }; use reth_transaction_pool::TransactionPool; -use tokio::sync::OnceCell; -use crate::{OpEthApiError, OpTxBuilder, SequencerClient}; +use crate::{OpEthApiError, SequencerClient}; /// Adapter for [`EthApiInner`], which holds all the data required to serve core `eth_` API. pub type EthApiNodeBackend = EthApiInner< - ::Provider, - ::Pool, - ::Network, - ::Evm, + ::Provider, + ::Pool, + ::Network, + ::Evm, >; /// OP-Reth `Eth` API implementation. @@ -60,20 +58,24 @@ pub type EthApiNodeBackend = EthApiInner< /// /// This type implements the [`FullEthApi`](reth_rpc_eth_api::helpers::FullEthApi) by implemented /// all the `Eth` helper traits and prerequisite traits. -#[derive(Clone, Deref)] -pub struct OpEthApi { +#[derive(Deref, Clone)] +pub struct OpEthApi { /// Gateway to node's core components. #[deref] inner: Arc>, /// Sequencer client, configured to forward submitted transactions to sequencer of given OP /// network. - sequencer_client: Arc>, + sequencer_client: Option, } -impl OpEthApi { +impl OpEthApi +where + N: RpcNodeCore< + Provider: BlockReaderIdExt + ChainSpecProvider + CanonStateSubscriptions + Clone + 'static, + >, +{ /// Creates a new instance for given context. - #[allow(clippy::type_complexity)] - pub fn with_spawner(ctx: &EthApiBuilderCtx) -> Self { + pub fn new(ctx: &EthApiBuilderCtx, sequencer_http: Option) -> Self { let blocking_task_pool = BlockingTaskPool::build().expect("failed to build blocking task pool"); @@ -93,38 +95,73 @@ impl OpEthApi { ctx.config.proof_permits, ); - Self { inner: Arc::new(inner), sequencer_client: Arc::new(OnceCell::new()) } + Self { inner: Arc::new(inner), sequencer_client: sequencer_http.map(SequencerClient::new) } } } impl EthApiTypes for OpEthApi where Self: Send + Sync, - N: FullNodeComponents, + N: RpcNodeCore, { type Error = OpEthApiError; type NetworkTypes = Optimism; - type TransactionCompat = OpTxBuilder; + type TransactionCompat = Self; + + fn tx_resp_builder(&self) -> &Self::TransactionCompat { + self + } } -impl EthApiSpec for OpEthApi +impl RpcNodeCore for OpEthApi where - Self: Send + Sync, - N: FullNodeComponents>, + N: RpcNodeCore, { + type Provider = N::Provider; + type Pool = N::Pool; + type Evm = ::Evm; + type Network = ::Network; + #[inline] - fn provider( - &self, - ) -> impl ChainSpecProvider + BlockNumReader + StageCheckpointReader - { - self.inner.provider() + fn pool(&self) -> &Self::Pool { + self.inner.pool() } #[inline] - fn network(&self) -> impl NetworkInfo { + fn evm_config(&self) -> &Self::Evm { + self.inner.evm_config() + } + + #[inline] + fn network(&self) -> &Self::Network { self.inner.network() } + #[inline] + fn provider(&self) -> &Self::Provider { + self.inner.provider() + } +} + +impl RpcNodeCoreExt for OpEthApi +where + N: RpcNodeCore, +{ + #[inline] + fn cache(&self) -> &EthStateCache { + self.inner.cache() + } +} + +impl EthApiSpec for OpEthApi +where + N: RpcNodeCore< + Provider: ChainSpecProvider + + BlockNumReader + + StageCheckpointReader, + Network: NetworkInfo, + >, +{ #[inline] fn starting_block(&self) -> U256 { self.inner.starting_block() @@ -139,7 +176,7 @@ where impl SpawnBlocking for OpEthApi where Self: Send + Sync + Clone + 'static, - N: FullNodeComponents, + N: RpcNodeCore, { #[inline] fn io_task_spawner(&self) -> impl TaskSpawner { @@ -159,23 +196,16 @@ where impl LoadFee for OpEthApi where - Self: LoadBlock, - N: FullNodeComponents>, + Self: LoadBlock, + N: RpcNodeCore< + Provider: BlockReaderIdExt + + EvmEnvProvider + + ChainSpecProvider + + StateProviderFactory, + >, { #[inline] - fn provider( - &self, - ) -> impl BlockIdReader + HeaderProvider + ChainSpecProvider { - self.inner.provider() - } - - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - - #[inline] - fn gas_oracle(&self) -> &GasPriceOracle { + fn gas_oracle(&self) -> &GasPriceOracle { self.inner.gas_oracle() } @@ -185,33 +215,18 @@ where } } -impl LoadState for OpEthApi -where - Self: Send + Sync + Clone, - N: FullNodeComponents>, +impl LoadState for OpEthApi where + N: RpcNodeCore< + Provider: StateProviderFactory + ChainSpecProvider, + Pool: TransactionPool, + > { - #[inline] - fn provider( - &self, - ) -> impl StateProviderFactory + ChainSpecProvider { - self.inner.provider() - } - - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - - #[inline] - fn pool(&self) -> impl TransactionPool { - self.inner.pool() - } } impl EthState for OpEthApi where Self: LoadState + SpawnBlocking, - N: FullNodeComponents, + N: RpcNodeCore, { #[inline] fn max_proof_window(&self) -> u64 { @@ -222,43 +237,27 @@ where impl EthFees for OpEthApi where Self: LoadFee, - N: FullNodeComponents, + N: RpcNodeCore, { } impl Trace for OpEthApi where - Self: LoadState, - N: FullNodeComponents, + Self: LoadState>, + N: RpcNodeCore, { - #[inline] - fn evm_config(&self) -> &impl ConfigureEvm
{ - self.inner.evm_config() - } } impl AddDevSigners for OpEthApi where - N: FullNodeComponents>, + N: RpcNodeCore, { fn with_dev_accounts(&self) { - *self.signers().write() = DevSigner::random_signers(20) - } -} - -impl BuilderProvider for OpEthApi -where - Self: Send, - N: FullNodeComponents, -{ - type Ctx<'a> = &'a EthApiBuilderCtx; - - fn builder() -> Box Fn(Self::Ctx<'a>) -> Self + Send> { - Box::new(Self::with_spawner) + *self.inner.signers().write() = DevSigner::random_signers(20) } } -impl fmt::Debug for OpEthApi { +impl fmt::Debug for OpEthApi { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OpEthApi").finish_non_exhaustive() } diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 5b716f3932..c90b3f7b79 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -1,20 +1,18 @@ //! Loads OP pending block for a RPC response. +use alloy_eips::BlockNumberOrTag; use alloy_primitives::{BlockNumber, B256}; -use reth_chainspec::EthereumHardforks; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; -use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; -use reth_primitives::{ - revm_primitives::BlockEnv, BlockNumberOrTag, Header, Receipt, SealedBlockWithSenders, -}; +use reth_primitives::{revm_primitives::BlockEnv, Header, Receipt, SealedBlockWithSenders}; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ExecutionOutcome, ReceiptProvider, StateProviderFactory, }; use reth_rpc_eth_api::{ helpers::{LoadPendingBlock, SpawnBlocking}, - FromEthApiError, + FromEthApiError, RpcNodeCore, }; use reth_rpc_eth_types::{EthApiError, PendingBlock}; use reth_transaction_pool::TransactionPool; @@ -24,33 +22,20 @@ use crate::OpEthApi; impl LoadPendingBlock for OpEthApi where Self: SpawnBlocking, - N: FullNodeComponents>, + N: RpcNodeCore< + Provider: BlockReaderIdExt + + EvmEnvProvider + + ChainSpecProvider + + StateProviderFactory, + Pool: TransactionPool, + Evm: ConfigureEvm
, + >, { - #[inline] - fn provider( - &self, - ) -> impl BlockReaderIdExt - + EvmEnvProvider - + ChainSpecProvider - + StateProviderFactory { - self.inner.provider() - } - - #[inline] - fn pool(&self) -> impl TransactionPool { - self.inner.pool() - } - #[inline] fn pending_block(&self) -> &tokio::sync::Mutex> { self.inner.pending_block() } - #[inline] - fn evm_config(&self) -> &impl ConfigureEvm
{ - self.inner.evm_config() - } - /// Returns the locally built pending block async fn local_pending_block( &self, diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index c8cd66c69a..5b188907ac 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -1,10 +1,11 @@ //! Loads and formats OP receipt RPC response. use alloy_eips::eip2718::Encodable2718; -use alloy_rpc_types::{AnyReceiptEnvelope, Log, TransactionReceipt}; -use op_alloy_consensus::{OpDepositReceipt, OpDepositReceiptWithBloom, OpReceiptEnvelope}; +use alloy_rpc_types::{Log, TransactionReceipt}; +use op_alloy_consensus::{ + DepositTransaction, OpDepositReceipt, OpDepositReceiptWithBloom, OpReceiptEnvelope, +}; use op_alloy_rpc_types::{receipt::L1BlockInfo, OpTransactionReceipt, OpTransactionReceiptFields}; -use reth_chainspec::ChainSpec; use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_evm::RethL1BlockInfo; @@ -12,7 +13,7 @@ use reth_optimism_forks::OptimismHardforks; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned, TxType}; use reth_provider::ChainSpecProvider; use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcReceipt}; -use reth_rpc_eth_types::{EthApiError, EthStateCache, ReceiptBuilder}; +use reth_rpc_eth_types::{receipt::build_receipt, EthApiError}; use crate::{OpEthApi, OpEthApiError}; @@ -21,18 +22,14 @@ where Self: Send + Sync, N: FullNodeComponents>, { - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - async fn build_transaction_receipt( &self, tx: TransactionSigned, meta: TransactionMeta, receipt: Receipt, ) -> Result, Self::Error> { - let (block, receipts) = LoadReceipt::cache(self) + let (block, receipts) = self + .cache() .get_block_and_receipts(meta.block_hash) .await .map_err(Self::Error::from_eth_err)? @@ -40,9 +37,8 @@ where meta.block_hash.into(), )))?; - let block = block.unseal(); let l1_block_info = - reth_optimism_evm::extract_l1_info(&block).map_err(OpEthApiError::from)?; + reth_optimism_evm::extract_l1_info(&block.body).map_err(OpEthApiError::from)?; Ok(OpReceiptBuilder::new( &self.inner.provider().chain_spec(), @@ -56,25 +52,6 @@ where } } -impl OpEthApi -where - N: FullNodeComponents>, -{ - /// Builds a receipt w.r.t. chain spec. - pub fn build_op_receipt_meta( - &self, - tx: &TransactionSigned, - l1_block_info: revm::L1BlockInfo, - receipt: &Receipt, - ) -> Result { - Ok(OpReceiptFieldsBuilder::default() - .l1_block_info(&self.inner.provider().chain_spec(), tx, l1_block_info)? - .deposit_nonce(receipt.deposit_nonce) - .deposit_version(receipt.deposit_receipt_version) - .build()) - } -} - /// L1 fee and data gas for a non-deposit transaction, or deposit nonce and receipt version for a /// deposit transaction. #[derive(Debug, Default, Clone)] @@ -114,7 +91,7 @@ impl OpReceiptFieldsBuilder { /// Applies [`L1BlockInfo`](revm::L1BlockInfo). pub fn l1_block_info( mut self, - chain_spec: &ChainSpec, + chain_spec: &OpChainSpec, tx: &TransactionSigned, l1_block_info: revm::L1BlockInfo, ) -> Result { @@ -195,9 +172,7 @@ impl OpReceiptFieldsBuilder { #[derive(Debug)] pub struct OpReceiptBuilder { /// Core receipt, has all the fields of an L1 receipt and is the basis for the OP receipt. - pub core_receipt: TransactionReceipt>, - /// Transaction type. - pub tx_type: TxType, + pub core_receipt: TransactionReceipt>, /// Additional OP receipt fields. pub op_receipt_fields: OpTransactionReceiptFields, } @@ -212,11 +187,29 @@ impl OpReceiptBuilder { all_receipts: &[Receipt], l1_block_info: revm::L1BlockInfo, ) -> Result { - let ReceiptBuilder { base: core_receipt, .. } = - ReceiptBuilder::new(transaction, meta, receipt, all_receipts) - .map_err(OpEthApiError::Eth)?; - - let tx_type = transaction.tx_type(); + let core_receipt = + build_receipt(transaction, meta, receipt, all_receipts, |receipt_with_bloom| { + match receipt.tx_type { + TxType::Legacy => OpReceiptEnvelope::::Legacy(receipt_with_bloom), + TxType::Eip2930 => OpReceiptEnvelope::::Eip2930(receipt_with_bloom), + TxType::Eip1559 => OpReceiptEnvelope::::Eip1559(receipt_with_bloom), + TxType::Eip4844 => { + // TODO: unreachable + OpReceiptEnvelope::::Eip1559(receipt_with_bloom) + } + TxType::Eip7702 => OpReceiptEnvelope::::Eip7702(receipt_with_bloom), + TxType::Deposit => { + OpReceiptEnvelope::::Deposit(OpDepositReceiptWithBloom:: { + receipt: OpDepositReceipt:: { + inner: receipt_with_bloom.receipt, + deposit_nonce: receipt.deposit_nonce, + deposit_receipt_version: receipt.deposit_receipt_version, + }, + logs_bloom: receipt_with_bloom.logs_bloom, + }) + } + } + })?; let mut op_receipt_fields = OpReceiptFieldsBuilder::default() .l1_block_info(chain_spec, transaction, l1_block_info)? @@ -230,71 +223,15 @@ impl OpReceiptBuilder { op_receipt_fields.l1_block_info.l1_fee = Some(0); } - Ok(Self { core_receipt, tx_type, op_receipt_fields }) + Ok(Self { core_receipt, op_receipt_fields }) } /// Builds [`OpTransactionReceipt`] by combing core (l1) receipt fields and additional OP /// receipt fields. pub fn build(self) -> OpTransactionReceipt { - let Self { core_receipt, tx_type, op_receipt_fields } = self; - - let OpTransactionReceiptFields { l1_block_info, deposit_nonce, deposit_receipt_version } = - op_receipt_fields; - - let TransactionReceipt { - inner: AnyReceiptEnvelope { inner: receipt_with_bloom, .. }, - transaction_hash, - transaction_index, - block_hash, - block_number, - gas_used, - effective_gas_price, - blob_gas_used, - blob_gas_price, - from, - to, - contract_address, - state_root, - authorization_list, - } = core_receipt; - - let inner = match tx_type { - TxType::Legacy => OpReceiptEnvelope::::Legacy(receipt_with_bloom), - TxType::Eip2930 => OpReceiptEnvelope::::Eip2930(receipt_with_bloom), - TxType::Eip1559 => OpReceiptEnvelope::::Eip1559(receipt_with_bloom), - TxType::Eip4844 => { - // TODO: unreachable - OpReceiptEnvelope::::Eip1559(receipt_with_bloom) - } - TxType::Eip7702 => OpReceiptEnvelope::::Eip7702(receipt_with_bloom), - TxType::Deposit => { - OpReceiptEnvelope::::Deposit(OpDepositReceiptWithBloom:: { - receipt: OpDepositReceipt:: { - inner: receipt_with_bloom.receipt, - deposit_nonce, - deposit_receipt_version, - }, - logs_bloom: receipt_with_bloom.logs_bloom, - }) - } - }; + let Self { core_receipt: inner, op_receipt_fields } = self; - let inner = TransactionReceipt::> { - inner, - transaction_hash, - transaction_index, - block_hash, - block_number, - gas_used, - effective_gas_price, - blob_gas_used, - blob_gas_price, - from, - to, - contract_address, - state_root, - authorization_list, - }; + let OpTransactionReceiptFields { l1_block_info, .. } = op_receipt_fields; OpTransactionReceipt { inner, l1_block_info } } @@ -359,7 +296,7 @@ mod test { }; let l1_block_info = - reth_optimism_evm::extract_l1_info(&block).expect("should extract l1 info"); + reth_optimism_evm::extract_l1_info(&block.body).expect("should extract l1 info"); // test assert!(OP_MAINNET.is_fjord_active_at_timestamp(BLOCK_124665056_TIMESTAMP)); diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index ab7525016a..6b5954391d 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -1,30 +1,28 @@ //! Loads and formats OP transaction RPC response. +use alloy_consensus::Transaction as _; use alloy_primitives::{Bytes, B256}; use alloy_rpc_types::TransactionInfo; +use op_alloy_consensus::DepositTransaction; use op_alloy_rpc_types::Transaction; use reth_node_api::FullNodeComponents; use reth_primitives::TransactionSignedEcRecovered; -use reth_provider::{BlockReaderIdExt, TransactionsProvider}; +use reth_provider::{BlockReaderIdExt, ReceiptProvider, TransactionsProvider}; use reth_rpc::eth::EthTxBuilder; use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, - FromEthApiError, FullEthApiTypes, TransactionCompat, + FromEthApiError, FullEthApiTypes, RpcNodeCore, TransactionCompat, }; -use reth_rpc_eth_types::{utils::recover_raw_transaction, EthStateCache}; +use reth_rpc_eth_types::utils::recover_raw_transaction; use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; use crate::{OpEthApi, SequencerClient}; impl EthTransactions for OpEthApi where - Self: LoadTransaction, - N: FullNodeComponents, + Self: LoadTransaction, + N: RpcNodeCore, { - fn provider(&self) -> impl BlockReaderIdExt { - self.inner.provider() - } - fn signers(&self) -> &parking_lot::RwLock>> { self.inner.signers() } @@ -40,7 +38,7 @@ where // On optimism, transactions are forwarded directly to the sequencer to be included in // blocks that it builds. if let Some(client) = self.raw_tx_forwarder().as_ref() { - tracing::debug!( target: "rpc::eth", "forwarding raw transaction to"); + tracing::debug!(target: "rpc::eth", hash = %pool_transaction.hash(), "forwarding raw transaction to sequencer"); let _ = client.forward_raw_transaction(&tx).await.inspect_err(|err| { tracing::debug!(target: "rpc::eth", %err, hash=% *pool_transaction.hash(), "failed to forward raw transaction"); }); @@ -60,57 +58,48 @@ where impl LoadTransaction for OpEthApi where Self: SpawnBlocking + FullEthApiTypes, - N: FullNodeComponents, + N: RpcNodeCore, { - type Pool = N::Pool; - - fn provider(&self) -> impl TransactionsProvider { - self.inner.provider() - } - - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - - fn pool(&self) -> &Self::Pool { - self.inner.pool() - } } impl OpEthApi where - N: FullNodeComponents, + N: RpcNodeCore, { - /// Sets a [`SequencerClient`] for `eth_sendRawTransaction` to forward transactions to. - pub fn set_sequencer_client( - &self, - sequencer_client: SequencerClient, - ) -> Result<(), tokio::sync::SetError> { - self.sequencer_client.set(sequencer_client) - } - /// Returns the [`SequencerClient`] if one is set. pub fn raw_tx_forwarder(&self) -> Option { - self.sequencer_client.get().cloned() + self.sequencer_client.clone() } } -/// Builds OP transaction response type. -#[derive(Clone, Debug, Copy)] -pub struct OpTxBuilder; - -impl TransactionCompat for OpTxBuilder { +impl TransactionCompat for OpEthApi +where + N: FullNodeComponents, +{ type Transaction = Transaction; - fn fill(tx: TransactionSignedEcRecovered, tx_info: TransactionInfo) -> Self::Transaction { + fn fill( + &self, + tx: TransactionSignedEcRecovered, + tx_info: TransactionInfo, + ) -> Self::Transaction { let signed_tx = tx.clone().into_signed(); + let hash = tx.hash; - let mut inner = EthTxBuilder::fill(tx, tx_info).inner; + let mut inner = EthTxBuilder.fill(tx, tx_info); if signed_tx.is_deposit() { inner.gas_price = Some(signed_tx.max_fee_per_gas()) } + let deposit_receipt_version = self + .inner + .provider() + .receipt_by_hash(hash) + .ok() // todo: change sig to return result + .flatten() + .and_then(|receipt| receipt.deposit_receipt_version); + Transaction { inner, source_hash: signed_tx.source_hash(), @@ -118,7 +107,7 @@ impl TransactionCompat for OpTxBuilder { // only include is_system_tx if true: is_system_tx: (signed_tx.is_deposit() && signed_tx.is_system_transaction()) .then_some(true), - deposit_receipt_version: None, // todo: how to fill this field? + deposit_receipt_version, } } diff --git a/crates/optimism/rpc/src/lib.rs b/crates/optimism/rpc/src/lib.rs index e3fef7adb5..0ff1451d05 100644 --- a/crates/optimism/rpc/src/lib.rs +++ b/crates/optimism/rpc/src/lib.rs @@ -15,5 +15,5 @@ pub mod eth; pub mod sequencer; pub use error::{OpEthApiError, OptimismInvalidTransactionError, SequencerClientError}; -pub use eth::{transaction::OpTxBuilder, OpEthApi, OpReceiptBuilder}; +pub use eth::{OpEthApi, OpReceiptBuilder}; pub use sequencer::SequencerClient; diff --git a/crates/optimism/storage/Cargo.toml b/crates/optimism/storage/Cargo.toml index 107b64db3d..2b18897d94 100644 --- a/crates/optimism/storage/Cargo.toml +++ b/crates/optimism/storage/Cargo.toml @@ -20,4 +20,8 @@ reth-prune-types.workspace = true reth-stages-types.workspace = true [features] -optimism = ["reth-primitives/optimism"] \ No newline at end of file +optimism = [ + "reth-primitives/optimism", + "reth-codecs/optimism", + "reth-db-api/optimism" +] diff --git a/crates/optimism/storage/src/lib.rs b/crates/optimism/storage/src/lib.rs index d435ed1d88..347b690c5c 100644 --- a/crates/optimism/storage/src/lib.rs +++ b/crates/optimism/storage/src/lib.rs @@ -16,7 +16,7 @@ mod tests { CompactClientVersion, CompactU256, CompactU64, StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals, }; - use reth_primitives::{Account, Receipt, ReceiptWithBloom, Requests, Withdrawals}; + use reth_primitives::{Account, Receipt, ReceiptWithBloom, Withdrawals}; use reth_prune_types::{PruneCheckpoint, PruneMode, PruneSegment}; use reth_stages_types::{ AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, @@ -74,6 +74,5 @@ mod tests { validate_bitflag_backwards_compat!(StoredBlockWithdrawals, UnusedBits::Zero); validate_bitflag_backwards_compat!(StorageHashingCheckpoint, UnusedBits::NotZero); validate_bitflag_backwards_compat!(Withdrawals, UnusedBits::Zero); - validate_bitflag_backwards_compat!(Requests, UnusedBits::Zero); } } diff --git a/crates/payload/basic/Cargo.toml b/crates/payload/basic/Cargo.toml index 939eb5b54b..74dea45d10 100644 --- a/crates/payload/basic/Cargo.toml +++ b/crates/payload/basic/Cargo.toml @@ -15,17 +15,20 @@ workspace = true # reth reth-chainspec.workspace = true reth-primitives.workspace = true -reth-revm.workspace = true reth-transaction-pool.workspace = true reth-provider.workspace = true reth-payload-builder.workspace = true reth-payload-primitives.workspace = true reth-tasks.workspace = true +reth-evm.workspace = true +reth-revm.workspace=true # ethereum alloy-rlp.workspace = true alloy-primitives.workspace = true revm.workspace = true +alloy-consensus.workspace = true +alloy-eips.workspace = true # async tokio = { workspace = true, features = ["sync", "time"] } diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index f9487ec784..4eaf7da842 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -9,22 +9,22 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use crate::metrics::PayloadBuilderMetrics; +use alloy_consensus::constants::EMPTY_WITHDRAWALS; +use alloy_eips::{merge::SLOT_DURATION, BlockNumberOrTag}; use alloy_primitives::{Bytes, B256, U256}; use futures_core::ready; use futures_util::FutureExt; use reth_chainspec::{ChainSpec, EthereumHardforks}; -use reth_payload_builder::{ - database::CachedReads, KeepPayloadJobAlive, PayloadId, PayloadJob, PayloadJobGenerator, -}; -use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError}; -use reth_primitives::{ - constants::{EMPTY_WITHDRAWALS, RETH_CLIENT_VERSION, SLOT_DURATION}, - proofs, BlockNumberOrTag, SealedBlock, Withdrawals, +use reth_evm::state_change::post_block_withdrawals_balance_increments; +use reth_payload_builder::{KeepPayloadJobAlive, PayloadId, PayloadJob, PayloadJobGenerator}; +use reth_payload_primitives::{ + BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError, PayloadKind, }; +use reth_primitives::{constants::RETH_CLIENT_VERSION, proofs, SealedHeader, Withdrawals}; use reth_provider::{ BlockReaderIdExt, BlockSource, CanonStateNotification, ProviderError, StateProviderFactory, }; -use reth_revm::state_change::post_block_withdrawals_balance_increments; +use reth_revm::cached::CachedReads; use reth_tasks::TaskSpawner; use reth_transaction_pool::TransactionPool; use revm::{Database, State}; @@ -92,10 +92,11 @@ impl BasicPayloadJobGenerator Client software SHOULD stop the updating process when either a call to engine_getPayload - // > with the build process's payloadId is made or SECONDS_PER_SLOT (12s in the Mainnet - // > configuration) have passed since the point in time identified by the timestamp parameter. - // See also + /// > Client software SHOULD stop the updating process when either a call to engine_getPayload + /// > with the build process's payloadId is made or SECONDS_PER_SLOT (12s in the Mainnet + /// > configuration) have passed since the point in time identified by the timestamp parameter. + /// + /// See also #[inline] fn max_job_duration(&self, unix_timestamp: u64) -> Duration { let duration_until_timestamp = duration_until(unix_timestamp); @@ -118,7 +119,7 @@ impl BasicPayloadJobGenerator Option { self.pre_cached.as_ref().filter(|pc| pc.block == parent).map(|pc| pc.cached.clone()) @@ -159,13 +160,17 @@ where block.seal(attributes.parent()) }; + let hash = parent_block.hash(); + let parent_header = parent_block.header(); + let header = SealedHeader::new(parent_header.clone(), hash); + let config = - PayloadConfig::new(Arc::new(parent_block), self.config.extradata.clone(), attributes); + PayloadConfig::new(Arc::new(header), self.config.extradata.clone(), attributes); let until = self.job_deadline(config.attributes.timestamp()); let deadline = Box::pin(tokio::time::sleep_until(until)); - let cached_reads = self.maybe_pre_cached(config.parent_block.hash()); + let cached_reads = self.maybe_pre_cached(hash); let mut job = BasicPayloadJob { config, @@ -175,7 +180,7 @@ where deadline, // ticks immediately interval: tokio::time::interval(self.config.interval), - best_payload: None, + best_payload: PayloadState::Missing, pending_block: None, cached_reads, payload_task_guard: self.payload_task_guard.clone(), @@ -321,8 +326,8 @@ where deadline: Pin>, /// The interval at which the job should build a new payload after the last. interval: Interval, - /// The best payload so far. - best_payload: Option, + /// The best payload so far and its state. + best_payload: PayloadState, /// Receiver for the block that is currently being built. pending_block: Option>, /// Restricts how many generator tasks can be executed at once. @@ -359,7 +364,7 @@ where let _cancel = cancel.clone(); let guard = self.payload_task_guard.clone(); let payload_config = self.config.clone(); - let best_payload = self.best_payload.clone(); + let best_payload = self.best_payload.payload().cloned(); self.metrics.inc_initiated_payload_builds(); let cached_reads = self.cached_reads.take().unwrap_or_default(); let builder = self.builder.clone(); @@ -404,8 +409,9 @@ where // check if the interval is reached while this.interval.poll_tick(cx).is_ready() { - // start a new job if there is no pending block and we haven't reached the deadline - if this.pending_block.is_none() { + // start a new job if there is no pending block, we haven't reached the deadline, + // and the payload isn't frozen + if this.pending_block.is_none() && !this.best_payload.is_frozen() { this.spawn_build_job(); } } @@ -417,7 +423,11 @@ where BuildOutcome::Better { payload, cached_reads } => { this.cached_reads = Some(cached_reads); debug!(target: "payload_builder", value = %payload.fees(), "built better payload"); - this.best_payload = Some(payload); + this.best_payload = PayloadState::Best(payload); + } + BuildOutcome::Freeze(payload) => { + debug!(target: "payload_builder", "payload frozen, no further building will occur"); + this.best_payload = PayloadState::Frozen(payload); } BuildOutcome::Aborted { fees, cached_reads } => { this.cached_reads = Some(cached_reads); @@ -456,26 +466,29 @@ where type BuiltPayload = Builder::BuiltPayload; fn best_payload(&self) -> Result { - if let Some(ref payload) = self.best_payload { - return Ok(payload.clone()) + if let Some(payload) = self.best_payload.payload() { + Ok(payload.clone()) + } else { + // No payload has been built yet, but we need to return something that the CL then + // can deliver, so we need to return an empty payload. + // + // Note: it is assumed that this is unlikely to happen, as the payload job is + // started right away and the first full block should have been + // built by the time CL is requesting the payload. + self.metrics.inc_requested_empty_payload(); + self.builder.build_empty_payload(&self.client, self.config.clone()) } - // No payload has been built yet, but we need to return something that the CL then can - // deliver, so we need to return an empty payload. - // - // Note: it is assumed that this is unlikely to happen, as the payload job is started right - // away and the first full block should have been built by the time CL is requesting the - // payload. - self.metrics.inc_requested_empty_payload(); - self.builder.build_empty_payload(&self.client, self.config.clone()) } fn payload_attributes(&self) -> Result { Ok(self.config.attributes.clone()) } - fn resolve(&mut self) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { - let best_payload = self.best_payload.take(); - + fn resolve_kind( + &mut self, + kind: PayloadKind, + ) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { + let best_payload = self.best_payload.payload().cloned(); if best_payload.is_none() && self.pending_block.is_none() { // ensure we have a job scheduled if we don't have a best payload yet and none is active self.spawn_build_job(); @@ -529,12 +542,44 @@ where }; } - let fut = ResolveBestPayload { best_payload, maybe_better, empty_payload }; + let fut = ResolveBestPayload { + best_payload, + maybe_better, + empty_payload: empty_payload.filter(|_| kind != PayloadKind::WaitForPending), + }; (fut, KeepPayloadJobAlive::No) } } +/// Represents the current state of a payload being built. +#[derive(Debug, Clone)] +pub enum PayloadState

{ + /// No payload has been built yet. + Missing, + /// The best payload built so far, which may still be improved upon. + Best(P), + /// The payload is frozen and no further building should occur. + /// + /// Contains the final payload `P` that should be used. + Frozen(P), +} + +impl

PayloadState

{ + /// Checks if the payload is frozen. + pub const fn is_frozen(&self) -> bool { + matches!(self, Self::Frozen(_)) + } + + /// Returns the payload if it exists (either Best or Frozen). + pub const fn payload(&self) -> Option<&P> { + match self { + Self::Missing => None, + Self::Best(p) | Self::Frozen(p) => Some(p), + } + } +} + /// The future that returns the best payload to be served to the consensus layer. /// /// This returns the payload that's supposed to be sent to the CL. @@ -573,7 +618,9 @@ where if let Some(fut) = Pin::new(&mut this.maybe_better).as_pin_mut() { if let Poll::Ready(res) = fut.poll(cx) { this.maybe_better = None; - if let Ok(BuildOutcome::Better { payload, .. }) = res { + if let Ok(Some(payload)) = res.map(|out| out.into_payload()) + .inspect_err(|err| warn!(target: "payload_builder", %err, "failed to resolve pending payload")) + { debug!(target: "payload_builder", "resolving better payload"); return Poll::Ready(Ok(payload)) } @@ -662,8 +709,8 @@ impl Drop for Cancelled { /// Static config for how to build a payload. #[derive(Clone, Debug)] pub struct PayloadConfig { - /// The parent block. - pub parent_block: Arc, + /// The parent header. + pub parent_header: Arc, /// Block extra data. pub extra_data: Bytes, /// Requested attributes for the payload. @@ -683,11 +730,11 @@ where { /// Create new payload config. pub const fn new( - parent_block: Arc, + parent_header: Arc, extra_data: Bytes, attributes: Attributes, ) -> Self { - Self { parent_block, extra_data, attributes } + Self { parent_header, extra_data, attributes } } /// Returns the payload id. @@ -715,13 +762,16 @@ pub enum BuildOutcome { }, /// Build job was cancelled Cancelled, + + /// The payload is final and no further building should occur + Freeze(Payload), } impl BuildOutcome { /// Consumes the type and returns the payload if the outcome is `Better`. pub fn into_payload(self) -> Option { match self { - Self::Better { payload, .. } => Some(payload), + Self::Better { payload, .. } | Self::Freeze(payload) => Some(payload), _ => None, } } @@ -789,6 +839,21 @@ impl BuildArguments(self, f: F) -> BuildArguments + where + F: FnOnce(Pool) -> P, + { + BuildArguments { + client: self.client, + pool: f(self.pool), + cached_reads: self.cached_reads, + config: self.config, + cancel: self.cancel, + best_payload: self.best_payload, + } + } } /// A trait for building payloads that encapsulate Ethereum transactions. diff --git a/crates/payload/builder/Cargo.toml b/crates/payload/builder/Cargo.toml index 71f63ce34c..08399b6f9c 100644 --- a/crates/payload/builder/Cargo.toml +++ b/crates/payload/builder/Cargo.toml @@ -13,15 +13,15 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true +reth-primitives = { workspace = true, optional = true } reth-provider.workspace = true reth-payload-primitives.workspace = true reth-ethereum-engine-primitives.workspace = true reth-chain-state = { workspace = true, optional = true } # alloy +alloy-primitives = { workspace = true, optional = true } alloy-rpc-types = { workspace = true, features = ["engine"] } -alloy-primitives.workspace = true # async async-trait.workspace = true @@ -37,7 +37,17 @@ metrics.workspace = true tracing.workspace = true [dev-dependencies] +reth-primitives.workspace = true +reth-chain-state.workspace = true +alloy-primitives.workspace = true revm.workspace = true [features] -test-utils = ["reth-chain-state"] +test-utils = [ + "alloy-primitives", + "reth-chain-state", + "reth-chain-state/test-utils", + "reth-primitives/test-utils", + "reth-provider/test-utils", + "revm/test-utils" +] diff --git a/crates/payload/builder/src/lib.rs b/crates/payload/builder/src/lib.rs index 70b4296da4..2c46a4a9e1 100644 --- a/crates/payload/builder/src/lib.rs +++ b/crates/payload/builder/src/lib.rs @@ -28,7 +28,7 @@ //! use std::pin::Pin; //! use std::task::{Context, Poll}; //! use alloy_primitives::U256; -//! use reth_payload_builder::{EthBuiltPayload, PayloadBuilderError, KeepPayloadJobAlive, EthPayloadBuilderAttributes, PayloadJob, PayloadJobGenerator}; +//! use reth_payload_builder::{EthBuiltPayload, PayloadBuilderError, KeepPayloadJobAlive, EthPayloadBuilderAttributes, PayloadJob, PayloadJobGenerator, PayloadKind}; //! use reth_primitives::{Block, Header}; //! //! /// The generator type that creates new jobs that builds empty blocks. @@ -65,7 +65,7 @@ //! }, //! ..Default::default() //! }; -//! let payload = EthBuiltPayload::new(self.attributes.id, payload.seal_slow(), U256::ZERO, None); +//! let payload = EthBuiltPayload::new(self.attributes.id, payload.seal_slow(), U256::ZERO, None, None); //! Ok(payload) //! } //! @@ -73,7 +73,7 @@ //! Ok(self.attributes.clone()) //! } //! -//! fn resolve(&mut self) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { +//! fn resolve_kind(&mut self, _kind: PayloadKind) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { //! let payload = self.best_payload(); //! (futures_util::future::ready(payload), KeepPayloadJobAlive::No) //! } @@ -101,7 +101,6 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -pub mod database; mod metrics; mod service; mod traits; @@ -112,7 +111,7 @@ pub mod noop; pub mod test_utils; pub use alloy_rpc_types::engine::PayloadId; -pub use reth_payload_primitives::PayloadBuilderError; +pub use reth_payload_primitives::{PayloadBuilderError, PayloadKind}; pub use service::{ PayloadBuilderHandle, PayloadBuilderService, PayloadServiceCommand, PayloadStore, }; diff --git a/crates/payload/builder/src/noop.rs b/crates/payload/builder/src/noop.rs index 06da7dcfad..cbf21f1ceb 100644 --- a/crates/payload/builder/src/noop.rs +++ b/crates/payload/builder/src/noop.rs @@ -51,7 +51,7 @@ where } PayloadServiceCommand::BestPayload(_, tx) => tx.send(None).ok(), PayloadServiceCommand::PayloadAttributes(_, tx) => tx.send(None).ok(), - PayloadServiceCommand::Resolve(_, tx) => tx.send(None).ok(), + PayloadServiceCommand::Resolve(_, _, tx) => tx.send(None).ok(), PayloadServiceCommand::Subscribe(_) => None, }; } diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index 1ebf6770c9..853c69e90d 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -11,7 +11,7 @@ use alloy_rpc_types::engine::PayloadId; use futures_util::{future::FutureExt, Stream, StreamExt}; use reth_payload_primitives::{ BuiltPayload, Events, PayloadBuilder, PayloadBuilderAttributes, PayloadBuilderError, - PayloadEvents, PayloadTypes, + PayloadEvents, PayloadKind, PayloadTypes, }; use reth_provider::CanonStateNotification; use std::{ @@ -45,11 +45,20 @@ where /// /// Note: depending on the installed [`PayloadJobGenerator`], this may or may not terminate the /// job, See [`PayloadJob::resolve`]. + pub async fn resolve_kind( + &self, + id: PayloadId, + kind: PayloadKind, + ) -> Option> { + self.inner.resolve_kind(id, kind).await + } + + /// Resolves the payload job and returns the best payload that has been built so far. pub async fn resolve( &self, id: PayloadId, ) -> Option> { - self.inner.resolve(id).await + self.resolve_kind(id, PayloadKind::Earliest).await } /// Returns the best payload for the given identifier. @@ -110,16 +119,13 @@ where type PayloadType = T; type Error = PayloadBuilderError; - async fn send_and_resolve_payload( + fn send_new_payload( &self, attr: ::PayloadBuilderAttributes, - ) -> Result::BuiltPayload>, Self::Error> { - let rx = self.send_new_payload(attr); - let id = rx.await??; - + ) -> Receiver> { let (tx, rx) = oneshot::channel(); - let _ = self.to_service.send(PayloadServiceCommand::Resolve(id, tx)); - rx.await?.ok_or(PayloadBuilderError::MissingPayload) + let _ = self.to_service.send(PayloadServiceCommand::BuildNewPayload(attr, tx)); + rx } /// Note: this does not resolve the job if it's still in progress. @@ -132,21 +138,17 @@ where rx.await.ok()? } - fn send_new_payload( + async fn resolve_kind( &self, - attr: ::PayloadBuilderAttributes, - ) -> Receiver> { + id: PayloadId, + kind: PayloadKind, + ) -> Option> { let (tx, rx) = oneshot::channel(); - let _ = self.to_service.send(PayloadServiceCommand::BuildNewPayload(attr, tx)); - rx - } - - /// Note: if there's already payload in progress with same identifier, it will be returned. - async fn new_payload( - &self, - attr: ::PayloadBuilderAttributes, - ) -> Result { - self.send_new_payload(attr).await? + self.to_service.send(PayloadServiceCommand::Resolve(id, kind, tx)).ok()?; + match rx.await.transpose()? { + Ok(fut) => Some(fut.await), + Err(e) => Some(Err(e.into())), + } } async fn subscribe(&self) -> Result, Self::Error> { @@ -168,19 +170,6 @@ where Self { to_service } } - /// Resolves the payload job and returns the best payload that has been built so far. - /// - /// Note: depending on the installed [`PayloadJobGenerator`], this may or may not terminate the - /// job, See [`PayloadJob::resolve`]. - async fn resolve(&self, id: PayloadId) -> Option> { - let (tx, rx) = oneshot::channel(); - self.to_service.send(PayloadServiceCommand::Resolve(id, tx)).ok()?; - match rx.await.transpose()? { - Ok(fut) => Some(fut.await), - Err(e) => Some(Err(e.into())), - } - } - /// Returns the payload attributes associated with the given identifier. /// /// Note: this returns the attributes of the payload and does not resolve the job. @@ -296,11 +285,15 @@ where /// Returns the best payload for the given identifier that has been built so far and terminates /// the job if requested. - fn resolve(&mut self, id: PayloadId) -> Option> { + fn resolve( + &mut self, + id: PayloadId, + kind: PayloadKind, + ) -> Option> { trace!(%id, "resolving payload job"); let job = self.payload_jobs.iter().position(|(_, job_id)| *job_id == id)?; - let (fut, keep_alive) = self.payload_jobs[job].0.resolve(); + let (fut, keep_alive) = self.payload_jobs[job].0.resolve_kind(kind); if keep_alive == KeepPayloadJobAlive::No { let (_, id) = self.payload_jobs.swap_remove(job); @@ -437,8 +430,8 @@ where let attributes = this.payload_attributes(id); let _ = tx.send(attributes); } - PayloadServiceCommand::Resolve(id, tx) => { - let _ = tx.send(this.resolve(id)); + PayloadServiceCommand::Resolve(id, strategy, tx) => { + let _ = tx.send(this.resolve(id, strategy)); } PayloadServiceCommand::Subscribe(tx) => { let new_rx = this.payload_events.subscribe(); @@ -469,7 +462,11 @@ pub enum PayloadServiceCommand { oneshot::Sender>>, ), /// Resolve the payload and return the payload - Resolve(PayloadId, oneshot::Sender>>), + Resolve( + PayloadId, + /* kind: */ PayloadKind, + oneshot::Sender>>, + ), /// Payload service events Subscribe(oneshot::Sender>>), } @@ -489,7 +486,7 @@ where Self::PayloadAttributes(f0, f1) => { f.debug_tuple("PayloadAttributes").field(&f0).field(&f1).finish() } - Self::Resolve(f0, _f1) => f.debug_tuple("Resolve").field(&f0).finish(), + Self::Resolve(f0, f1, _f2) => f.debug_tuple("Resolve").field(&f0).field(&f1).finish(), Self::Subscribe(f0) => f.debug_tuple("Subscribe").field(&f0).finish(), } } diff --git a/crates/payload/builder/src/test_utils.rs b/crates/payload/builder/src/test_utils.rs index 55b9b84f45..676e60d912 100644 --- a/crates/payload/builder/src/test_utils.rs +++ b/crates/payload/builder/src/test_utils.rs @@ -7,7 +7,7 @@ use crate::{ use alloy_primitives::U256; use reth_chain_state::ExecutedBlock; -use reth_payload_primitives::{PayloadBuilderError, PayloadTypes}; +use reth_payload_primitives::{PayloadBuilderError, PayloadKind, PayloadTypes}; use reth_primitives::Block; use reth_provider::CanonStateNotification; use std::{ @@ -89,6 +89,7 @@ impl PayloadJob for TestPayloadJob { Block::default().seal_slow(), U256::ZERO, Some(ExecutedBlock::default()), + Some(Default::default()), )) } @@ -96,7 +97,10 @@ impl PayloadJob for TestPayloadJob { Ok(self.attr.clone()) } - fn resolve(&mut self) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { + fn resolve_kind( + &mut self, + _kind: PayloadKind, + ) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { let fut = futures_util::future::ready(self.best_payload()); (fut, KeepPayloadJobAlive::No) } diff --git a/crates/payload/builder/src/traits.rs b/crates/payload/builder/src/traits.rs index 8d448eeff5..62dadeb45d 100644 --- a/crates/payload/builder/src/traits.rs +++ b/crates/payload/builder/src/traits.rs @@ -1,6 +1,8 @@ //! Trait abstractions used by the payload crate. -use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError}; +use reth_payload_primitives::{ + BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError, PayloadKind, +}; use reth_provider::CanonStateNotification; use std::future::Future; @@ -53,7 +55,21 @@ pub trait PayloadJob: Future> + Send + /// If this returns [`KeepPayloadJobAlive::Yes`], then the [`PayloadJob`] will be polled /// once more. If this returns [`KeepPayloadJobAlive::No`] then the [`PayloadJob`] will be /// dropped after this call. - fn resolve(&mut self) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive); + /// + /// The [`PayloadKind`] determines how the payload should be resolved in the + /// `ResolvePayloadFuture`. [`PayloadKind::Earliest`] should return the earliest available + /// payload (as fast as possible), e.g. racing an empty payload job against a pending job if + /// there's no payload available yet. [`PayloadKind::WaitForPending`] is allowed to wait + /// until a built payload is available. + fn resolve_kind( + &mut self, + kind: PayloadKind, + ) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive); + + /// Resolves the payload as fast as possible. + fn resolve(&mut self) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { + self.resolve_kind(PayloadKind::Earliest) + } } /// Whether the payload job should be kept alive or terminated after the payload was requested by diff --git a/crates/payload/primitives/Cargo.toml b/crates/payload/primitives/Cargo.toml index 27418ccd89..ad8ce63a7e 100644 --- a/crates/payload/primitives/Cargo.toml +++ b/crates/payload/primitives/Cargo.toml @@ -20,6 +20,7 @@ reth-transaction-pool.workspace = true reth-chain-state.workspace = true # alloy +alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types = { workspace = true, features = ["engine"] } op-alloy-rpc-types-engine.workspace = true diff --git a/crates/payload/primitives/src/lib.rs b/crates/payload/primitives/src/lib.rs index 5d10040513..7013d9fd91 100644 --- a/crates/payload/primitives/src/lib.rs +++ b/crates/payload/primitives/src/lib.rs @@ -26,7 +26,7 @@ pub use traits::{ mod payload; pub use payload::PayloadOrAttributes; -use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_chainspec::EthereumHardforks; /// The types that are used by the engine API. pub trait PayloadTypes: Send + Sync + Unpin + core::fmt::Debug + Clone + 'static { /// The built payload type. @@ -125,8 +125,8 @@ pub fn validate_payload_timestamp( /// Validates the presence of the `withdrawals` field according to the payload timestamp. /// After Shanghai, withdrawals field must be [Some]. /// Before Shanghai, withdrawals field must be [None]; -pub fn validate_withdrawals_presence( - chain_spec: &ChainSpec, +pub fn validate_withdrawals_presence( + chain_spec: &T, version: EngineApiMessageVersion, message_validation_kind: MessageValidationKind, timestamp: u64, @@ -210,8 +210,8 @@ pub fn validate_withdrawals_presence( /// `MessageValidationKind::Payload`, then the error code will be `-32602: Invalid params`. If the /// parameter is `MessageValidationKind::PayloadAttributes`, then the error code will be `-38003: /// Invalid payload attributes`. -pub fn validate_parent_beacon_block_root_presence( - chain_spec: &ChainSpec, +pub fn validate_parent_beacon_block_root_presence( + chain_spec: &T, version: EngineApiMessageVersion, validation_kind: MessageValidationKind, timestamp: u64, @@ -298,13 +298,14 @@ impl MessageValidationKind { /// either an execution payload, or payload attributes. /// /// The version is provided by the [`EngineApiMessageVersion`] argument. -pub fn validate_version_specific_fields( - chain_spec: &ChainSpec, +pub fn validate_version_specific_fields( + chain_spec: &T, version: EngineApiMessageVersion, payload_or_attrs: PayloadOrAttributes<'_, Type>, ) -> Result<(), EngineObjectValidationError> where Type: PayloadAttributes, + T: EthereumHardforks, { validate_withdrawals_presence( chain_spec, @@ -323,22 +324,45 @@ where } /// The version of Engine API message. -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)] pub enum EngineApiMessageVersion { /// Version 1 - V1, + V1 = 1, /// Version 2 /// /// Added in the Shanghai hardfork. - V2, + V2 = 2, /// Version 3 /// /// Added in the Cancun hardfork. - V3, + #[default] + V3 = 3, /// Version 4 /// /// Added in the Prague hardfork. - V4, + V4 = 4, +} + +/// Determines how we should choose the payload to return. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +pub enum PayloadKind { + /// Returns the next best available payload (the earliest available payload). + /// This does not wait for a real for pending job to finish if there's no best payload yet and + /// is allowed to race various payload jobs (empty, pending best) against each other and + /// returns whichever job finishes faster. + /// + /// This should be used when it's more important to return a valid payload as fast as possible. + /// For example, the engine API timeout for `engine_getPayload` is 1s and clients should rather + /// return an empty payload than indefinitely waiting for the pending payload job to finish and + /// risk missing the deadline. + #[default] + Earliest, + /// Only returns once we have at least one built payload. + /// + /// Compared to [`PayloadKind::Earliest`] this does not race an empty payload job against the + /// already in progress one, and returns the best available built payload or awaits the job in + /// progress. + WaitForPending, } #[cfg(test)] diff --git a/crates/payload/primitives/src/payload.rs b/crates/payload/primitives/src/payload.rs index 41c2ef1efc..fc685559e0 100644 --- a/crates/payload/primitives/src/payload.rs +++ b/crates/payload/primitives/src/payload.rs @@ -3,8 +3,10 @@ use alloy_primitives::B256; use alloy_rpc_types::engine::ExecutionPayload; /// Either an [`ExecutionPayload`] or a types that implements the [`PayloadAttributes`] trait. +/// +/// This is a helper type to unify pre-validation of version specific fields of the engine API. #[derive(Debug)] -pub enum PayloadOrAttributes<'a, AttributesType> { +pub enum PayloadOrAttributes<'a, Attributes> { /// An [`ExecutionPayload`] and optional parent beacon block root. ExecutionPayload { /// The inner execution payload @@ -13,13 +15,10 @@ pub enum PayloadOrAttributes<'a, AttributesType> { parent_beacon_block_root: Option, }, /// A payload attributes type. - PayloadAttributes(&'a AttributesType), + PayloadAttributes(&'a Attributes), } -impl<'a, AttributesType> PayloadOrAttributes<'a, AttributesType> -where - AttributesType: PayloadAttributes, -{ +impl<'a, Attributes> PayloadOrAttributes<'a, Attributes> { /// Construct a [`PayloadOrAttributes`] from an [`ExecutionPayload`] and optional parent beacon /// block root. pub const fn from_execution_payload( @@ -29,6 +28,16 @@ where Self::ExecutionPayload { payload, parent_beacon_block_root } } + /// Construct a [`PayloadOrAttributes::PayloadAttributes`] variant + pub const fn from_attributes(attributes: &'a Attributes) -> Self { + Self::PayloadAttributes(attributes) + } +} + +impl PayloadOrAttributes<'_, Attributes> +where + Attributes: PayloadAttributes, +{ /// Return the withdrawals for the payload or attributes. pub fn withdrawals(&self) -> Option<&Vec> { match self { diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index 6ae6361fdb..a78dc8c132 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -1,4 +1,5 @@ -use crate::{PayloadBuilderError, PayloadEvents, PayloadTypes}; +use crate::{PayloadEvents, PayloadKind, PayloadTypes}; +use alloy_eips::eip7685::Requests; use alloy_primitives::{Address, B256, U256}; use alloy_rpc_types::{ engine::{PayloadAttributes as EthPayloadAttributes, PayloadId}, @@ -7,12 +8,8 @@ use alloy_rpc_types::{ use op_alloy_rpc_types_engine::OpPayloadAttributes; use reth_chain_state::ExecutedBlock; use reth_primitives::{SealedBlock, Withdrawals}; -use std::{future::Future, pin::Pin}; use tokio::sync::oneshot; -pub(crate) type PayloadFuture

= - Pin> + Send + Sync>>; - /// A type that can request, subscribe to and resolve payloads. #[async_trait::async_trait] pub trait PayloadBuilder: Send + Unpin { @@ -21,12 +18,13 @@ pub trait PayloadBuilder: Send + Unpin { /// The error type returned by the builder. type Error; - /// Sends a message to the service to start building a new payload for the given payload - /// attributes and returns a future that resolves to the payload. - async fn send_and_resolve_payload( + /// Sends a message to the service to start building a new payload for the given payload. + /// + /// Returns a receiver that will receive the payload id. + fn send_new_payload( &self, attr: ::PayloadBuilderAttributes, - ) -> Result::BuiltPayload>, Self::Error>; + ) -> oneshot::Receiver>; /// Returns the best payload for the given identifier. async fn best_payload( @@ -34,22 +32,21 @@ pub trait PayloadBuilder: Send + Unpin { id: PayloadId, ) -> Option::BuiltPayload, Self::Error>>; - /// Sends a message to the service to start building a new payload for the given payload. - /// - /// This is the same as [`PayloadBuilder::new_payload`] but does not wait for the result - /// and returns the receiver instead - fn send_new_payload( + /// Resolves the payload job and returns the best payload that has been built so far. + async fn resolve_kind( &self, - attr: ::PayloadBuilderAttributes, - ) -> oneshot::Receiver>; + id: PayloadId, + kind: PayloadKind, + ) -> Option::BuiltPayload, Self::Error>>; - /// Starts building a new payload for the given payload attributes. - /// - /// Returns the identifier of the payload. - async fn new_payload( + /// Resolves the payload job as fast and possible and returns the best payload that has been + /// built so far. + async fn resolve( &self, - attr: ::PayloadBuilderAttributes, - ) -> Result; + id: PayloadId, + ) -> Option::BuiltPayload, Self::Error>> { + self.resolve_kind(id, PayloadKind::Earliest).await + } /// Sends a message to the service to subscribe to payload events. /// Returns a receiver that will receive them. @@ -69,6 +66,9 @@ pub trait BuiltPayload: Send + Sync + std::fmt::Debug { fn executed_block(&self) -> Option { None } + + /// Returns the EIP-7865 requests for the payload if any. + fn requests(&self) -> Option; } /// This can be implemented by types that describe a currently running payload job. @@ -84,10 +84,11 @@ pub trait PayloadBuilderAttributes: Send + Sync + std::fmt::Debug { /// Creates a new payload builder for the given parent block and the attributes. /// - /// Derives the unique [`PayloadId`] for the given parent and attributes + /// Derives the unique [`PayloadId`] for the given parent, attributes and version. fn try_new( parent: B256, rpc_payload_attributes: Self::RpcPayloadAttributes, + version: u8, ) -> Result where Self: Sized; @@ -160,12 +161,7 @@ impl PayloadAttributes for OpPayloadAttributes { } /// A builder that can return the current payload attribute. -pub trait PayloadAttributesBuilder: std::fmt::Debug + Send + Sync + 'static { - /// The payload attributes type returned by the builder. - type PayloadAttributes: PayloadAttributes; - /// The error type returned by [`PayloadAttributesBuilder::build`]. - type Error: core::error::Error + Send + Sync; - +pub trait PayloadAttributesBuilder: Send + Sync + 'static { /// Return a new payload attribute from the builder. - fn build(&self) -> Result; + fn build(&self, timestamp: u64) -> Attributes; } diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index 55002b0a98..e74b5f48d4 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -8,7 +8,9 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use alloy_rpc_types::engine::{ExecutionPayload, MaybeCancunPayloadFields, PayloadError}; +use alloy_rpc_types::engine::{ + ExecutionPayload, ExecutionPayloadSidecar, MaybeCancunPayloadFields, PayloadError, +}; use reth_chainspec::EthereumHardforks; use reth_primitives::SealedBlock; use reth_rpc_types_compat::engine::payload::try_into_block; @@ -21,7 +23,7 @@ pub struct ExecutionPayloadValidator { chain_spec: Arc, } -impl ExecutionPayloadValidator { +impl ExecutionPayloadValidator { /// Create a new validator. pub const fn new(chain_spec: Arc) -> Self { Self { chain_spec } @@ -29,10 +31,12 @@ impl ExecutionPayloadValidator { /// Returns the chain spec used by the validator. #[inline] - pub fn chain_spec(&self) -> &ChainSpec { + pub const fn chain_spec(&self) -> &Arc { &self.chain_spec } +} +impl ExecutionPayloadValidator { /// Returns true if the Cancun hardfork is active at the given timestamp. #[inline] fn is_cancun_active_at_timestamp(&self, timestamp: u64) -> bool { @@ -111,13 +115,12 @@ impl ExecutionPayloadValidator { pub fn ensure_well_formed_payload( &self, payload: ExecutionPayload, - cancun_fields: MaybeCancunPayloadFields, + sidecar: ExecutionPayloadSidecar, ) -> Result { let expected_hash = payload.block_hash(); // First parse the block - let sealed_block = - try_into_block(payload, cancun_fields.parent_beacon_block_root())?.seal_slow(); + let sealed_block = try_into_block(payload, &sidecar)?.seal_slow(); // Ensure the hash included in the payload matches the block hash if expected_hash != sealed_block.hash() { @@ -136,7 +139,7 @@ impl ExecutionPayloadValidator { // cancun active but excess blob gas not present return Err(PayloadError::PostCancunBlockWithoutExcessBlobGas) } - if cancun_fields.as_ref().is_none() { + if sidecar.cancun().is_none() { // cancun active but cancun fields not present return Err(PayloadError::PostCancunWithoutCancunFields) } @@ -153,7 +156,7 @@ impl ExecutionPayloadValidator { // cancun not active but excess blob gas present return Err(PayloadError::PreCancunBlockWithExcessBlobGas) } - if cancun_fields.as_ref().is_some() { + if sidecar.cancun().is_some() { // cancun not active but cancun fields present return Err(PayloadError::PreCancunWithCancunFields) } @@ -162,7 +165,7 @@ impl ExecutionPayloadValidator { let shanghai_active = self.is_shanghai_active_at_timestamp(sealed_block.timestamp); if !shanghai_active && sealed_block.body.withdrawals.is_some() { // shanghai not active but withdrawals present - return Err(PayloadError::PreShanghaiBlockWithWitdrawals) + return Err(PayloadError::PreShanghaiBlockWithWithdrawals) } if !self.is_prague_active_at_timestamp(sealed_block.timestamp) && @@ -172,7 +175,10 @@ impl ExecutionPayloadValidator { } // EIP-4844 checks - self.ensure_matching_blob_versioned_hashes(&sealed_block, &cancun_fields)?; + self.ensure_matching_blob_versioned_hashes( + &sealed_block, + &sidecar.cancun().cloned().into(), + )?; Ok(sealed_block) } diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index d595c4ff93..38728cc2a2 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -18,7 +18,7 @@ alloy-consensus = { workspace = true, features = ["serde"] } alloy-eips.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true -alloy-rlp.workspace = true +alloy-rlp = { workspace = true, features = ["derive"] } revm-primitives = { workspace = true, features = ["serde"] } @@ -44,7 +44,6 @@ reth-testing-utils.workspace = true alloy-primitives = { workspace = true, features = ["arbitrary", "rand", "rlp"] } alloy-consensus = { workspace = true, features = ["arbitrary"] } -arbitrary = { workspace = true, features = ["derive"] } bincode.workspace = true proptest-arbitrary-interop.workspace = true proptest.workspace = true @@ -54,16 +53,32 @@ test-fuzz.workspace = true [features] default = ["std"] -std = [] -test-utils = ["arbitrary"] +std = [ + "alloy-consensus/std", + "alloy-eips/std", + "alloy-genesis/std", + "alloy-primitives/std", + "revm-primitives/std", + "serde/std" +] +test-utils = [ + "arbitrary", + "reth-codecs/test-utils" +] arbitrary = [ - "std", - "alloy-consensus/arbitrary", - "alloy-primitives/arbitrary", - "dep:arbitrary", - "dep:proptest", - "dep:proptest-arbitrary-interop", + "std", + "alloy-consensus/arbitrary", + "alloy-primitives/arbitrary", + "dep:arbitrary", + "dep:proptest", + "dep:proptest-arbitrary-interop", + "alloy-eips/arbitrary", + "revm-primitives/arbitrary", + "reth-codecs/arbitrary" +] +serde-bincode-compat = [ + "serde_with", + "alloy-consensus/serde-bincode-compat", + "alloy-eips/serde-bincode-compat" ] -serde-bincode-compat = ["serde_with", "alloy-consensus/serde-bincode-compat"] - bsc = [] \ No newline at end of file diff --git a/crates/primitives-traits/src/account.rs b/crates/primitives-traits/src/account.rs index 063504b2a0..ae58973edd 100644 --- a/crates/primitives-traits/src/account.rs +++ b/crates/primitives-traits/src/account.rs @@ -256,4 +256,50 @@ mod tests { assert_eq!(decoded, bytecode); assert!(remainder.is_empty()); } + + #[test] + fn test_account_has_bytecode() { + // Account with no bytecode (None) + let acc_no_bytecode = Account { nonce: 1, balance: U256::from(1000), bytecode_hash: None }; + assert!(!acc_no_bytecode.has_bytecode(), "Account should not have bytecode"); + + // Account with bytecode hash set to KECCAK_EMPTY (should have bytecode) + let acc_empty_bytecode = + Account { nonce: 1, balance: U256::from(1000), bytecode_hash: Some(KECCAK_EMPTY) }; + assert!(acc_empty_bytecode.has_bytecode(), "Account should have bytecode"); + + // Account with a non-empty bytecode hash + let acc_with_bytecode = Account { + nonce: 1, + balance: U256::from(1000), + bytecode_hash: Some(B256::from_slice(&[0x11u8; 32])), + }; + assert!(acc_with_bytecode.has_bytecode(), "Account should have bytecode"); + } + + #[test] + fn test_account_get_bytecode_hash() { + // Account with no bytecode (should return KECCAK_EMPTY) + let acc_no_bytecode = Account { nonce: 0, balance: U256::ZERO, bytecode_hash: None }; + assert_eq!(acc_no_bytecode.get_bytecode_hash(), KECCAK_EMPTY, "Should return KECCAK_EMPTY"); + + // Account with bytecode hash set to KECCAK_EMPTY + let acc_empty_bytecode = + Account { nonce: 1, balance: U256::from(1000), bytecode_hash: Some(KECCAK_EMPTY) }; + assert_eq!( + acc_empty_bytecode.get_bytecode_hash(), + KECCAK_EMPTY, + "Should return KECCAK_EMPTY" + ); + + // Account with a valid bytecode hash + let bytecode_hash = B256::from_slice(&[0x11u8; 32]); + let acc_with_bytecode = + Account { nonce: 1, balance: U256::from(1000), bytecode_hash: Some(bytecode_hash) }; + assert_eq!( + acc_with_bytecode.get_bytecode_hash(), + bytecode_hash, + "Should return the bytecode hash" + ); + } } diff --git a/crates/primitives-traits/src/blob_sidecar.rs b/crates/primitives-traits/src/blob_sidecar.rs index af11850a1c..a3bfe1f55e 100644 --- a/crates/primitives-traits/src/blob_sidecar.rs +++ b/crates/primitives-traits/src/blob_sidecar.rs @@ -74,7 +74,7 @@ impl BlobSidecars { /// check for errors because we assume that `BlobSidecars` will only ever contain valid /// sidecars pub fn encode_index(&self, out: &mut dyn BufMut, index: usize) { - let header = alloy_rlp::Header { list: true, payload_length: self.0[index].length() }; + let header = alloy_rlp::Header { list: false, payload_length: self.0[index].length() }; header.encode(out); self.0[index].encode(out); } @@ -82,16 +82,24 @@ impl BlobSidecars { impl Encodable for BlobSidecar { fn encode(&self, out: &mut dyn BufMut) { - let list_header_self = alloy_rlp::Header { list: true, payload_length: self.length() }; + let payload_length = self.blob_transaction_sidecar.length() + + self.block_number.length() + + self.block_hash.length() + + self.tx_index.length() + + self.tx_hash.length(); + + let list_header_self = alloy_rlp::Header { list: false, payload_length }; list_header_self.encode(out); let list_header_tx_sidecar = alloy_rlp::Header { - list: true, + list: false, payload_length: self.blob_transaction_sidecar.length(), }; list_header_tx_sidecar.encode(out); - self.blob_transaction_sidecar.encode(out); + self.blob_transaction_sidecar.blobs.encode(out); + self.blob_transaction_sidecar.commitments.encode(out); + self.blob_transaction_sidecar.proofs.encode(out); self.block_number.encode(out); self.block_hash.encode(out); self.tx_index.encode(out); @@ -99,8 +107,24 @@ impl Encodable for BlobSidecar { } fn length(&self) -> usize { - self.blob_transaction_sidecar.length() + - self.blob_transaction_sidecar.length().length() + + let payload_length = self.blob_transaction_sidecar.length() + + self.block_number.length() + + self.block_hash.length() + + self.tx_index.length() + + self.tx_hash.length(); + + let list_header_self = alloy_rlp::Header { list: false, payload_length }; + let list_header_self_length = list_header_self.length(); + + let list_header_tx_sidecar = alloy_rlp::Header { + list: false, + payload_length: self.blob_transaction_sidecar.length(), + }; + let header_length = list_header_tx_sidecar.length(); + + list_header_self_length + + header_length + + self.blob_transaction_sidecar.length() + self.block_number.length() + self.block_hash.length() + self.tx_index.length() + @@ -226,7 +250,7 @@ mod tests { fn test_blob_sidecar_rlp() { let blob_sidecar = BlobSidecar { blob_transaction_sidecar: BlobTransactionSidecar { - blobs: vec![], + blobs: vec![Default::default()], commitments: vec![Default::default()], proofs: vec![Default::default()], }, diff --git a/crates/primitives/src/traits/block/body.rs b/crates/primitives-traits/src/block/body.rs similarity index 54% rename from crates/primitives/src/traits/block/body.rs rename to crates/primitives-traits/src/block/body.rs index ff8f71b761..c9b673ec72 100644 --- a/crates/primitives/src/traits/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -1,12 +1,12 @@ //! Block body abstraction. -use alloc::fmt; -use core::ops; +use alloc::{fmt, vec::Vec}; use alloy_consensus::{BlockHeader, Transaction, TxType}; +use alloy_eips::{eip4895::Withdrawal, eip7685::Requests}; use alloy_primitives::{Address, B256}; -use crate::{proofs, traits::Block, Requests, Withdrawals}; +use crate::Block; /// Abstraction for block's body. pub trait BlockBody: @@ -27,17 +27,20 @@ pub trait BlockBody: /// Header type (uncle blocks). type Header: BlockHeader; + /// Withdrawals in block. + type Withdrawals: Iterator; + /// Returns reference to transactions in block. fn transactions(&self) -> &[Self::SignedTransaction]; - /// Returns [`Withdrawals`] in the block, if any. + /// Returns `Withdrawals` in the block, if any. // todo: branch out into extension trait - fn withdrawals(&self) -> Option<&Withdrawals>; + fn withdrawals(&self) -> Option<&Self::Withdrawals>; /// Returns reference to uncle block headers. fn ommers(&self) -> &[Self::Header]; - /// Returns [`Request`] in block, if any. + /// Returns [`Requests`] in block, if any. fn requests(&self) -> Option<&Requests>; /// Create a [`Block`] from the body and its header. @@ -53,32 +56,26 @@ pub trait BlockBody: /// Calculate the withdrawals root for the block body, if withdrawals exist. If there are no /// withdrawals, this will return `None`. - fn calculate_withdrawals_root(&self) -> Option { - Some(proofs::calculate_withdrawals_root(self.withdrawals()?)) - } - - /// Calculate the requests root for the block body, if requests exist. If there are no - /// requests, this will return `None`. - fn calculate_requests_root(&self) -> Option { - Some(proofs::calculate_requests_root(self.requests()?)) - } + // todo: can be default impl if `calculate_withdrawals_root` made into a method on + // `Withdrawals` and `Withdrawals` moved to alloy + fn calculate_withdrawals_root(&self) -> Option; /// Recover signer addresses for all transactions in the block body. fn recover_signers(&self) -> Option>; /// Returns whether or not the block body contains any blob transactions. fn has_blob_transactions(&self) -> bool { - self.transactions().iter().any(|tx| tx.ty() as u8 == TxType::Eip4844 as u8) + self.transactions().iter().any(|tx| tx.ty() == TxType::Eip4844 as u8) } /// Returns whether or not the block body contains any EIP-7702 transactions. fn has_eip7702_transactions(&self) -> bool { - self.transactions().iter().any(|tx| tx.ty() as u8 == TxType::Eip7702 as u8) + self.transactions().iter().any(|tx| tx.ty() == TxType::Eip7702 as u8) } /// Returns an iterator over all blob transactions of the block fn blob_transactions_iter(&self) -> impl Iterator + '_ { - self.transactions().iter().filter(|tx| tx.ty() as u8 == TxType::Eip4844 as u8) + self.transactions().iter().filter(|tx| tx.ty() == TxType::Eip4844 as u8) } /// Returns only the blob transactions, if any, from the block body. @@ -97,56 +94,3 @@ pub trait BlockBody: /// Calculates a heuristic for the in-memory size of the [`BlockBody`]. fn size(&self) -> usize; } - -impl BlockBody for T -where - T: ops::Deref - + Clone - + fmt::Debug - + PartialEq - + Eq - + Default - + serde::Serialize - + for<'de> serde::Deserialize<'de> - + alloy_rlp::Encodable - + alloy_rlp::Decodable, -{ - type Header = ::Header; - type SignedTransaction = ::SignedTransaction; - - fn transactions(&self) -> &Vec { - self.deref().transactions() - } - - fn withdrawals(&self) -> Option<&Withdrawals> { - self.deref().withdrawals() - } - - fn ommers(&self) -> &Vec { - self.deref().ommers() - } - - fn requests(&self) -> Option<&Requests> { - self.deref().requests() - } - - fn calculate_tx_root(&self) -> B256 { - self.deref().calculate_tx_root() - } - - fn calculate_ommers_root(&self) -> B256 { - self.deref().calculate_ommers_root() - } - - fn recover_signers(&self) -> Option> { - self.deref().recover_signers() - } - - fn blob_versioned_hashes_iter(&self) -> impl Iterator + '_ { - self.deref().blob_versioned_hashes_iter() - } - - fn size(&self) -> usize { - self.deref().size() - } -} diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs new file mode 100644 index 0000000000..395cf61df1 --- /dev/null +++ b/crates/primitives-traits/src/block/mod.rs @@ -0,0 +1,106 @@ +//! Block abstraction. + +pub mod body; + +use alloc::{fmt, vec::Vec}; + +use alloy_consensus::BlockHeader; +use alloy_primitives::{Address, Sealable, B256}; + +use crate::BlockBody; + +/// Helper trait, unifies behaviour required of a block header. +pub trait Header: BlockHeader + Sealable {} + +impl Header for T where T: BlockHeader + Sealable {} + +/// Abstraction of block data type. +// todo: make sealable super-trait, depends on +// todo: make with senders extension trait, so block can be impl by block type already containing +// senders +pub trait Block: + fmt::Debug + + Clone + + PartialEq + + Eq + + Default + + serde::Serialize + + for<'a> serde::Deserialize<'a> + + From<(Self::Header, Self::Body)> + + Into<(Self::Header, Self::Body)> +{ + /// Header part of the block. + type Header: Header; + + /// The block's body contains the transactions in the block. + type Body: BlockBody; + + /// A block and block hash. + type SealedBlock; + + /// A block and addresses of senders of transactions in it. + type BlockWithSenders; + + /// Returns reference to [`BlockHeader`] type. + fn header(&self) -> &Self::Header; + + /// Returns reference to [`BlockBody`] type. + fn body(&self) -> &Self::Body; + + /// Calculate the header hash and seal the block so that it can't be changed. + // todo: can be default impl if sealed block type is made generic over header and body and + // migrated to alloy + fn seal_slow(self) -> Self::SealedBlock; + + /// Seal the block with a known hash. + /// + /// WARNING: This method does not perform validation whether the hash is correct. + // todo: can be default impl if sealed block type is made generic over header and body and + // migrated to alloy + fn seal(self, hash: B256) -> Self::SealedBlock; + + /// Expensive operation that recovers transaction signer. See + /// `SealedBlockWithSenders`. + fn senders(&self) -> Option> { + self.body().recover_signers() + } + + /// Transform into a `BlockWithSenders`. + /// + /// # Panics + /// + /// If the number of senders does not match the number of transactions in the block + /// and the signer recovery for one of the transactions fails. + /// + /// Note: this is expected to be called with blocks read from disk. + #[track_caller] + fn with_senders_unchecked(self, senders: Vec

) -> Self::BlockWithSenders { + self.try_with_senders_unchecked(senders).expect("stored block is valid") + } + + /// Transform into a `BlockWithSenders` using the given senders. + /// + /// If the number of senders does not match the number of transactions in the block, this falls + /// back to manually recovery, but _without ensuring that the signature has a low `s` value_. + /// See also `SignedTransaction::recover_signer_unchecked`. + /// + /// Returns an error if a signature is invalid. + // todo: can be default impl if block with senders type is made generic over block and migrated + // to alloy + #[track_caller] + fn try_with_senders_unchecked( + self, + senders: Vec
, + ) -> Result, Self>; + + /// **Expensive**. Transform into a `BlockWithSenders` by recovering senders in the contained + /// transactions. + /// + /// Returns `None` if a transaction is invalid. + // todo: can be default impl if sealed block type is made generic over header and body and + // migrated to alloy + fn with_recovered_senders(self) -> Option>; + + /// Calculates a heuristic for the in-memory size of the [`Block`]. + fn size(&self) -> usize; +} diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index fb29114aa0..25bfd1446a 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -1,7 +1,6 @@ //! Ethereum protocol-related constants -use alloy_primitives::{address, b256, Address, B256, U256}; -use core::time::Duration; +use alloy_primitives::{b256, B256}; /// Gas units, for example [`GIGAGAS`]. pub mod gas_units; @@ -10,9 +9,6 @@ pub use gas_units::{GIGAGAS, KILOGAS, MEGAGAS}; /// The client version: `reth/v{major}.{minor}.{patch}` pub const RETH_CLIENT_VERSION: &str = concat!("reth/v", env!("CARGO_PKG_VERSION")); -/// The first four bytes of the call data for a function call specifies the function to be called. -pub const SELECTOR_LEN: usize = 4; - /// Maximum extra data size in a block after genesis #[cfg(not(feature = "bsc"))] pub const MAXIMUM_EXTRA_DATA_SIZE: usize = 32; @@ -21,38 +17,6 @@ pub const MAXIMUM_EXTRA_DATA_SIZE: usize = 32; #[cfg(feature = "bsc")] pub const MAXIMUM_EXTRA_DATA_SIZE: usize = 1024 * 1024; -/// An EPOCH is a series of 32 slots. -pub const EPOCH_SLOTS: u64 = 32; - -/// The duration of a slot in seconds. -/// -/// This is the time period of 12 seconds in which a randomly chosen validator has time to propose a -/// block. -pub const SLOT_DURATION: Duration = Duration::from_secs(12); - -/// An EPOCH is a series of 32 slots (~6.4min). -pub const EPOCH_DURATION: Duration = Duration::from_secs(12 * EPOCH_SLOTS); - -/// The default block nonce in the beacon consensus -pub const BEACON_NONCE: u64 = 0u64; - -/// The default Ethereum block gas limit. -pub const ETHEREUM_BLOCK_GAS_LIMIT: u64 = 30_000_000; - -/// The minimum tx fee below which the txpool will reject the transaction. -/// -/// Configured to `7` WEI which is the lowest possible value of base fee under mainnet EIP-1559 -/// parameters. `BASE_FEE_MAX_CHANGE_DENOMINATOR` -/// is `8`, or 12.5%. Once the base fee has dropped to `7` WEI it cannot decrease further because -/// 12.5% of 7 is less than 1. -/// -/// Note that min base fee under different 1559 parameterizations may differ, but there's no -/// significant harm in leaving this setting as is. -pub const MIN_PROTOCOL_BASE_FEE: u64 = 7; - -/// Same as [`MIN_PROTOCOL_BASE_FEE`] but as a U256. -pub const MIN_PROTOCOL_BASE_FEE_U256: U256 = U256::from_limbs([7u64, 0, 0, 0]); - /// Initial base fee as defined in [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559) #[cfg(not(feature = "bsc"))] pub const EIP1559_INITIAL_BASE_FEE: u64 = 1_000_000_000; @@ -61,96 +25,17 @@ pub const EIP1559_INITIAL_BASE_FEE: u64 = 1_000_000_000; #[cfg(feature = "bsc")] pub const EIP1559_INITIAL_BASE_FEE: u64 = 0; -/// Base fee max change denominator as defined in [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559) -pub const EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u64 = 8; - -/// Elasticity multiplier as defined in [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559) -pub const EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u64 = 2; - /// Minimum gas limit allowed for transactions. pub const MINIMUM_GAS_LIMIT: u64 = 5000; -/// Base fee max change denominator for Optimism Mainnet as defined in the Optimism -/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. -pub const OP_MAINNET_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u128 = 50; - -/// Base fee max change denominator for Optimism Mainnet as defined in the Optimism Canyon -/// hardfork. -pub const OP_MAINNET_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON: u128 = 250; - -/// Base fee max change denominator for Optimism Mainnet as defined in the Optimism -/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. -pub const OP_MAINNET_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 6; - -/// Base fee max change denominator for Optimism Sepolia as defined in the Optimism -/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. -pub const OP_SEPOLIA_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u128 = 50; - -/// Base fee max change denominator for Optimism Sepolia as defined in the Optimism Canyon -/// hardfork. -pub const OP_SEPOLIA_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON: u128 = 250; - -/// Base fee max change denominator for Optimism Sepolia as defined in the Optimism -/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. -pub const OP_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 6; - -/// Base fee max change denominator for Base Sepolia as defined in the Optimism -/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. -pub const BASE_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 10; - -/// Multiplier for converting gwei to wei. -pub const GWEI_TO_WEI: u64 = 1_000_000_000; - -/// Multiplier for converting finney (milliether) to wei. -pub const FINNEY_TO_WEI: u128 = (GWEI_TO_WEI as u128) * 1_000_000; - -/// Multiplier for converting ether to wei. -pub const ETH_TO_WEI: u128 = FINNEY_TO_WEI * 1000; - -/// The Ethereum mainnet genesis hash: -/// `0x0d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3` -pub const MAINNET_GENESIS_HASH: B256 = - b256!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"); - -/// Sepolia genesis hash: `0x25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9` -pub const SEPOLIA_GENESIS_HASH: B256 = - b256!("25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9"); - /// Holesky genesis hash: `0xb5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4` pub const HOLESKY_GENESIS_HASH: B256 = b256!("b5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4"); -/// Testnet genesis hash: `0x2f980576711e3617a5e4d83dd539548ec0f7792007d505a3d2e9674833af2d7c` -pub const DEV_GENESIS_HASH: B256 = - b256!("2f980576711e3617a5e4d83dd539548ec0f7792007d505a3d2e9674833af2d7c"); - -/// Keccak256 over empty array: `0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470` -pub const KECCAK_EMPTY: B256 = - b256!("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"); - /// Ommer root of empty list: `0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347` pub const EMPTY_OMMER_ROOT_HASH: B256 = b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"); -/// Root hash of an empty trie: `0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421` -pub const EMPTY_ROOT_HASH: B256 = - b256!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"); - -/// From address from Optimism system txs: `0xdeaddeaddeaddeaddeaddeaddeaddeaddead0001` -pub const OP_SYSTEM_TX_FROM_ADDR: Address = address!("deaddeaddeaddeaddeaddeaddeaddeaddead0001"); - -/// To address from Optimism system txs: `0x4200000000000000000000000000000000000015` -pub const OP_SYSTEM_TX_TO_ADDR: Address = address!("4200000000000000000000000000000000000015"); - -/// Transactions root of empty receipts set. -pub const EMPTY_RECEIPTS: B256 = EMPTY_ROOT_HASH; - -/// Transactions root of empty transactions set. -pub const EMPTY_TRANSACTIONS: B256 = EMPTY_ROOT_HASH; - -/// Withdrawals root of empty withdrawals set. -pub const EMPTY_WITHDRAWALS: B256 = EMPTY_ROOT_HASH; - /// Empty mix hash pub const EMPTY_MIX_HASH: B256 = b256!("0000000000000000000000000000000000000000000000000000000000000000"); @@ -164,22 +49,3 @@ pub const EMPTY_MIX_HASH: B256 = /// Unwind depth of `3` blocks significantly reduces the chance that the reorged block is kept in /// the database. pub const BEACON_CONSENSUS_REORG_UNWIND_DEPTH: u64 = 3; - -/// Max seconds from current time allowed for blocks, before they're considered future blocks. -/// -/// This is only used when checking whether or not the timestamp for pre-merge blocks is in the -/// future. -/// -/// See: -/// -pub const ALLOWED_FUTURE_BLOCK_TIME_SECONDS: u64 = 15; - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn min_protocol_sanity() { - assert_eq!(MIN_PROTOCOL_BASE_FEE_U256.to::(), MIN_PROTOCOL_BASE_FEE); - } -} diff --git a/crates/primitives-traits/src/header/test_utils.rs b/crates/primitives-traits/src/header/test_utils.rs index ef5c0d0253..c5f6e86b9d 100644 --- a/crates/primitives-traits/src/header/test_utils.rs +++ b/crates/primitives-traits/src/header/test_utils.rs @@ -37,7 +37,7 @@ pub const fn generate_valid_header( } // Placeholder for future EIP adjustments - header.requests_root = None; + header.requests_hash = None; header } diff --git a/crates/primitives-traits/src/integer_list.rs b/crates/primitives-traits/src/integer_list.rs index 570c96c9fd..682fa0cf82 100644 --- a/crates/primitives-traits/src/integer_list.rs +++ b/crates/primitives-traits/src/integer_list.rs @@ -9,8 +9,16 @@ use serde::{ Deserialize, Deserializer, Serialize, Serializer, }; -/// Uses Roaring Bitmaps to hold a list of integers. It provides really good compression with the -/// capability to access its elements without decoding it. +/// A data structure that uses Roaring Bitmaps to efficiently store a list of integers. +/// +/// This structure provides excellent compression while allowing direct access to individual +/// elements without the need for full decompression. +/// +/// Key features: +/// - Efficient compression: the underlying Roaring Bitmaps significantly reduce memory usage. +/// - Direct access: elements can be accessed or queried without needing to decode the entire list. +/// - [`RoaringTreemap`] backing: internally backed by [`RoaringTreemap`], which supports 64-bit +/// integers. #[derive(Clone, PartialEq, Default, Deref)] pub struct IntegerList(pub RoaringTreemap); @@ -22,12 +30,12 @@ impl fmt::Debug for IntegerList { } impl IntegerList { - /// Creates a new empty `IntegerList`. + /// Creates a new empty [`IntegerList`]. pub fn empty() -> Self { Self(RoaringTreemap::new()) } - /// Creates an `IntegerList` from a list of integers. + /// Creates an [`IntegerList`] from a list of integers. /// /// Returns an error if the list is not pre-sorted. pub fn new(list: impl IntoIterator) -> Result { @@ -36,7 +44,7 @@ impl IntegerList { .map_err(|_| IntegerListError::UnsortedInput) } - // Creates an IntegerList from a pre-sorted list of integers. + /// Creates an [`IntegerList`] from a pre-sorted list of integers. /// /// # Panics /// @@ -54,11 +62,7 @@ impl IntegerList { /// Pushes a new integer to the list. pub fn push(&mut self, value: u64) -> Result<(), IntegerListError> { - if self.0.push(value) { - Ok(()) - } else { - Err(IntegerListError::UnsortedInput) - } + self.0.push(value).then_some(()).ok_or(IntegerListError::UnsortedInput) } /// Clears the list. @@ -80,10 +84,9 @@ impl IntegerList { /// Deserializes a sequence of bytes into a proper [`IntegerList`]. pub fn from_bytes(data: &[u8]) -> Result { - Ok(Self( - RoaringTreemap::deserialize_from(data) - .map_err(|_| IntegerListError::FailedToDeserialize)?, - )) + RoaringTreemap::deserialize_from(data) + .map(Self) + .map_err(|_| IntegerListError::FailedToDeserialize) } } diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 0cb90f9f9a..e808881970 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -14,30 +14,41 @@ extern crate alloc; /// Common constants. pub mod constants; + pub use constants::gas_units::{format_gas, format_gas_throughput}; /// Minimal account pub mod account; pub use account::{Account, Bytecode}; +pub mod receipt; +pub use receipt::Receipt; + +pub mod transaction; +pub use transaction::{signed::SignedTransaction, FullTransaction, Transaction}; + mod integer_list; pub use integer_list::{IntegerList, IntegerListError}; -pub mod request; -pub use request::{Request, Requests}; +pub mod block; +pub use block::{body::BlockBody, Block}; mod withdrawal; -pub use withdrawal::{Withdrawal, Withdrawals}; +pub use withdrawal::Withdrawals; mod error; pub use error::{GotExpected, GotExpectedBoxed}; mod log; -pub use log::{logs_bloom, Log, LogData}; +pub use alloy_primitives::{logs_bloom, Log, LogData}; mod storage; pub use storage::StorageEntry; +/// Transaction types +pub mod tx_type; +pub use tx_type::TxType; + /// Common header types pub mod header; diff --git a/crates/primitives-traits/src/log.rs b/crates/primitives-traits/src/log.rs index 6e6b473351..0b445aeeba 100644 --- a/crates/primitives-traits/src/log.rs +++ b/crates/primitives-traits/src/log.rs @@ -1,18 +1,3 @@ -use alloy_primitives::Bloom; -pub use alloy_primitives::{Log, LogData}; - -/// Calculate receipt logs bloom. -pub fn logs_bloom<'a>(logs: impl IntoIterator) -> Bloom { - let mut bloom = Bloom::ZERO; - for log in logs { - bloom.m3_2048(log.address.as_slice()); - for topic in log.topics() { - bloom.m3_2048(topic.as_slice()); - } - } - bloom -} - #[cfg(test)] mod tests { use alloy_primitives::{Address, Bytes, Log as AlloyLog, B256}; diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs new file mode 100644 index 0000000000..5c317dc49a --- /dev/null +++ b/crates/primitives-traits/src/receipt.rs @@ -0,0 +1,23 @@ +//! Receipt abstraction + +use alloy_consensus::TxReceipt; +use reth_codecs::Compact; +use serde::{Deserialize, Serialize}; + +/// Helper trait that unifies all behaviour required by receipt to support full node operations. +pub trait FullReceipt: Receipt + Compact {} + +impl FullReceipt for T where T: Receipt + Compact {} + +/// Abstraction of a receipt. +pub trait Receipt: + TxReceipt + + Default + + alloy_rlp::Encodable + + alloy_rlp::Decodable + + Serialize + + for<'de> Deserialize<'de> +{ + /// Returns transaction type. + fn tx_type(&self) -> u8; +} diff --git a/crates/primitives-traits/src/request.rs b/crates/primitives-traits/src/request.rs deleted file mode 100644 index c08af3fd62..0000000000 --- a/crates/primitives-traits/src/request.rs +++ /dev/null @@ -1,58 +0,0 @@ -//! EIP-7685 requests. - -use alloc::vec::Vec; -pub use alloy_consensus::Request; -use alloy_eips::eip7685::{Decodable7685, Encodable7685}; -use alloy_rlp::{Decodable, Encodable}; -use derive_more::{Deref, DerefMut, From, IntoIterator}; -use reth_codecs::{add_arbitrary_tests, Compact}; -use revm_primitives::Bytes; -use serde::{Deserialize, Serialize}; - -/// A list of EIP-7685 requests. -#[derive( - Debug, - Clone, - PartialEq, - Eq, - Default, - Hash, - Deref, - DerefMut, - From, - IntoIterator, - Serialize, - Deserialize, - Compact, -)] -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] -pub struct Requests(pub Vec); - -impl Encodable for Requests { - fn encode(&self, out: &mut dyn bytes::BufMut) { - let mut h = alloy_rlp::Header { list: true, payload_length: 0 }; - - let mut encoded = Vec::new(); - for req in &self.0 { - let encoded_req = req.encoded_7685(); - h.payload_length += encoded_req.len(); - encoded.push(Bytes::from(encoded_req)); - } - - h.encode(out); - for req in encoded { - req.encode(out); - } - } -} - -impl Decodable for Requests { - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - Ok( as Decodable>::decode(buf)? - .into_iter() - .map(|bytes| Request::decode_7685(&mut bytes.as_ref())) - .collect::, alloy_eips::eip7685::Eip7685Error>>() - .map(Self)?) - } -} diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs new file mode 100644 index 0000000000..a1ad81ab32 --- /dev/null +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -0,0 +1,61 @@ +//! Transaction abstraction + +use core::{fmt::Debug, hash::Hash}; + +use alloy_primitives::{TxKind, B256}; + +use reth_codecs::Compact; +use serde::{Deserialize, Serialize}; + +pub mod signed; + +#[allow(dead_code)] +/// Abstraction of a transaction. +pub trait Transaction: + Debug + + Default + + Clone + + Eq + + PartialEq + + Hash + + Serialize + + alloy_rlp::Encodable + + alloy_rlp::Decodable + + for<'de> Deserialize<'de> + + alloy_consensus::Transaction + + MaybeArbitrary +{ + /// Heavy operation that return signature hash over rlp encoded transaction. + /// It is only for signature signing or signer recovery. + fn signature_hash(&self) -> B256; + + /// Gets the transaction's [`TxKind`], which is the address of the recipient or + /// [`TxKind::Create`] if the transaction is a contract creation. + fn kind(&self) -> TxKind; + + /// Returns true if the tx supports dynamic fees + fn is_dynamic_fee(&self) -> bool; + + /// Returns the effective gas price for the given base fee. + fn effective_gas_price(&self, base_fee: Option) -> u128; + + /// This encodes the transaction _without_ the signature, and is only suitable for creating a + /// hash intended for signing. + fn encode_without_signature(&self, out: &mut dyn bytes::BufMut); + + /// Calculates a heuristic for the in-memory size of the [Transaction]. + fn size(&self) -> usize; +} + +#[cfg(not(feature = "arbitrary"))] +/// Helper trait that requires arbitrary implementation if the feature is enabled. +pub trait MaybeArbitrary {} + +#[cfg(feature = "arbitrary")] +/// Helper trait that requires arbitrary implementation if the feature is enabled. +pub trait MaybeArbitrary: for<'a> arbitrary::Arbitrary<'a> {} + +/// Helper trait that unifies all behaviour required by transaction to support full node operations. +pub trait FullTransaction: Transaction + Compact {} + +impl FullTransaction for T where T: Transaction + Compact {} diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs new file mode 100644 index 0000000000..4c12437212 --- /dev/null +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -0,0 +1,71 @@ +//! API of a signed transaction. + +use alloc::fmt; +use core::hash::Hash; + +use alloy_consensus::Transaction; +use alloy_eips::eip2718::{Decodable2718, Encodable2718}; +use alloy_primitives::{keccak256, Address, Signature, TxHash, B256}; +use revm_primitives::TxEnv; + +/// A signed transaction. +pub trait SignedTransaction: + fmt::Debug + + Clone + + PartialEq + + Eq + + Hash + + Send + + Sync + + serde::Serialize + + for<'a> serde::Deserialize<'a> + + alloy_rlp::Encodable + + alloy_rlp::Decodable + + Encodable2718 + + Decodable2718 +{ + /// Transaction type that is signed. + type Transaction: Transaction; + + /// Returns reference to transaction hash. + fn tx_hash(&self) -> &TxHash; + + /// Returns reference to transaction. + fn transaction(&self) -> &Self::Transaction; + + /// Returns reference to signature. + fn signature(&self) -> &Signature; + + /// Recover signer from signature and hash. + /// + /// Returns `None` if the transaction's signature is invalid following [EIP-2](https://eips.ethereum.org/EIPS/eip-2), see also `reth_primitives::transaction::recover_signer`. + /// + /// Note: + /// + /// This can fail for some early ethereum mainnet transactions pre EIP-2, use + /// [`Self::recover_signer_unchecked`] if you want to recover the signer without ensuring that + /// the signature has a low `s` value. + fn recover_signer(&self) -> Option
; + + /// Recover signer from signature and hash _without ensuring that the signature has a low `s` + /// value_. + /// + /// Returns `None` if the transaction's signature is invalid, see also + /// `reth_primitives::transaction::recover_signer_unchecked`. + fn recover_signer_unchecked(&self) -> Option
; + + /// Create a new signed transaction from a transaction and its signature. + /// + /// This will also calculate the transaction hash using its encoding. + fn from_transaction_and_signature(transaction: Self::Transaction, signature: Signature) + -> Self; + + /// Calculate transaction hash, eip2728 transaction does not contain rlp header and start with + /// tx type. + fn recalculate_hash(&self) -> B256 { + keccak256(self.encoded_2718()) + } + + /// Fills [`TxEnv`] with an [`Address`] and transaction. + fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address); +} diff --git a/crates/primitives-traits/src/tx_type.rs b/crates/primitives-traits/src/tx_type.rs new file mode 100644 index 0000000000..aebf7584fe --- /dev/null +++ b/crates/primitives-traits/src/tx_type.rs @@ -0,0 +1,28 @@ +use alloy_eips::eip2718::Eip2718Error; +use alloy_primitives::{U64, U8}; +use alloy_rlp::{Decodable, Encodable}; +use core::fmt::{Debug, Display}; + +/// Trait representing the behavior of a transaction type. +pub trait TxType: + Into + + Into + + PartialEq + + Eq + + PartialEq + + TryFrom + + TryFrom + + TryFrom + + From + + Debug + + Display + + Clone + + Copy + + Default + + Encodable + + Decodable + + Send + + Sync + + 'static +{ +} diff --git a/crates/primitives-traits/src/withdrawal.rs b/crates/primitives-traits/src/withdrawal.rs index 21b0df2b3c..860281b7cb 100644 --- a/crates/primitives-traits/src/withdrawal.rs +++ b/crates/primitives-traits/src/withdrawal.rs @@ -1,13 +1,10 @@ //! [EIP-4895](https://eips.ethereum.org/EIPS/eip-4895) Withdrawal types. use alloc::vec::Vec; +use alloy_eips::eip4895::Withdrawal; use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; use derive_more::{AsRef, Deref, DerefMut, From, IntoIterator}; use reth_codecs::{add_arbitrary_tests, Compact}; - -/// Re-export from `alloy_eips`. -#[doc(inline)] -pub use alloy_eips::eip4895::Withdrawal; use serde::{Deserialize, Serialize}; /// Represents a collection of Withdrawals. diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 90996f1857..29f8598349 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -20,10 +20,6 @@ reth-trie-common.workspace = true revm = { workspace = true, optional = true } revm-primitives = { workspace = true, features = ["serde"] } reth-codecs = { workspace = true, optional = true } -reth-chainspec = { workspace = true, optional = true } - -# op-reth -reth-optimism-chainspec = { workspace = true, optional = true } # ethereum alloy-consensus.workspace = true @@ -36,14 +32,15 @@ alloy-eips = { workspace = true, features = ["serde"] } # optimism op-alloy-rpc-types = { workspace = true, optional = true } op-alloy-consensus = { workspace = true, features = [ - "arbitrary", + "arbitrary", + "serde", ], optional = true } # crypto secp256k1 = { workspace = true, features = [ - "global-context", - "recovery", - "rand", + "global-context", + "recovery", + "rand", ], optional = true } k256.workspace = true # for eip-4844 @@ -62,12 +59,11 @@ zstd = { workspace = true, features = ["experimental"], optional = true } # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } -proptest = { workspace = true, optional = true } [dev-dependencies] # eth reth-chainspec.workspace = true -reth-codecs.workspace = true +reth-codecs = { workspace = true, features = ["test-utils"] } reth-primitives-traits = { workspace = true, features = ["arbitrary"] } reth-testing-utils.workspace = true revm-primitives = { workspace = true, features = ["arbitrary"] } @@ -78,7 +74,6 @@ alloy-genesis.workspace = true arbitrary = { workspace = true, features = ["derive"] } assert_matches.workspace = true bincode.workspace = true -modular-bitfield.workspace = true proptest-arbitrary-interop.workspace = true proptest.workspace = true rand.workspace = true @@ -87,39 +82,66 @@ test-fuzz.workspace = true criterion.workspace = true pprof = { workspace = true, features = [ - "flamegraph", - "frame-pointer", - "criterion", + "flamegraph", + "frame-pointer", + "criterion", ] } [features] default = ["c-kzg", "alloy-compat", "std", "reth-codec", "secp256k1"] -std = ["reth-primitives-traits/std"] +std = [ + "reth-primitives-traits/std", + "alloy-consensus/std", + "alloy-eips/std", + "alloy-genesis/std", + "alloy-primitives/std", + "alloy-serde?/std", + "k256/std", + "once_cell/std", + "revm-primitives/std", + "secp256k1?/std", + "serde/std", + "revm?/std" +] reth-codec = ["dep:reth-codecs", "dep:zstd", "dep:modular-bitfield", "std"] -asm-keccak = ["alloy-primitives/asm-keccak"] +asm-keccak = [ + "alloy-primitives/asm-keccak", + "revm-primitives/asm-keccak", + "revm?/asm-keccak" +] arbitrary = [ - "dep:arbitrary", - "dep:proptest", - "alloy-eips/arbitrary", - "rand", - "reth-codec", - "reth-ethereum-forks/arbitrary", - "reth-primitives-traits/arbitrary", - "revm-primitives/arbitrary", - "secp256k1", + "dep:arbitrary", + "alloy-eips/arbitrary", + "rand", + "reth-codec", + "reth-ethereum-forks/arbitrary", + "reth-primitives-traits/arbitrary", + "revm-primitives/arbitrary", + "secp256k1", + "reth-chainspec/arbitrary", + "reth-trie-common/arbitrary", + "alloy-consensus/arbitrary", + "alloy-primitives/arbitrary", + "alloy-rpc-types?/arbitrary", + "alloy-serde?/arbitrary", + "op-alloy-consensus?/arbitrary", + "op-alloy-rpc-types?/arbitrary", + "reth-codecs?/arbitrary", + "revm?/arbitrary" ] secp256k1 = ["dep:secp256k1"] c-kzg = [ - "dep:c-kzg", - "alloy-consensus/kzg", - "alloy-eips/kzg", - "revm-primitives/c-kzg", + "dep:c-kzg", + "alloy-consensus/kzg", + "alloy-eips/kzg", + "revm-primitives/c-kzg", ] optimism = [ - "dep:op-alloy-consensus", - "dep:reth-optimism-chainspec", - "reth-codecs?/optimism", - "revm-primitives/optimism", + "dep:op-alloy-consensus", + "reth-codecs?/optimism", + "revm-primitives/optimism", + "reth-chainspec/optimism", + "revm?/optimism" ] opbnb = [ "revm/opbnb", @@ -130,16 +152,23 @@ bsc = [ "revm/bsc", ] alloy-compat = [ - "dep:alloy-rpc-types", - "dep:alloy-serde", - "dep:op-alloy-rpc-types", + "dep:alloy-rpc-types", + "dep:alloy-serde", + "dep:op-alloy-rpc-types", +] +test-utils = [ + "reth-primitives-traits/test-utils", + "reth-chainspec/test-utils", + "reth-codecs?/test-utils", + "reth-trie-common/test-utils", + "revm?/test-utils" ] -test-utils = ["reth-primitives-traits/test-utils"] serde-bincode-compat = [ - "alloy-consensus/serde-bincode-compat", - "op-alloy-consensus?/serde-bincode-compat", - "reth-primitives-traits/serde-bincode-compat", - "serde_with", + "alloy-consensus/serde-bincode-compat", + "op-alloy-consensus?/serde-bincode-compat", + "reth-primitives-traits/serde-bincode-compat", + "serde_with", + "alloy-eips/serde-bincode-compat", ] [[bench]] diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index 09267a615f..8c28139cb7 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -1,12 +1,14 @@ //! Common conversions from alloy types. use crate::{ - constants::EMPTY_TRANSACTIONS, transaction::extract_chain_id, Block, BlockBody, Signature, - Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxType, + transaction::extract_chain_id, Block, BlockBody, Transaction, TransactionSigned, + TransactionSignedEcRecovered, TransactionSignedNoHash, TxType, }; use alloc::{string::ToString, vec::Vec}; -use alloy_consensus::{TxEip1559, TxEip2930, TxEip4844, TxLegacy}; -use alloy_primitives::{Parity, TxKind}; +use alloy_consensus::{ + constants::EMPTY_TRANSACTIONS, Transaction as _, TxEip1559, TxEip2930, TxEip4844, TxLegacy, +}; +use alloy_primitives::{Parity, Signature, TxKind}; use alloy_rlp::Error as RlpError; use alloy_serde::WithOtherFields; use op_alloy_rpc_types as _; @@ -49,7 +51,6 @@ impl TryFrom Option { - None - } -} - /// Ethereum full block. /// /// Withdrawals can be optionally included at the end of the RLP encoded message. @@ -123,7 +107,6 @@ mod block_rlp { transactions: Vec, ommers: Vec
, withdrawals: Option, - requests: Option, sidecars: Option, } @@ -134,22 +117,18 @@ mod block_rlp { transactions: &'a Vec, ommers: &'a Vec
, withdrawals: Option<&'a Withdrawals>, - requests: Option<&'a Requests>, sidecars: Option<&'a BlobSidecars>, } impl<'a> From<&'a Block> for HelperRef<'a, Header> { fn from(block: &'a Block) -> Self { - let Block { - header, - body: BlockBody { transactions, ommers, withdrawals, requests, sidecars }, - } = block; + let Block { header, body: BlockBody { transactions, ommers, withdrawals, sidecars } } = + block; Self { header, transactions, ommers, withdrawals: withdrawals.as_ref(), - requests: requests.as_ref(), sidecars: sidecars.as_ref(), } } @@ -159,14 +138,13 @@ mod block_rlp { fn from(block: &'a SealedBlock) -> Self { let SealedBlock { header, - body: BlockBody { transactions, ommers, withdrawals, requests, sidecars }, + body: BlockBody { transactions, ommers, withdrawals, sidecars }, } = block; Self { header, transactions, ommers, withdrawals: withdrawals.as_ref(), - requests: requests.as_ref(), sidecars: sidecars.as_ref(), } } @@ -174,48 +152,40 @@ mod block_rlp { impl Decodable for Block { fn decode(b: &mut &[u8]) -> alloy_rlp::Result { - let Helper { header, transactions, ommers, withdrawals, requests, sidecars } = - Helper::decode(b)?; - Ok(Self { - header, - body: BlockBody { transactions, ommers, withdrawals, requests, sidecars }, - }) + let Helper { header, transactions, ommers, withdrawals, sidecars } = Helper::decode(b)?; + Ok(Self { header, body: BlockBody { transactions, ommers, withdrawals, sidecars } }) } } impl Decodable for SealedBlock { fn decode(b: &mut &[u8]) -> alloy_rlp::Result { - let Helper { header, transactions, ommers, withdrawals, requests, sidecars } = - Helper::decode(b)?; - Ok(Self { - header, - body: BlockBody { transactions, ommers, withdrawals, requests, sidecars }, - }) + let Helper { header, transactions, ommers, withdrawals, sidecars } = Helper::decode(b)?; + Ok(Self { header, body: BlockBody { transactions, ommers, withdrawals, sidecars } }) } } impl Encodable for Block { - fn length(&self) -> usize { - let helper: HelperRef<'_, _> = self.into(); - helper.length() - } - fn encode(&self, out: &mut dyn bytes::BufMut) { let helper: HelperRef<'_, _> = self.into(); helper.encode(out) } - } - impl Encodable for SealedBlock { fn length(&self) -> usize { let helper: HelperRef<'_, _> = self.into(); helper.length() } + } + impl Encodable for SealedBlock { fn encode(&self, out: &mut dyn bytes::BufMut) { let helper: HelperRef<'_, _> = self.into(); helper.encode(out) } + + fn length(&self) -> usize { + let helper: HelperRef<'_, _> = self.into(); + helper.length() + } } } @@ -232,14 +202,7 @@ impl<'a> arbitrary::Arbitrary<'a> for Block { Ok(Self { header: u.arbitrary()?, - body: BlockBody { - transactions, - ommers, - // for now just generate empty requests, see HACK above - requests: u.arbitrary()?, - withdrawals: u.arbitrary()?, - sidecars: None, - }, + body: BlockBody { transactions, ommers, withdrawals: u.arbitrary()?, sidecars: None }, }) } } @@ -591,8 +554,6 @@ pub struct BlockBody { // only for bsc /// Tx sidecars for the block. pub sidecars: Option, - /// Requests in the block. - pub requests: Option, } impl BlockBody { @@ -617,12 +578,6 @@ impl BlockBody { self.withdrawals.as_ref().map(|w| crate::proofs::calculate_withdrawals_root(w)) } - /// Calculate the requests root for the block body, if requests exist. If there are no - /// requests, this will return `None`. - pub fn calculate_requests_root(&self) -> Option { - self.requests.as_ref().map(|r| crate::proofs::calculate_requests_root(&r.0)) - } - /// Recover signer addresses for all transactions in the block body. pub fn recover_signers(&self) -> Option> { TransactionSigned::recover_signers(&self.transactions, self.transactions.len()) @@ -695,7 +650,6 @@ impl From for BlockBody { ommers: block.body.ommers, withdrawals: block.body.withdrawals, sidecars: block.body.sidecars, - requests: block.body.requests, } } } @@ -717,14 +671,7 @@ impl<'a> arbitrary::Arbitrary<'a> for BlockBody { }) .collect::>>()?; - // for now just generate empty requests, see HACK above - Ok(Self { - transactions, - ommers, - sidecars: None, - requests: None, - withdrawals: u.arbitrary()?, - }) + Ok(Self { transactions, ommers, sidecars: None, withdrawals: u.arbitrary()? }) } } @@ -734,9 +681,7 @@ pub(super) mod serde_bincode_compat { use alloc::{borrow::Cow, vec::Vec}; use alloy_consensus::serde_bincode_compat::Header; use alloy_primitives::Address; - use reth_primitives_traits::{ - serde_bincode_compat::SealedHeader, BlobSidecars, Requests, Withdrawals, - }; + use reth_primitives_traits::{serde_bincode_compat::SealedHeader, BlobSidecars, Withdrawals}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; @@ -762,7 +707,6 @@ pub(super) mod serde_bincode_compat { transactions: Vec>, ommers: Vec>, withdrawals: Cow<'a, Option>, - requests: Cow<'a, Option>, sidecars: Cow<'a, Option>, } @@ -772,7 +716,6 @@ pub(super) mod serde_bincode_compat { transactions: value.transactions.iter().map(Into::into).collect(), ommers: value.ommers.iter().map(Into::into).collect(), withdrawals: Cow::Borrowed(&value.withdrawals), - requests: Cow::Borrowed(&value.requests), sidecars: Cow::Borrowed(&value.sidecars), } } @@ -784,7 +727,6 @@ pub(super) mod serde_bincode_compat { transactions: value.transactions.into_iter().map(Into::into).collect(), ommers: value.ommers.into_iter().map(Into::into).collect(), withdrawals: value.withdrawals.into_owned(), - requests: value.requests.into_owned(), sidecars: value.sidecars.into_owned(), } } @@ -989,8 +931,11 @@ pub(super) mod serde_bincode_compat { #[cfg(test)] mod tests { - use super::{BlockNumberOrTag::*, *}; - use alloy_eips::eip1898::HexStringMissingPrefixError; + use super::*; + use alloy_eips::{ + eip1898::HexStringMissingPrefixError, BlockId, BlockNumberOrTag, BlockNumberOrTag::*, + RpcBlockHash, + }; use alloy_primitives::hex_literal::hex; use alloy_rlp::{Decodable, Encodable}; use std::str::FromStr; @@ -1182,4 +1127,13 @@ mod tests { let block = block.seal_slow(); assert_eq!(sealed, block.hash()); } + + #[test] + fn empty_block_rlp() { + let body = BlockBody::default(); + let mut buf = Vec::new(); + body.encode(&mut buf); + let decoded = BlockBody::decode(&mut buf.as_slice()).unwrap(); + assert_eq!(body, decoded); + } } diff --git a/crates/primitives/src/constants/eip4844.rs b/crates/primitives/src/constants/eip4844.rs deleted file mode 100644 index 14e892adfb..0000000000 --- a/crates/primitives/src/constants/eip4844.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#parameters) protocol constants and utils for shard Blob Transactions. - -pub use alloy_eips::eip4844::{ - BLOB_GASPRICE_UPDATE_FRACTION, BLOB_TX_MIN_BLOB_GASPRICE, DATA_GAS_PER_BLOB, - FIELD_ELEMENTS_PER_BLOB, FIELD_ELEMENT_BYTES, MAX_BLOBS_PER_BLOCK, MAX_DATA_GAS_PER_BLOCK, - TARGET_BLOBS_PER_BLOCK, TARGET_DATA_GAS_PER_BLOCK, VERSIONED_HASH_VERSION_KZG, -}; diff --git a/crates/primitives/src/constants/mod.rs b/crates/primitives/src/constants/mod.rs index fd1dc15862..09c488cc25 100644 --- a/crates/primitives/src/constants/mod.rs +++ b/crates/primitives/src/constants/mod.rs @@ -1,6 +1,3 @@ //! Ethereum protocol-related constants pub use reth_primitives_traits::constants::*; - -/// [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#parameters) constants. -pub mod eip4844; diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 1e067fed47..0d8fdda581 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -34,23 +34,16 @@ pub mod parlia; pub mod transaction; #[cfg(any(test, feature = "arbitrary"))] pub use block::{generate_valid_header, valid_header_strategy}; -pub use block::{ - Block, BlockBody, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, BlockWithSenders, - ForkBlock, RpcBlockHash, SealedBlock, SealedBlockWithSenders, -}; +pub use block::{Block, BlockBody, BlockWithSenders, SealedBlock, SealedBlockWithSenders}; #[cfg(feature = "reth-codec")] pub use compression::*; -pub use constants::{ - DEV_GENESIS_HASH, EMPTY_OMMER_ROOT_HASH, HOLESKY_GENESIS_HASH, KECCAK_EMPTY, - MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH, -}; +pub use constants::HOLESKY_GENESIS_HASH; pub use receipt::{ gas_spent_by_transactions, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts, }; pub use reth_primitives_traits::{ logs_bloom, Account, BlobSidecar, BlobSidecars, Bytecode, GotExpected, GotExpectedBoxed, - Header, HeaderError, Log, LogData, Request, Requests, SealedHeader, StorageEntry, Withdrawal, - Withdrawals, + Header, HeaderError, Log, LogData, SealedHeader, StorageEntry, Withdrawals, }; pub use static_file::StaticFileSegment; @@ -59,15 +52,10 @@ pub use transaction::{ PooledTransactionsElementEcRecovered, }; -#[cfg(feature = "c-kzg")] -pub use transaction::BlobTransactionValidationError; - pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer_unchecked, sign_message}, - InvalidTransactionError, Signature, Transaction, TransactionMeta, TransactionSigned, + InvalidTransactionError, Transaction, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxHashOrNumber, TxType, - EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, - LEGACY_TX_TYPE_ID, }; // Re-exports @@ -96,7 +84,5 @@ pub mod serde_bincode_compat { } // to make lint happy -#[cfg(feature = "bsc")] -use reth_chainspec as _; #[cfg(any(feature = "bsc", feature = "opbnb"))] use revm as _; diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index a12a5d6be8..000244d2c5 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -1,11 +1,9 @@ //! Helper function for calculating Merkle proofs and hashes. -use crate::{ - constants::EMPTY_OMMER_ROOT_HASH, Header, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, - Request, TransactionSigned, Withdrawal, -}; +use crate::{Header, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, TransactionSigned}; use alloc::vec::Vec; -use alloy_eips::{eip2718::Encodable2718, eip7685::Encodable7685}; +use alloy_consensus::EMPTY_OMMER_ROOT_HASH; +use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawal}; use alloy_primitives::{keccak256, B256}; use reth_trie_common::root::{ordered_trie_root, ordered_trie_root_with_encoder}; @@ -29,13 +27,6 @@ pub fn calculate_receipt_root(receipts: &[ReceiptWithBloom]) -> B256 { ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_inner(buf, false)) } -/// Calculate [EIP-7685](https://eips.ethereum.org/EIPS/eip-7685) requests root. -/// -/// NOTE: The requests are encoded as `id + request` -pub fn calculate_requests_root(requests: &[Request]) -> B256 { - ordered_trie_root_with_encoder(requests, |item, buf| item.encode_7685(buf)) -} - /// Calculates the receipt root for a header. pub fn calculate_receipt_root_ref(receipts: &[ReceiptWithBloomRef<'_>]) -> B256 { ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_inner(buf, false)) @@ -65,7 +56,8 @@ pub fn calculate_ommers_root(ommers: &[Header]) -> B256 { #[cfg(test)] mod tests { use super::*; - use crate::{constants::EMPTY_ROOT_HASH, Block}; + use crate::Block; + use alloy_consensus::EMPTY_ROOT_HASH; use alloy_genesis::GenesisAccount; use alloy_primitives::{b256, hex_literal::hex, Address, U256}; use alloy_rlp::Decodable; diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index cfd831ed0f..e60bddb9d7 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -1,24 +1,23 @@ #[cfg(feature = "reth-codec")] use crate::compression::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR}; -use crate::{ - logs_bloom, TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, - EIP7702_TX_TYPE_ID, -}; +use crate::TxType; use alloc::{vec, vec::Vec}; -use alloy_primitives::{Bloom, Bytes, Log, B256}; +use alloy_consensus::constants::{ + EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, +}; +use alloy_eips::eip2718::Encodable2718; +use alloy_primitives::{Bloom, Log, B256}; use alloy_rlp::{length_of_length, Decodable, Encodable, RlpDecodable, RlpEncodable}; use bytes::{Buf, BufMut}; use core::{cmp::Ordering, ops::Deref}; use derive_more::{DerefMut, From, IntoIterator}; -#[cfg(feature = "reth-codec")] -use reth_codecs::{Compact, CompactZstd}; use serde::{Deserialize, Serialize}; /// Receipt containing result of transaction execution. #[derive( Clone, Debug, PartialEq, Eq, Default, RlpEncodable, RlpDecodable, Serialize, Deserialize, )] -#[cfg_attr(any(test, feature = "reth-codec"), derive(CompactZstd))] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::CompactZstd))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests)] #[rlp(trailing)] pub struct Receipt { @@ -49,7 +48,7 @@ impl Receipt { /// Calculates [`Log`]'s bloom filter. this is slow operation and [`ReceiptWithBloom`] can /// be used to cache this value. pub fn bloom_slow(&self) -> Bloom { - logs_bloom(self.logs.iter()) + alloy_primitives::logs_bloom(self.logs.iter()) } /// Calculates the bloom filter for the receipt and returns the [`ReceiptWithBloom`] container @@ -90,7 +89,7 @@ impl Receipts { self.receipt_vec.len() } - /// Returns `true` if the `Receipts` vector is empty. + /// Returns true if the `Receipts` vector is empty. pub fn is_empty(&self) -> bool { self.receipt_vec.is_empty() } @@ -130,7 +129,7 @@ impl From for ReceiptWithBloom { /// [`Receipt`] with calculated bloom filter. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[cfg_attr(any(test, feature = "reth-codec"), derive(Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct ReceiptWithBloom { /// Bloom filter build from logs. @@ -204,14 +203,20 @@ impl<'a> arbitrary::Arbitrary<'a> for Receipt { } } -impl ReceiptWithBloom { - /// Returns the enveloped encoded receipt. - /// - /// See also [`ReceiptWithBloom::encode_enveloped`] - pub fn envelope_encoded(&self) -> Bytes { - let mut buf = Vec::new(); - self.encode_enveloped(&mut buf); - buf.into() +impl Encodable2718 for ReceiptWithBloom { + fn type_flag(&self) -> Option { + match self.receipt.tx_type { + TxType::Legacy => None, + tx_type => Some(tx_type as u8), + } + } + + fn encode_2718_len(&self) -> usize { + let encoder = self.as_encoder(); + match self.receipt.tx_type { + TxType::Legacy => encoder.receipt_length(), + _ => 1 + encoder.receipt_length(), // 1 byte for the type prefix + } } /// Encodes the receipt into its "raw" format. @@ -223,10 +228,18 @@ impl ReceiptWithBloom { /// of the receipt: /// - EIP-1559, 2930 and 4844 transactions: `tx-type || rlp([status, cumulativeGasUsed, /// logsBloom, logs])` - pub fn encode_enveloped(&self, out: &mut dyn bytes::BufMut) { + fn encode_2718(&self, out: &mut dyn BufMut) { self.encode_inner(out, false) } + fn encoded_2718(&self) -> Vec { + let mut out = vec![]; + self.encode_2718(&mut out); + out + } +} + +impl ReceiptWithBloom { /// Encode receipt with or without the header data. pub fn encode_inner(&self, out: &mut dyn BufMut, with_header: bool) { self.as_encoder().encode_inner(out, with_header) @@ -501,7 +514,21 @@ impl Encodable for ReceiptWithBloomEncoder<'_> { #[cfg(test)] mod tests { use super::*; + use crate::revm_primitives::Bytes; use alloy_primitives::{address, b256, bytes, hex_literal::hex}; + use reth_codecs::Compact; + + #[test] + fn test_decode_receipt() { + #[cfg(not(feature = "optimism"))] + reth_codecs::test_utils::test_decode::(&hex!( + "c428b52ffd23fc42696156b10200f034792b6a94c3850215c2fef7aea361a0c31b79d9a32652eefc0d4e2e730036061cff7344b6fc6132b50cda0ed810a991ae58ef013150c12b2522533cb3b3a8b19b7786a8b5ff1d3cdc84225e22b02def168c8858df" + )); + #[cfg(feature = "optimism")] + reth_codecs::test_utils::test_decode::(&hex!( + "c30328b52ffd23fc426961a00105007eb0042307705a97e503562eacf2b95060cce9de6de68386b6c155b73a9650021a49e2f8baad17f30faff5899d785c4c0873e45bc268bcf07560106424570d11f9a59e8f3db1efa4ceec680123712275f10d92c3411e1caaa11c7c5d591bc11487168e09934a9986848136da1b583babf3a7188e3aed007a1520f1cf4c1ca7d3482c6c28d37c298613c70a76940008816c4c95644579fd08471dc34732fd0f24" + )); + } // Test vector from: https://eips.ethereum.org/EIPS/eip-2481 #[test] @@ -649,4 +676,50 @@ mod tests { let (decoded, _) = Receipt::from_compact(&data[..], data.len()); assert_eq!(decoded, receipt); } + + #[test] + fn test_encode_2718_length() { + let receipt = ReceiptWithBloom { + receipt: Receipt { + tx_type: TxType::Eip1559, + success: true, + cumulative_gas_used: 21000, + logs: vec![], + #[cfg(feature = "optimism")] + deposit_nonce: None, + #[cfg(feature = "optimism")] + deposit_receipt_version: None, + }, + bloom: Bloom::default(), + }; + + let encoded = receipt.encoded_2718(); + assert_eq!( + encoded.len(), + receipt.encode_2718_len(), + "Encoded length should match the actual encoded data length" + ); + + // Test for legacy receipt as well + let legacy_receipt = ReceiptWithBloom { + receipt: Receipt { + tx_type: TxType::Legacy, + success: true, + cumulative_gas_used: 21000, + logs: vec![], + #[cfg(feature = "optimism")] + deposit_nonce: None, + #[cfg(feature = "optimism")] + deposit_receipt_version: None, + }, + bloom: Bloom::default(), + }; + + let legacy_encoded = legacy_receipt.encoded_2718(); + assert_eq!( + legacy_encoded.len(), + legacy_receipt.encode_2718_len(), + "Encoded length for legacy receipt should match the actual encoded data length" + ); + } } diff --git a/crates/primitives/src/traits/block/mod.rs b/crates/primitives/src/traits/block/mod.rs deleted file mode 100644 index 451a54c345..0000000000 --- a/crates/primitives/src/traits/block/mod.rs +++ /dev/null @@ -1,137 +0,0 @@ -//! Block abstraction. - -pub mod body; - -use alloc::fmt; -use core::ops; - -use alloy_consensus::BlockHeader; -use alloy_primitives::{Address, Sealable, B256}; - -use crate::{traits::BlockBody, BlockWithSenders, SealedBlock, SealedHeader}; - -/// Abstraction of block data type. -pub trait Block: - fmt::Debug - + Clone - + PartialEq - + Eq - + Default - + serde::Serialize - + for<'a> serde::Deserialize<'a> - + From<(Self::Header, Self::Body)> - + Into<(Self::Header, Self::Body)> -{ - /// Header part of the block. - type Header: BlockHeader + Sealable; - - /// The block's body contains the transactions in the block. - type Body: BlockBody; - - /// Returns reference to [`BlockHeader`] type. - fn header(&self) -> &Self::Header; - - /// Returns reference to [`BlockBody`] type. - fn body(&self) -> &Self::Body; - - /// Calculate the header hash and seal the block so that it can't be changed. - fn seal_slow(self) -> SealedBlock { - let (header, body) = self.into(); - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedBlock { header: SealedHeader::new(header, seal), body } - } - - /// Seal the block with a known hash. - /// - /// WARNING: This method does not perform validation whether the hash is correct. - fn seal(self, hash: B256) -> SealedBlock { - let (header, body) = self.into(); - SealedBlock { header: SealedHeader::new(header, hash), body } - } - - /// Expensive operation that recovers transaction signer. See - /// [`SealedBlockWithSenders`](reth_primitives::SealedBlockWithSenders). - fn senders(&self) -> Option> { - self.body().recover_signers() - } - - /// Transform into a [`BlockWithSenders`]. - /// - /// # Panics - /// - /// If the number of senders does not match the number of transactions in the block - /// and the signer recovery for one of the transactions fails. - /// - /// Note: this is expected to be called with blocks read from disk. - #[track_caller] - fn with_senders_unchecked(self, senders: Vec
) -> BlockWithSenders { - self.try_with_senders_unchecked(senders).expect("stored block is valid") - } - - /// Transform into a [`BlockWithSenders`] using the given senders. - /// - /// If the number of senders does not match the number of transactions in the block, this falls - /// back to manually recovery, but _without ensuring that the signature has a low `s` value_. - /// See also [`TransactionSigned::recover_signer_unchecked`] - /// - /// Returns an error if a signature is invalid. - #[track_caller] - fn try_with_senders_unchecked( - self, - senders: Vec
, - ) -> Result, Self> { - let senders = if self.body().transactions().len() == senders.len() { - senders - } else { - let Some(senders) = self.body().recover_signers() else { return Err(self) }; - senders - }; - - Ok(BlockWithSenders { block: self, senders }) - } - - /// **Expensive**. Transform into a [`BlockWithSenders`] by recovering senders in the contained - /// transactions. - /// - /// Returns `None` if a transaction is invalid. - fn with_recovered_senders(self) -> Option> { - let senders = self.senders()?; - Some(BlockWithSenders { block: self, senders }) - } - - /// Calculates a heuristic for the in-memory size of the [`Block`]. - fn size(&self) -> usize; -} - -impl Block for T -where - T: ops::Deref - + fmt::Debug - + Clone - + PartialEq - + Eq - + Default - + serde::Serialize - + for<'a> serde::Deserialize<'a> - + From<(::Header, ::Body)> - + Into<(::Header, ::Body)>, -{ - type Header = ::Header; - type Body = ::Body; - - #[inline] - fn header(&self) -> &Self::Header { - self.deref().header() - } - - #[inline] - fn body(&self) -> &Self::Body { - self.deref().body() - } - - #[inline] - fn size(&self) -> usize { - self.deref().size() - } -} diff --git a/crates/primitives/src/traits/mod.rs b/crates/primitives/src/traits/mod.rs index 8c84c67297..49fb73ea55 100644 --- a/crates/primitives/src/traits/mod.rs +++ b/crates/primitives/src/traits/mod.rs @@ -1,7 +1,9 @@ //! Abstractions of primitive data types pub mod block; +pub mod transaction; pub use block::{body::BlockBody, Block}; +pub use transaction::signed::SignedTransaction; pub use alloy_consensus::BlockHeader; diff --git a/crates/primitives/src/transaction/compat.rs b/crates/primitives/src/transaction/compat.rs index 81281186f6..883c89c45f 100644 --- a/crates/primitives/src/transaction/compat.rs +++ b/crates/primitives/src/transaction/compat.rs @@ -1,5 +1,7 @@ use crate::{Transaction, TransactionSigned}; use alloy_primitives::{Address, TxKind, U256}; +#[cfg(feature = "optimism")] +use op_alloy_consensus::DepositTransaction; use revm_primitives::{AuthorizationList, TxEnv}; /// Implements behaviour to fill a [`TxEnv`] from another transaction. diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 075bdb8fb3..b3c8260fac 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1,42 +1,41 @@ //! Transaction types. -use crate::BlockHashOrNumber; -use alloy_eips::eip7702::SignedAuthorization; -use alloy_primitives::{keccak256, Address, TxKind, B256, U256}; - -use alloy_consensus::{SignableTransaction, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy}; +#[cfg(any(test, feature = "reth-codec"))] +use alloy_consensus::constants::{EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}; +use alloy_consensus::{ + SignableTransaction, Transaction as _, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy, +}; use alloy_eips::{ + eip1898::BlockHashOrNumber, eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, eip2930::AccessList, + eip7702::SignedAuthorization, }; -use alloy_primitives::{Bytes, TxHash}; +use alloy_primitives::{keccak256, Address, Bytes, ChainId, Signature, TxHash, TxKind, B256, U256}; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; use core::mem; use derive_more::{AsRef, Deref}; -use once_cell::sync::Lazy; +use once_cell as _; +#[cfg(not(feature = "std"))] +use once_cell::sync::Lazy as LazyLock; +#[cfg(feature = "optimism")] +use op_alloy_consensus::DepositTransaction; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use serde::{Deserialize, Serialize}; use signature::{decode_with_eip155_chain_id, with_eip155_parity}; +#[cfg(feature = "std")] +use std::sync::LazyLock; pub use error::{ InvalidTransactionError, TransactionConversionError, TryFromRecoveredTransactionError, }; pub use meta::TransactionMeta; pub use pooled::{PooledTransactionsElement, PooledTransactionsElementEcRecovered}; -#[cfg(all(feature = "c-kzg", any(test, feature = "arbitrary")))] -pub use sidecar::generate_blob_sidecar; -#[cfg(feature = "c-kzg")] -pub use sidecar::BlobTransactionValidationError; pub use sidecar::{BlobTransaction, BlobTransactionSidecar}; pub use compat::FillTxEnv; -pub use signature::{ - extract_chain_id, legacy_parity, recover_signer, recover_signer_unchecked, Signature, -}; -pub use tx_type::{ - TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, - LEGACY_TX_TYPE_ID, -}; +pub use signature::{extract_chain_id, legacy_parity, recover_signer, recover_signer_unchecked}; +pub use tx_type::TxType; pub use variant::TransactionSignedVariant; pub(crate) mod access_list; @@ -53,8 +52,6 @@ mod variant; #[cfg(feature = "optimism")] use op_alloy_consensus::TxDeposit; #[cfg(feature = "optimism")] -use reth_optimism_chainspec::optimism_deposit_tx_signature; -#[cfg(feature = "optimism")] pub use tx_type::DEPOSIT_TX_TYPE_ID; #[cfg(any(test, feature = "reth-codec"))] use tx_type::{ @@ -62,18 +59,17 @@ use tx_type::{ COMPACT_IDENTIFIER_LEGACY, }; -#[cfg(test)] -use reth_codecs::Compact; - use alloc::vec::Vec; +use reth_primitives_traits::SignedTransaction; +use revm_primitives::{AuthorizationList, TxEnv}; /// Either a transaction hash or number. pub type TxHashOrNumber = BlockHashOrNumber; // Expected number of transactions where we can expect a speed-up by recovering the senders in // parallel. -pub(crate) static PARALLEL_SENDER_RECOVERY_THRESHOLD: Lazy = - Lazy::new(|| match rayon::current_num_threads() { +pub(crate) static PARALLEL_SENDER_RECOVERY_THRESHOLD: LazyLock = + LazyLock::new(|| match rayon::current_num_threads() { 0..=1 => usize::MAX, 2..=8 => 10, _ => 5, @@ -135,6 +131,31 @@ pub enum Transaction { Deposit(TxDeposit), } +#[cfg(feature = "optimism")] +impl DepositTransaction for Transaction { + fn source_hash(&self) -> Option { + match self { + Self::Deposit(tx) => tx.source_hash(), + _ => None, + } + } + fn mint(&self) -> Option { + match self { + Self::Deposit(tx) => tx.mint(), + _ => None, + } + } + fn is_system_transaction(&self) -> bool { + match self { + Self::Deposit(tx) => tx.is_system_transaction(), + _ => false, + } + } + fn is_deposit(&self) -> bool { + matches!(self, Self::Deposit(_)) + } +} + #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for Transaction { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { @@ -193,19 +214,6 @@ impl Transaction { } } - /// Get `chain_id`. - pub const fn chain_id(&self) -> Option { - match self { - Self::Legacy(TxLegacy { chain_id, .. }) => *chain_id, - Self::Eip2930(TxEip2930 { chain_id, .. }) | - Self::Eip1559(TxEip1559 { chain_id, .. }) | - Self::Eip4844(TxEip4844 { chain_id, .. }) | - Self::Eip7702(TxEip7702 { chain_id, .. }) => Some(*chain_id), - #[cfg(feature = "optimism")] - Self::Deposit(_) => None, - } - } - /// Sets the transaction's chain id to the provided value. pub fn set_chain_id(&mut self, chain_id: u64) { match self { @@ -255,33 +263,6 @@ impl Transaction { } } - /// Gets the transaction's value field. - pub const fn value(&self) -> U256 { - *match self { - Self::Legacy(TxLegacy { value, .. }) | - Self::Eip2930(TxEip2930 { value, .. }) | - Self::Eip1559(TxEip1559 { value, .. }) | - Self::Eip4844(TxEip4844 { value, .. }) | - Self::Eip7702(TxEip7702 { value, .. }) => value, - #[cfg(feature = "optimism")] - Self::Deposit(TxDeposit { value, .. }) => value, - } - } - - /// Get the transaction's nonce. - pub const fn nonce(&self) -> u64 { - match self { - Self::Legacy(TxLegacy { nonce, .. }) | - Self::Eip2930(TxEip2930 { nonce, .. }) | - Self::Eip1559(TxEip1559 { nonce, .. }) | - Self::Eip4844(TxEip4844 { nonce, .. }) | - Self::Eip7702(TxEip7702 { nonce, .. }) => *nonce, - // Deposit transactions do not have nonces. - #[cfg(feature = "optimism")] - Self::Deposit(_) => 0, - } - } - /// Returns the [`AccessList`] of the transaction. /// /// Returns `None` for legacy transactions. @@ -307,19 +288,6 @@ impl Transaction { } } - /// Get the gas limit of the transaction. - pub const fn gas_limit(&self) -> u64 { - match self { - Self::Legacy(TxLegacy { gas_limit, .. }) | - Self::Eip1559(TxEip1559 { gas_limit, .. }) | - Self::Eip4844(TxEip4844 { gas_limit, .. }) | - Self::Eip7702(TxEip7702 { gas_limit, .. }) | - Self::Eip2930(TxEip2930 { gas_limit, .. }) => *gas_limit, - #[cfg(feature = "optimism")] - Self::Deposit(TxDeposit { gas_limit, .. }) => *gas_limit, - } - } - /// Returns true if the tx supports dynamic fees pub const fn is_dynamic_fee(&self) -> bool { match self { @@ -330,40 +298,6 @@ impl Transaction { } } - /// Max fee per gas for eip1559 transaction, for legacy transactions this is `gas_price`. - /// - /// This is also commonly referred to as the "Gas Fee Cap" (`GasFeeCap`). - pub const fn max_fee_per_gas(&self) -> u128 { - match self { - Self::Legacy(TxLegacy { gas_price, .. }) | - Self::Eip2930(TxEip2930 { gas_price, .. }) => *gas_price, - Self::Eip1559(TxEip1559 { max_fee_per_gas, .. }) | - Self::Eip4844(TxEip4844 { max_fee_per_gas, .. }) | - Self::Eip7702(TxEip7702 { max_fee_per_gas, .. }) => *max_fee_per_gas, - // Deposit transactions buy their L2 gas on L1 and, as such, the L2 gas is not - // refundable. - #[cfg(feature = "optimism")] - Self::Deposit(_) => 0, - } - } - - /// Max priority fee per gas for eip1559 transaction, for legacy and eip2930 transactions this - /// is `None` - /// - /// This is also commonly referred to as the "Gas Tip Cap" (`GasTipCap`). - pub const fn max_priority_fee_per_gas(&self) -> Option { - match self { - Self::Legacy(_) | Self::Eip2930(_) => None, - Self::Eip1559(TxEip1559 { max_priority_fee_per_gas, .. }) | - Self::Eip4844(TxEip4844 { max_priority_fee_per_gas, .. }) | - Self::Eip7702(TxEip7702 { max_priority_fee_per_gas, .. }) => { - Some(*max_priority_fee_per_gas) - } - #[cfg(feature = "optimism")] - Self::Deposit(_) => None, - } - } - /// Blob versioned hashes for eip4844 transaction, for legacy, eip1559, eip2930 and eip7702 /// transactions this is `None` /// @@ -379,46 +313,15 @@ impl Transaction { } } - /// Max fee per blob gas for eip4844 transaction [`TxEip4844`]. - /// - /// Returns `None` for non-eip4844 transactions. - /// - /// This is also commonly referred to as the "Blob Gas Fee Cap" (`BlobGasFeeCap`). - pub const fn max_fee_per_blob_gas(&self) -> Option { - match self { - Self::Eip4844(TxEip4844 { max_fee_per_blob_gas, .. }) => Some(*max_fee_per_blob_gas), - _ => None, - } - } - /// Returns the blob gas used for all blobs of the EIP-4844 transaction if it is an EIP-4844 /// transaction. /// /// This is the number of blobs times the - /// [`DATA_GAS_PER_BLOB`](crate::constants::eip4844::DATA_GAS_PER_BLOB) a single blob consumes. + /// [`DATA_GAS_PER_BLOB`](alloy_eips::eip4844::DATA_GAS_PER_BLOB) a single blob consumes. pub fn blob_gas_used(&self) -> Option { self.as_eip4844().map(TxEip4844::blob_gas) } - /// Return the max priority fee per gas if the transaction is an EIP-1559 transaction, and - /// otherwise return the gas price. - /// - /// # Warning - /// - /// This is different than the `max_priority_fee_per_gas` method, which returns `None` for - /// non-EIP-1559 transactions. - pub const fn priority_fee_or_price(&self) -> u128 { - match self { - Self::Legacy(TxLegacy { gas_price, .. }) | - Self::Eip2930(TxEip2930 { gas_price, .. }) => *gas_price, - Self::Eip1559(TxEip1559 { max_priority_fee_per_gas, .. }) | - Self::Eip4844(TxEip4844 { max_priority_fee_per_gas, .. }) | - Self::Eip7702(TxEip7702 { max_priority_fee_per_gas, .. }) => *max_priority_fee_per_gas, - #[cfg(feature = "optimism")] - Self::Deposit(_) => 0, - } - } - /// Returns the effective gas price for the given base fee. /// /// If the transaction is a legacy or EIP2930 transaction, the gas price is returned. @@ -478,42 +381,6 @@ impl Transaction { } } - /// Returns the source hash of the transaction, which uniquely identifies its source. - /// If not a deposit transaction, this will always return `None`. - #[cfg(feature = "optimism")] - pub const fn source_hash(&self) -> Option { - match self { - Self::Deposit(TxDeposit { source_hash, .. }) => Some(*source_hash), - _ => None, - } - } - - /// Returns the amount of ETH locked up on L1 that will be minted on L2. If the transaction - /// is not a deposit transaction, this will always return `None`. - #[cfg(feature = "optimism")] - pub const fn mint(&self) -> Option { - match self { - Self::Deposit(TxDeposit { mint, .. }) => *mint, - _ => None, - } - } - - /// Returns whether or not the transaction is a system transaction. If the transaction - /// is not a deposit transaction, this will always return `false`. - #[cfg(feature = "optimism")] - pub const fn is_system_transaction(&self) -> bool { - match self { - Self::Deposit(TxDeposit { is_system_transaction, .. }) => *is_system_transaction, - _ => false, - } - } - - /// Returns whether or not the transaction is an Optimism Deposited transaction. - #[cfg(feature = "optimism")] - pub const fn is_deposit(&self) -> bool { - matches!(self, Self::Deposit(_)) - } - /// This encodes the transaction _without_ the signature, and is only suitable for creating a /// hash intended for signing. pub fn encode_without_signature(&self, out: &mut dyn bytes::BufMut) { @@ -766,10 +633,12 @@ impl reth_codecs::Compact for Transaction { let (tx, buf) = TxDeposit::from_compact(buf, buf.len()); (Self::Deposit(tx), buf) } - _ => unreachable!("Junk data in database: unknown Transaction variant"), + _ => unreachable!( + "Junk data in database: unknown Transaction variant: {identifier}" + ), } } - _ => unreachable!("Junk data in database: unknown Transaction variant"), + _ => unreachable!("Junk data in database: unknown Transaction variant: {identifier}"), } } } @@ -820,6 +689,188 @@ impl Encodable for Transaction { } } +impl alloy_consensus::Transaction for Transaction { + fn chain_id(&self) -> Option { + match self { + Self::Legacy(tx) => tx.chain_id(), + Self::Eip2930(tx) => tx.chain_id(), + Self::Eip1559(tx) => tx.chain_id(), + Self::Eip4844(tx) => tx.chain_id(), + Self::Eip7702(tx) => tx.chain_id(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.chain_id(), + } + } + + fn nonce(&self) -> u64 { + match self { + Self::Legacy(tx) => tx.nonce(), + Self::Eip2930(tx) => tx.nonce(), + Self::Eip1559(tx) => tx.nonce(), + Self::Eip4844(tx) => tx.nonce(), + Self::Eip7702(tx) => tx.nonce(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.nonce(), + } + } + + fn gas_limit(&self) -> u64 { + match self { + Self::Legacy(tx) => tx.gas_limit(), + Self::Eip2930(tx) => tx.gas_limit(), + Self::Eip1559(tx) => tx.gas_limit(), + Self::Eip4844(tx) => tx.gas_limit(), + Self::Eip7702(tx) => tx.gas_limit(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.gas_limit(), + } + } + + fn gas_price(&self) -> Option { + match self { + Self::Legacy(tx) => tx.gas_price(), + Self::Eip2930(tx) => tx.gas_price(), + Self::Eip1559(tx) => tx.gas_price(), + Self::Eip4844(tx) => tx.gas_price(), + Self::Eip7702(tx) => tx.gas_price(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.gas_price(), + } + } + + fn max_fee_per_gas(&self) -> u128 { + match self { + Self::Legacy(tx) => tx.max_fee_per_gas(), + Self::Eip2930(tx) => tx.max_fee_per_gas(), + Self::Eip1559(tx) => tx.max_fee_per_gas(), + Self::Eip4844(tx) => tx.max_fee_per_gas(), + Self::Eip7702(tx) => tx.max_fee_per_gas(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.max_fee_per_gas(), + } + } + + fn max_priority_fee_per_gas(&self) -> Option { + match self { + Self::Legacy(tx) => tx.max_priority_fee_per_gas(), + Self::Eip2930(tx) => tx.max_priority_fee_per_gas(), + Self::Eip1559(tx) => tx.max_priority_fee_per_gas(), + Self::Eip4844(tx) => tx.max_priority_fee_per_gas(), + Self::Eip7702(tx) => tx.max_priority_fee_per_gas(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.max_priority_fee_per_gas(), + } + } + + fn max_fee_per_blob_gas(&self) -> Option { + match self { + Self::Legacy(tx) => tx.max_fee_per_blob_gas(), + Self::Eip2930(tx) => tx.max_fee_per_blob_gas(), + Self::Eip1559(tx) => tx.max_fee_per_blob_gas(), + Self::Eip4844(tx) => tx.max_fee_per_blob_gas(), + Self::Eip7702(tx) => tx.max_fee_per_blob_gas(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.max_fee_per_blob_gas(), + } + } + + fn priority_fee_or_price(&self) -> u128 { + match self { + Self::Legacy(tx) => tx.priority_fee_or_price(), + Self::Eip2930(tx) => tx.priority_fee_or_price(), + Self::Eip1559(tx) => tx.priority_fee_or_price(), + Self::Eip4844(tx) => tx.priority_fee_or_price(), + Self::Eip7702(tx) => tx.priority_fee_or_price(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.priority_fee_or_price(), + } + } + + fn value(&self) -> U256 { + match self { + Self::Legacy(tx) => tx.value(), + Self::Eip2930(tx) => tx.value(), + Self::Eip1559(tx) => tx.value(), + Self::Eip4844(tx) => tx.value(), + Self::Eip7702(tx) => tx.value(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.value(), + } + } + + fn input(&self) -> &Bytes { + match self { + Self::Legacy(tx) => tx.input(), + Self::Eip2930(tx) => tx.input(), + Self::Eip1559(tx) => tx.input(), + Self::Eip4844(tx) => tx.input(), + Self::Eip7702(tx) => tx.input(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.input(), + } + } + + fn ty(&self) -> u8 { + match self { + Self::Legacy(tx) => tx.ty(), + Self::Eip2930(tx) => tx.ty(), + Self::Eip1559(tx) => tx.ty(), + Self::Eip4844(tx) => tx.ty(), + Self::Eip7702(tx) => tx.ty(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.ty(), + } + } + + fn access_list(&self) -> Option<&AccessList> { + match self { + Self::Legacy(tx) => tx.access_list(), + Self::Eip2930(tx) => tx.access_list(), + Self::Eip1559(tx) => tx.access_list(), + Self::Eip4844(tx) => tx.access_list(), + Self::Eip7702(tx) => tx.access_list(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.access_list(), + } + } + + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + match self { + Self::Legacy(tx) => tx.blob_versioned_hashes(), + Self::Eip2930(tx) => tx.blob_versioned_hashes(), + Self::Eip1559(tx) => tx.blob_versioned_hashes(), + Self::Eip4844(tx) => tx.blob_versioned_hashes(), + Self::Eip7702(tx) => tx.blob_versioned_hashes(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.blob_versioned_hashes(), + } + } + + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { + match self { + Self::Legacy(tx) => tx.authorization_list(), + Self::Eip2930(tx) => tx.authorization_list(), + Self::Eip1559(tx) => tx.authorization_list(), + Self::Eip4844(tx) => tx.authorization_list(), + Self::Eip7702(tx) => tx.authorization_list(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.authorization_list(), + } + } + + fn kind(&self) -> TxKind { + match self { + Self::Legacy(tx) => tx.kind(), + Self::Eip2930(tx) => tx.kind(), + Self::Eip1559(tx) => tx.kind(), + Self::Eip4844(tx) => tx.kind(), + Self::Eip7702(tx) => tx.kind(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.kind(), + } + } +} + /// Signed transaction without its Hash. Used type for inserting into the DB. /// /// This can by converted to [`TransactionSigned`] by calling [`TransactionSignedNoHash::hash`]. @@ -888,7 +939,7 @@ impl TransactionSignedNoHash { // transactions with an empty signature // // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock - if self.is_legacy() && self.signature == optimism_deposit_tx_signature() { + if self.is_legacy() && self.signature == TxDeposit::signature() { return Some(Address::ZERO) } } @@ -1070,6 +1121,11 @@ impl TransactionSigned { &self.signature } + /// Transaction + pub const fn transaction(&self) -> &Transaction { + &self.transaction + } + /// Transaction hash. Used to identify transaction. pub const fn hash(&self) -> TxHash { self.hash @@ -1283,6 +1339,178 @@ impl TransactionSigned { } } +impl alloy_consensus::Transaction for TransactionSigned { + fn chain_id(&self) -> Option { + self.deref().chain_id() + } + + fn nonce(&self) -> u64 { + self.deref().nonce() + } + + fn gas_limit(&self) -> u64 { + self.deref().gas_limit() + } + + fn gas_price(&self) -> Option { + self.deref().gas_price() + } + + fn max_fee_per_gas(&self) -> u128 { + self.deref().max_fee_per_gas() + } + + fn max_priority_fee_per_gas(&self) -> Option { + self.deref().max_priority_fee_per_gas() + } + + fn max_fee_per_blob_gas(&self) -> Option { + self.deref().max_fee_per_blob_gas() + } + + fn priority_fee_or_price(&self) -> u128 { + self.deref().priority_fee_or_price() + } + + fn value(&self) -> U256 { + self.deref().value() + } + + fn input(&self) -> &Bytes { + self.deref().input() + } + + fn ty(&self) -> u8 { + self.deref().ty() + } + + fn access_list(&self) -> Option<&AccessList> { + self.deref().access_list() + } + + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + alloy_consensus::Transaction::blob_versioned_hashes(self.deref()) + } + + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { + self.deref().authorization_list() + } + + fn kind(&self) -> TxKind { + self.deref().kind() + } +} + +impl SignedTransaction for TransactionSigned { + type Transaction = Transaction; + + fn tx_hash(&self) -> &TxHash { + Self::hash_ref(self) + } + + fn transaction(&self) -> &Self::Transaction { + Self::transaction(self) + } + + fn signature(&self) -> &Signature { + Self::signature(self) + } + + fn recover_signer(&self) -> Option
{ + Self::recover_signer(self) + } + + fn recover_signer_unchecked(&self) -> Option
{ + Self::recover_signer_unchecked(self) + } + + fn from_transaction_and_signature( + transaction: Self::Transaction, + signature: Signature, + ) -> Self { + Self::from_transaction_and_signature(transaction, signature) + } + + fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address) { + tx_env.caller = sender; + match self.as_ref() { + Transaction::Legacy(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.gas_price); + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = tx.chain_id; + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clear(); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = None; + } + Transaction::Eip2930(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.gas_price); + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = None; + } + Transaction::Eip1559(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = None; + } + Transaction::Eip4844(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = TxKind::Call(tx.to); + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clone_from(&tx.blob_versioned_hashes); + tx_env.max_fee_per_blob_gas = Some(U256::from(tx.max_fee_per_blob_gas)); + tx_env.authorization_list = None; + } + Transaction::Eip7702(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = tx.to.into(); + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = + Some(AuthorizationList::Signed(tx.authorization_list.clone())); + } + #[cfg(feature = "optimism")] + Transaction::Deposit(_) => {} + } + } +} + impl From for TransactionSigned { fn from(recovered: TransactionSignedEcRecovered) -> Self { recovered.signed_transaction @@ -1401,7 +1629,7 @@ impl Decodable2718 for TransactionSigned { #[cfg(feature = "optimism")] TxType::Deposit => Ok(Self::from_transaction_and_signature( Transaction::Deposit(TxDeposit::decode(buf)?), - optimism_deposit_tx_signature(), + TxDeposit::signature(), )), } } @@ -1446,8 +1674,7 @@ impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { } #[cfg(feature = "optimism")] - let signature = - if transaction.is_deposit() { optimism_deposit_tx_signature() } else { signature }; + let signature = if transaction.is_deposit() { TxDeposit::signature() } else { signature }; Ok(Self::from_transaction_and_signature(transaction, signature)) } @@ -1472,6 +1699,11 @@ impl TransactionSignedEcRecovered { self.signer } + /// Returns a reference to [`TransactionSigned`] + pub const fn as_signed(&self) -> &TransactionSigned { + &self.signed_transaction + } + /// Transform back to [`TransactionSigned`] pub fn into_signed(self) -> TransactionSigned { self.signed_transaction @@ -1780,11 +2012,14 @@ pub mod serde_bincode_compat { #[cfg(test)] mod tests { use crate::{ - transaction::{signature::Signature, TxEip1559, TxKind, TxLegacy}, + transaction::{TxEip1559, TxKind, TxLegacy}, Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, }; + use alloy_consensus::Transaction as _; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; - use alloy_primitives::{address, b256, bytes, hex, Address, Bytes, Parity, B256, U256}; + use alloy_primitives::{ + address, b256, bytes, hex, Address, Bytes, Parity, Signature, B256, U256, + }; use alloy_rlp::{Decodable, Encodable, Error as RlpError}; use reth_chainspec::MIN_TRANSACTION_GAS; use reth_codecs::Compact; diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index ec49f44a68..11da5d8385 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -7,15 +7,16 @@ use super::{ TxEip7702, }; use crate::{ - BlobTransaction, BlobTransactionSidecar, Signature, Transaction, TransactionSigned, - TransactionSignedEcRecovered, EIP4844_TX_TYPE_ID, + BlobTransaction, BlobTransactionSidecar, Transaction, TransactionSigned, + TransactionSignedEcRecovered, }; use alloy_consensus::{ + constants::EIP4844_TX_TYPE_ID, transaction::{TxEip1559, TxEip2930, TxEip4844, TxLegacy}, SignableTransaction, TxEip4844WithSidecar, }; use alloy_eips::eip2718::{Decodable2718, Eip2718Result, Encodable2718}; -use alloy_primitives::{Address, TxHash, B256}; +use alloy_primitives::{Address, Signature, TxHash, B256}; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; use bytes::Buf; use derive_more::{AsRef, Deref}; @@ -263,7 +264,7 @@ impl PooledTransactionsElement { /// transaction. /// /// This is the number of blobs times the - /// [`DATA_GAS_PER_BLOB`](crate::constants::eip4844::DATA_GAS_PER_BLOB) a single blob consumes. + /// [`DATA_GAS_PER_BLOB`](alloy_eips::eip4844::DATA_GAS_PER_BLOB) a single blob consumes. pub fn blob_gas_used(&self) -> Option { self.as_eip4844().map(TxEip4844::blob_gas) } diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index 87b8c1fbf3..e7ff7a9b50 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -1,19 +1,14 @@ #![cfg_attr(docsrs, doc(cfg(feature = "c-kzg")))] -use crate::{Signature, Transaction, TransactionSigned, EIP4844_TX_TYPE_ID}; -use alloy_consensus::{transaction::TxEip4844, TxEip4844WithSidecar}; -use alloy_primitives::{keccak256, TxHash}; -use alloy_rlp::{Decodable, Error as RlpError, Header}; +use crate::{Transaction, TransactionSigned}; +use alloy_consensus::{constants::EIP4844_TX_TYPE_ID, TxEip4844WithSidecar}; +use alloy_primitives::{Signature, TxHash}; +use alloy_rlp::Header; use serde::{Deserialize, Serialize}; #[doc(inline)] pub use alloy_eips::eip4844::BlobTransactionSidecar; -#[cfg(feature = "c-kzg")] -pub use alloy_eips::eip4844::BlobTransactionValidationError; - -use alloc::vec::Vec; - /// A response to `GetPooledTransactions` that includes blob data, their commitments, and their /// corresponding proofs. /// @@ -34,7 +29,7 @@ impl BlobTransaction { /// Constructs a new [`BlobTransaction`] from a [`TransactionSigned`] and a /// [`BlobTransactionSidecar`]. /// - /// Returns an error if the signed transaction is not [`TxEip4844`] + /// Returns an error if the signed transaction is not [`Transaction::Eip4844`] pub fn try_from_signed( tx: TransactionSigned, sidecar: BlobTransactionSidecar, @@ -55,12 +50,12 @@ impl BlobTransaction { /// Verifies that the transaction's blob data, commitments, and proofs are all valid. /// - /// See also [`TxEip4844::validate_blob`] + /// See also [`alloy_consensus::TxEip4844::validate_blob`] #[cfg(feature = "c-kzg")] pub fn validate( &self, proof_settings: &c_kzg::KzgSettings, - ) -> Result<(), BlobTransactionValidationError> { + ) -> Result<(), alloy_eips::eip4844::BlobTransactionValidationError> { self.transaction.validate_blob(proof_settings) } @@ -161,7 +156,7 @@ impl BlobTransaction { // The payload length is the length of the `tranascation_payload_body` list, plus the // length of the blobs, commitments, and proofs. - let payload_length = tx_length + self.transaction.sidecar.fields_len(); + let payload_length = tx_length + self.transaction.sidecar.rlp_encoded_fields_length(); // We use the calculated payload len to construct the first list header, which encompasses // everything in the tx - the length of the second, inner list header is part of @@ -186,109 +181,23 @@ impl BlobTransaction { /// Note: this should be used only when implementing other RLP decoding methods, and does not /// represent the full RLP decoding of the `PooledTransactionsElement` type. pub(crate) fn decode_inner(data: &mut &[u8]) -> alloy_rlp::Result { - // decode the _first_ list header for the rest of the transaction - let outer_header = Header::decode(data)?; - if !outer_header.list { - return Err(RlpError::Custom("PooledTransactions blob tx must be encoded as a list")) - } - - let outer_remaining_len = data.len(); - - // Now we need to decode the inner 4844 transaction and its signature: - // - // `[chain_id, nonce, max_priority_fee_per_gas, ..., y_parity, r, s]` - let inner_header = Header::decode(data)?; - if !inner_header.list { - return Err(RlpError::Custom( - "PooledTransactions inner blob tx must be encoded as a list", - )) - } - - let inner_remaining_len = data.len(); - - // inner transaction - let transaction = TxEip4844::decode_fields(data)?; - - // signature - let signature = Signature::decode_rlp_vrs(data)?; - - // the inner header only decodes the transaction and signature, so we check the length here - let inner_consumed = inner_remaining_len - data.len(); - if inner_consumed != inner_header.payload_length { - return Err(RlpError::UnexpectedLength) - } - - // All that's left are the blobs, commitments, and proofs - let sidecar = BlobTransactionSidecar::decode(data)?; + let (transaction, signature, hash) = + TxEip4844WithSidecar::decode_signed_fields(data)?.into_parts(); - // # Calculating the hash - // - // The full encoding of the `PooledTransaction` response is: - // `tx_type (0x03) || rlp([tx_payload_body, blobs, commitments, proofs])` - // - // The transaction hash however, is: - // `keccak256(tx_type (0x03) || rlp(tx_payload_body))` - // - // Note that this is `tx_payload_body`, not `[tx_payload_body]`, which would be - // `[[chain_id, nonce, max_priority_fee_per_gas, ...]]`, i.e. a list within a list. - // - // Because the pooled transaction encoding is different than the hash encoding for - // EIP-4844 transactions, we do not use the original buffer to calculate the hash. - // - // Instead, we use `encode_with_signature`, which RLP encodes the transaction with a - // signature for hashing without a header. We then hash the result. - let mut buf = Vec::new(); - transaction.encode_with_signature(&signature, &mut buf, false); - let hash = keccak256(&buf); - - // the outer header is for the entire transaction, so we check the length here - let outer_consumed = outer_remaining_len - data.len(); - if outer_consumed != outer_header.payload_length { - return Err(RlpError::UnexpectedLength) - } - - Ok(Self { transaction: TxEip4844WithSidecar { tx: transaction, sidecar }, hash, signature }) + Ok(Self { transaction, hash, signature }) } } -/// Generates a [`BlobTransactionSidecar`] structure containing blobs, commitments, and proofs. -#[cfg(all(feature = "c-kzg", any(test, feature = "arbitrary")))] -pub fn generate_blob_sidecar(blobs: Vec) -> BlobTransactionSidecar { - use alloy_eips::eip4844::env_settings::EnvKzgSettings; - use c_kzg::{KzgCommitment, KzgProof}; - - let kzg_settings = EnvKzgSettings::Default; - - let commitments: Vec = blobs - .iter() - .map(|blob| { - KzgCommitment::blob_to_kzg_commitment(&blob.clone(), kzg_settings.get()).unwrap() - }) - .map(|commitment| commitment.to_bytes()) - .collect(); - - let proofs: Vec = blobs - .iter() - .zip(commitments.iter()) - .map(|(blob, commitment)| { - KzgProof::compute_blob_kzg_proof(blob, commitment, kzg_settings.get()).unwrap() - }) - .map(|proof| proof.to_bytes()) - .collect(); - - BlobTransactionSidecar::from_kzg(blobs, commitments, proofs) -} - #[cfg(all(test, feature = "c-kzg"))] mod tests { use super::*; use crate::{kzg::Blob, PooledTransactionsElement}; + use alloc::vec::Vec; use alloy_eips::{ eip2718::{Decodable2718, Encodable2718}, eip4844::Bytes48, }; use alloy_primitives::hex; - use alloy_rlp::Encodable; use std::{fs, path::PathBuf, str::FromStr}; #[test] @@ -310,7 +219,7 @@ mod tests { .unwrap()]; // Generate a BlobTransactionSidecar from the blobs - let sidecar = generate_blob_sidecar(blobs); + let sidecar = BlobTransactionSidecar::try_from_blobs(blobs).unwrap(); // Assert commitment equality assert_eq!( @@ -359,7 +268,7 @@ mod tests { } // Generate a BlobTransactionSidecar from the blobs - let sidecar = generate_blob_sidecar(blobs.clone()); + let sidecar = BlobTransactionSidecar::try_from_blobs(blobs).unwrap(); // Assert sidecar size assert_eq!(sidecar.size(), 524672); @@ -384,13 +293,13 @@ mod tests { .unwrap()]; // Generate a BlobTransactionSidecar from the blobs - let sidecar = generate_blob_sidecar(blobs); + let sidecar = BlobTransactionSidecar::try_from_blobs(blobs).unwrap(); // Create a vector to store the encoded RLP let mut encoded_rlp = Vec::new(); // Encode the inner data of the BlobTransactionSidecar into RLP - sidecar.encode(&mut encoded_rlp); + sidecar.rlp_encode_fields(&mut encoded_rlp); // Assert the equality between the expected RLP from the JSON and the encoded RLP assert_eq!(json_value.get("rlp").unwrap().as_str().unwrap(), hex::encode(&encoded_rlp)); @@ -415,16 +324,17 @@ mod tests { .unwrap()]; // Generate a BlobTransactionSidecar from the blobs - let sidecar = generate_blob_sidecar(blobs); + let sidecar = BlobTransactionSidecar::try_from_blobs(blobs).unwrap(); // Create a vector to store the encoded RLP let mut encoded_rlp = Vec::new(); // Encode the inner data of the BlobTransactionSidecar into RLP - sidecar.encode(&mut encoded_rlp); + sidecar.rlp_encode_fields(&mut encoded_rlp); // Decode the RLP-encoded data back into a BlobTransactionSidecar - let decoded_sidecar = BlobTransactionSidecar::decode(&mut encoded_rlp.as_slice()).unwrap(); + let decoded_sidecar = + BlobTransactionSidecar::rlp_decode_fields(&mut encoded_rlp.as_slice()).unwrap(); // Assert the equality between the original BlobTransactionSidecar and the decoded one assert_eq!(sidecar, decoded_sidecar); diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index 39c0f92fda..b73206e6e7 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -1,12 +1,7 @@ use crate::transaction::util::secp256k1; -use alloy_primitives::{Address, Parity, B256, U256}; +use alloy_primitives::{Address, Parity, Signature, B256, U256}; use alloy_rlp::{Decodable, Error as RlpError}; -pub use alloy_primitives::Signature; - -#[cfg(feature = "optimism")] -use reth_optimism_chainspec::optimism_deposit_tx_signature; - /// The order of the secp256k1 curve, divided by two. Signatures that should be checked according /// to EIP-2 should have an S value less than or equal to this. /// @@ -82,7 +77,7 @@ pub fn legacy_parity(signature: &Signature, chain_id: Option) -> Parity { // transactions with an empty signature // // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock - if *signature == optimism_deposit_tx_signature() { + if *signature == op_alloy_consensus::TxDeposit::signature() { return Parity::Parity(false) } Parity::NonEip155(signature.v().y_parity()) @@ -114,14 +109,11 @@ pub const fn extract_chain_id(v: u64) -> alloy_rlp::Result<(bool, Option)> #[cfg(test)] mod tests { - use crate::{ - transaction::signature::{ - legacy_parity, recover_signer, recover_signer_unchecked, SECP256K1N_HALF, - }, - Signature, + use crate::transaction::signature::{ + legacy_parity, recover_signer, recover_signer_unchecked, SECP256K1N_HALF, }; use alloy_eips::eip2718::Decodable2718; - use alloy_primitives::{hex, Address, Parity, B256, U256}; + use alloy_primitives::{hex, Address, Parity, Signature, B256, U256}; use std::str::FromStr; #[test] diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index c55e0d3c61..eff1c17a71 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -1,10 +1,11 @@ +use alloy_consensus::constants::{ + EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, + LEGACY_TX_TYPE_ID, +}; use alloy_primitives::{U64, U8}; use alloy_rlp::{Decodable, Encodable}; use serde::{Deserialize, Serialize}; -#[cfg(test)] -use reth_codecs::Compact; - /// Identifier parameter for legacy transaction #[cfg(any(test, feature = "reth-codec"))] pub(crate) const COMPACT_IDENTIFIER_LEGACY: usize = 0; @@ -23,22 +24,6 @@ pub(crate) const COMPACT_IDENTIFIER_EIP1559: usize = 2; #[cfg(any(test, feature = "reth-codec"))] pub(crate) const COMPACT_EXTENDED_IDENTIFIER_FLAG: usize = 3; -/// Identifier for legacy transaction, however [`TxLegacy`](alloy_consensus::TxLegacy) this is -/// technically not typed. -pub const LEGACY_TX_TYPE_ID: u8 = 0; - -/// Identifier for [`TxEip2930`](alloy_consensus::TxEip2930) transaction. -pub const EIP2930_TX_TYPE_ID: u8 = 1; - -/// Identifier for [`TxEip1559`](alloy_consensus::TxEip1559) transaction. -pub const EIP1559_TX_TYPE_ID: u8 = 2; - -/// Identifier for [`TxEip4844`](alloy_consensus::TxEip4844) transaction. -pub const EIP4844_TX_TYPE_ID: u8 = 3; - -/// Identifier for [`TxEip7702`](alloy_consensus::TxEip7702) transaction. -pub const EIP7702_TX_TYPE_ID: u8 = 4; - /// Identifier for [`TxDeposit`](op_alloy_consensus::TxDeposit) transaction. #[cfg(feature = "optimism")] pub const DEPOSIT_TX_TYPE_ID: u8 = 126; diff --git a/crates/primitives/src/transaction/util.rs b/crates/primitives/src/transaction/util.rs index 7569400e94..ff2c2e0dab 100644 --- a/crates/primitives/src/transaction/util.rs +++ b/crates/primitives/src/transaction/util.rs @@ -1,5 +1,4 @@ -use crate::Signature; -use alloy_primitives::Address; +use alloy_primitives::{Address, Signature}; #[cfg(feature = "secp256k1")] pub(crate) mod secp256k1 { diff --git a/crates/prune/prune/src/segments/mod.rs b/crates/prune/prune/src/segments/mod.rs index 80c2f5393b..e793c4201c 100644 --- a/crates/prune/prune/src/segments/mod.rs +++ b/crates/prune/prune/src/segments/mod.rs @@ -23,9 +23,9 @@ pub use user::{ /// A segment represents a pruning of some portion of the data. /// -/// Segments are called from [Pruner](crate::Pruner) with the following lifecycle: +/// Segments are called from [`Pruner`](crate::Pruner) with the following lifecycle: /// 1. Call [`Segment::prune`] with `delete_limit` of [`PruneInput`]. -/// 2. If [`Segment::prune`] returned a [Some] in `checkpoint` of [`SegmentOutput`], call +/// 2. If [`Segment::prune`] returned a [`Some`] in `checkpoint` of [`SegmentOutput`], call /// [`Segment::save_checkpoint`]. /// 3. Subtract `pruned` of [`SegmentOutput`] from `delete_limit` of next [`PruneInput`]. pub trait Segment: Debug + Send + Sync { @@ -88,7 +88,7 @@ impl PruneInput { }, }) // No checkpoint exists, prune from genesis - .unwrap_or(0); + .unwrap_or_default(); let to_tx_number = match provider.block_body_indices(self.to_block)? { Some(body) => { @@ -143,3 +143,206 @@ impl PruneInput { .unwrap_or(0) } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::B256; + use reth_provider::{ + providers::BlockchainProvider2, + test_utils::{create_test_provider_factory, MockEthProvider}, + }; + use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams}; + + #[test] + fn test_prune_input_get_next_tx_num_range_no_to_block() { + let input = PruneInput { + previous_checkpoint: None, + to_block: 10, + limiter: PruneLimiter::default(), + }; + + // Default provider with no block corresponding to block 10 + let provider = MockEthProvider::default(); + + // No block body for block 10, expected None + let range = input.get_next_tx_num_range(&provider).expect("Expected range"); + assert!(range.is_none()); + } + + #[test] + fn test_prune_input_get_next_tx_num_range_no_tx() { + let input = PruneInput { + previous_checkpoint: None, + to_block: 10, + limiter: PruneLimiter::default(), + }; + + let mut rng = generators::rng(); + let factory = create_test_provider_factory(); + + // Generate 10 random blocks with no transactions + let blocks = random_block_range( + &mut rng, + 0..=10, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() }, + ); + + // Insert the blocks into the database + let provider_rw = factory.provider_rw().expect("failed to get provider_rw"); + for block in &blocks { + provider_rw + .insert_historical_block( + block.clone().seal_with_senders().expect("failed to seal block with senders"), + ) + .expect("failed to insert block"); + } + provider_rw.commit().expect("failed to commit"); + + // Create a new provider + let provider = BlockchainProvider2::new(factory).unwrap(); + + // Since there are no transactions, expected None + let range = input.get_next_tx_num_range(&provider).expect("Expected range"); + assert!(range.is_none()); + } + + #[test] + fn test_prune_input_get_next_tx_num_range_valid() { + // Create a new prune input + let input = PruneInput { + previous_checkpoint: None, + to_block: 10, + limiter: PruneLimiter::default(), + }; + + let mut rng = generators::rng(); + let factory = create_test_provider_factory(); + + // Generate 10 random blocks with some transactions + let blocks = random_block_range( + &mut rng, + 0..=10, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..5, ..Default::default() }, + ); + + // Insert the blocks into the database + let provider_rw = factory.provider_rw().expect("failed to get provider_rw"); + for block in &blocks { + provider_rw + .insert_historical_block( + block.clone().seal_with_senders().expect("failed to seal block with senders"), + ) + .expect("failed to insert block"); + } + provider_rw.commit().expect("failed to commit"); + + // Create a new provider + let provider = BlockchainProvider2::new(factory).unwrap(); + + // Get the next tx number range + let range = input.get_next_tx_num_range(&provider).expect("Expected range").unwrap(); + + // Calculate the total number of transactions + let num_txs = + blocks.iter().map(|block| block.body.transactions().count() as u64).sum::(); + + assert_eq!(range, 0..=num_txs - 1); + } + + #[test] + fn test_prune_input_get_next_tx_checkpoint_without_tx_number() { + // Create a prune input with a previous checkpoint without a tx number (unexpected) + let input = PruneInput { + previous_checkpoint: Some(PruneCheckpoint { + block_number: Some(5), + tx_number: None, + prune_mode: PruneMode::Full, + }), + to_block: 10, + limiter: PruneLimiter::default(), + }; + + let mut rng = generators::rng(); + let factory = create_test_provider_factory(); + + // Generate 10 random blocks + let blocks = random_block_range( + &mut rng, + 0..=10, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..5, ..Default::default() }, + ); + + // Insert the blocks into the database + let provider_rw = factory.provider_rw().expect("failed to get provider_rw"); + for block in &blocks { + provider_rw + .insert_historical_block( + block.clone().seal_with_senders().expect("failed to seal block with senders"), + ) + .expect("failed to insert block"); + } + provider_rw.commit().expect("failed to commit"); + + // Create a new provider + let provider = BlockchainProvider2::new(factory).unwrap(); + + // Fetch the range and check if it is correct + let range = input.get_next_tx_num_range(&provider).expect("Expected range").unwrap(); + + // Calculate the total number of transactions + let num_txs = + blocks.iter().map(|block| block.body.transactions().count() as u64).sum::(); + + assert_eq!(range, 0..=num_txs - 1,); + } + + #[test] + fn test_prune_input_get_next_tx_empty_range() { + // Create a new provider via factory + let mut rng = generators::rng(); + let factory = create_test_provider_factory(); + + // Generate 10 random blocks + let blocks = random_block_range( + &mut rng, + 0..=10, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..5, ..Default::default() }, + ); + + // Insert the blocks into the database + let provider_rw = factory.provider_rw().expect("failed to get provider_rw"); + for block in &blocks { + provider_rw + .insert_historical_block( + block.clone().seal_with_senders().expect("failed to seal block with senders"), + ) + .expect("failed to insert block"); + } + provider_rw.commit().expect("failed to commit"); + + // Create a new provider + let provider = BlockchainProvider2::new(factory).unwrap(); + + // Get the last tx number + // Calculate the total number of transactions + let num_txs = + blocks.iter().map(|block| block.body.transactions().count() as u64).sum::(); + let max_range = num_txs - 1; + + // Create a prune input with a previous checkpoint that is the last tx number + let input = PruneInput { + previous_checkpoint: Some(PruneCheckpoint { + block_number: Some(5), + tx_number: Some(max_range), + prune_mode: PruneMode::Full, + }), + to_block: 10, + limiter: PruneLimiter::default(), + }; + + // We expect an empty range since the previous checkpoint is the last tx number + let range = input.get_next_tx_num_range(&provider).expect("Expected range"); + assert!(range.is_none()); + } +} diff --git a/crates/prune/prune/src/segments/receipts.rs b/crates/prune/prune/src/segments/receipts.rs index 05482d6595..c081bf88c7 100644 --- a/crates/prune/prune/src/segments/receipts.rs +++ b/crates/prune/prune/src/segments/receipts.rs @@ -109,12 +109,14 @@ mod tests { let mut receipts = Vec::new(); for block in &blocks { + receipts.reserve_exact(block.body.transactions.len()); for transaction in &block.body.transactions { receipts .push((receipts.len() as u64, random_receipt(&mut rng, transaction, Some(0)))); } } - db.insert_receipts(receipts.clone()).expect("insert receipts"); + let receipts_len = receipts.len(); + db.insert_receipts(receipts).expect("insert receipts"); assert_eq!( db.table::().unwrap().len(), @@ -194,7 +196,7 @@ mod tests { assert_eq!( db.table::().unwrap().len(), - receipts.len() - (last_pruned_tx_number + 1) + receipts_len - (last_pruned_tx_number + 1) ); assert_eq!( db.factory diff --git a/crates/prune/prune/src/segments/set.rs b/crates/prune/prune/src/segments/set.rs index af7f479bbc..e93322ff27 100644 --- a/crates/prune/prune/src/segments/set.rs +++ b/crates/prune/prune/src/segments/set.rs @@ -11,7 +11,7 @@ use reth_prune_types::PruneModes; use super::{StaticFileHeaders, StaticFileReceipts, StaticFileTransactions}; -/// Collection of [Segment]. Thread-safe, allocated on the heap. +/// Collection of [`Segment`]. Thread-safe, allocated on the heap. #[derive(Debug)] pub struct SegmentSet { inner: Vec>>, @@ -23,7 +23,7 @@ impl SegmentSet { Self::default() } - /// Adds new [Segment] to collection. + /// Adds new [`Segment`] to collection. pub fn segment + 'static>(mut self, segment: S) -> Self { self.inner.push(Box::new(segment)); self diff --git a/crates/prune/prune/src/segments/user/receipts_by_logs.rs b/crates/prune/prune/src/segments/user/receipts_by_logs.rs index 489df7e722..ee2accee1b 100644 --- a/crates/prune/prune/src/segments/user/receipts_by_logs.rs +++ b/crates/prune/prune/src/segments/user/receipts_by_logs.rs @@ -263,15 +263,12 @@ mod tests { let (deposit_contract_addr, _) = random_eoa_account(&mut rng); for block in &blocks { + receipts.reserve_exact(block.body.size()); for (txi, transaction) in block.body.transactions.iter().enumerate() { let mut receipt = random_receipt(&mut rng, transaction, Some(1)); receipt.logs.push(random_log( &mut rng, - if txi == (block.body.transactions.len() - 1) { - Some(deposit_contract_addr) - } else { - None - }, + (txi == (block.body.transactions.len() - 1)).then_some(deposit_contract_addr), Some(1), )); receipts.push((receipts.len() as u64, receipt)); diff --git a/crates/prune/prune/src/segments/user/sender_recovery.rs b/crates/prune/prune/src/segments/user/sender_recovery.rs index bd86f3e652..f189e6c36a 100644 --- a/crates/prune/prune/src/segments/user/sender_recovery.rs +++ b/crates/prune/prune/src/segments/user/sender_recovery.rs @@ -110,6 +110,7 @@ mod tests { let mut transaction_senders = Vec::new(); for block in &blocks { + transaction_senders.reserve_exact(block.body.transactions.len()); for transaction in &block.body.transactions { transaction_senders.push(( transaction_senders.len() as u64, @@ -117,8 +118,8 @@ mod tests { )); } } - db.insert_transaction_senders(transaction_senders.clone()) - .expect("insert transaction senders"); + let transaction_senders_len = transaction_senders.len(); + db.insert_transaction_senders(transaction_senders).expect("insert transaction senders"); assert_eq!( db.table::().unwrap().len(), @@ -202,7 +203,7 @@ mod tests { assert_eq!( db.table::().unwrap().len(), - transaction_senders.len() - (last_pruned_tx_number + 1) + transaction_senders_len - (last_pruned_tx_number + 1) ); assert_eq!( db.factory diff --git a/crates/prune/prune/src/segments/user/transaction_lookup.rs b/crates/prune/prune/src/segments/user/transaction_lookup.rs index bb8196cdb0..2df8cccf30 100644 --- a/crates/prune/prune/src/segments/user/transaction_lookup.rs +++ b/crates/prune/prune/src/segments/user/transaction_lookup.rs @@ -140,11 +140,13 @@ mod tests { let mut tx_hash_numbers = Vec::new(); for block in &blocks { + tx_hash_numbers.reserve_exact(block.body.transactions.len()); for transaction in &block.body.transactions { tx_hash_numbers.push((transaction.hash, tx_hash_numbers.len() as u64)); } } - db.insert_tx_hash_numbers(tx_hash_numbers.clone()).expect("insert tx hash numbers"); + let tx_hash_numbers_len = tx_hash_numbers.len(); + db.insert_tx_hash_numbers(tx_hash_numbers).expect("insert tx hash numbers"); assert_eq!( db.table::().unwrap().len(), @@ -228,7 +230,7 @@ mod tests { assert_eq!( db.table::().unwrap().len(), - tx_hash_numbers.len() - (last_pruned_tx_number + 1) + tx_hash_numbers_len - (last_pruned_tx_number + 1) ); assert_eq!( db.factory diff --git a/crates/prune/types/Cargo.toml b/crates/prune/types/Cargo.toml index 13def8eaa8..5446d6f76f 100644 --- a/crates/prune/types/Cargo.toml +++ b/crates/prune/types/Cargo.toml @@ -20,6 +20,7 @@ derive_more.workspace = true modular-bitfield.workspace = true serde.workspace = true thiserror.workspace = true +arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] arbitrary = { workspace = true, features = ["derive"] } @@ -29,3 +30,13 @@ proptest-arbitrary-interop.workspace = true serde_json.workspace = true test-fuzz.workspace = true toml.workspace = true + +[features] +test-utils = [ + "dep:arbitrary", + "reth-codecs/test-utils" +] +arbitrary = [ + "alloy-primitives/arbitrary", + "reth-codecs/arbitrary" +] diff --git a/crates/prune/types/src/checkpoint.rs b/crates/prune/types/src/checkpoint.rs index f654fba7df..e0397c5afc 100644 --- a/crates/prune/types/src/checkpoint.rs +++ b/crates/prune/types/src/checkpoint.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; /// Saves the pruning progress of a stage. #[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(Default, arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "test-utils"), derive(Default, arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub struct PruneCheckpoint { /// Highest pruned block number. If it's [None], the pruning for block `0` is not finished yet. diff --git a/crates/prune/types/src/lib.rs b/crates/prune/types/src/lib.rs index 6e06d6fc5d..8483b7b737 100644 --- a/crates/prune/types/src/lib.rs +++ b/crates/prune/types/src/lib.rs @@ -27,6 +27,7 @@ use std::collections::BTreeMap; pub use target::{PruneModes, MINIMUM_PRUNING_DISTANCE}; use alloy_primitives::{Address, BlockNumber}; +use std::ops::Deref; /// Configuration for pruning receipts not associated with logs emitted by the specified contracts. #[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] @@ -59,7 +60,7 @@ impl ReceiptsLogPruneConfig { pruned_block: Option, ) -> Result>, PruneSegmentError> { let mut map = BTreeMap::new(); - let pruned_block = pruned_block.unwrap_or_default(); + let base_block = pruned_block.unwrap_or_default() + 1; for (address, mode) in &self.0 { // Getting `None`, means that there is nothing to prune yet, so we need it to include in @@ -69,7 +70,7 @@ impl ReceiptsLogPruneConfig { // // Reminder, that we increment because the [`BlockNumber`] key of the new map should be // viewed as `PruneMode::Before(block)` - let block = (pruned_block + 1).max( + let block = base_block.max( mode.prune_target_block(tip, PruneSegment::ContractLogs, PrunePurpose::User)? .map(|(block, _)| block) .unwrap_or_default() + @@ -90,8 +91,8 @@ impl ReceiptsLogPruneConfig { let pruned_block = pruned_block.unwrap_or_default(); let mut lowest = None; - for mode in self.0.values() { - if let PruneMode::Distance(_) = mode { + for mode in self.values() { + if mode.is_distance() { if let Some((block, _)) = mode.prune_target_block(tip, PruneSegment::ContractLogs, PrunePurpose::User)? { @@ -103,3 +104,224 @@ impl ReceiptsLogPruneConfig { Ok(lowest.map(|lowest| lowest.max(pruned_block))) } } + +impl Deref for ReceiptsLogPruneConfig { + type Target = BTreeMap; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_group_by_block_empty_config() { + let config = ReceiptsLogPruneConfig(BTreeMap::new()); + let tip = 1000; + let pruned_block = None; + + let result = config.group_by_block(tip, pruned_block).unwrap(); + assert!(result.is_empty(), "The result should be empty when the config is empty"); + } + + #[test] + fn test_group_by_block_single_entry() { + let mut config_map = BTreeMap::new(); + let address = Address::new([1; 20]); + let prune_mode = PruneMode::Before(500); + config_map.insert(address, prune_mode); + + let config = ReceiptsLogPruneConfig(config_map); + // Big tip to have something to prune for the target block + let tip = 3000000; + let pruned_block = Some(400); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect one entry with block 500 and the corresponding address + assert_eq!(result.len(), 1); + assert_eq!(result[&500], vec![&address], "Address should be grouped under block 500"); + + // Tip smaller than the target block, so that we have nothing to prune for the block + let tip = 300; + let pruned_block = Some(400); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect one entry with block 400 and the corresponding address + assert_eq!(result.len(), 1); + assert_eq!(result[&401], vec![&address], "Address should be grouped under block 400"); + } + + #[test] + fn test_group_by_block_multiple_entries() { + let mut config_map = BTreeMap::new(); + let address1 = Address::new([1; 20]); + let address2 = Address::new([2; 20]); + let prune_mode1 = PruneMode::Before(600); + let prune_mode2 = PruneMode::Before(800); + config_map.insert(address1, prune_mode1); + config_map.insert(address2, prune_mode2); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 900000; + let pruned_block = Some(400); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect two entries: one for block 600 and another for block 800 + assert_eq!(result.len(), 2); + assert_eq!(result[&600], vec![&address1], "Address1 should be grouped under block 600"); + assert_eq!(result[&800], vec![&address2], "Address2 should be grouped under block 800"); + } + + #[test] + fn test_group_by_block_with_distance_prune_mode() { + let mut config_map = BTreeMap::new(); + let address = Address::new([1; 20]); + let prune_mode = PruneMode::Distance(100000); + config_map.insert(address, prune_mode); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 100100; + // Pruned block is smaller than the target block + let pruned_block = Some(50); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect the entry to be grouped under block 100 (tip - distance) + assert_eq!(result.len(), 1); + assert_eq!(result[&101], vec![&address], "Address should be grouped under block 100"); + + let tip = 100100; + // Pruned block is larger than the target block + let pruned_block = Some(800); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect the entry to be grouped under block 800 which is larger than tip - distance + assert_eq!(result.len(), 1); + assert_eq!(result[&801], vec![&address], "Address should be grouped under block 800"); + } + + #[test] + fn test_lowest_block_with_distance_empty_config() { + let config = ReceiptsLogPruneConfig(BTreeMap::new()); + let tip = 1000; + let pruned_block = None; + + let result = config.lowest_block_with_distance(tip, pruned_block).unwrap(); + assert_eq!(result, None, "The result should be None when the config is empty"); + } + + #[test] + fn test_lowest_block_with_distance_no_distance_mode() { + let mut config_map = BTreeMap::new(); + let address = Address::new([1; 20]); + let prune_mode = PruneMode::Before(500); + config_map.insert(address, prune_mode); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 1000; + let pruned_block = None; + + let result = config.lowest_block_with_distance(tip, pruned_block).unwrap(); + assert_eq!(result, None, "The result should be None when there are no Distance modes"); + } + + #[test] + fn test_lowest_block_with_distance_single_entry() { + let mut config_map = BTreeMap::new(); + let address = Address::new([1; 20]); + let prune_mode = PruneMode::Distance(100000); + config_map.insert(address, prune_mode); + + let config = ReceiptsLogPruneConfig(config_map); + + let tip = 100100; + let pruned_block = Some(400); + + // Expect the lowest block to be 400 as 400 > 100100 - 100000 (tip - distance) + assert_eq!( + config.lowest_block_with_distance(tip, pruned_block).unwrap(), + Some(400), + "The lowest block should be 400" + ); + + let tip = 100100; + let pruned_block = Some(50); + + // Expect the lowest block to be 100 as 100 > 50 (pruned block) + assert_eq!( + config.lowest_block_with_distance(tip, pruned_block).unwrap(), + Some(100), + "The lowest block should be 100" + ); + } + + #[test] + fn test_lowest_block_with_distance_multiple_entries_last() { + let mut config_map = BTreeMap::new(); + let address1 = Address::new([1; 20]); + let address2 = Address::new([2; 20]); + let prune_mode1 = PruneMode::Distance(100100); + let prune_mode2 = PruneMode::Distance(100300); + config_map.insert(address1, prune_mode1); + config_map.insert(address2, prune_mode2); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 200300; + let pruned_block = Some(100); + + // The lowest block should be 200300 - 100300 = 100000: + // - First iteration will return 100200 => 200300 - 100100 = 100200 + // - Second iteration will return 100000 => 200300 - 100300 = 100000 < 100200 + // - Final result is 100000 + assert_eq!(config.lowest_block_with_distance(tip, pruned_block).unwrap(), Some(100000)); + } + + #[test] + fn test_lowest_block_with_distance_multiple_entries_first() { + let mut config_map = BTreeMap::new(); + let address1 = Address::new([1; 20]); + let address2 = Address::new([2; 20]); + let prune_mode1 = PruneMode::Distance(100400); + let prune_mode2 = PruneMode::Distance(100300); + config_map.insert(address1, prune_mode1); + config_map.insert(address2, prune_mode2); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 200300; + let pruned_block = Some(100); + + // The lowest block should be 200300 - 100400 = 99900: + // - First iteration, lowest block is 200300 - 100400 = 99900 + // - Second iteration, lowest block is still 99900 < 200300 - 100300 = 100000 + // - Final result is 99900 + assert_eq!(config.lowest_block_with_distance(tip, pruned_block).unwrap(), Some(99900)); + } + + #[test] + fn test_lowest_block_with_distance_multiple_entries_pruned_block() { + let mut config_map = BTreeMap::new(); + let address1 = Address::new([1; 20]); + let address2 = Address::new([2; 20]); + let prune_mode1 = PruneMode::Distance(100400); + let prune_mode2 = PruneMode::Distance(100300); + config_map.insert(address1, prune_mode1); + config_map.insert(address2, prune_mode2); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 200300; + let pruned_block = Some(100000); + + // The lowest block should be 100000 because: + // - Lowest is 200300 - 100400 = 99900 < 200300 - 100300 = 100000 + // - Lowest is compared to the pruned block 100000: 100000 > 99900 + // - Finally the lowest block is 100000 + assert_eq!(config.lowest_block_with_distance(tip, pruned_block).unwrap(), Some(100000)); + } +} diff --git a/crates/prune/types/src/mode.rs b/crates/prune/types/src/mode.rs index 3465882993..de9b9e6dc0 100644 --- a/crates/prune/types/src/mode.rs +++ b/crates/prune/types/src/mode.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; /// Prune mode. #[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, Compact)] #[serde(rename_all = "lowercase")] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub enum PruneMode { /// Prune all blocks. @@ -17,6 +17,13 @@ pub enum PruneMode { Before(BlockNumber), } +#[cfg(any(test, feature = "test-utils"))] +impl Default for PruneMode { + fn default() -> Self { + Self::Full + } +} + impl PruneMode { /// Prune blocks up to the specified block number. The specified block number is also pruned. /// @@ -67,12 +74,10 @@ impl PruneMode { pub const fn is_full(&self) -> bool { matches!(self, Self::Full) } -} -#[cfg(test)] -impl Default for PruneMode { - fn default() -> Self { - Self::Full + /// Returns true if the prune mode is [`PruneMode::Distance`]. + pub const fn is_distance(&self) -> bool { + matches!(self, Self::Distance(_)) } } diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 9e4501f627..3ee6801010 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -13,14 +13,15 @@ workspace = true [dependencies] # reth -reth-chainspec.workspace = true reth-primitives.workspace = true reth-storage-errors.workspace = true reth-execution-errors.workspace = true -reth-consensus-common.workspace = true reth-prune-types.workspace = true reth-storage-api.workspace = true reth-trie = { workspace = true, optional = true } + +# alloy +alloy-eips.workspace = true alloy-primitives.workspace = true # revm @@ -30,11 +31,28 @@ revm.workspace = true reth-trie.workspace = true reth-ethereum-forks.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true [features] -default = ["std", "c-kzg"] -std = [] -c-kzg = ["revm/c-kzg"] -test-utils = ["dep:reth-trie"] -optimism = ["revm/optimism"] -serde = ["revm/serde"] +default = ["std"] +std = [ + "reth-primitives/std", + "alloy-primitives/std", + "revm/std", + "alloy-eips/std", + "alloy-consensus/std", +] +test-utils = [ + "dep:reth-trie", + "reth-primitives/test-utils", + "reth-trie?/test-utils", + "revm/test-utils", + "reth-prune-types/test-utils" +] +serde = [ + "revm/serde", + "reth-trie?/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "alloy-consensus/serde", +] diff --git a/crates/revm/src/batch.rs b/crates/revm/src/batch.rs index a63681aa13..be3ef0a378 100644 --- a/crates/revm/src/batch.rs +++ b/crates/revm/src/batch.rs @@ -1,9 +1,10 @@ //! Helper for handling execution of multiple blocks. use alloc::vec::Vec; +use alloy_eips::eip7685::Requests; use alloy_primitives::{map::HashSet, Address, BlockNumber}; use reth_execution_errors::{BlockExecutionError, InternalBlockExecutionError}; -use reth_primitives::{Receipt, Receipts, Request, Requests}; +use reth_primitives::{Receipt, Receipts}; use reth_prune_types::{PruneMode, PruneModes, PruneSegmentError, MINIMUM_PRUNING_DISTANCE}; use revm::db::states::bundle_state::BundleRetention; @@ -170,8 +171,8 @@ impl BlockBatchRecord { } /// Save EIP-7685 requests to the executor. - pub fn save_requests(&mut self, requests: Vec) { - self.requests.push(requests.into()); + pub fn save_requests(&mut self, requests: Requests) { + self.requests.push(requests); } } diff --git a/crates/payload/builder/src/database.rs b/crates/revm/src/cached.rs similarity index 60% rename from crates/payload/builder/src/database.rs rename to crates/revm/src/cached.rs index d63f7322de..88a41e1d89 100644 --- a/crates/payload/builder/src/database.rs +++ b/crates/revm/src/cached.rs @@ -1,13 +1,13 @@ //! Database adapters for payload building. -use alloy_primitives::{Address, B256, U256}; +use alloy_primitives::{ + map::{Entry, HashMap}, + Address, B256, U256, +}; +use core::cell::RefCell; use reth_primitives::revm_primitives::{ db::{Database, DatabaseRef}, AccountInfo, Bytecode, }; -use std::{ - cell::RefCell, - collections::{hash_map::Entry, HashMap}, -}; /// A container type that caches reads from an underlying [`DatabaseRef`]. /// @@ -17,15 +17,15 @@ use std::{ /// # Example /// /// ``` -/// use reth_payload_builder::database::CachedReads; +/// use reth_revm::cached::CachedReads; /// use revm::db::{DatabaseRef, State}; /// /// fn build_payload(db: DB) { /// let mut cached_reads = CachedReads::default(); -/// let db_ref = cached_reads.as_db(db); -/// // this is `Database` and can be used to build a payload, it never writes to `CachedReads` or the underlying database, but all reads from the underlying database are cached in `CachedReads`. +/// let db = cached_reads.as_db_mut(db); +/// // this is `Database` and can be used to build a payload, it never commits to `CachedReads` or the underlying database, but all reads from the underlying database are cached in `CachedReads`. /// // Subsequent payload build attempts can use cached reads and avoid hitting the underlying database. -/// let db = State::builder().with_database_ref(db_ref).build(); +/// let state = State::builder().with_database(db).build(); /// } /// ``` #[derive(Debug, Clone, Default)] @@ -40,10 +40,11 @@ pub struct CachedReads { impl CachedReads { /// Gets a [`DatabaseRef`] that will cache reads from the given database. pub fn as_db(&mut self, db: DB) -> CachedReadsDBRef<'_, DB> { - CachedReadsDBRef { inner: RefCell::new(self.as_db_mut(db)) } + self.as_db_mut(db).into_db() } - fn as_db_mut(&mut self, db: DB) -> CachedReadsDbMut<'_, DB> { + /// Gets a mutable [`Database`] that will cache reads from the underlying database. + pub fn as_db_mut(&mut self, db: DB) -> CachedReadsDbMut<'_, DB> { CachedReadsDbMut { cached: self, db } } @@ -56,6 +57,15 @@ impl CachedReads { ) { self.accounts.insert(address, CachedAccount { info: Some(info), storage }); } + + /// Extends current cache with entries from another [`CachedReads`] instance. + /// + /// Note: It is expected that both instances are based on the exact same state. + pub fn extend(&mut self, other: Self) { + self.accounts.extend(other.accounts); + self.contracts.extend(other.contracts); + self.block_hashes.extend(other.block_hashes); + } } /// A [Database] that caches reads inside [`CachedReads`]. @@ -67,6 +77,28 @@ pub struct CachedReadsDbMut<'a, DB> { pub db: DB, } +impl<'a, DB> CachedReadsDbMut<'a, DB> { + /// Converts this [`Database`] implementation into a [`DatabaseRef`] that will still cache + /// reads. + pub const fn into_db(self) -> CachedReadsDBRef<'a, DB> { + CachedReadsDBRef { inner: RefCell::new(self) } + } + + /// Returns access to wrapped [`DatabaseRef`]. + pub const fn inner(&self) -> &DB { + &self.db + } +} + +impl AsRef for CachedReadsDbMut<'_, DB> +where + DB: AsRef, +{ + fn as_ref(&self) -> &T { + self.inner().as_ref() + } +} + impl Database for CachedReadsDbMut<'_, DB> { type Error = ::Error; @@ -161,3 +193,57 @@ impl CachedAccount { Self { info, storage: HashMap::default() } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_extend_with_two_cached_reads() { + // Setup test data + let hash1 = B256::from_slice(&[1u8; 32]); + let hash2 = B256::from_slice(&[2u8; 32]); + let address1 = Address::from_slice(&[1u8; 20]); + let address2 = Address::from_slice(&[2u8; 20]); + + // Create primary cache + let mut primary = { + let mut cache = CachedReads::default(); + cache.accounts.insert(address1, CachedAccount::new(Some(AccountInfo::default()))); + cache.contracts.insert(hash1, Bytecode::default()); + cache.block_hashes.insert(1, hash1); + cache + }; + + // Create additional cache + let additional = { + let mut cache = CachedReads::default(); + cache.accounts.insert(address2, CachedAccount::new(Some(AccountInfo::default()))); + cache.contracts.insert(hash2, Bytecode::default()); + cache.block_hashes.insert(2, hash2); + cache + }; + + // Extending primary with additional cache + primary.extend(additional); + + // Verify the combined state + assert!( + primary.accounts.len() == 2 && + primary.contracts.len() == 2 && + primary.block_hashes.len() == 2, + "All maps should contain 2 entries" + ); + + // Verify specific entries + assert!( + primary.accounts.contains_key(&address1) && + primary.accounts.contains_key(&address2) && + primary.contracts.contains_key(&hash1) && + primary.contracts.contains_key(&hash2) && + primary.block_hashes.get(&1) == Some(&hash1) && + primary.block_hashes.get(&2) == Some(&hash2), + "All expected entries should be present" + ); + } +} diff --git a/crates/revm/src/database.rs b/crates/revm/src/database.rs index 8f40d2be8d..682aca6cf3 100644 --- a/crates/revm/src/database.rs +++ b/crates/revm/src/database.rs @@ -79,6 +79,12 @@ impl StateProviderDatabase { } } +impl AsRef for StateProviderDatabase { + fn as_ref(&self) -> &DB { + self + } +} + impl Deref for StateProviderDatabase { type Target = DB; @@ -101,21 +107,21 @@ impl Database for StateProviderDatabase { /// Returns `Ok` with `Some(AccountInfo)` if the account exists, /// `None` if it doesn't, or an error if encountered. fn basic(&mut self, address: Address) -> Result, Self::Error> { - DatabaseRef::basic_ref(self, address) + self.basic_ref(address) } /// Retrieves the bytecode associated with a given code hash. /// /// Returns `Ok` with the bytecode if found, or the default bytecode otherwise. fn code_by_hash(&mut self, code_hash: B256) -> Result { - DatabaseRef::code_by_hash_ref(self, code_hash) + self.code_by_hash_ref(code_hash) } /// Retrieves the storage value at a specific index for a given address. /// /// Returns `Ok` with the storage value, or the default value if not found. fn storage(&mut self, address: Address, index: U256) -> Result { - DatabaseRef::storage_ref(self, address, index) + self.storage_ref(address, index) } /// Retrieves the block hash for a given block number. @@ -123,7 +129,7 @@ impl Database for StateProviderDatabase { /// Returns `Ok` with the block hash if found, or the default hash otherwise. /// Note: It safely casts the `number` to `u64`. fn block_hash(&mut self, number: u64) -> Result { - DatabaseRef::block_hash_ref(self, number) + self.block_hash_ref(number) } } diff --git a/crates/revm/src/either.rs b/crates/revm/src/either.rs new file mode 100644 index 0000000000..e93ba3a8d0 --- /dev/null +++ b/crates/revm/src/either.rs @@ -0,0 +1,52 @@ +use alloy_primitives::{Address, B256, U256}; +use revm::{ + primitives::{AccountInfo, Bytecode}, + Database, +}; + +/// An enum type that can hold either of two different [`Database`] implementations. +/// +/// This allows flexible usage of different [`Database`] types in the same context. +#[derive(Debug, Clone)] +pub enum Either { + /// A value of type `L`. + Left(L), + /// A value of type `R`. + Right(R), +} + +impl Database for Either +where + L: Database, + R: Database, +{ + type Error = L::Error; + + fn basic(&mut self, address: Address) -> Result, Self::Error> { + match self { + Self::Left(db) => db.basic(address), + Self::Right(db) => db.basic(address), + } + } + + fn code_by_hash(&mut self, code_hash: B256) -> Result { + match self { + Self::Left(db) => db.code_by_hash(code_hash), + Self::Right(db) => db.code_by_hash(code_hash), + } + } + + fn storage(&mut self, address: Address, index: U256) -> Result { + match self { + Self::Left(db) => db.storage(address, index), + Self::Right(db) => db.storage(address, index), + } + } + + fn block_hash(&mut self, number: u64) -> Result { + match self { + Self::Left(db) => db.block_hash(number), + Self::Right(db) => db.block_hash(number), + } + } +} diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index 5515357d0d..b06ee816f8 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -11,13 +11,14 @@ extern crate alloc; -/// Contains glue code for integrating reth database into revm's [Database]. -pub mod database; - pub mod batch; -/// State changes that are not related to transactions. -pub mod state_change; +/// Cache database that reads from an underlying [`DatabaseRef`]. +/// Database adapters for payload building. +pub mod cached; + +/// Contains glue code for integrating reth database into revm's [Database]. +pub mod database; /// Common test helpers #[cfg(any(test, feature = "test-utils"))] @@ -25,3 +26,6 @@ pub mod test_utils; // Convenience re-exports. pub use revm::{self, *}; + +/// Either type for flexible usage of different database types in the same context. +pub mod either; diff --git a/crates/rpc/rpc-api/Cargo.toml b/crates/rpc/rpc-api/Cargo.toml index 6e9e469ec4..75c06a2554 100644 --- a/crates/rpc/rpc-api/Cargo.toml +++ b/crates/rpc/rpc-api/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true reth-rpc-eth-api.workspace = true reth-engine-primitives.workspace = true reth-network-peers.workspace = true @@ -36,13 +35,12 @@ alloy-rpc-types-engine.workspace = true # misc jsonrpsee = { workspace = true, features = ["server", "macros"] } - -[dev-dependencies] -serde_json.workspace = true +serde.workspace = true +serde_with.workspace = true [features] client = [ "jsonrpsee/client", "jsonrpsee/async-client", - "reth-rpc-eth-api/client" + "reth-rpc-eth-api/client", ] diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index 3e03210f1f..d1837787d5 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -1,3 +1,4 @@ +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::{Address, Bytes, B256}; use alloy_rpc_types::{Block, Bundle, StateContext}; use alloy_rpc_types_debug::ExecutionWitness; @@ -6,7 +7,6 @@ use alloy_rpc_types_trace::geth::{ BlockTraceResult, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, TraceResult, }; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{BlockId, BlockNumberOrTag}; /// Debug rpc interface. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "debug"))] diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index 50181d23a7..d92173112e 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -3,16 +3,16 @@ //! This contains the `engine_` namespace and the subset of the `eth_` namespace that is exposed to //! the consensus client. -use alloy_eips::{eip4844::BlobAndProofV1, BlockId, BlockNumberOrTag}; +use alloy_eips::{eip4844::BlobAndProofV1, eip7685::Requests, BlockId, BlockNumberOrTag}; use alloy_json_rpc::RpcObject; use alloy_primitives::{Address, BlockHash, Bytes, B256, U256, U64}; use alloy_rpc_types::{ state::StateOverride, BlockOverrides, EIP1186AccountProofResponse, Filter, Log, SyncStatus, }; use alloy_rpc_types_engine::{ - ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadBodiesV2, ExecutionPayloadInputV2, - ExecutionPayloadV1, ExecutionPayloadV3, ExecutionPayloadV4, ForkchoiceState, ForkchoiceUpdated, - PayloadId, PayloadStatus, TransitionConfiguration, + ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, ExecutionPayloadV1, + ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, + TransitionConfiguration, }; use alloy_rpc_types_eth::transaction::TransactionRequest; use alloy_serde::JsonStorageKey; @@ -54,9 +54,10 @@ pub trait EngineApi { #[method(name = "newPayloadV4")] async fn new_payload_v4( &self, - payload: ExecutionPayloadV4, + payload: ExecutionPayloadV3, versioned_hashes: Vec, parent_beacon_block_root: B256, + execution_requests: Requests, ) -> RpcResult; /// See also @@ -109,7 +110,10 @@ pub trait EngineApi { /// Note: /// > Provider software MAY stop the corresponding build process after serving this call. #[method(name = "getPayloadV1")] - async fn get_payload_v1(&self, payload_id: PayloadId) -> RpcResult; + async fn get_payload_v1( + &self, + payload_id: PayloadId, + ) -> RpcResult; /// See also /// @@ -117,7 +121,10 @@ pub trait EngineApi { /// payload build process at the time of receiving this call. Note: /// > Provider software MAY stop the corresponding build process after serving this call. #[method(name = "getPayloadV2")] - async fn get_payload_v2(&self, payload_id: PayloadId) -> RpcResult; + async fn get_payload_v2( + &self, + payload_id: PayloadId, + ) -> RpcResult; /// Post Cancun payload handler which also returns a blobs bundle. /// @@ -127,7 +134,10 @@ pub trait EngineApi { /// payload build process at the time of receiving this call. Note: /// > Provider software MAY stop the corresponding build process after serving this call. #[method(name = "getPayloadV3")] - async fn get_payload_v3(&self, payload_id: PayloadId) -> RpcResult; + async fn get_payload_v3( + &self, + payload_id: PayloadId, + ) -> RpcResult; /// Post Prague payload handler. /// @@ -137,7 +147,10 @@ pub trait EngineApi { /// payload build process at the time of receiving this call. Note: /// > Provider software MAY stop the corresponding build process after serving this call. #[method(name = "getPayloadV4")] - async fn get_payload_v4(&self, payload_id: PayloadId) -> RpcResult; + async fn get_payload_v4( + &self, + payload_id: PayloadId, + ) -> RpcResult; /// See also #[method(name = "getPayloadBodiesByHashV1")] @@ -146,13 +159,6 @@ pub trait EngineApi { block_hashes: Vec, ) -> RpcResult; - /// See also - #[method(name = "getPayloadBodiesByHashV2")] - async fn get_payload_bodies_by_hash_v2( - &self, - block_hashes: Vec, - ) -> RpcResult; - /// See also /// /// Returns the execution payload bodies by the range starting at `start`, containing `count` @@ -172,16 +178,6 @@ pub trait EngineApi { count: U64, ) -> RpcResult; - /// See also - /// - /// Similar to `getPayloadBodiesByRangeV1`, but returns [`ExecutionPayloadBodiesV2`] - #[method(name = "getPayloadBodiesByRangeV2")] - async fn get_payload_bodies_by_range_v2( - &self, - start: U64, - count: U64, - ) -> RpcResult; - /// See also /// /// Note: This method will be deprecated after the cancun hardfork: diff --git a/crates/rpc/rpc-api/src/lib.rs b/crates/rpc/rpc-api/src/lib.rs index 73775112dc..0a4fa9f660 100644 --- a/crates/rpc/rpc-api/src/lib.rs +++ b/crates/rpc/rpc-api/src/lib.rs @@ -46,7 +46,10 @@ pub mod servers { rpc::RpcApiServer, trace::TraceApiServer, txpool::TxPoolApiServer, - validation::BlockSubmissionValidationApiServer, + validation::{ + BlockSubmissionValidationApiServer, BuilderBlockValidationRequestV3, + BuilderBlockValidationRequestV4, + }, web3::Web3ApiServer, }; pub use reth_rpc_eth_api::{ diff --git a/crates/rpc/rpc-api/src/otterscan.rs b/crates/rpc/rpc-api/src/otterscan.rs index ee805b482c..d3e61c0310 100644 --- a/crates/rpc/rpc-api/src/otterscan.rs +++ b/crates/rpc/rpc-api/src/otterscan.rs @@ -1,3 +1,4 @@ +use alloy_eips::BlockId; use alloy_json_rpc::RpcObject; use alloy_primitives::{Address, Bytes, TxHash, B256}; use alloy_rpc_types::Header; @@ -6,7 +7,6 @@ use alloy_rpc_types_trace::otterscan::{ TransactionsWithReceipts, }; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::BlockId; /// Otterscan rpc interface. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "ots"))] diff --git a/crates/rpc/rpc-api/src/reth.rs b/crates/rpc/rpc-api/src/reth.rs index 98c31b78f9..0589ffc00c 100644 --- a/crates/rpc/rpc-api/src/reth.rs +++ b/crates/rpc/rpc-api/src/reth.rs @@ -1,6 +1,6 @@ +use alloy_eips::BlockId; use alloy_primitives::{Address, U256}; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::BlockId; use std::collections::HashMap; /// Reth API namespace for reth-specific methods diff --git a/crates/rpc/rpc-api/src/trace.rs b/crates/rpc/rpc-api/src/trace.rs index 58dda422ab..45059284a2 100644 --- a/crates/rpc/rpc-api/src/trace.rs +++ b/crates/rpc/rpc-api/src/trace.rs @@ -1,3 +1,4 @@ +use alloy_eips::BlockId; use alloy_primitives::{map::HashSet, Bytes, B256}; use alloy_rpc_types::{state::StateOverride, BlockOverrides, Index}; use alloy_rpc_types_eth::transaction::TransactionRequest; @@ -7,7 +8,6 @@ use alloy_rpc_types_trace::{ parity::*, }; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::BlockId; /// Ethereum trace API #[cfg_attr(not(feature = "client"), rpc(server, namespace = "trace"))] diff --git a/crates/rpc/rpc-api/src/validation.rs b/crates/rpc/rpc-api/src/validation.rs index bbfa673d25..797eee7ae5 100644 --- a/crates/rpc/rpc-api/src/validation.rs +++ b/crates/rpc/rpc-api/src/validation.rs @@ -1,9 +1,45 @@ //! API for block submission validation. +use alloy_primitives::B256; use alloy_rpc_types_beacon::relay::{ - BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, + BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, SignedBidSubmissionV3, + SignedBidSubmissionV4, }; use jsonrpsee::proc_macros::rpc; +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, DisplayFromStr}; + +/// A Request to validate a [`SignedBidSubmissionV3`] +/// +/// +#[serde_as] +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct BuilderBlockValidationRequestV3 { + /// The request to be validated. + #[serde(flatten)] + pub request: SignedBidSubmissionV3, + /// The registered gas limit for the validation request. + #[serde_as(as = "DisplayFromStr")] + pub registered_gas_limit: u64, + /// The parent beacon block root for the validation request. + pub parent_beacon_block_root: B256, +} + +/// A Request to validate a [`SignedBidSubmissionV4`] +/// +/// +#[serde_as] +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct BuilderBlockValidationRequestV4 { + /// The request to be validated. + #[serde(flatten)] + pub request: SignedBidSubmissionV4, + /// The registered gas limit for the validation request. + #[serde_as(as = "DisplayFromStr")] + pub registered_gas_limit: u64, + /// The parent beacon block root for the validation request. + pub parent_beacon_block_root: B256, +} /// Block validation rpc interface. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "flashbots"))] @@ -22,4 +58,18 @@ pub trait BlockSubmissionValidationApi { &self, request: BuilderBlockValidationRequestV2, ) -> jsonrpsee::core::RpcResult<()>; + + /// A Request to validate a block submission. + #[method(name = "validateBuilderSubmissionV3")] + async fn validate_builder_submission_v3( + &self, + request: BuilderBlockValidationRequestV3, + ) -> jsonrpsee::core::RpcResult<()>; + + /// A Request to validate a block submission. + #[method(name = "validateBuilderSubmissionV4")] + async fn validate_builder_submission_v4( + &self, + request: BuilderBlockValidationRequestV4, + ) -> jsonrpsee::core::RpcResult<()>; } diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 78776dc1c3..b993054419 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-ipc.workspace = true reth-chainspec.workspace = true +reth-consensus.workspace = true reth-network-api.workspace = true reth-node-core.workspace = true reth-provider.workspace = true @@ -33,11 +34,6 @@ reth-primitives.workspace = true # bsc reth-bsc-consensus.workspace = true -# ethereum -alloy-network.workspace = true -alloy-rpc-types.workspace = true -alloy-serde.workspace = true - # rpc/net jsonrpsee = { workspace = true, features = ["server"] } tower-http = { workspace = true, features = ["full"] } @@ -53,6 +49,8 @@ metrics.workspace = true serde = { workspace = true, features = ["derive"] } thiserror.workspace = true tracing.workspace = true +tokio-util = { workspace = true } +tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } [dev-dependencies] reth-chainspec.workspace = true @@ -67,14 +65,13 @@ reth-rpc-api = { workspace = true, features = ["client"] } reth-rpc-engine-api.workspace = true reth-tracing.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } -reth-tokio-util.workspace = true -reth-node-api.workspace = true reth-rpc-types-compat.workspace = true alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true alloy-rpc-types-trace.workspace = true alloy-rpc-types-engine.workspace = true +alloy-eips.workspace = true tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } serde_json.workspace = true diff --git a/crates/rpc/rpc-builder/src/config.rs b/crates/rpc/rpc-builder/src/config.rs index 4ff98ae8d5..daff81fa2a 100644 --- a/crates/rpc/rpc-builder/src/config.rs +++ b/crates/rpc/rpc-builder/src/config.rs @@ -2,6 +2,7 @@ use std::{net::SocketAddr, path::PathBuf}; use jsonrpsee::server::ServerBuilder; use reth_node_core::{args::RpcServerArgs, utils::get_or_create_jwt_secret_from_path}; +use reth_rpc::ValidationApiConfig; use reth_rpc_eth_types::{EthConfig, EthStateCacheConfig, GasPriceOracleConfig}; use reth_rpc_layer::{JwtError, JwtSecret}; use reth_rpc_server_types::RpcModuleSelection; @@ -27,6 +28,9 @@ pub trait RethRpcServerConfig { /// The configured ethereum RPC settings. fn eth_config(&self) -> EthConfig; + /// The configured ethereum RPC settings. + fn flashbots_config(&self) -> ValidationApiConfig; + /// Returns state cache configuration. fn state_cache_config(&self) -> EthStateCacheConfig; @@ -101,6 +105,10 @@ impl RethRpcServerConfig for RpcServerArgs { .proof_permits(self.rpc_proof_permits) } + fn flashbots_config(&self) -> ValidationApiConfig { + ValidationApiConfig { disallow: self.builder_disallow.clone().unwrap_or_default() } + } + fn state_cache_config(&self) -> EthStateCacheConfig { EthStateCacheConfig { max_blocks: self.rpc_state_cache.max_blocks, @@ -124,7 +132,7 @@ impl RethRpcServerConfig for RpcServerArgs { fn transport_rpc_module_config(&self) -> TransportRpcModuleConfig { let mut config = TransportRpcModuleConfig::default() - .with_config(RpcModuleConfig::new(self.eth_config())); + .with_config(RpcModuleConfig::new(self.eth_config(), self.flashbots_config())); if self.http { config = config.with_http( diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index 4e706e9159..edf0342ff6 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -1,5 +1,3 @@ -use std::marker::PhantomData; - use reth_bsc_consensus::BscTraceHelper; use reth_evm::ConfigureEvm; use reth_primitives::Header; @@ -12,9 +10,8 @@ use reth_rpc_eth_types::{ use reth_tasks::TaskSpawner; /// Alias for `eth` namespace API builder. -pub type DynEthApiBuilder = Box< - dyn Fn(&EthApiBuilderCtx) -> EthApi, ->; +pub type DynEthApiBuilder = + Box) -> EthApi>; /// Handlers for core, filter and pubsub `eth` namespace APIs. #[derive(Debug, Clone)] @@ -26,7 +23,7 @@ pub struct EthHandlers { /// Polling based filter handler available on all transports pub filter: EthFilter, /// Handler for subscriptions only available for transports that support it (ws, ipc) - pub pubsub: EthPubSub, + pub pubsub: EthPubSub, } impl EthHandlers @@ -90,7 +87,6 @@ where events, cache, bsc_trace_helper, - _rpc_ty_builders: PhantomData, }; let api = eth_api_builder(&ctx); @@ -101,6 +97,7 @@ where ctx.cache.clone(), ctx.config.filter_config(), Box::new(ctx.executor.clone()), + api.tx_resp_builder().clone(), ); let pubsub = EthPubSub::with_spawner( @@ -109,6 +106,7 @@ where ctx.events.clone(), ctx.network.clone(), Box::new(ctx.executor.clone()), + api.tx_resp_builder().clone(), ); Self { api, cache: ctx.cache, filter, pubsub } diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 480f2a4e6c..5cf31b9771 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -27,20 +27,22 @@ //! use reth_tasks::TokioTaskExecutor; //! use reth_transaction_pool::TransactionPool; //! -//! pub async fn launch( +//! pub async fn launch( //! provider: Provider, //! pool: Pool, //! network: Network, //! events: Events, //! evm_config: EvmConfig, //! block_executor: BlockExecutor, +//! consensus: Consensus, //! ) where //! Provider: FullRpcProvider + AccountReader + ChangeSetReader, -//! Pool: TransactionPool + 'static, +//! Pool: TransactionPool + Unpin + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: CanonStateSubscriptions + Clone + 'static, //! EvmConfig: ConfigureEvm
, //! BlockExecutor: BlockExecutorProvider, +//! Consensus: reth_consensus::Consensus + Clone + 'static, //! { //! // configure the rpc module per transport //! let transports = TransportRpcModuleConfig::default().with_http(vec![ @@ -57,6 +59,7 @@ //! events, //! evm_config, //! block_executor, +//! consensus, //! ) //! .build(transports, Box::new(EthApi::with_spawner)); //! let handle = RpcServerConfig::default() @@ -85,6 +88,7 @@ //! use reth_tasks::TokioTaskExecutor; //! use reth_transaction_pool::TransactionPool; //! use tokio::try_join; +//! //! pub async fn launch< //! Provider, //! Pool, @@ -94,6 +98,7 @@ //! EngineT, //! EvmConfig, //! BlockExecutor, +//! Consensus, //! >( //! provider: Provider, //! pool: Pool, @@ -102,15 +107,17 @@ //! engine_api: EngineApi, //! evm_config: EvmConfig, //! block_executor: BlockExecutor, +//! consensus: Consensus, //! ) where //! Provider: FullRpcProvider + AccountReader + ChangeSetReader, -//! Pool: TransactionPool + 'static, +//! Pool: TransactionPool + Unpin + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: CanonStateSubscriptions + Clone + 'static, //! EngineApi: EngineApiServer, //! EngineT: EngineTypes, //! EvmConfig: ConfigureEvm
, //! BlockExecutor: BlockExecutorProvider, +//! Consensus: reth_consensus::Consensus + Clone + 'static, //! { //! // configure the rpc module per transport //! let transports = TransportRpcModuleConfig::default().with_http(vec![ @@ -127,6 +134,7 @@ //! events, //! evm_config, //! block_executor, +//! consensus, //! ); //! //! // configure the server modules @@ -152,7 +160,9 @@ use std::{ collections::HashMap, + fmt::Debug, net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + sync::Arc, time::{Duration, SystemTime, UNIX_EPOCH}, }; @@ -169,6 +179,7 @@ use jsonrpsee::{ }; use reth_bsc_consensus::BscTraceHelper; use reth_chainspec::EthereumHardforks; +use reth_consensus::Consensus; use reth_engine_primitives::EngineTypes; use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; @@ -179,7 +190,7 @@ use reth_provider::{ }; use reth_rpc::{ AdminApi, DebugApi, EngineEthApi, EthBundle, NetApi, OtterscanApi, RPCApi, RethApi, TraceApi, - TxPoolApi, Web3Api, + TxPoolApi, ValidationApi, ValidationApiConfig, Web3Api, }; use reth_rpc_api::servers::*; use reth_rpc_eth_api::{ @@ -226,6 +237,9 @@ pub use eth::EthHandlers; mod metrics; pub use metrics::{MeteredRequestFuture, RpcRequestMetricsService}; +// Rpc rate limiter +pub mod rate_limiter; + /// Convenience function for starting a server in one step. #[allow(clippy::too_many_arguments)] pub async fn launch( @@ -239,6 +253,7 @@ pub async fn launch, block_executor: BlockExecutor, + consensus: Arc, ) -> Result where Provider: FullRpcProvider + AccountReader + ChangeSetReader, @@ -262,6 +277,7 @@ where events, evm_config, block_executor, + consensus, ) .build(module_config, eth), ) @@ -272,7 +288,16 @@ where /// /// This is the main entrypoint and the easiest way to configure an RPC server. #[derive(Debug, Clone)] -pub struct RpcModuleBuilder { +pub struct RpcModuleBuilder< + Provider, + Pool, + Network, + Tasks, + Events, + EvmConfig, + BlockExecutor, + Consensus, +> { /// The Provider type to when creating all rpc handlers provider: Provider, /// The Pool type to when creating all rpc handlers @@ -287,16 +312,19 @@ pub struct RpcModuleBuilder, } // === impl RpcBuilder === -impl - RpcModuleBuilder +impl + RpcModuleBuilder { /// Create a new instance of the builder + #[allow(clippy::too_many_arguments)] pub const fn new( provider: Provider, pool: Pool, @@ -305,6 +333,7 @@ impl events: Events, evm_config: EvmConfig, block_executor: BlockExecutor, + consensus: Consensus, ) -> Self { Self { provider, @@ -314,6 +343,7 @@ impl events, evm_config, block_executor, + consensus, bsc_trace_helper: None, } } @@ -322,7 +352,7 @@ impl pub fn with_provider

( self, provider: P, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where P: BlockReader + StateProviderFactory + EvmEnvProvider + 'static, { @@ -333,6 +363,7 @@ impl events, evm_config, block_executor, + consensus, bsc_trace_helper, .. } = self; @@ -344,6 +375,7 @@ impl events, evm_config, block_executor, + consensus, bsc_trace_helper, } } @@ -352,7 +384,7 @@ impl pub fn with_pool

( self, pool: P, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where P: TransactionPool + 'static, { @@ -363,6 +395,7 @@ impl events, evm_config, block_executor, + consensus, bsc_trace_helper, .. } = self; @@ -374,6 +407,7 @@ impl events, evm_config, block_executor, + consensus, bsc_trace_helper, } } @@ -393,6 +427,7 @@ impl Events, EvmConfig, BlockExecutor, + Consensus, > { let Self { provider, @@ -401,6 +436,7 @@ impl network, evm_config, block_executor, + consensus, bsc_trace_helper, .. } = self; @@ -412,6 +448,7 @@ impl evm_config, block_executor, pool: NoopTransactionPool::default(), + consensus, bsc_trace_helper, } } @@ -420,7 +457,7 @@ impl pub fn with_network( self, network: N, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where N: NetworkInfo + Peers + 'static, { @@ -431,6 +468,7 @@ impl events, evm_config, block_executor, + consensus, bsc_trace_helper, .. } = self; @@ -442,6 +480,7 @@ impl events, evm_config, block_executor, + consensus, bsc_trace_helper, } } @@ -453,8 +492,16 @@ impl /// [`EthApi`](reth_rpc::eth::EthApi) which requires a [`NetworkInfo`] implementation. pub fn with_noop_network( self, - ) -> RpcModuleBuilder - { + ) -> RpcModuleBuilder< + Provider, + Pool, + NoopNetwork, + Tasks, + Events, + EvmConfig, + BlockExecutor, + Consensus, + > { let Self { provider, pool, @@ -462,6 +509,7 @@ impl events, evm_config, block_executor, + consensus, bsc_trace_helper, .. } = self; @@ -473,6 +521,7 @@ impl network: NoopNetwork::default(), evm_config, block_executor, + consensus, bsc_trace_helper, } } @@ -481,7 +530,7 @@ impl pub fn with_executor( self, executor: T, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where T: TaskSpawner + 'static, { @@ -492,6 +541,7 @@ impl events, evm_config, block_executor, + consensus, bsc_trace_helper, .. } = self; @@ -503,6 +553,7 @@ impl events, evm_config, block_executor, + consensus, bsc_trace_helper, } } @@ -521,6 +572,7 @@ impl Events, EvmConfig, BlockExecutor, + Consensus, > { let Self { pool, @@ -529,6 +581,7 @@ impl events, evm_config, block_executor, + consensus, bsc_trace_helper, .. } = self; @@ -540,6 +593,7 @@ impl executor: TokioTaskExecutor::default(), evm_config, block_executor, + consensus, bsc_trace_helper, } } @@ -548,7 +602,7 @@ impl pub fn with_events( self, events: E, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where E: CanonStateSubscriptions + 'static, { @@ -559,6 +613,7 @@ impl network, evm_config, block_executor, + consensus, bsc_trace_helper, .. } = self; @@ -570,6 +625,7 @@ impl events, evm_config, block_executor, + consensus, bsc_trace_helper, } } @@ -578,7 +634,7 @@ impl pub fn with_evm_config( self, evm_config: E, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where E: ConfigureEvm + 'static, { @@ -589,6 +645,7 @@ impl network, events, block_executor, + consensus, bsc_trace_helper, .. } = self; @@ -600,6 +657,7 @@ impl events, evm_config, block_executor, + consensus, bsc_trace_helper, } } @@ -608,12 +666,20 @@ impl pub fn with_block_executor( self, block_executor: BE, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where BE: BlockExecutorProvider, { let Self { - provider, network, pool, executor, events, evm_config, bsc_trace_helper, .. + provider, + network, + pool, + executor, + events, + evm_config, + consensus, + bsc_trace_helper, + .. } = self; RpcModuleBuilder { provider, @@ -623,15 +689,59 @@ impl events, evm_config, block_executor, + consensus, bsc_trace_helper, } } - /// Configure the bsc trace helper - pub fn with_bsc_trace_helper(self, bsc_trace_helper: Option) -> Self { - let Self { provider, network, pool, executor, events, evm_config, block_executor, .. } = - self; - Self { + /// Configure the consensus implementation. + pub fn with_consensus( + self, + consensus: C, + ) -> RpcModuleBuilder { + let Self { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + bsc_trace_helper, + .. + } = self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + bsc_trace_helper, + } + } + + /// Configure the `bsc_trace_helper` implementation. + #[allow(clippy::use_self)] + pub fn with_bsc_trace_helper( + self, + bsc_trace_helper: Option, + ) -> RpcModuleBuilder + { + let Self { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + .. + } = self; + RpcModuleBuilder { provider, network, pool, @@ -639,13 +749,14 @@ impl events, evm_config, block_executor, + consensus, bsc_trace_helper, } } } -impl - RpcModuleBuilder +impl + RpcModuleBuilder where Provider: FullRpcProvider + AccountReader + ChangeSetReader, Pool: TransactionPool + 'static, @@ -654,6 +765,7 @@ where Events: CanonStateSubscriptions + Clone + 'static, EvmConfig: ConfigureEvm

, BlockExecutor: BlockExecutorProvider, + Consensus: reth_consensus::Consensus + Clone + 'static, { /// Configures all [`RpcModule`]s specific to the given [`TransportRpcModuleConfig`] which can /// be used to start the transport server(s). @@ -670,7 +782,7 @@ where ) -> ( TransportRpcModules, AuthRpcModule, - RpcRegistryInner, + RpcRegistryInner, ) where EngineT: EngineTypes, @@ -685,6 +797,7 @@ where events, evm_config, block_executor, + consensus, bsc_trace_helper, } = self; @@ -696,6 +809,7 @@ where network, executor, events, + consensus, config, evm_config, eth, @@ -718,6 +832,7 @@ where /// # Example /// /// ```no_run + /// use reth_consensus::noop::NoopConsensus; /// use reth_evm::ConfigureEvm; /// use reth_evm_ethereum::execute::EthExecutorProvider; /// use reth_network_api::noop::NoopNetwork; @@ -737,6 +852,7 @@ where /// .with_events(TestCanonStateSubscriptions::default()) /// .with_evm_config(evm) /// .with_block_executor(EthExecutorProvider::mainnet()) + /// .with_consensus(NoopConsensus::default()) /// .into_registry(Default::default(), Box::new(EthApi::with_spawner)); /// /// let eth_api = registry.eth_api(); @@ -746,7 +862,7 @@ where self, config: RpcModuleConfig, eth: DynEthApiBuilder, - ) -> RpcRegistryInner + ) -> RpcRegistryInner where EthApi: EthApiTypes + 'static, { @@ -758,6 +874,7 @@ where events, evm_config, block_executor, + consensus, bsc_trace_helper, } = self; RpcRegistryInner::new( @@ -766,6 +883,7 @@ where network, executor, events, + consensus, config, evm_config, eth, @@ -794,6 +912,7 @@ where events, evm_config, block_executor, + consensus, bsc_trace_helper, } = self; @@ -806,6 +925,7 @@ where network, executor, events, + consensus, config.unwrap_or_default(), evm_config, eth, @@ -823,9 +943,9 @@ where } } -impl Default for RpcModuleBuilder<(), (), (), (), (), (), ()> { +impl Default for RpcModuleBuilder<(), (), (), (), (), (), (), ()> { fn default() -> Self { - Self::new((), (), (), (), (), (), ()) + Self::new((), (), (), (), (), (), (), ()) } } @@ -834,6 +954,8 @@ impl Default for RpcModuleBuilder<(), (), (), (), (), (), ()> { pub struct RpcModuleConfig { /// `eth` namespace settings eth: EthConfig, + /// `flashbots` namespace settings + flashbots: ValidationApiConfig, } // === impl RpcModuleConfig === @@ -845,8 +967,8 @@ impl RpcModuleConfig { } /// Returns a new RPC module config given the eth namespace config - pub const fn new(eth: EthConfig) -> Self { - Self { eth } + pub const fn new(eth: EthConfig, flashbots: ValidationApiConfig) -> Self { + Self { eth, flashbots } } /// Get a reference to the eth namespace config @@ -864,6 +986,7 @@ impl RpcModuleConfig { #[derive(Clone, Debug, Default)] pub struct RpcModuleConfigBuilder { eth: Option, + flashbots: Option, } // === impl RpcModuleConfigBuilder === @@ -875,15 +998,21 @@ impl RpcModuleConfigBuilder { self } + /// Configures a custom flashbots namespace config + pub fn flashbots(mut self, flashbots: ValidationApiConfig) -> Self { + self.flashbots = Some(flashbots); + self + } + /// Consumes the type and creates the [`RpcModuleConfig`] pub fn build(self) -> RpcModuleConfig { - let Self { eth } = self; - RpcModuleConfig { eth: eth.unwrap_or_default() } + let Self { eth, flashbots } = self; + RpcModuleConfig { eth: eth.unwrap_or_default(), flashbots: flashbots.unwrap_or_default() } } /// Get a reference to the eth namespace config, if any - pub const fn get_eth(&self) -> &Option { - &self.eth + pub const fn get_eth(&self) -> Option<&EthConfig> { + self.eth.as_ref() } /// Get a mutable reference to the eth namespace config, if any @@ -907,6 +1036,7 @@ pub struct RpcRegistryInner< Events, EthApi: EthApiTypes, BlockExecutor, + Consensus, > { provider: Provider, pool: Pool, @@ -914,6 +1044,9 @@ pub struct RpcRegistryInner< executor: Tasks, events: Events, block_executor: BlockExecutor, + consensus: Consensus, + /// Holds the configuration for the RPC modules + config: RpcModuleConfig, /// Holds a all `eth_` namespace handlers eth: EthHandlers, /// to put trace calls behind semaphore @@ -926,8 +1059,8 @@ pub struct RpcRegistryInner< // === impl RpcRegistryInner === -impl - RpcRegistryInner +impl + RpcRegistryInner where Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, Pool: Send + Sync + Clone + 'static, @@ -945,6 +1078,7 @@ where network: Network, executor: Tasks, events: Events, + consensus: Consensus, config: RpcModuleConfig, evm_config: EvmConfig, eth_api_builder: DynEthApiBuilder< @@ -982,6 +1116,8 @@ where network, eth, executor, + consensus, + config, modules: Default::default(), blocking_pool_guard, events, @@ -991,8 +1127,8 @@ where } } -impl - RpcRegistryInner +impl + RpcRegistryInner where EthApi: EthApiTypes, { @@ -1049,8 +1185,8 @@ where } } -impl - RpcRegistryInner +impl + RpcRegistryInner where Network: NetworkInfo + Clone + 'static, EthApi: EthApiTypes, @@ -1088,8 +1224,8 @@ where } } -impl - RpcRegistryInner +impl + RpcRegistryInner where Provider: FullRpcProvider + AccountReader + ChangeSetReader, Network: NetworkInfo + Peers + Clone + 'static, @@ -1119,14 +1255,7 @@ where /// If called outside of the tokio runtime. See also [`Self::eth_api`] pub fn register_ots(&mut self) -> &mut Self where - EthApi: TraceExt - + EthTransactions< - NetworkTypes: alloy_network::Network< - TransactionResponse = alloy_serde::WithOtherFields< - alloy_rpc_types::Transaction, - >, - >, - >, + EthApi: TraceExt + EthTransactions, { let otterscan_api = self.otterscan_api(); self.modules.insert(RethRpcModule::Ots, otterscan_api.into_rpc().into()); @@ -1201,8 +1330,8 @@ where } } -impl - RpcRegistryInner +impl + RpcRegistryInner where Provider: FullRpcProvider + AccountReader + ChangeSetReader, Network: NetworkInfo + Peers + Clone + 'static, @@ -1275,10 +1404,23 @@ where pub fn reth_api(&self) -> RethApi { RethApi::new(self.provider.clone(), Box::new(self.executor.clone())) } + + /// Instantiates `ValidationApi` + pub fn validation_api(&self) -> ValidationApi + where + Consensus: reth_consensus::Consensus + Clone + 'static, + { + ValidationApi::new( + self.provider.clone(), + Arc::new(self.consensus.clone()), + self.block_executor.clone(), + self.config.flashbots.clone(), + ) + } } -impl - RpcRegistryInner +impl + RpcRegistryInner where Provider: FullRpcProvider + AccountReader + ChangeSetReader, Pool: TransactionPool + 'static, @@ -1287,6 +1429,7 @@ where Events: CanonStateSubscriptions + Clone + 'static, EthApi: FullEthApiServer, BlockExecutor: BlockExecutorProvider, + Consensus: reth_consensus::Consensus + Clone + 'static, { /// Configures the auth module that includes the /// * `engine_` namespace @@ -1412,9 +1555,12 @@ where .into_rpc() .into(), RethRpcModule::Web3 => Web3Api::new(self.network.clone()).into_rpc().into(), - RethRpcModule::Txpool => { - TxPoolApi::<_, EthApi>::new(self.pool.clone()).into_rpc().into() - } + RethRpcModule::Txpool => TxPoolApi::new( + self.pool.clone(), + self.eth.api.tx_resp_builder().clone(), + ) + .into_rpc() + .into(), RethRpcModule::Rpc => RPCApi::new( namespaces .iter() @@ -1429,6 +1575,14 @@ where .into_rpc() .into() } + RethRpcModule::Flashbots => ValidationApi::new( + self.provider.clone(), + Arc::new(self.consensus.clone()), + self.block_executor.clone(), + self.config.flashbots.clone(), + ) + .into_rpc() + .into(), }) .clone() }) @@ -1886,7 +2040,7 @@ impl TransportRpcModuleConfig { } /// Sets a custom [`RpcModuleConfig`] for the configured modules. - pub const fn with_config(mut self, config: RpcModuleConfig) -> Self { + pub fn with_config(mut self, config: RpcModuleConfig) -> Self { self.config = Some(config); self } @@ -2017,7 +2171,7 @@ impl TransportRpcModules { Ok(false) } - /// Merge the given [Methods] in all configured methods. + /// Merge the given [`Methods`] in all configured methods. /// /// Fails if any of the methods in other is present already. pub fn merge_configured( @@ -2108,7 +2262,22 @@ impl TransportRpcModules { http_removed || ws_removed || ipc_removed } - /// Replace the given [Methods] in the configured http methods. + /// Renames a method in all configured transports by: + /// 1. Removing the old method name. + /// 2. Adding the new method. + pub fn rename( + &mut self, + old_name: &'static str, + new_method: impl Into, + ) -> Result<(), RegisterMethodError> { + // Remove the old method from all configured transports + self.remove_method_from_configured(old_name); + + // Merge the new method into the configured transports + self.merge_configured(new_method) + } + + /// Replace the given [`Methods`] in the configured http methods. /// /// Fails if any of the methods in other is present already or if the method being removed is /// not present @@ -2480,6 +2649,43 @@ mod tests { assert!(modules.ipc.as_ref().unwrap().method("anything").is_none()); } + #[test] + fn test_transport_rpc_module_rename() { + let mut modules = TransportRpcModules { + http: Some(create_test_module()), + ws: Some(create_test_module()), + ipc: Some(create_test_module()), + ..Default::default() + }; + + // Verify that the old we want to rename exists at the start + assert!(modules.http.as_ref().unwrap().method("anything").is_some()); + assert!(modules.ws.as_ref().unwrap().method("anything").is_some()); + assert!(modules.ipc.as_ref().unwrap().method("anything").is_some()); + + // Verify that the new method does not exist at the start + assert!(modules.http.as_ref().unwrap().method("something").is_none()); + assert!(modules.ws.as_ref().unwrap().method("something").is_none()); + assert!(modules.ipc.as_ref().unwrap().method("something").is_none()); + + // Create another module + let mut other_module = RpcModule::new(()); + other_module.register_method("something", |_, _, _| "fails").unwrap(); + + // Rename the method + modules.rename("anything", other_module).expect("rename failed"); + + // Verify that the old method was removed from all transports + assert!(modules.http.as_ref().unwrap().method("anything").is_none()); + assert!(modules.ws.as_ref().unwrap().method("anything").is_none()); + assert!(modules.ipc.as_ref().unwrap().method("anything").is_none()); + + // Verify that the new method was added to all transports + assert!(modules.http.as_ref().unwrap().method("something").is_some()); + assert!(modules.ws.as_ref().unwrap().method("something").is_some()); + assert!(modules.ipc.as_ref().unwrap().method("something").is_some()); + } + #[test] fn test_replace_http_method() { let mut modules = diff --git a/crates/rpc/rpc-builder/src/metrics.rs b/crates/rpc/rpc-builder/src/metrics.rs index 08fd388985..57283ded37 100644 --- a/crates/rpc/rpc-builder/src/metrics.rs +++ b/crates/rpc/rpc-builder/src/metrics.rs @@ -30,9 +30,12 @@ impl RpcRequestMetrics { Self { inner: Arc::new(RpcServerMetricsInner { connection_metrics: transport.connection_metrics(), - call_metrics: HashMap::from_iter(module.method_names().map(|method| { - (method, RpcServerCallMetrics::new_with_labels(&[("method", method)])) - })), + call_metrics: module + .method_names() + .map(|method| { + (method, RpcServerCallMetrics::new_with_labels(&[("method", method)])) + }) + .collect(), }), } } diff --git a/crates/rpc/rpc-builder/src/rate_limiter.rs b/crates/rpc/rpc-builder/src/rate_limiter.rs new file mode 100644 index 0000000000..85df0eee61 --- /dev/null +++ b/crates/rpc/rpc-builder/src/rate_limiter.rs @@ -0,0 +1,116 @@ +//! [`jsonrpsee`] helper layer for rate limiting certain methods. + +use jsonrpsee::{server::middleware::rpc::RpcServiceT, types::Request, MethodResponse}; +use std::{ + future::Future, + pin::Pin, + sync::Arc, + task::{ready, Context, Poll}, +}; +use tokio::sync::{OwnedSemaphorePermit, Semaphore}; +use tokio_util::sync::PollSemaphore; +use tower::Layer; + +/// Rate limiter for the RPC server. +/// +/// Rate limits expensive calls such as debug_ and trace_. +#[derive(Debug, Clone)] +pub struct RpcRequestRateLimiter { + inner: Arc, +} + +impl RpcRequestRateLimiter { + /// Create a new rate limit layer with the given number of permits. + pub fn new(rate_limit: usize) -> Self { + Self { + inner: Arc::new(RpcRequestRateLimiterInner { + call_guard: PollSemaphore::new(Arc::new(Semaphore::new(rate_limit))), + }), + } + } +} + +impl Layer for RpcRequestRateLimiter { + type Service = RpcRequestRateLimitingService; + + fn layer(&self, inner: S) -> Self::Service { + RpcRequestRateLimitingService::new(inner, self.clone()) + } +} + +/// Rate Limiter for the RPC server +#[derive(Debug, Clone)] +struct RpcRequestRateLimiterInner { + /// Semaphore to rate limit calls + call_guard: PollSemaphore, +} + +/// A [`RpcServiceT`] middleware that rate limits RPC calls to the server. +#[derive(Debug, Clone)] +pub struct RpcRequestRateLimitingService { + /// The rate limiter for RPC requests + rate_limiter: RpcRequestRateLimiter, + /// The inner service being wrapped + inner: S, +} + +impl RpcRequestRateLimitingService { + /// Create a new rate limited service. + pub const fn new(service: S, rate_limiter: RpcRequestRateLimiter) -> Self { + Self { inner: service, rate_limiter } + } +} + +impl<'a, S> RpcServiceT<'a> for RpcRequestRateLimitingService +where + S: RpcServiceT<'a> + Send + Sync + Clone + 'static, +{ + type Future = RateLimitingRequestFuture; + + fn call(&self, req: Request<'a>) -> Self::Future { + let method_name = req.method_name(); + if method_name.starts_with("trace_") || method_name.starts_with("debug_") { + RateLimitingRequestFuture { + fut: self.inner.call(req), + guard: Some(self.rate_limiter.inner.call_guard.clone()), + permit: None, + } + } else { + // if we don't need to rate limit, then there + // is no need to get a semaphore permit + RateLimitingRequestFuture { fut: self.inner.call(req), guard: None, permit: None } + } + } +} + +/// Response future. +#[pin_project::pin_project] +pub struct RateLimitingRequestFuture { + #[pin] + fut: F, + guard: Option, + permit: Option, +} + +impl std::fmt::Debug for RateLimitingRequestFuture { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str("RateLimitingRequestFuture") + } +} + +impl> Future for RateLimitingRequestFuture { + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + if let Some(guard) = this.guard.as_mut() { + *this.permit = ready!(guard.poll_acquire(cx)); + *this.guard = None; + } + let res = this.fut.poll(cx); + if res.is_ready() { + *this.permit = None; + } + res + } +} diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index 7a8093c506..b5faa71cc5 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -2,12 +2,12 @@ //! Standalone http tests use crate::utils::{launch_http, launch_http_ws, launch_ws}; +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::{hex_literal::hex, Address, Bytes, TxHash, B256, B64, U256, U64}; -use alloy_rpc_types::{ - Block, FeeHistory, Filter, Index, Log, PendingTransactionFilterKind, SyncStatus, Transaction, - TransactionReceipt, +use alloy_rpc_types_eth::{ + transaction::TransactionRequest, Block, FeeHistory, Filter, Index, Log, + PendingTransactionFilterKind, SyncStatus, Transaction, TransactionReceipt, }; -use alloy_rpc_types_eth::transaction::TransactionRequest; use alloy_rpc_types_trace::filter::TraceFilter; use jsonrpsee::{ core::{ @@ -19,7 +19,7 @@ use jsonrpsee::{ types::error::ErrorCode, }; use reth_network_peers::NodeRecord; -use reth_primitives::{BlockId, BlockNumberOrTag, Receipt}; +use reth_primitives::Receipt; use reth_rpc_api::{ clients::{AdminApiClient, EthApiClient}, DebugApiClient, EthFilterApiClient, NetApiClient, OtterscanClient, TraceApiClient, diff --git a/crates/rpc/rpc-builder/tests/it/middleware.rs b/crates/rpc/rpc-builder/tests/it/middleware.rs index bcc26dcad8..0e2186e56e 100644 --- a/crates/rpc/rpc-builder/tests/it/middleware.rs +++ b/crates/rpc/rpc-builder/tests/it/middleware.rs @@ -1,5 +1,5 @@ use crate::utils::{test_address, test_rpc_builder}; -use alloy_rpc_types::{Block, Receipt, Transaction}; +use alloy_rpc_types_eth::{Block, Receipt, Transaction}; use jsonrpsee::{ server::{middleware::rpc::RpcServiceT, RpcServiceBuilder}, types::Request, diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index 847de99564..175992c0f1 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -3,8 +3,10 @@ use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use alloy_rpc_types_engine::{ClientCode, ClientVersionV1}; use reth_beacon_consensus::BeaconConsensusEngineHandle; use reth_chainspec::MAINNET; +use reth_consensus::noop::NoopConsensus; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; -use reth_evm_ethereum::{execute::EthExecutorProvider, EthEvmConfig}; +use reth_evm::execute::BasicBlockExecutorProvider; +use reth_evm_ethereum::{execute::EthExecutionStrategyFactory, EthEvmConfig}; use reth_network_api::noop::NoopNetwork; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_provider::test_utils::{NoopProvider, TestCanonStateSubscriptions}; @@ -124,7 +126,8 @@ pub fn test_rpc_builder() -> RpcModuleBuilder< TokioTaskExecutor, TestCanonStateSubscriptions, EthEvmConfig, - EthExecutorProvider, + BasicBlockExecutorProvider, + NoopConsensus, > { RpcModuleBuilder::default() .with_provider(NoopProvider::default()) @@ -133,5 +136,8 @@ pub fn test_rpc_builder() -> RpcModuleBuilder< .with_executor(TokioTaskExecutor::default()) .with_events(TestCanonStateSubscriptions::default()) .with_evm_config(EthEvmConfig::new(MAINNET.clone())) - .with_block_executor(EthExecutorProvider::ethereum(MAINNET.clone())) + .with_block_executor( + BasicBlockExecutorProvider::new(EthExecutionStrategyFactory::mainnet()), + ) + .with_consensus(NoopConsensus::default()) } diff --git a/crates/rpc/rpc-engine-api/src/capabilities.rs b/crates/rpc/rpc-engine-api/src/capabilities.rs index de4d962315..af0609b0d1 100644 --- a/crates/rpc/rpc-engine-api/src/capabilities.rs +++ b/crates/rpc/rpc-engine-api/src/capabilities.rs @@ -17,8 +17,6 @@ pub const CAPABILITIES: &[&str] = &[ "engine_newPayloadV4", "engine_getPayloadBodiesByHashV1", "engine_getPayloadBodiesByRangeV1", - "engine_getPayloadBodiesByHashV2", - "engine_getPayloadBodiesByRangeV2", "engine_getBlobsV1", ]; diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 252808c14a..20eeb390ac 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -1,13 +1,12 @@ use crate::{ capabilities::EngineCapabilities, metrics::EngineApiMetrics, EngineApiError, EngineApiResult, }; -use alloy_eips::eip4844::BlobAndProofV1; +use alloy_eips::{eip1898::BlockHashOrNumber, eip4844::BlobAndProofV1, eip7685::Requests}; use alloy_primitives::{BlockHash, BlockNumber, B256, U64}; use alloy_rpc_types_engine::{ CancunPayloadFields, ClientVersionV1, ExecutionPayload, ExecutionPayloadBodiesV1, - ExecutionPayloadBodiesV2, ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, - ExecutionPayloadV4, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, - TransitionConfiguration, + ExecutionPayloadInputV2, ExecutionPayloadSidecar, ExecutionPayloadV1, ExecutionPayloadV3, + ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, TransitionConfiguration, }; use async_trait::async_trait; use jsonrpsee_core::RpcResult; @@ -20,10 +19,10 @@ use reth_payload_primitives::{ validate_payload_timestamp, EngineApiMessageVersion, PayloadBuilderAttributes, PayloadOrAttributes, }; -use reth_primitives::{Block, BlockHashOrNumber, EthereumHardfork}; +use reth_primitives::{Block, EthereumHardfork}; use reth_rpc_api::EngineApiServer; use reth_rpc_types_compat::engine::payload::{ - convert_payload_input_v2_to_payload, convert_to_payload_body_v1, convert_to_payload_body_v2, + convert_payload_input_v2_to_payload, convert_to_payload_body_v1, }; use reth_storage_api::{BlockReader, HeaderProvider, StateProviderFactory}; use reth_tasks::TaskSpawner; @@ -141,7 +140,11 @@ where self.inner .validator .validate_version_specific_fields(EngineApiMessageVersion::V1, payload_or_attrs)?; - Ok(self.inner.beacon_consensus.new_payload(payload, None).await?) + Ok(self + .inner + .beacon_consensus + .new_payload(payload, ExecutionPayloadSidecar::none()) + .await?) } /// See also @@ -157,7 +160,11 @@ where self.inner .validator .validate_version_specific_fields(EngineApiMessageVersion::V2, payload_or_attrs)?; - Ok(self.inner.beacon_consensus.new_payload(payload, None).await?) + Ok(self + .inner + .beacon_consensus + .new_payload(payload, ExecutionPayloadSidecar::none()) + .await?) } /// See also @@ -177,17 +184,26 @@ where .validator .validate_version_specific_fields(EngineApiMessageVersion::V3, payload_or_attrs)?; - let cancun_fields = CancunPayloadFields { versioned_hashes, parent_beacon_block_root }; - - Ok(self.inner.beacon_consensus.new_payload(payload, Some(cancun_fields)).await?) + Ok(self + .inner + .beacon_consensus + .new_payload( + payload, + ExecutionPayloadSidecar::v3(CancunPayloadFields { + versioned_hashes, + parent_beacon_block_root, + }), + ) + .await?) } /// See also pub async fn new_payload_v4( &self, - payload: ExecutionPayloadV4, + payload: ExecutionPayloadV3, versioned_hashes: Vec, parent_beacon_block_root: B256, + execution_requests: Requests, ) -> EngineApiResult { let payload = ExecutionPayload::from(payload); let payload_or_attrs = @@ -199,9 +215,17 @@ where .validator .validate_version_specific_fields(EngineApiMessageVersion::V4, payload_or_attrs)?; - let cancun_fields = CancunPayloadFields { versioned_hashes, parent_beacon_block_root }; - - Ok(self.inner.beacon_consensus.new_payload(payload, Some(cancun_fields)).await?) + Ok(self + .inner + .beacon_consensus + .new_payload( + payload, + ExecutionPayloadSidecar::v4( + CancunPayloadFields { versioned_hashes, parent_beacon_block_root }, + execution_requests, + ), + ) + .await?) } /// Sends a message to the beacon consensus engine to update the fork choice _without_ @@ -257,7 +281,7 @@ where pub async fn get_payload_v1( &self, payload_id: PayloadId, - ) -> EngineApiResult { + ) -> EngineApiResult { self.inner .payload_store .resolve(payload_id) @@ -281,7 +305,7 @@ where pub async fn get_payload_v2( &self, payload_id: PayloadId, - ) -> EngineApiResult { + ) -> EngineApiResult { // First we fetch the payload attributes to check the timestamp let attributes = self.get_payload_attributes(payload_id).await?; @@ -316,7 +340,7 @@ where pub async fn get_payload_v3( &self, payload_id: PayloadId, - ) -> EngineApiResult { + ) -> EngineApiResult { // First we fetch the payload attributes to check the timestamp let attributes = self.get_payload_attributes(payload_id).await?; @@ -351,7 +375,7 @@ where pub async fn get_payload_v4( &self, payload_id: PayloadId, - ) -> EngineApiResult { + ) -> EngineApiResult { // First we fetch the payload attributes to check the timestamp let attributes = self.get_payload_attributes(payload_id).await?; @@ -371,7 +395,7 @@ where .map_err(|_| EngineApiError::UnknownPayload)? .try_into() .map_err(|_| { - warn!("could not transform built payload into ExecutionPayloadV4"); + warn!("could not transform built payload into ExecutionPayloadV3"); EngineApiError::UnknownPayload }) } @@ -451,18 +475,6 @@ where self.get_payload_bodies_by_range_with(start, count, convert_to_payload_body_v1).await } - /// Returns the execution payload bodies by the range starting at `start`, containing `count` - /// blocks. - /// - /// Same as [`Self::get_payload_bodies_by_range_v1`] but as [`ExecutionPayloadBodiesV2`]. - pub async fn get_payload_bodies_by_range_v2( - &self, - start: BlockNumber, - count: u64, - ) -> EngineApiResult { - self.get_payload_bodies_by_range_with(start, count, convert_to_payload_body_v2).await - } - /// Called to retrieve execution payload bodies by hashes. async fn get_payload_bodies_by_hash_with( &self, @@ -509,16 +521,6 @@ where self.get_payload_bodies_by_hash_with(hashes, convert_to_payload_body_v1).await } - /// Called to retrieve execution payload bodies by hashes. - /// - /// Same as [`Self::get_payload_bodies_by_hash_v1`] but as [`ExecutionPayloadBodiesV2`]. - pub async fn get_payload_bodies_by_hash_v2( - &self, - hashes: Vec, - ) -> EngineApiResult { - self.get_payload_bodies_by_hash_with(hashes, convert_to_payload_body_v2).await - } - /// Called to verify network configuration parameters and ensure that Consensus and Execution /// layers are using the latest configuration. pub fn exchange_transition_configuration( @@ -614,7 +616,8 @@ where // To do this, we set the payload attrs to `None` if attribute validation failed, but // we still apply the forkchoice update. if let Err(err) = attr_validation_res { - let fcu_res = self.inner.beacon_consensus.fork_choice_updated(state, None).await?; + let fcu_res = + self.inner.beacon_consensus.fork_choice_updated(state, None, version).await?; // TODO: decide if we want this branch - the FCU INVALID response might be more // useful than the payload attributes INVALID response if fcu_res.is_invalid() { @@ -624,7 +627,7 @@ where } } - Ok(self.inner.beacon_consensus.fork_choice_updated(state, payload_attrs).await?) + Ok(self.inner.beacon_consensus.fork_choice_updated(state, payload_attrs, version).await?) } } @@ -688,15 +691,22 @@ where /// See also async fn new_payload_v4( &self, - payload: ExecutionPayloadV4, + payload: ExecutionPayloadV3, versioned_hashes: Vec, parent_beacon_block_root: B256, + execution_requests: Requests, ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV4"); let start = Instant::now(); - let gas_used = payload.payload_inner.payload_inner.payload_inner.gas_used; - let res = - Self::new_payload_v4(self, payload, versioned_hashes, parent_beacon_block_root).await; + let gas_used = payload.payload_inner.payload_inner.gas_used; + let res = Self::new_payload_v4( + self, + payload, + versioned_hashes, + parent_beacon_block_root, + execution_requests, + ) + .await; let elapsed = start.elapsed(); self.inner.metrics.latency.new_payload_v4.record(elapsed); self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); @@ -765,7 +775,7 @@ where async fn get_payload_v1( &self, payload_id: PayloadId, - ) -> RpcResult { + ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadV1"); let start = Instant::now(); let res = Self::get_payload_v1(self, payload_id).await; @@ -785,7 +795,7 @@ where async fn get_payload_v2( &self, payload_id: PayloadId, - ) -> RpcResult { + ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadV2"); let start = Instant::now(); let res = Self::get_payload_v2(self, payload_id).await; @@ -805,7 +815,7 @@ where async fn get_payload_v3( &self, payload_id: PayloadId, - ) -> RpcResult { + ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadV3"); let start = Instant::now(); let res = Self::get_payload_v3(self, payload_id).await; @@ -825,7 +835,7 @@ where async fn get_payload_v4( &self, payload_id: PayloadId, - ) -> RpcResult { + ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadV4"); let start = Instant::now(); let res = Self::get_payload_v4(self, payload_id).await; @@ -846,17 +856,6 @@ where Ok(res.await?) } - async fn get_payload_bodies_by_hash_v2( - &self, - block_hashes: Vec, - ) -> RpcResult { - trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByHashV2"); - let start = Instant::now(); - let res = Self::get_payload_bodies_by_hash_v2(self, block_hashes); - self.inner.metrics.latency.get_payload_bodies_by_hash_v2.record(start.elapsed()); - Ok(res.await?) - } - /// Handler for `engine_getPayloadBodiesByRangeV1` /// /// See also @@ -885,18 +884,6 @@ where Ok(res?) } - async fn get_payload_bodies_by_range_v2( - &self, - start: U64, - count: U64, - ) -> RpcResult { - trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByRangeV2"); - let start_time = Instant::now(); - let res = Self::get_payload_bodies_by_range_v2(self, start.to(), count.to()).await; - self.inner.metrics.latency.get_payload_bodies_by_range_v2.record(start_time.elapsed()); - Ok(res?) - } - /// Handler for `engine_exchangeTransitionConfigurationV1` /// See also async fn exchange_transition_configuration( diff --git a/crates/rpc/rpc-engine-api/src/metrics.rs b/crates/rpc/rpc-engine-api/src/metrics.rs index 2c4216664a..8d0106f9dd 100644 --- a/crates/rpc/rpc-engine-api/src/metrics.rs +++ b/crates/rpc/rpc-engine-api/src/metrics.rs @@ -44,12 +44,8 @@ pub(crate) struct EngineApiLatencyMetrics { pub(crate) get_payload_v4: Histogram, /// Latency for `engine_getPayloadBodiesByRangeV1` pub(crate) get_payload_bodies_by_range_v1: Histogram, - /// Latency for `engine_getPayloadBodiesByRangeV2` - pub(crate) get_payload_bodies_by_range_v2: Histogram, /// Latency for `engine_getPayloadBodiesByHashV1` pub(crate) get_payload_bodies_by_hash_v1: Histogram, - /// Latency for `engine_getPayloadBodiesByHashV2` - pub(crate) get_payload_bodies_by_hash_v2: Histogram, /// Latency for `engine_exchangeTransitionConfigurationV1` pub(crate) exchange_transition_configuration: Histogram, } diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index 1ac5c3d6df..59b63ab3c9 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -3,7 +3,8 @@ use alloy_primitives::{Bytes, Sealable, U256}; use alloy_rlp::{Decodable, Error as RlpError}; use alloy_rpc_types_engine::{ - ExecutionPayload, ExecutionPayloadBodyV1, ExecutionPayloadV1, PayloadError, + ExecutionPayload, ExecutionPayloadBodyV1, ExecutionPayloadSidecar, ExecutionPayloadV1, + PayloadError, }; use assert_matches::assert_matches; use reth_primitives::{proofs, Block, SealedBlock, SealedHeader, TransactionSigned, Withdrawals}; @@ -75,7 +76,10 @@ fn payload_validation() { b }); - assert_matches!(try_into_sealed_block(block_with_valid_extra_data, None), Ok(_)); + assert_matches!( + try_into_sealed_block(block_with_valid_extra_data, &ExecutionPayloadSidecar::none()), + Ok(_) + ); // Invalid extra data let block_with_invalid_extra_data = Bytes::from_static(&[0; 33]); @@ -84,22 +88,19 @@ fn payload_validation() { b }); assert_matches!( - try_into_sealed_block(invalid_extra_data_block, None), + try_into_sealed_block(invalid_extra_data_block, &ExecutionPayloadSidecar::none()), Err(PayloadError::ExtraData(data)) if data == block_with_invalid_extra_data ); - // // Zero base fee - // #[cfg(not(feature = "optimism"))] - // { - // let block_with_zero_base_fee = transform_block(block.clone(), |mut b| { - // b.header.base_fee_per_gas = Some(0); - // b - // }); - // assert_matches!( - // try_into_sealed_block(block_with_zero_base_fee, None), - // Err(PayloadError::BaseFee(val)) if val.is_zero() - // ); - // } + // Zero base fee + // let block_with_zero_base_fee = transform_block(block.clone(), |mut b| { + // b.header.base_fee_per_gas = Some(0); + // b + // }); + // assert_matches!( + // try_into_sealed_block(block_with_zero_base_fee, &ExecutionPayloadSidecar::none()), + // Err(PayloadError::BaseFee(val)) if val.is_zero() + // ); // Invalid encoded transactions let mut payload_with_invalid_txs: ExecutionPayloadV1 = block_to_payload_v1(block.clone()); @@ -116,8 +117,7 @@ fn payload_validation() { b }); assert_matches!( - try_into_sealed_block(block_with_ommers.clone(),None), - + try_into_sealed_block(block_with_ommers.clone(), &ExecutionPayloadSidecar::none()), Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_ommers.block_hash() ); @@ -128,9 +128,8 @@ fn payload_validation() { b }); assert_matches!( - try_into_sealed_block(block_with_difficulty.clone(),None), + try_into_sealed_block(block_with_difficulty.clone(), &ExecutionPayloadSidecar::none()), Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_difficulty.block_hash() - ); // None zero nonce @@ -139,9 +138,8 @@ fn payload_validation() { b }); assert_matches!( - try_into_sealed_block(block_with_nonce.clone(),None), + try_into_sealed_block(block_with_nonce.clone(), &ExecutionPayloadSidecar::none()), Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_nonce.block_hash() - ); // Valid block diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index 8452601f9b..d63872906c 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -30,6 +30,7 @@ reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-network-api.workspace = true reth-trie.workspace = true +reth-node-api.workspace = true # bsc-reth reth-bsc-primitives = { workspace = true, optional = true } @@ -43,6 +44,7 @@ alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true alloy-rpc-types.workspace = true alloy-rpc-types-mev.workspace = true +alloy-consensus.workspace = true # rpc jsonrpsee = { workspace = true, features = ["server", "macros"] } diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index 7033caa64a..03a4930e4b 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -1,7 +1,7 @@ //! Implementation of the [`jsonrpsee`] generated [`EthApiServer`] trait. Handles RPC requests for //! the `eth_` namespace. use alloy_dyn_abi::TypedData; -use alloy_eips::eip2930::AccessListResult; +use alloy_eips::{eip2930::AccessListResult, BlockId, BlockNumberOrTag}; use alloy_json_rpc::RpcObject; use alloy_primitives::{Address, Bytes, B256, B64, U256, U64}; use alloy_rpc_types::{ @@ -13,7 +13,6 @@ use alloy_rpc_types::{ }; use alloy_rpc_types_eth::transaction::TransactionRequest; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{BlockId, BlockNumberOrTag}; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use tracing::trace; @@ -510,7 +509,7 @@ where trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionByHash"); Ok(EthTransactions::transaction_by_hash(self, hash) .await? - .map(|tx| tx.into_transaction::())) + .map(|tx| tx.into_transaction(self.tx_resp_builder()))) } /// Handler for: `eth_getRawTransactionByBlockHashAndIndex` diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index 00ad7cced2..0ce9262ef2 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -2,25 +2,25 @@ use std::sync::Arc; +use alloy_eips::BlockId; use alloy_rpc_types::{BlockSidecar, Header, Index}; use futures::Future; -use reth_primitives::{BlockId, Receipt, SealedBlock, SealedBlockWithSenders}; +use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders}; use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider}; -use reth_rpc_eth_types::EthStateCache; use reth_rpc_types_compat::block::{from_block, uncle_block_from_header}; -use crate::{FromEthApiError, FullEthApiTypes, RpcBlock, RpcReceipt}; +use crate::{node::RpcNodeCoreExt, FromEthApiError, FullEthApiTypes, RpcBlock, RpcReceipt}; use super::{LoadPendingBlock, LoadReceipt, SpawnBlocking}; +/// Result type of the fetched block receipts. +pub type BlockReceiptsResult = Result>>, E>; +/// Result type of the fetched block and its receipts. +pub type BlockAndReceiptsResult = Result>)>, E>; + /// Block related functions for the [`EthApiServer`](crate::EthApiServer) trait in the /// `eth_` namespace. pub trait EthBlocks: LoadBlock { - /// Returns a handle for reading data from disk. - /// - /// Data access in default (L1) trait method implementations. - fn provider(&self) -> impl HeaderProvider; - /// Returns the block header for the given block id. fn rpc_block_header( &self, @@ -47,22 +47,23 @@ pub trait EthBlocks: LoadBlock { async move { let Some(block) = self.block_with_senders(block_id).await? else { return Ok(None) }; let block_hash = block.hash(); - let mut total_difficulty = EthBlocks::provider(self) + let mut total_difficulty = self + .provider() .header_td_by_number(block.number) .map_err(Self::Error::from_eth_err)?; if total_difficulty.is_none() { // if we failed to find td after we successfully loaded the block, try again using // the hash this only matters if the chain is currently transitioning the merge block and there's a reorg: - total_difficulty = EthBlocks::provider(self) - .header_td(&block.hash()) - .map_err(Self::Error::from_eth_err)?; + total_difficulty = + self.provider().header_td(&block.hash()).map_err(Self::Error::from_eth_err)?; } - let block = from_block::( - block.unseal(), + let block = from_block( + (*block).clone().unseal(), total_difficulty.unwrap_or_default(), full.into(), Some(block_hash), + self.tx_resp_builder(), ) .map_err(Self::Error::from_eth_err)?; Ok(Some(block)) @@ -79,13 +80,15 @@ pub trait EthBlocks: LoadBlock { async move { if block_id.is_pending() { // Pending block can be fetched directly without need for caching - return Ok(LoadBlock::provider(self) + return Ok(self + .provider() .pending_block() .map_err(Self::Error::from_eth_err)? .map(|block| block.body.transactions.len())) } - let block_hash = match LoadBlock::provider(self) + let block_hash = match self + .provider() .block_hash_for_id(block_id) .map_err(Self::Error::from_eth_err)? { @@ -95,28 +98,30 @@ pub trait EthBlocks: LoadBlock { Ok(self .cache() - .get_block_transactions(block_hash) + .get_sealed_block_with_senders(block_hash) .await .map_err(Self::Error::from_eth_err)? - .map(|txs| txs.len())) + .map(|b| b.body.transactions.len())) } } /// Helper function for `eth_getBlockReceipts`. /// /// Returns all transaction receipts in block, or `None` if block wasn't found. + #[allow(clippy::type_complexity)] fn block_receipts( &self, block_id: BlockId, - ) -> impl Future>>, Self::Error>> + Send + ) -> impl Future> + Send where Self: LoadReceipt; /// Helper method that loads a bock and all its receipts. + #[allow(clippy::type_complexity)] fn load_block_and_receipts( &self, block_id: BlockId, - ) -> impl Future>)>, Self::Error>> + Send + ) -> impl Future> + Send where Self: LoadReceipt, { @@ -124,7 +129,8 @@ pub trait EthBlocks: LoadBlock { if block_id.is_pending() { // First, try to get the pending block from the provider, in case we already // received the actual pending block from the CL. - if let Some((block, receipts)) = LoadBlock::provider(self) + if let Some((block, receipts)) = self + .provider() .pending_block_and_receipts() .map_err(Self::Error::from_eth_err)? { @@ -137,14 +143,15 @@ pub trait EthBlocks: LoadBlock { } } - if let Some(block_hash) = LoadBlock::provider(self) - .block_hash_for_id(block_id) - .map_err(Self::Error::from_eth_err)? + if let Some(block_hash) = + self.provider().block_hash_for_id(block_id).map_err(Self::Error::from_eth_err)? { - return LoadReceipt::cache(self) + return self + .cache() .get_block_and_receipts(block_hash) .await .map_err(Self::Error::from_eth_err) + .map(|b| b.map(|(b, r)| (b.block.clone(), r))) } Ok(None) @@ -158,7 +165,7 @@ pub trait EthBlocks: LoadBlock { &self, block_id: BlockId, ) -> Result>, Self::Error> { - LoadBlock::provider(self).ommers_by_id(block_id).map_err(Self::Error::from_eth_err) + self.provider().ommers_by_id(block_id).map_err(Self::Error::from_eth_err) } /// Returns uncle block at given index in given block. @@ -173,14 +180,12 @@ pub trait EthBlocks: LoadBlock { async move { let uncles = if block_id.is_pending() { // Pending block can be fetched directly without need for caching - LoadBlock::provider(self) + self.provider() .pending_block() .map_err(Self::Error::from_eth_err)? .map(|block| block.body.ommers) } else { - LoadBlock::provider(self) - .ommers_by_id(block_id) - .map_err(Self::Error::from_eth_err)? + self.provider().ommers_by_id(block_id).map_err(Self::Error::from_eth_err)? } .unwrap_or_default(); @@ -201,9 +206,8 @@ pub trait EthBlocks: LoadBlock { return Ok(None); } - let sidecars = if let Some(block_hash) = LoadBlock::provider(self) - .block_hash_for_id(block_id) - .map_err(Self::Error::from_eth_err)? + let sidecars = if let Some(block_hash) = + self.provider().block_hash_for_id(block_id).map_err(Self::Error::from_eth_err)? { self.cache().get_sidecars(block_hash).await.map_err(Self::Error::from_eth_err)? } else { @@ -229,52 +233,32 @@ pub trait EthBlocks: LoadBlock { /// Loads a block from database. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. -pub trait LoadBlock: LoadPendingBlock + SpawnBlocking { - // Returns a handle for reading data from disk. - /// - /// Data access in default (L1) trait method implementations. - fn provider(&self) -> impl BlockReaderIdExt; - - /// Returns a handle for reading data from memory. - /// - /// Data access in default (L1) trait method implementations. - fn cache(&self) -> &EthStateCache; - - /// Returns the block object for the given block id. - fn block( - &self, - block_id: BlockId, - ) -> impl Future, Self::Error>> + Send { - async move { - self.block_with_senders(block_id) - .await - .map(|maybe_block| maybe_block.map(|block| block.block)) - } - } - +pub trait LoadBlock: LoadPendingBlock + SpawnBlocking + RpcNodeCoreExt { /// Returns the block object for the given block id. fn block_with_senders( &self, block_id: BlockId, - ) -> impl Future, Self::Error>> + Send { + ) -> impl Future>, Self::Error>> + Send { async move { if block_id.is_pending() { // Pending block can be fetched directly without need for caching - let maybe_pending = LoadPendingBlock::provider(self) + if let Some(pending_block) = self + .provider() .pending_block_with_senders() - .map_err(Self::Error::from_eth_err)?; - return if maybe_pending.is_some() { - Ok(maybe_pending) - } else { - // If no pending block from provider, try to get local pending block - return match self.local_pending_block().await? { - Some((block, _)) => Ok(Some(block)), - None => Ok(None), - }; + .map_err(Self::Error::from_eth_err)? + { + return Ok(Some(Arc::new(pending_block))); + } + + // If no pending block from provider, try to get local pending block + return match self.local_pending_block().await? { + Some((block, _)) => Ok(Some(Arc::new(block))), + None => Ok(None), }; } - let block_hash = match LoadPendingBlock::provider(self) + let block_hash = match self + .provider() .block_hash_for_id(block_id) .map_err(Self::Error::from_eth_err)? { diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index c90995eb7b..d1ad6b8e28 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -4,9 +4,10 @@ use super::{LoadBlock, LoadPendingBlock, LoadState, LoadTransaction, SpawnBlocking, Trace}; use crate::{ AsEthApiError, FromEthApiError, FromEvmError, FullEthApiTypes, IntoEthApiError, RpcBlock, + RpcNodeCore, }; use alloy_eips::{eip1559::calc_next_block_base_fee, eip2930::AccessListResult}; -use alloy_primitives::{Bytes, TxKind, B256, U256}; +use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; use alloy_rpc_types::{ simulate::{SimBlock, SimulatePayload, SimulatedBlock}, state::{EvmOverrides, StateOverride}, @@ -23,7 +24,7 @@ use reth_primitives::{ BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ExecutionResult, HaltReason, ResultAndState, TransactTo, TxEnv, }, - Header, TransactionSignedEcRecovered, + Header, TransactionSigned, }; use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider, StateProvider}; use reth_revm::{database::StateProviderDatabase, db::CacheDB, DatabaseRef}; @@ -42,6 +43,9 @@ use revm::{Database, DatabaseCommit, GetInspector}; use revm_inspectors::{access_list::AccessListInspector, transfer::TransferInspector}; use tracing::trace; +/// Result type for `eth_simulateV1` RPC method. +pub type SimulatedBlocksResult = Result>>, E>; + /// Execution related functions for the [`EthApiServer`](crate::EthApiServer) trait in /// the `eth_` namespace. pub trait EthCall: Call + LoadPendingBlock { @@ -59,12 +63,12 @@ pub trait EthCall: Call + LoadPendingBlock { /// The transactions are packed into individual blocks. Overrides can be provided. /// /// See also: + #[allow(clippy::type_complexity)] fn simulate_v1( &self, payload: SimulatePayload, block: Option, - ) -> impl Future>>, Self::Error>> - + Send + ) -> impl Future> + Send where Self: LoadBlock + FullEthApiTypes, { @@ -91,9 +95,10 @@ pub trait EthCall: Call + LoadPendingBlock { // Gas cap for entire operation let total_gas_limit = self.call_gas_limit(); - let base_block = self.block(block).await?.ok_or(EthApiError::HeaderNotFound(block))?; + let base_block = + self.block_with_senders(block).await?.ok_or(EthApiError::HeaderNotFound(block))?; let mut parent_hash = base_block.header.hash(); - let total_difficulty = LoadPendingBlock::provider(self) + let total_difficulty = RpcNodeCore::provider(self) .header_td_by_number(block_env.number.to()) .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(block))?; @@ -115,7 +120,7 @@ pub trait EthCall: Call + LoadPendingBlock { block_env.timestamp += U256::from(1); if validation { - let chain_spec = LoadPendingBlock::provider(&this).chain_spec(); + let chain_spec = RpcNodeCore::provider(&this).chain_spec(); let base_fee_params = chain_spec.base_fee_params_at_timestamp(block_env.timestamp.to()); let base_fee = if let Some(latest) = blocks.last() { @@ -188,7 +193,7 @@ pub trait EthCall: Call + LoadPendingBlock { results.push((env.tx.caller, res.result)); } - let block = simulate::build_block::( + let block = simulate::build_block( results, transactions, &block_env, @@ -196,6 +201,7 @@ pub trait EthCall: Call + LoadPendingBlock { total_difficulty, return_full_transactions, &db, + this.tx_resp_builder(), )?; parent_hash = block.inner.header.hash; @@ -254,7 +260,8 @@ pub trait EthCall: Call + LoadPendingBlock { // if it's not pending, we should always use block_hash over block_number to ensure that // different provider calls query data related to the same block. if !is_block_target_pending { - target_block = LoadBlock::provider(self) + target_block = self + .provider() .block_hash_for_id(target_block) .map_err(|_| EthApiError::HeaderNotFound(target_block))? .ok_or_else(|| EthApiError::HeaderNotFound(target_block))? @@ -291,12 +298,12 @@ pub trait EthCall: Call + LoadPendingBlock { if replay_block_txs { // only need to replay the transactions in the block if not all transactions are // to be replayed - let transactions = block.into_transactions_ecrecovered().take(num_txs); - for tx in transactions { + let transactions = block.transactions_with_sender().take(num_txs); + for (signer, tx) in transactions { let env = EnvWithHandlerCfg::new_with_cfg_env( cfg.clone(), block_env.clone(), - Call::evm_config(&this).tx_env(&tx), + RpcNodeCore::evm_config(&this).tx_env(tx, *signer), ); let (res, _) = this.transact(&mut db, env)?; db.commit(res.state); @@ -448,7 +455,7 @@ pub trait EthCall: Call + LoadPendingBlock { } /// Executes code on state. -pub trait Call: LoadState + SpawnBlocking { +pub trait Call: LoadState> + SpawnBlocking { /// Returns default gas limit to use for `eth_call` and tracing RPC methods. /// /// Data access in default trait method implementations. @@ -457,11 +464,6 @@ pub trait Call: LoadState + SpawnBlocking { /// Returns the maximum number of blocks accepted for `eth_simulateV1`. fn max_simulate_blocks(&self) -> u64; - /// Returns a handle for reading evm config. - /// - /// Data access in default (L1) trait method implementations. - fn evm_config(&self) -> &impl ConfigureEvm
; - /// Executes the closure with the state that corresponds to the given [`BlockId`]. fn with_state_at_block(&self, at: BlockId, f: F) -> Result where @@ -541,6 +543,16 @@ pub trait Call: LoadState + SpawnBlocking { /// /// This returns the configured [`EnvWithHandlerCfg`] for the given [`TransactionRequest`] at /// the given [`BlockId`] and with configured call settings: `prepare_call_env`. + /// + /// This is primarily used by `eth_call`. + /// + /// # Blocking behaviour + /// + /// This assumes executing the call is relatively more expensive on IO than CPU because it + /// transacts a single transaction on an empty in memory database. Because `eth_call`s are + /// usually allowed to consume a lot of gas, this also allows a lot of memory operations so + /// we assume this is not primarily CPU bound and instead spawn the call on a regular tokio task + /// instead, where blocking IO is less problematic. fn spawn_with_call_at( &self, request: TransactionRequest, @@ -558,7 +570,7 @@ pub trait Call: LoadState + SpawnBlocking { async move { let (cfg, block_env, at) = self.evm_env_at(at).await?; let this = self.clone(); - self.spawn_tracing(move |_| { + self.spawn_blocking_io(move |_| { let state = this.state_at_block_id(at)?; let mut db = CacheDB::new(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))); @@ -604,9 +616,8 @@ pub trait Call: LoadState + SpawnBlocking { // we need to get the state of the parent block because we're essentially replaying the // block the transaction is included in let parent_block = block.parent_hash; - let block_txs = block.into_transactions_ecrecovered(); let parent_timestamp = self - .block(parent_block.into()) + .block_with_senders(parent_block.into()) .await? .map(|block| block.timestamp) .ok_or(EthApiError::UnknownParentBlock)?; @@ -614,6 +625,7 @@ pub trait Call: LoadState + SpawnBlocking { let this = self.clone(); self.spawn_with_state_at_block(parent_block.into(), move |state| { let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let block_txs = block.transactions_with_sender(); // replay all transactions prior to the targeted transaction this.replay_transactions_until( @@ -625,7 +637,7 @@ pub trait Call: LoadState + SpawnBlocking { parent_timestamp, )?; - let tx_env = Call::evm_config(&this).tx_env(&tx); + let tx_env = RpcNodeCore::evm_config(&this).tx_env(tx.as_signed(), tx.signer()); #[cfg(feature = "bsc")] let tx_env = { let mut tx_env = tx_env; @@ -653,32 +665,32 @@ pub trait Call: LoadState + SpawnBlocking { /// Note: This assumes the target transaction is in the given iterator. /// Returns the index of the target transaction in the given iterator. #[allow(unused_variables)] - fn replay_transactions_until( + fn replay_transactions_until<'a, DB, I>( &self, - db: &mut CacheDB, + db: &mut DB, cfg: CfgEnvWithHandlerCfg, block_env: BlockEnv, - transactions: impl IntoIterator, + transactions: I, target_tx_hash: B256, _parent_timestamp: u64, ) -> Result where - DB: DatabaseRef, + DB: Database + DatabaseCommit, EthApiError: From, + I: IntoIterator, { #[allow(clippy::redundant_clone)] let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env.clone(), Default::default()); let mut evm = self.evm_config().evm_with_env(db, env); let mut index = 0; - for tx in transactions { + for (sender, tx) in transactions { if tx.hash() == target_tx_hash { // reached the target transaction break } - let sender = tx.signer(); - self.evm_config().fill_tx_env(evm.tx_mut(), &tx.into_signed(), sender); + self.evm_config().fill_tx_env(evm.tx_mut(), tx, *sender); evm.transact_commit().map_err(Self::Error::from_evm_err)?; index += 1; } @@ -867,7 +879,7 @@ pub trait Call: LoadState + SpawnBlocking { // Update the gas used based on the new result. gas_used = res.result.gas_used(); // Update the gas limit estimates (highest and lowest) based on the execution result. - self.update_estimated_gas_range( + update_estimated_gas_range( res.result, optimistic_gas_limit, &mut highest_gas_limit, @@ -900,7 +912,11 @@ pub trait Call: LoadState + SpawnBlocking { // Execute transaction and handle potential gas errors, adjusting limits accordingly. match self.transact(&mut db, env.clone()) { Err(err) if err.is_gas_too_high() => { - // Increase the lowest gas limit if gas is too high + // Decrease the highest gas limit if gas is too high + highest_gas_limit = mid_gas_limit; + } + Err(err) if err.is_gas_too_low() => { + // Increase the lowest gas limit if gas is too low lowest_gas_limit = mid_gas_limit; } // Handle other cases, including successful transactions. @@ -908,7 +924,7 @@ pub trait Call: LoadState + SpawnBlocking { // Unpack the result and environment if the transaction was successful. (res, env) = ethres?; // Update the estimated gas range based on the transaction result. - self.update_estimated_gas_range( + update_estimated_gas_range( res.result, mid_gas_limit, &mut highest_gas_limit, @@ -924,66 +940,18 @@ pub trait Call: LoadState + SpawnBlocking { Ok(U256::from(highest_gas_limit)) } - /// Updates the highest and lowest gas limits for binary search based on the execution result. - /// - /// This function refines the gas limit estimates used in a binary search to find the optimal - /// gas limit for a transaction. It adjusts the highest or lowest gas limits depending on - /// whether the execution succeeded, reverted, or halted due to specific reasons. - #[inline] - fn update_estimated_gas_range( - &self, - result: ExecutionResult, - tx_gas_limit: u64, - highest_gas_limit: &mut u64, - lowest_gas_limit: &mut u64, - ) -> Result<(), Self::Error> { - match result { - ExecutionResult::Success { .. } => { - // Cap the highest gas limit with the succeeding gas limit. - *highest_gas_limit = tx_gas_limit; - } - ExecutionResult::Revert { .. } => { - // Increase the lowest gas limit. - *lowest_gas_limit = tx_gas_limit; - } - ExecutionResult::Halt { reason, .. } => { - match reason { - HaltReason::OutOfGas(_) | HaltReason::InvalidFEOpcode => { - // Both `OutOfGas` and `InvalidEFOpcode` can occur dynamically if the gas - // left is too low. Treat this as an out of gas - // condition, knowing that the call succeeds with a - // higher gas limit. - // - // Common usage of invalid opcode in OpenZeppelin: - // - - // Increase the lowest gas limit. - *lowest_gas_limit = tx_gas_limit; - } - err => { - // These cases should be unreachable because we know the transaction - // succeeds, but if they occur, treat them as an - // error. - return Err(RpcInvalidTransactionError::EvmHalt(err).into_eth_err()) - } - } - } - }; - - Ok(()) - } - /// Executes the requests again after an out of gas error to check if the error is gas related /// or not #[inline] - fn map_out_of_gas_err( + fn map_out_of_gas_err( &self, env_gas_limit: U256, mut env: EnvWithHandlerCfg, - db: &mut CacheDB>, + db: &mut DB, ) -> Self::Error where - S: StateProvider, + DB: Database, + EthApiError: From, { let req_gas_limit = env.tx.gas_limit; env.tx.gas_limit = env_gas_limit.try_into().unwrap_or(u64::MAX); @@ -1171,3 +1139,51 @@ pub trait Call: LoadState + SpawnBlocking { Ok(env) } } + +/// Updates the highest and lowest gas limits for binary search based on the execution result. +/// +/// This function refines the gas limit estimates used in a binary search to find the optimal +/// gas limit for a transaction. It adjusts the highest or lowest gas limits depending on +/// whether the execution succeeded, reverted, or halted due to specific reasons. +#[inline] +fn update_estimated_gas_range( + result: ExecutionResult, + tx_gas_limit: u64, + highest_gas_limit: &mut u64, + lowest_gas_limit: &mut u64, +) -> Result<(), EthApiError> { + match result { + ExecutionResult::Success { .. } => { + // Cap the highest gas limit with the succeeding gas limit. + *highest_gas_limit = tx_gas_limit; + } + ExecutionResult::Revert { .. } => { + // Increase the lowest gas limit. + *lowest_gas_limit = tx_gas_limit; + } + ExecutionResult::Halt { reason, .. } => { + match reason { + HaltReason::OutOfGas(_) | HaltReason::InvalidFEOpcode => { + // Both `OutOfGas` and `InvalidEFOpcode` can occur dynamically if the gas + // left is too low. Treat this as an out of gas + // condition, knowing that the call succeeds with a + // higher gas limit. + // + // Common usage of invalid opcode in OpenZeppelin: + // + + // Increase the lowest gas limit. + *lowest_gas_limit = tx_gas_limit; + } + err => { + // These cases should be unreachable because we know the transaction + // succeeds, but if they occur, treat them as an + // error. + return Err(RpcInvalidTransactionError::EvmHalt(err).into_eth_err()) + } + } + } + }; + + Ok(()) +} diff --git a/crates/rpc/rpc-eth-api/src/helpers/error.rs b/crates/rpc/rpc-eth-api/src/helpers/error.rs index 041a019052..1d991b8e65 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/error.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/error.rs @@ -59,6 +59,16 @@ pub trait AsEthApiError { false } + + /// Returns `true` if error is + /// [`RpcInvalidTransactionError::GasTooLow`](reth_rpc_eth_types::RpcInvalidTransactionError::GasTooLow). + fn is_gas_too_low(&self) -> bool { + if let Some(err) = self.as_err() { + return err.is_gas_too_low() + } + + false + } } impl AsEthApiError for EthApiError { diff --git a/crates/rpc/rpc-eth-api/src/helpers/fee.rs b/crates/rpc/rpc-eth-api/src/helpers/fee.rs index b6dcef4708..18d2d63114 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/fee.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/fee.rs @@ -3,11 +3,11 @@ use alloy_primitives::U256; use alloy_rpc_types::{BlockNumberOrTag, FeeHistory}; use futures::Future; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_provider::{BlockIdReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider}; +use reth_chainspec::EthChainSpec; +use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider}; use reth_rpc_eth_types::{ - fee_history::calculate_reward_percentiles_for_block, EthApiError, EthStateCache, - FeeHistoryCache, FeeHistoryEntry, GasPriceOracle, RpcInvalidTransactionError, + fee_history::calculate_reward_percentiles_for_block, EthApiError, FeeHistoryCache, + FeeHistoryEntry, GasPriceOracle, RpcInvalidTransactionError, }; use tracing::debug; @@ -82,7 +82,8 @@ pub trait EthFees: LoadFee { block_count = block_count.saturating_sub(1); } - let end_block = LoadFee::provider(self) + let end_block = self + .provider() .block_number_for_id(newest_block.into()) .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(newest_block.into()))?; @@ -147,13 +148,12 @@ pub trait EthFees: LoadFee { // Also need to include the `base_fee_per_gas` and `base_fee_per_blob_gas` for the // next block base_fee_per_gas - .push(last_entry.next_block_base_fee(LoadFee::provider(self).chain_spec()) - as u128); + .push(last_entry.next_block_base_fee(self.provider().chain_spec()) as u128); base_fee_per_blob_gas.push(last_entry.next_block_blob_fee().unwrap_or_default()); } else { // read the requested header range - let headers = LoadFee::provider(self) + let headers = self.provider() .sealed_headers_range(start_block..=end_block) .map_err(Self::Error::from_eth_err)?; if headers.len() != block_count as usize { @@ -167,13 +167,13 @@ pub trait EthFees: LoadFee { base_fee_per_blob_gas.push(header.blob_fee().unwrap_or_default()); blob_gas_used_ratio.push( header.blob_gas_used.unwrap_or_default() as f64 - / reth_primitives::constants::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, + / alloy_eips::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, ); // Percentiles were specified, so we need to collect reward percentile ino if let Some(percentiles) = &reward_percentiles { - let (transactions, receipts) = LoadFee::cache(self) - .get_transactions_and_receipts(header.hash()) + let (block, receipts) = self.cache() + .get_block_and_receipts(header.hash()) .await .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::InvalidBlockRange)?; @@ -182,7 +182,7 @@ pub trait EthFees: LoadFee { percentiles, header.gas_used, header.base_fee_per_gas.unwrap_or_default(), - &transactions, + &block.body.transactions, &receipts, ) .unwrap_or_default(), @@ -197,7 +197,7 @@ pub trait EthFees: LoadFee { // The unwrap is safe since we checked earlier that we got at least 1 header. let last_header = headers.last().expect("is present"); base_fee_per_gas.push( - LoadFee::provider(self) + self.provider() .chain_spec() .base_fee_params_at_timestamp(last_header.timestamp) .next_block_base_fee( @@ -242,22 +242,10 @@ pub trait EthFees: LoadFee { /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` fees RPC methods. pub trait LoadFee: LoadBlock { - // Returns a handle for reading data from disk. - /// - /// Data access in default (L1) trait method implementations. - fn provider( - &self, - ) -> impl BlockIdReader + HeaderProvider + ChainSpecProvider; - - /// Returns a handle for reading data from memory. - /// - /// Data access in default (L1) trait method implementations. - fn cache(&self) -> &EthStateCache; - /// Returns a handle for reading gas price. /// /// Data access in default (L1) trait method implementations. - fn gas_oracle(&self) -> &GasPriceOracle; + fn gas_oracle(&self) -> &GasPriceOracle; /// Returns a handle for reading fee history data from memory. /// @@ -284,19 +272,19 @@ pub trait LoadFee: LoadBlock { /// Returns the EIP-1559 fees if they are set, otherwise fetches a suggested gas price for /// EIP-1559 transactions. /// - /// Returns (`max_fee`, `priority_fee`) + /// Returns (`base_fee`, `priority_fee`) fn eip1559_fees( &self, - max_fee_per_gas: Option, + base_fee: Option, max_priority_fee_per_gas: Option, ) -> impl Future> + Send { async move { - let max_fee_per_gas = match max_fee_per_gas { - Some(max_fee_per_gas) => max_fee_per_gas, + let base_fee = match base_fee { + Some(base_fee) => base_fee, None => { // fetch pending base fee let base_fee = self - .block(BlockNumberOrTag::Pending.into()) + .block_with_senders(BlockNumberOrTag::Pending.into()) .await? .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Pending.into()))? .base_fee_per_gas @@ -311,7 +299,7 @@ pub trait LoadFee: LoadBlock { Some(max_priority_fee_per_gas) => max_priority_fee_per_gas, None => self.suggested_priority_fee().await?, }; - Ok((max_fee_per_gas, max_priority_fee_per_gas)) + Ok((base_fee, max_priority_fee_per_gas)) } } @@ -332,7 +320,7 @@ pub trait LoadFee: LoadBlock { /// /// See also: fn gas_price(&self) -> impl Future> + Send { - let header = self.block(BlockNumberOrTag::Latest.into()); + let header = self.block_with_senders(BlockNumberOrTag::Latest.into()); let suggested_tip = self.suggested_priority_fee(); async move { let (header, suggested_tip) = futures::try_join!(header, suggested_tip)?; @@ -344,9 +332,9 @@ pub trait LoadFee: LoadBlock { /// Returns a suggestion for a base fee for blob transactions. fn blob_base_fee(&self) -> impl Future> + Send { async move { - self.block(BlockNumberOrTag::Latest.into()) + self.block_with_senders(BlockNumberOrTag::Latest.into()) .await? - .and_then(|h: reth_primitives::SealedBlock| h.next_block_blob_fee()) + .and_then(|h| h.next_block_blob_fee()) .ok_or(EthApiError::ExcessBlobGasNotSet.into()) .map(U256::from) } diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 08e6c68f1e..8e508325e9 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -3,31 +3,35 @@ use std::time::{Duration, Instant}; -use crate::{EthApiTypes, FromEthApiError, FromEvmError}; +use crate::{EthApiTypes, FromEthApiError, FromEvmError, RpcNodeCore}; +use alloy_consensus::EMPTY_OMMER_ROOT_HASH; +use alloy_eips::{ + eip4844::MAX_DATA_GAS_PER_BLOCK, eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE, +}; use alloy_primitives::{BlockNumber, B256, U256}; use alloy_rpc_types::BlockNumberOrTag; use futures::Future; use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_evm::{system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv}; +use reth_evm::{ + state_change::post_block_withdrawals_balance_increments, system_calls::SystemCaller, + ConfigureEvm, ConfigureEvmEnv, +}; use reth_execution_types::ExecutionOutcome; use reth_primitives::{ - constants::{eip4844::MAX_DATA_GAS_PER_BLOCK, BEACON_NONCE, EMPTY_ROOT_HASH}, proofs::calculate_transaction_root, revm_primitives::{ BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, EVMError, Env, ExecutionResult, InvalidTransaction, ResultAndState, SpecId, }, - Block, BlockBody, Header, Receipt, Requests, SealedBlockWithSenders, SealedHeader, - TransactionSignedEcRecovered, EMPTY_OMMER_ROOT_HASH, + Block, BlockBody, Header, Receipt, SealedBlockWithSenders, SealedHeader, + TransactionSignedEcRecovered, }; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderError, ReceiptProvider, StateProviderFactory, }; -use reth_revm::{ - database::StateProviderDatabase, state_change::post_block_withdrawals_balance_increments, -}; +use reth_revm::database::StateProviderDatabase; use reth_rpc_eth_types::{EthApiError, PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; use reth_transaction_pool::{BestTransactionsAttributes, TransactionPool}; use reth_trie::HashedPostState; @@ -40,32 +44,22 @@ use super::SpawnBlocking; /// Loads a pending block from database. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. -pub trait LoadPendingBlock: EthApiTypes { - /// Returns a handle for reading data from disk. - /// - /// Data access in default (L1) trait method implementations. - fn provider( - &self, - ) -> impl BlockReaderIdExt - + EvmEnvProvider - + ChainSpecProvider - + StateProviderFactory; - - /// Returns a handle for reading data from transaction pool. - /// - /// Data access in default (L1) trait method implementations. - fn pool(&self) -> impl TransactionPool; - +pub trait LoadPendingBlock: + EthApiTypes + + RpcNodeCore< + Provider: BlockReaderIdExt + + EvmEnvProvider + + ChainSpecProvider + + StateProviderFactory, + Pool: TransactionPool, + Evm: ConfigureEvm
, + > +{ /// Returns a handle to the pending block. /// /// Data access in default (L1) trait method implementations. fn pending_block(&self) -> &Mutex>; - /// Returns a handle for reading evm config. - /// - /// Data access in default (L1) trait method implementations. - fn evm_config(&self) -> &impl ConfigureEvm
; - /// Configures the [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the pending block /// /// If no pending block is available, this will derive it from the `latest` block @@ -155,7 +149,7 @@ pub trait LoadPendingBlock: EthApiTypes { pending.origin.header().hash() == pending_block.block.parent_hash && now <= pending_block.expires_at { - return Ok(Some((pending_block.block.clone(), pending_block.receipts.clone()))) + return Ok(Some((pending_block.block.clone(), pending_block.receipts.clone()))); } } @@ -260,8 +254,7 @@ pub trait LoadPendingBlock: EthApiTypes { let chain_spec = self.provider().chain_spec(); - let evm_config = self.evm_config().clone(); - let mut system_caller = SystemCaller::new(&evm_config, chain_spec.clone()); + let mut system_caller = SystemCaller::new(self.evm_config().clone(), chain_spec.clone()); let parent_beacon_block_root = if origin.is_actual_pending() { // apply eip-4788 pre block contract call if we got the block from the CL with the real @@ -323,7 +316,7 @@ pub trait LoadPendingBlock: EthApiTypes { let env = Env::boxed( cfg.cfg_env.clone(), block_env.clone(), - Self::evm_config(self).tx_env(&tx), + Self::evm_config(self).tx_env(tx.as_signed(), tx.signer()), ); let mut evm = revm::Evm::builder().with_env(env).with_db(&mut db).build(); @@ -406,25 +399,18 @@ pub trait LoadPendingBlock: EthApiTypes { execution_outcome.block_logs_bloom(block_number).expect("Block is present"); // calculate the state root - let state_provider = &db.database; - let state_root = - state_provider.state_root(hashed_state).map_err(Self::Error::from_eth_err)?; + let state_root = db.database.state_root(hashed_state).map_err(Self::Error::from_eth_err)?; // create the block header let transactions_root = calculate_transaction_root(&executed_txs); // check if cancun is activated to set eip4844 header fields correctly let blob_gas_used = - if cfg.handler_cfg.spec_id >= SpecId::CANCUN { Some(sum_blob_gas_used) } else { None }; - - // note(onbjerg): the rpc spec has not been changed to include requests, so for now we just - // set these to empty - let (requests, requests_root) = - if chain_spec.is_prague_active_at_timestamp(block_env.timestamp.to::()) { - (Some(Requests::default()), Some(EMPTY_ROOT_HASH)) - } else { - (None, None) - }; + (cfg.handler_cfg.spec_id >= SpecId::CANCUN).then_some(sum_blob_gas_used); + + let requests_hash = chain_spec + .is_prague_active_at_timestamp(block_env.timestamp.to::()) + .then_some(EMPTY_REQUESTS_HASH); let header = Header { parent_hash, @@ -447,7 +433,7 @@ pub trait LoadPendingBlock: EthApiTypes { excess_blob_gas: block_env.get_blob_excess_gas().map(Into::into), extra_data: Default::default(), parent_beacon_block_root, - requests_root, + requests_hash, }; // Convert Vec> to Vec @@ -459,13 +445,7 @@ pub trait LoadPendingBlock: EthApiTypes { // seal the block let block = Block { header, - body: BlockBody { - transactions: executed_txs, - ommers: vec![], - withdrawals, - sidecars, - requests, - }, + body: BlockBody { transactions: executed_txs, ommers: vec![], withdrawals, sidecars }, }; Ok((SealedBlockWithSenders { block: block.seal_slow(), senders }, receipts)) } diff --git a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs index eae99bbe45..48394f1cd6 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs @@ -3,19 +3,13 @@ use futures::Future; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; -use reth_rpc_eth_types::EthStateCache; -use crate::{EthApiTypes, RpcReceipt}; +use crate::{EthApiTypes, RpcNodeCoreExt, RpcReceipt}; /// Assembles transaction receipt data w.r.t to network. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` receipts RPC methods. -pub trait LoadReceipt: EthApiTypes + Send + Sync { - /// Returns a handle for reading data from memory. - /// - /// Data access in default (L1) trait method implementations. - fn cache(&self) -> &EthStateCache; - +pub trait LoadReceipt: EthApiTypes + RpcNodeCoreExt + Send + Sync { /// Helper method for `eth_getBlockReceipts` and `eth_getTransactionReceipt`. fn build_transaction_receipt( &self, diff --git a/crates/rpc/rpc-eth-api/src/helpers/signer.rs b/crates/rpc/rpc-eth-api/src/helpers/signer.rs index ab11e62d54..36e9277400 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/signer.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/signer.rs @@ -1,10 +1,10 @@ //! An abstraction over ethereum signers. use alloy_dyn_abi::TypedData; -use alloy_primitives::Address; +use alloy_primitives::{Address, Signature}; use alloy_rpc_types_eth::TransactionRequest; use dyn_clone::DynClone; -use reth_primitives::{Signature, TransactionSigned}; +use reth_primitives::TransactionSigned; use reth_rpc_eth_types::SignError; use std::result; diff --git a/crates/rpc/rpc-eth-api/src/helpers/spec.rs b/crates/rpc/rpc-eth-api/src/helpers/spec.rs index 5976cf29c0..a6213017af 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/spec.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/spec.rs @@ -8,21 +8,20 @@ use reth_errors::{RethError, RethResult}; use reth_network_api::NetworkInfo; use reth_provider::{BlockNumReader, ChainSpecProvider, StageCheckpointReader}; -use super::EthSigner; +use crate::{helpers::EthSigner, RpcNodeCore}; /// `Eth` API trait. /// /// Defines core functionality of the `eth` API implementation. #[auto_impl::auto_impl(&, Arc)] -pub trait EthApiSpec: Send + Sync { - /// Returns a handle for reading data from disk. - fn provider( - &self, - ) -> impl ChainSpecProvider + BlockNumReader + StageCheckpointReader; - - /// Returns a handle for reading network data summary. - fn network(&self) -> impl NetworkInfo; - +pub trait EthApiSpec: + RpcNodeCore< + Provider: ChainSpecProvider + + BlockNumReader + + StageCheckpointReader, + Network: NetworkInfo, +> +{ /// Returns the block node is started on. fn starting_block(&self) -> U256; diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index d601e43d90..702572064c 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -1,23 +1,25 @@ //! Loads a pending block from database. Helper trait for `eth_` block, transaction, call and trace //! RPC methods. +use alloy_consensus::constants::KECCAK_EMPTY; +use alloy_eips::BlockId; use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rpc_types::{serde_helpers::JsonStorageKey, Account, EIP1186AccountProofResponse}; use futures::Future; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_errors::RethError; use reth_evm::ConfigureEvmEnv; -use reth_primitives::{BlockId, Header, KECCAK_EMPTY}; +use reth_primitives::Header; use reth_provider::{ BlockIdReader, BlockNumReader, ChainSpecProvider, StateProvider, StateProviderBox, StateProviderFactory, }; -use reth_rpc_eth_types::{EthApiError, EthStateCache, PendingBlockEnv, RpcInvalidTransactionError}; +use reth_rpc_eth_types::{EthApiError, PendingBlockEnv, RpcInvalidTransactionError}; use reth_rpc_types_compat::proof::from_primitive_account_proof; use reth_transaction_pool::TransactionPool; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, SpecId}; -use crate::{EthApiTypes, FromEthApiError}; +use crate::{EthApiTypes, FromEthApiError, RpcNodeCore, RpcNodeCoreExt}; use super::{EthApiSpec, LoadPendingBlock, SpawnBlocking}; @@ -28,7 +30,7 @@ pub trait EthState: LoadState + SpawnBlocking { /// Returns the number of transactions sent from an address at the given block identifier. /// - /// If this is [`BlockNumberOrTag::Pending`](reth_primitives::BlockNumberOrTag) then this will + /// If this is [`BlockNumberOrTag::Pending`](alloy_eips::BlockNumberOrTag) then this will /// look up the highest transaction in pool and return the next nonce (highest + 1). fn transaction_count( &self, @@ -104,7 +106,8 @@ pub trait EthState: LoadState + SpawnBlocking { let block_id = block_id.unwrap_or_default(); // Check whether the distance to the block exceeds the maximum configured window. - let block_number = LoadState::provider(self) + let block_number = self + .provider() .block_number_for_id(block_id) .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(block_id))?; @@ -137,9 +140,9 @@ pub trait EthState: LoadState + SpawnBlocking { let Some(account) = account else { return Ok(None) }; // Check whether the distance to the block exceeds the maximum configured proof window. - let chain_info = - LoadState::provider(&this).chain_info().map_err(Self::Error::from_eth_err)?; - let block_number = LoadState::provider(&this) + let chain_info = this.provider().chain_info().map_err(Self::Error::from_eth_err)?; + let block_number = this + .provider() .block_number_for_id(block_id) .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(block_id))?; @@ -166,24 +169,14 @@ pub trait EthState: LoadState + SpawnBlocking { /// Loads state from database. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` state RPC methods. -pub trait LoadState: EthApiTypes { - /// Returns a handle for reading state from database. - /// - /// Data access in default trait method implementations. - fn provider( - &self, - ) -> impl StateProviderFactory + ChainSpecProvider; - - /// Returns a handle for reading data from memory. - /// - /// Data access in default (L1) trait method implementations. - fn cache(&self) -> &EthStateCache; - - /// Returns a handle for reading data from transaction pool. - /// - /// Data access in default trait method implementations. - fn pool(&self) -> impl TransactionPool; - +pub trait LoadState: + EthApiTypes + + RpcNodeCoreExt< + Provider: StateProviderFactory + + ChainSpecProvider, + Pool: TransactionPool, + > +{ /// Returns the state at the given block number fn state_at_hash(&self, block_hash: B256) -> Result { self.provider().history_by_block_hash(block_hash).map_err(Self::Error::from_eth_err) @@ -191,7 +184,7 @@ pub trait LoadState: EthApiTypes { /// Returns the state at the given [`BlockId`] enum. /// - /// Note: if not [`BlockNumberOrTag::Pending`](reth_primitives::BlockNumberOrTag) then this + /// Note: if not [`BlockNumberOrTag::Pending`](alloy_eips::BlockNumberOrTag) then this /// will only return canonical state. See also fn state_at_block_id(&self, at: BlockId) -> Result { self.provider().state_by_block_id(at).map_err(Self::Error::from_eth_err) @@ -236,7 +229,7 @@ pub trait LoadState: EthApiTypes { Ok((cfg, block_env, origin.state_block_id())) } else { // Use cached values if there is no pending block - let block_hash = LoadPendingBlock::provider(self) + let block_hash = RpcNodeCore::provider(self) .block_hash_for_id(at) .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(at))?; @@ -271,9 +264,45 @@ pub trait LoadState: EthApiTypes { } } + /// Returns the next available nonce without gaps for the given address + /// Next available nonce is either the on chain nonce of the account or the highest consecutive + /// nonce in the pool + 1 + fn next_available_nonce( + &self, + address: Address, + ) -> impl Future> + Send + where + Self: SpawnBlocking, + { + self.spawn_blocking_io(move |this| { + // first fetch the on chain nonce of the account + let on_chain_account_nonce = this + .latest_state()? + .account_nonce(address) + .map_err(Self::Error::from_eth_err)? + .unwrap_or_default(); + + let mut next_nonce = on_chain_account_nonce; + // Retrieve the highest consecutive transaction for the sender from the transaction pool + if let Some(highest_tx) = this + .pool() + .get_highest_consecutive_transaction_by_sender(address, on_chain_account_nonce) + { + // Return the nonce of the highest consecutive transaction + 1 + next_nonce = highest_tx.nonce().checked_add(1).ok_or_else(|| { + Self::Error::from(EthApiError::InvalidTransaction( + RpcInvalidTransactionError::NonceMaxValue, + )) + })?; + } + + Ok(next_nonce) + }) + } + /// Returns the number of transactions sent from an address at the given block identifier. /// - /// If this is [`BlockNumberOrTag::Pending`](reth_primitives::BlockNumberOrTag) then this will + /// If this is [`BlockNumberOrTag::Pending`](alloy_eips::BlockNumberOrTag) then this will /// look up the highest transaction in pool and return the next nonce (highest + 1). fn transaction_count( &self, @@ -284,8 +313,8 @@ pub trait LoadState: EthApiTypes { Self: SpawnBlocking, { self.spawn_blocking_io(move |this| { - // first fetch the on chain nonce - let nonce = this + // first fetch the on chain nonce of the account + let on_chain_account_nonce = this .state_at_block_id_or_latest(block_id)? .account_nonce(address) .map_err(Self::Error::from_eth_err)? @@ -297,20 +326,24 @@ pub trait LoadState: EthApiTypes { this.pool().get_highest_transaction_by_sender(address) { { - // and the corresponding txcount is nonce + 1 - let next_nonce = - nonce.max(highest_pool_tx.nonce()).checked_add(1).ok_or_else(|| { + // and the corresponding txcount is nonce + 1 of the highest tx in the pool + // (on chain nonce is increased after tx) + let next_tx_nonce = + highest_pool_tx.nonce().checked_add(1).ok_or_else(|| { Self::Error::from(EthApiError::InvalidTransaction( RpcInvalidTransactionError::NonceMaxValue, )) })?; - let tx_count = nonce.max(next_nonce); + // guard against drifts in the pool + let next_tx_nonce = on_chain_account_nonce.max(next_tx_nonce); + + let tx_count = on_chain_account_nonce.max(next_tx_nonce); return Ok(U256::from(tx_count)); } } } - Ok(U256::from(nonce)) + Ok(U256::from(on_chain_account_nonce)) }) } diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 0f1e256656..668e5807bd 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -1,6 +1,8 @@ //! Loads a pending block from database. Helper trait for `eth_` call and trace RPC methods. -use crate::{FromEthApiError, FromEvmError}; +use std::sync::Arc; + +use crate::{FromEthApiError, FromEvmError, RpcNodeCore}; use alloy_primitives::B256; use alloy_rpc_types::{BlockId, TransactionInfo}; use futures::Future; @@ -8,7 +10,7 @@ use futures::Future; use reth_bsc_primitives::system_contracts::is_system_transaction; use reth_chainspec::ChainSpecProvider; use reth_evm::{system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::Header; +use reth_primitives::{Header, SealedBlockWithSenders}; use reth_revm::database::StateProviderDatabase; use reth_rpc_eth_types::{ cache::db::{StateCacheDb, StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, @@ -21,12 +23,7 @@ use revm_primitives::{EnvWithHandlerCfg, EvmState, ExecutionResult, ResultAndSta use super::{Call, LoadBlock, LoadPendingBlock, LoadState, LoadTransaction}; /// Executes CPU heavy tasks. -pub trait Trace: LoadState { - /// Returns a handle for reading evm config. - /// - /// Data access in default (L1) trait method implementations. - fn evm_config(&self) -> &impl ConfigureEvm
; - +pub trait Trace: LoadState> { /// Executes the [`EnvWithHandlerCfg`] against the given [Database] without committing state /// changes. fn inspect( @@ -117,7 +114,7 @@ pub trait Trace: LoadState { self.spawn_with_state_at_block(at, move |state| { let mut db = CacheDB::new(StateProviderDatabase::new(state)); let mut inspector = TracingInspector::new(config); - let (res, _) = this.inspect(StateCacheDbRefMutWrapper(&mut db), env, &mut inspector)?; + let (res, _) = this.inspect(&mut db, env, &mut inspector)?; f(inspector, res, db) }) } @@ -193,9 +190,9 @@ pub trait Trace: LoadState { // block the transaction is included in let parent_block = block.parent_hash; let parent_beacon_block_root = block.parent_beacon_block_root; - let block_txs = block.into_transactions_ecrecovered(); - let parent_timestamp = LoadState::cache(self) - .get_block(parent_block) + let parent_timestamp = self + .cache() + .get_sealed_block_with_senders(parent_block) .await .map_err(Self::Error::from_eth_err)? .map(|block| block.timestamp) @@ -204,13 +201,10 @@ pub trait Trace: LoadState { let this = self.clone(); self.spawn_with_state_at_block(parent_block.into(), move |state| { let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let block_txs = block.transactions_with_sender(); // apply relevant system calls - let mut system_caller = SystemCaller::new( - Trace::evm_config(&this), - LoadState::provider(&this).chain_spec(), - ); - system_caller + SystemCaller::new(this.evm_config().clone(), this.provider().chain_spec()) .pre_block_beacon_root_contract_call( &mut db, &cfg, @@ -233,7 +227,7 @@ pub trait Trace: LoadState { parent_timestamp, )?; - let tx_env = Call::evm_config(&this).tx_env(&tx); + let tx_env = RpcNodeCore::evm_config(&this).tx_env(tx.as_signed(), tx.signer()); #[cfg(feature = "bsc")] let tx_env = { let mut tx_env = tx_env; @@ -262,6 +256,7 @@ pub trait Trace: LoadState { fn trace_block_until( &self, block_id: BlockId, + block: Option>, highest_index: Option, config: TracingInspectorConfig, f: F, @@ -281,6 +276,7 @@ pub trait Trace: LoadState { { self.trace_block_until_with_inspector( block_id, + block, highest_index, move || TracingInspector::new(config), f, @@ -300,6 +296,7 @@ pub trait Trace: LoadState { fn trace_block_until_with_inspector( &self, block_id: BlockId, + block: Option>, highest_index: Option, mut inspector_setup: Setup, f: F, @@ -320,8 +317,15 @@ pub trait Trace: LoadState { R: Send + 'static, { async move { + let block = async { + if block.is_some() { + return Ok(block) + } + self.block_with_senders(block_id).await + }; + let ((cfg, block_env, _), block) = - futures::try_join!(self.evm_env_at(block_id), self.block_with_senders(block_id))?; + futures::try_join!(self.evm_env_at(block_id), block)?; let Some(block) = block else { return Ok(None) }; @@ -346,11 +350,7 @@ pub trait Trace: LoadState { CacheDB::new(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))); // apply relevant system calls - let mut system_caller = SystemCaller::new( - Trace::evm_config(&this), - LoadState::provider(&this).chain_spec(), - ); - system_caller + SystemCaller::new(this.evm_config().clone(), this.provider().chain_spec()) .pre_block_beacon_root_contract_call( &mut db, &cfg, @@ -371,10 +371,10 @@ pub trait Trace: LoadState { let mut results = Vec::with_capacity(max_transactions); let mut transactions = block - .into_transactions_ecrecovered() + .transactions_with_sender() .take(max_transactions) .enumerate() - .map(|(idx, tx)| { + .map(|(idx, (signer, tx))| { let tx_info = TransactionInfo { hash: Some(tx.hash()), index: Some(idx as u64), @@ -382,7 +382,7 @@ pub trait Trace: LoadState { block_number: Some(block_number), base_fee: Some(base_fee), }; - let tx_env = Trace::evm_config(&this).tx_env(&tx); + let tx_env = this.evm_config().tx_env(tx, *signer); (tx_info, tx_env) }) .peekable(); @@ -424,6 +424,7 @@ pub trait Trace: LoadState { fn trace_block_with( &self, block_id: BlockId, + block: Option>, config: TracingInspectorConfig, f: F, ) -> impl Future>, Self::Error>> + Send @@ -442,7 +443,7 @@ pub trait Trace: LoadState { + 'static, R: Send + 'static, { - self.trace_block_until(block_id, None, config, f) + self.trace_block_until(block_id, block, None, config, f) } /// Executes all transactions of a block and returns a list of callback results invoked for each @@ -462,6 +463,7 @@ pub trait Trace: LoadState { fn trace_block_inspector( &self, block_id: BlockId, + block: Option>, insp_setup: Setup, f: F, ) -> impl Future>, Self::Error>> + Send @@ -482,6 +484,6 @@ pub trait Trace: LoadState { Insp: for<'a, 'b> Inspector> + Send + 'static, R: Send + 'static, { - self.trace_block_until_with_inspector(block_id, None, insp_setup, f) + self.trace_block_until_with_inspector(block_id, block, None, insp_setup, f) } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 3805d9b939..15809d95bf 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -1,25 +1,28 @@ //! Database access for `eth_` transaction RPC methods. Loads transaction and receipt data w.r.t. //! network. +use alloy_consensus::Transaction; use alloy_dyn_abi::TypedData; -use alloy_eips::eip2718::Encodable2718; +use alloy_eips::{eip2718::Encodable2718, BlockId}; use alloy_network::TransactionBuilder; use alloy_primitives::{Address, Bytes, TxHash, B256}; use alloy_rpc_types::{BlockNumberOrTag, BlockSidecar, TransactionInfo}; use alloy_rpc_types_eth::transaction::TransactionRequest; use futures::Future; -use reth_primitives::{ - BlockId, Receipt, SealedBlockWithSenders, TransactionMeta, TransactionSigned, -}; +use reth_primitives::{Receipt, SealedBlockWithSenders, TransactionMeta, TransactionSigned}; use reth_provider::{BlockNumReader, BlockReaderIdExt, ReceiptProvider, TransactionsProvider}; use reth_rpc_eth_types::{ utils::{binary_search, recover_raw_transaction}, - EthApiError, EthStateCache, SignError, TransactionSource, + EthApiError, SignError, TransactionSource, }; use reth_rpc_types_compat::transaction::{from_recovered, from_recovered_with_block_context}; use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; +use std::sync::Arc; -use crate::{FromEthApiError, FullEthApiTypes, IntoEthApiError, RpcReceipt, RpcTransaction}; +use crate::{ + FromEthApiError, FullEthApiTypes, IntoEthApiError, RpcNodeCore, RpcNodeCoreExt, RpcReceipt, + RpcTransaction, +}; use super::{ Call, EthApiSpec, EthSigner, LoadBlock, LoadPendingBlock, LoadReceipt, LoadState, SpawnBlocking, @@ -48,12 +51,7 @@ use super::{ /// See also /// /// This implementation follows the behaviour of Geth and disables the basefee check for tracing. -pub trait EthTransactions: LoadTransaction { - /// Returns a handle for reading data from disk. - /// - /// Data access in default (L1) trait method implementations. - fn provider(&self) -> impl BlockReaderIdExt; - +pub trait EthTransactions: LoadTransaction { /// Returns a handle for signing data. /// /// Singer access in default (L1) trait method implementations. @@ -79,7 +77,11 @@ pub trait EthTransactions: LoadTransaction { block: B256, ) -> impl Future>, Self::Error>> + Send { async move { - self.cache().get_block_transactions(block).await.map_err(Self::Error::from_eth_err) + self.cache() + .get_sealed_block_with_senders(block) + .await + .map(|b| b.map(|b| b.body.transactions.clone())) + .map_err(Self::Error::from_eth_err) } } @@ -103,7 +105,8 @@ pub trait EthTransactions: LoadTransaction { } self.spawn_blocking_io(move |ref this| { - Ok(LoadTransaction::provider(this) + Ok(this + .provider() .transaction_by_hash(hash) .map_err(Self::Error::from_eth_err)? .map(|tx| tx.encoded_2718().into())) @@ -158,7 +161,8 @@ pub trait EthTransactions: LoadTransaction { { let this = self.clone(); self.spawn_blocking_io(move |_| { - let (tx, meta) = match LoadTransaction::provider(&this) + let (tx, meta) = match this + .provider() .transaction_by_hash_with_meta(hash) .map_err(Self::Error::from_eth_err)? { @@ -166,13 +170,11 @@ pub trait EthTransactions: LoadTransaction { None => return Ok(None), }; - let receipt = match EthTransactions::provider(&this) - .receipt_by_hash(hash) - .map_err(Self::Error::from_eth_err)? - { - Some(recpt) => recpt, - None => return Ok(None), - }; + let receipt = + match this.provider().receipt_by_hash(hash).map_err(Self::Error::from_eth_err)? { + Some(recpt) => recpt, + None => return Ok(None), + }; Ok(Some((tx, meta, receipt))) }) @@ -194,7 +196,7 @@ pub trait EthTransactions: LoadTransaction { let block_hash = block.hash(); let block_number = block.number; let base_fee_per_gas = block.base_fee_per_gas; - if let Some(tx) = block.into_transactions_ecrecovered().nth(index) { + if let Some((signer, tx)) = block.transactions_with_sender().nth(index) { let tx_info = TransactionInfo { hash: Some(tx.hash()), block_hash: Some(block_hash), @@ -203,8 +205,10 @@ pub trait EthTransactions: LoadTransaction { index: Some(index as u64), }; - return Ok(Some(from_recovered_with_block_context::( - tx, tx_info, + return Ok(Some(from_recovered_with_block_context( + tx.clone().with_signer(*signer), + tx_info, + self.tx_resp_builder(), ))) } } @@ -221,16 +225,16 @@ pub trait EthTransactions: LoadTransaction { include_pending: bool, ) -> impl Future>, Self::Error>> + Send where - Self: LoadBlock + LoadState + FullEthApiTypes, + Self: LoadBlock + LoadState, { async move { // Check the pool first if include_pending { if let Some(tx) = - LoadState::pool(self).get_transaction_by_sender_and_nonce(sender, nonce) + RpcNodeCore::pool(self).get_transaction_by_sender_and_nonce(sender, nonce) { let transaction = tx.transaction.clone().into_consensus(); - return Ok(Some(from_recovered::(transaction.into()))); + return Ok(Some(from_recovered(transaction.into(), self.tx_resp_builder()))); } } @@ -247,7 +251,7 @@ pub trait EthTransactions: LoadTransaction { return Ok(None); } - let Ok(high) = LoadBlock::provider(self).best_block_number() else { + let Ok(high) = self.provider().best_block_number() else { return Err(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()).into()); }; @@ -270,10 +274,10 @@ pub trait EthTransactions: LoadTransaction { let base_fee_per_gas = block.base_fee_per_gas; block - .into_transactions_ecrecovered() + .transactions_with_sender() .enumerate() - .find(|(_, tx)| tx.signer() == sender && tx.nonce() == nonce) - .map(|(index, tx)| { + .find(|(_, (signer, tx))| **signer == sender && tx.nonce() == nonce) + .map(|(index, (signer, tx))| { let tx_info = TransactionInfo { hash: Some(tx.hash()), block_hash: Some(block_hash), @@ -281,8 +285,10 @@ pub trait EthTransactions: LoadTransaction { base_fee: base_fee_per_gas.map(u128::from), index: Some(index as u64), }; - from_recovered_with_block_context::( - tx, tx_info, + from_recovered_with_block_context( + tx.clone().with_signer(*signer), + tx_info, + self.tx_resp_builder(), ) }) }) @@ -357,9 +363,8 @@ pub trait EthTransactions: LoadTransaction { // set nonce if not already set before if request.nonce.is_none() { - let nonce = self.transaction_count(from, Some(BlockId::pending())).await?; - // note: `.to()` can't panic because the nonce is constructed from a `u64` - request.nonce = Some(nonce.to()); + let nonce = self.next_available_nonce(from).await?; + request.nonce = Some(nonce); } let chain_id = self.chain_id(); @@ -372,10 +377,15 @@ pub trait EthTransactions: LoadTransaction { let transaction = self.sign_request(&from, request).await?.with_signer(from); - let pool_transaction = <::Pool as TransactionPool>::Transaction::try_from_consensus(transaction.into()).map_err(|_| EthApiError::TransactionConversionError)?; + let pool_transaction = + <::Pool as TransactionPool>::Transaction::try_from_consensus( + transaction.into(), + ) + .map_err(|_| EthApiError::TransactionConversionError)?; // submit the transaction to the pool with a `Local` origin - let hash = LoadTransaction::pool(self) + let hash = self + .pool() .add_transaction(TransactionOrigin::Local, pool_transaction) .await .map_err(Self::Error::from_eth_err)?; @@ -453,7 +463,8 @@ pub trait EthTransactions: LoadTransaction { Self: LoadReceipt + 'static, { async move { - let meta = match LoadTransaction::provider(self) + let meta = match self + .provider() .transaction_by_hash_with_meta(hash) .map_err(Self::Error::from_eth_err)? { @@ -462,11 +473,10 @@ pub trait EthTransactions: LoadTransaction { }; // If no block sidecars found, return None - let sidecars = - match LoadTransaction::cache(self).get_sidecars(meta.block_hash).await.unwrap() { - Some(sidecars) => sidecars, - None => return Ok(None), - }; + let sidecars = match self.cache().get_sidecars(meta.block_hash).await.unwrap() { + Some(sidecars) => sidecars, + None => return Ok(None), + }; Ok(sidecars.iter().find(|item| item.tx_hash == hash).map(|sidecar| BlockSidecar { blob_sidecar: sidecar.blob_transaction_sidecar.clone(), @@ -483,26 +493,11 @@ pub trait EthTransactions: LoadTransaction { /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` transactions RPC /// methods. -pub trait LoadTransaction: SpawnBlocking + FullEthApiTypes { - /// Transaction pool with pending transactions. [`TransactionPool::Transaction`] is the - /// supported transaction type. - type Pool: TransactionPool; - - /// Returns a handle for reading data from disk. - /// - /// Data access in default (L1) trait method implementations. - fn provider(&self) -> impl TransactionsProvider; - - /// Returns a handle for reading data from memory. - /// - /// Data access in default (L1) trait method implementations. - fn cache(&self) -> &EthStateCache; - - /// Returns a handle for reading data from pool. - /// - /// Data access in default (L1) trait method implementations. - fn pool(&self) -> &Self::Pool; - +pub trait LoadTransaction: + SpawnBlocking + + FullEthApiTypes + + RpcNodeCoreExt +{ /// Returns the transaction by hash. /// /// Checks the pool and state. @@ -578,8 +573,9 @@ pub trait LoadTransaction: SpawnBlocking + FullEthApiTypes { fn transaction_and_block( &self, hash: B256, - ) -> impl Future, Self::Error>> - + Send { + ) -> impl Future< + Output = Result)>, Self::Error>, + > + Send { async move { let (transaction, at) = match self.transaction_by_hash_at(hash).await? { None => return Ok(None), @@ -593,10 +589,10 @@ pub trait LoadTransaction: SpawnBlocking + FullEthApiTypes { }; let block = self .cache() - .get_block_with_senders(block_hash) + .get_sealed_block_with_senders(block_hash) .await .map_err(Self::Error::from_eth_err)?; - Ok(block.map(|block| (transaction, (*block).clone().seal(block_hash)))) + Ok(block.map(|block| (transaction, block))) } } } diff --git a/crates/rpc/rpc-eth-api/src/lib.rs b/crates/rpc/rpc-eth-api/src/lib.rs index 849c8e2e4c..fa9737f84f 100644 --- a/crates/rpc/rpc-eth-api/src/lib.rs +++ b/crates/rpc/rpc-eth-api/src/lib.rs @@ -16,6 +16,7 @@ pub mod bundle; pub mod core; pub mod filter; pub mod helpers; +pub mod node; pub mod pubsub; pub mod types; @@ -25,6 +26,7 @@ pub use bundle::{EthBundleApiServer, EthCallBundleApiServer}; pub use core::{EthApiServer, FullEthApiServer}; pub use filter::EthFilterApiServer; pub use helpers::error::{AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError}; +pub use node::{RpcNodeCore, RpcNodeCoreExt}; pub use pubsub::EthPubSubApiServer; pub use types::{EthApiTypes, FullEthApiTypes, RpcBlock, RpcReceipt, RpcTransaction}; diff --git a/crates/rpc/rpc-eth-api/src/node.rs b/crates/rpc/rpc-eth-api/src/node.rs new file mode 100644 index 0000000000..4ae79c0834 --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/node.rs @@ -0,0 +1,70 @@ +//! Helper trait for interfacing with [`FullNodeComponents`]. + +use reth_node_api::FullNodeComponents; +use reth_rpc_eth_types::EthStateCache; + +/// Helper trait to relax trait bounds on [`FullNodeComponents`]. +/// +/// Helpful when defining types that would otherwise have a generic `N: FullNodeComponents`. Using +/// `N: RpcNodeCore` instead, allows access to all the associated types on [`FullNodeComponents`] +/// that are used in RPC, but with more flexibility since they have no trait bounds (asides auto +/// traits). +pub trait RpcNodeCore: Clone + Send + Sync { + /// The provider type used to interact with the node. + type Provider: Send + Sync + Clone + Unpin; + /// The transaction pool of the node. + type Pool: Send + Sync + Clone + Unpin; + /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. + type Evm: Send + Sync + Clone + Unpin; + /// Network API. + type Network: Send + Sync + Clone; + + /// Returns the transaction pool of the node. + fn pool(&self) -> &Self::Pool; + + /// Returns the node's evm config. + fn evm_config(&self) -> &Self::Evm; + + /// Returns the handle to the network + fn network(&self) -> &Self::Network; + + /// Returns the provider of the node. + fn provider(&self) -> &Self::Provider; +} + +impl RpcNodeCore for T +where + T: FullNodeComponents, +{ + type Provider = T::Provider; + type Pool = T::Pool; + type Evm = ::Evm; + type Network = ::Network; + + #[inline] + fn pool(&self) -> &Self::Pool { + FullNodeComponents::pool(self) + } + + #[inline] + fn evm_config(&self) -> &Self::Evm { + FullNodeComponents::evm_config(self) + } + + #[inline] + fn network(&self) -> &Self::Network { + FullNodeComponents::network(self) + } + + #[inline] + fn provider(&self) -> &Self::Provider { + FullNodeComponents::provider(self) + } +} + +/// Additional components, asides the core node components, needed to run `eth_` namespace API +/// server. +pub trait RpcNodeCoreExt: RpcNodeCore { + /// Returns handle to RPC cache service. + fn cache(&self) -> &EthStateCache; +} diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index 9ddc23ea32..1d176dd1e8 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -2,16 +2,15 @@ use std::{error::Error, fmt}; -use alloy_network::{AnyNetwork, Network}; +use alloy_network::Network; use alloy_rpc_types::Block; -use reth_rpc_eth_types::EthApiError; use reth_rpc_types_compat::TransactionCompat; use crate::{AsEthApiError, FromEthApiError, FromEvmError}; /// Network specific `eth` API types. pub trait EthApiTypes: Send + Sync + Clone { - /// Extension of [`EthApiError`], with network specific errors. + /// Extension of [`FromEthApiError`], with network specific errors. type Error: Into> + FromEthApiError + AsEthApiError @@ -23,12 +22,9 @@ pub trait EthApiTypes: Send + Sync + Clone { type NetworkTypes: Network; /// Conversion methods for transaction RPC type. type TransactionCompat: Send + Sync + Clone + fmt::Debug; -} -impl EthApiTypes for () { - type Error = EthApiError; - type NetworkTypes = AnyNetwork; - type TransactionCompat = (); + /// Returns reference to transaction response builder. + fn tx_resp_builder(&self) -> &Self::TransactionCompat; } /// Adapter for network specific transaction type. diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml index b2599d0744..7b4cc9a3f3 100644 --- a/crates/rpc/rpc-eth-types/Cargo.toml +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -39,7 +39,6 @@ revm.workspace = true revm-inspectors.workspace = true revm-primitives = { workspace = true, features = ["dev"] } alloy-rpc-types.workspace = true -alloy-serde.workspace = true alloy-eips.workspace = true # rpc @@ -61,6 +60,7 @@ derive_more.workspace = true schnellru.workspace = true rand.workspace = true tracing.workspace = true +itertools.workspace = true [dev-dependencies] serde_json.workspace = true diff --git a/crates/rpc/rpc-eth-types/src/builder/config.rs b/crates/rpc/rpc-eth-types/src/builder/config.rs index a016d02158..532c107720 100644 --- a/crates/rpc/rpc-eth-types/src/builder/config.rs +++ b/crates/rpc/rpc-eth-types/src/builder/config.rs @@ -15,7 +15,7 @@ use serde::{Deserialize, Serialize}; pub const DEFAULT_STALE_FILTER_TTL: Duration = Duration::from_secs(5 * 60); /// Additional config values for the eth namespace. -#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)] pub struct EthConfig { /// Settings for the caching layer pub cache: EthStateCacheConfig, diff --git a/crates/rpc/rpc-eth-types/src/builder/ctx.rs b/crates/rpc/rpc-eth-types/src/builder/ctx.rs index 593e6fa5a6..eedb3f4586 100644 --- a/crates/rpc/rpc-eth-types/src/builder/ctx.rs +++ b/crates/rpc/rpc-eth-types/src/builder/ctx.rs @@ -5,7 +5,6 @@ use reth_chain_state::CanonStateSubscriptions; use reth_chainspec::ChainSpecProvider; use reth_storage_api::BlockReaderIdExt; use reth_tasks::TaskSpawner; -use std::marker::PhantomData; use crate::{ fee_history::fee_history_cache_new_blocks_task, EthConfig, EthStateCache, FeeHistoryCache, @@ -14,7 +13,7 @@ use crate::{ /// Context for building the `eth` namespace API. #[derive(Debug, Clone)] -pub struct EthApiBuilderCtx { +pub struct EthApiBuilderCtx { /// Database handle. pub provider: Provider, /// Mempool handle. @@ -33,12 +32,10 @@ pub struct EthApiBuilderCtx, - /// RPC type builders. - pub _rpc_ty_builders: PhantomData, } -impl - EthApiBuilderCtx +impl + EthApiBuilderCtx where Provider: BlockReaderIdExt + Clone, { @@ -48,53 +45,14 @@ where Provider: ChainSpecProvider + 'static, Tasks: TaskSpawner, Events: CanonStateSubscriptions, - { - FeeHistoryCacheBuilder::build(self) - } - - /// Returns a new [`GasPriceOracle`] for the context. - pub fn new_gas_price_oracle(&self) -> GasPriceOracle { - GasPriceOracleBuilder::build(self) - } -} - -/// Builds `eth_` core api component [`GasPriceOracle`], for given context. -#[derive(Debug)] -pub struct GasPriceOracleBuilder; - -impl GasPriceOracleBuilder { - /// Builds a [`GasPriceOracle`], for given context. - pub fn build( - ctx: &EthApiBuilderCtx, - ) -> GasPriceOracle - where - Provider: BlockReaderIdExt + Clone, - { - GasPriceOracle::new(ctx.provider.clone(), ctx.config.gas_oracle, ctx.cache.clone()) - } -} - -/// Builds `eth_` core api component [`FeeHistoryCache`], for given context. -#[derive(Debug)] -pub struct FeeHistoryCacheBuilder; - -impl FeeHistoryCacheBuilder { - /// Builds a [`FeeHistoryCache`], for given context. - pub fn build( - ctx: &EthApiBuilderCtx, - ) -> FeeHistoryCache - where - Provider: ChainSpecProvider + BlockReaderIdExt + Clone + 'static, - Tasks: TaskSpawner, - Events: CanonStateSubscriptions, { let fee_history_cache = - FeeHistoryCache::new(ctx.cache.clone(), ctx.config.fee_history_cache); + FeeHistoryCache::new(self.cache.clone(), self.config.fee_history_cache); - let new_canonical_blocks = ctx.events.canonical_state_stream(); + let new_canonical_blocks = self.events.canonical_state_stream(); let fhc = fee_history_cache.clone(); - let provider = ctx.provider.clone(); - ctx.executor.spawn_critical( + let provider = self.provider.clone(); + self.executor.spawn_critical( "cache canonical blocks for fee history task", Box::pin(async move { fee_history_cache_new_blocks_task(fhc, new_canonical_blocks, provider).await; @@ -103,4 +61,9 @@ impl FeeHistoryCacheBuilder { fee_history_cache } + + /// Returns a new [`GasPriceOracle`] for the context. + pub fn new_gas_price_oracle(&self) -> GasPriceOracle { + GasPriceOracle::new(self.provider.clone(), self.config.gas_oracle, self.cache.clone()) + } } diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs index 7422dcfb8a..627fd2b2df 100644 --- a/crates/rpc/rpc-eth-types/src/cache/db.rs +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -114,6 +114,13 @@ impl reth_storage_api::BlockHashReader for StateProviderTraitObjWrapper<'_> { self.0.block_hash(block_number) } + fn convert_block_hash( + &self, + hash_or_number: alloy_rpc_types::BlockHashOrNumber, + ) -> reth_errors::ProviderResult> { + self.0.convert_block_hash(hash_or_number) + } + fn canonical_hashes_range( &self, start: alloy_primitives::BlockNumber, @@ -121,21 +128,22 @@ impl reth_storage_api::BlockHashReader for StateProviderTraitObjWrapper<'_> { ) -> reth_errors::ProviderResult> { self.0.canonical_hashes_range(start, end) } +} - fn convert_block_hash( +impl StateProvider for StateProviderTraitObjWrapper<'_> { + fn storage( &self, - hash_or_number: alloy_rpc_types::BlockHashOrNumber, - ) -> reth_errors::ProviderResult> { - self.0.convert_block_hash(hash_or_number) + account: revm_primitives::Address, + storage_key: alloy_primitives::StorageKey, + ) -> reth_errors::ProviderResult> { + self.0.storage(account, storage_key) } -} -impl StateProvider for StateProviderTraitObjWrapper<'_> { - fn account_balance( + fn bytecode_by_hash( &self, - addr: revm_primitives::Address, - ) -> reth_errors::ProviderResult> { - self.0.account_balance(addr) + code_hash: B256, + ) -> reth_errors::ProviderResult> { + self.0.bytecode_by_hash(code_hash) } fn account_code( @@ -145,26 +153,18 @@ impl StateProvider for StateProviderTraitObjWrapper<'_> { self.0.account_code(addr) } - fn account_nonce( + fn account_balance( &self, addr: revm_primitives::Address, - ) -> reth_errors::ProviderResult> { - self.0.account_nonce(addr) - } - - fn bytecode_by_hash( - &self, - code_hash: B256, - ) -> reth_errors::ProviderResult> { - self.0.bytecode_by_hash(code_hash) + ) -> reth_errors::ProviderResult> { + self.0.account_balance(addr) } - fn storage( + fn account_nonce( &self, - account: revm_primitives::Address, - storage_key: alloy_primitives::StorageKey, - ) -> reth_errors::ProviderResult> { - self.0.storage(account, storage_key) + addr: revm_primitives::Address, + ) -> reth_errors::ProviderResult> { + self.0.account_nonce(addr) } } diff --git a/crates/rpc/rpc-eth-types/src/cache/mod.rs b/crates/rpc/rpc-eth-types/src/cache/mod.rs index 2fc9430626..6ff26797ac 100644 --- a/crates/rpc/rpc-eth-types/src/cache/mod.rs +++ b/crates/rpc/rpc-eth-types/src/cache/mod.rs @@ -1,15 +1,13 @@ //! Async caching support for eth RPC +use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; use futures::{future::Either, Stream, StreamExt}; use reth_chain_state::CanonStateNotification; use reth_errors::{ProviderError, ProviderResult}; use reth_evm::{provider::EvmEnvProvider, ConfigureEvm}; use reth_execution_types::Chain; -use reth_primitives::{ - BlobSidecars, Block, BlockHashOrNumber, BlockWithSenders, Header, Receipt, SealedBlock, - SealedBlockWithSenders, TransactionSigned, TransactionSignedEcRecovered, -}; +use reth_primitives::{BlobSidecars, Header, Receipt, SealedBlockWithSenders, TransactionSigned}; use reth_storage_api::{BlockReader, StateProviderFactory, TransactionVariant}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; @@ -33,13 +31,13 @@ pub mod db; pub mod metrics; pub mod multi_consumer; -/// The type that can send the response to a requested [`Block`] +/// The type that can send the response to a requested [`SealedBlockWithSenders`] type BlockTransactionsResponseSender = oneshot::Sender>>>; -/// The type that can send the response to a requested [`BlockWithSenders`] +/// The type that can send the response to a requested [`SealedBlockWithSenders`] type BlockWithSendersResponseSender = - oneshot::Sender>>>; + oneshot::Sender>>>; /// The type that can send the response to the requested receipts of a block. type ReceiptsResponseSender = oneshot::Sender>>>>; @@ -52,7 +50,7 @@ type EnvResponseSender = oneshot::Sender = MultiConsumerLruCache< B256, - Arc, + Arc, L, Either, >; @@ -148,92 +146,18 @@ impl EthStateCache { this } - /// Requests the [`Block`] for the block hash - /// - /// Returns `None` if the block does not exist. - pub async fn get_block(&self, block_hash: B256) -> ProviderResult> { - let (response_tx, rx) = oneshot::channel(); - let _ = self.to_service.send(CacheAction::GetBlockWithSenders { block_hash, response_tx }); - let block_with_senders_res = - rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)?; - - if let Ok(Some(block_with_senders)) = block_with_senders_res { - Ok(Some(block_with_senders.block.clone())) - } else { - Ok(None) - } - } - - /// Requests the [`Block`] for the block hash, sealed with the given block hash. - /// - /// Returns `None` if the block does not exist. - pub async fn get_sealed_block(&self, block_hash: B256) -> ProviderResult> { - Ok(self.get_block(block_hash).await?.map(|block| block.seal(block_hash))) - } - - /// Requests the transactions of the [`Block`] - /// - /// Returns `None` if the block does not exist. - pub async fn get_block_transactions( - &self, - block_hash: B256, - ) -> ProviderResult>> { - let (response_tx, rx) = oneshot::channel(); - let _ = self.to_service.send(CacheAction::GetBlockTransactions { block_hash, response_tx }); - rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)? - } - - /// Requests the ecrecovered transactions of the [`Block`] - /// - /// Returns `None` if the block does not exist. - pub async fn get_block_transactions_ecrecovered( - &self, - block_hash: B256, - ) -> ProviderResult>> { - Ok(self - .get_block_with_senders(block_hash) - .await? - .map(|block| (*block).clone().into_transactions_ecrecovered().collect())) - } - - /// Fetches both transactions and receipts for the given block hash. - pub async fn get_transactions_and_receipts( - &self, - block_hash: B256, - ) -> ProviderResult, Arc>)>> { - let transactions = self.get_block_transactions(block_hash); - let receipts = self.get_receipts(block_hash); - - let (transactions, receipts) = futures::try_join!(transactions, receipts)?; - - Ok(transactions.zip(receipts)) - } - - /// Requests the [`BlockWithSenders`] for the block hash + /// Requests the [`SealedBlockWithSenders`] for the block hash /// /// Returns `None` if the block does not exist. - pub async fn get_block_with_senders( + pub async fn get_sealed_block_with_senders( &self, block_hash: B256, - ) -> ProviderResult>> { + ) -> ProviderResult>> { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetBlockWithSenders { block_hash, response_tx }); rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)? } - /// Requests the [`SealedBlockWithSenders`] for the block hash - /// - /// Returns `None` if the block does not exist. - pub async fn get_sealed_block_with_senders( - &self, - block_hash: B256, - ) -> ProviderResult> { - Ok(self - .get_block_with_senders(block_hash) - .await? - .map(|block| (*block).clone().seal(block_hash))) - } - /// Requests the [Receipt] for the block hash /// /// Returns `None` if the block was not found. @@ -250,8 +174,8 @@ impl EthStateCache { pub async fn get_block_and_receipts( &self, block_hash: B256, - ) -> ProviderResult>)>> { - let block = self.get_sealed_block(block_hash); + ) -> ProviderResult, Arc>)>> { + let block = self.get_sealed_block_with_senders(block_hash); let receipts = self.get_receipts(block_hash); let (block, receipts) = futures::try_join!(block, receipts)?; @@ -306,7 +230,7 @@ pub(crate) struct EthStateCacheService< LimitEnvs = ByLength, LimitSidecars = ByLength, > where - LimitBlocks: Limiter>, + LimitBlocks: Limiter>, LimitReceipts: Limiter>>, LimitEnvs: Limiter, LimitSidecars: Limiter, @@ -342,7 +266,7 @@ where fn on_new_block( &mut self, block_hash: B256, - res: ProviderResult>>, + res: ProviderResult>>, ) { if let Some(queued) = self.full_block_cache.remove(&block_hash) { // send the response to queued senders @@ -384,7 +308,11 @@ where } } - fn on_reorg_block(&mut self, block_hash: B256, res: ProviderResult>) { + fn on_reorg_block( + &mut self, + block_hash: B256, + res: ProviderResult>, + ) { let res = res.map(|b| b.map(Arc::new)); if let Some(queued) = self.full_block_cache.remove(&block_hash) { // send the response to queued senders @@ -458,7 +386,7 @@ where // Only look in the database to prevent situations where we // looking up the tree is blocking let block_sender = provider - .block_with_senders( + .sealed_block_with_senders( BlockHashOrNumber::Hash(block_hash), TransactionVariant::WithHash, ) @@ -470,36 +398,6 @@ where })); } } - CacheAction::GetBlockTransactions { block_hash, response_tx } => { - // check if block is cached - if let Some(block) = this.full_block_cache.get(&block_hash) { - let _ = response_tx.send(Ok(Some(block.body.transactions.clone()))); - continue - } - - // block is not in the cache, request it if this is the first consumer - if this.full_block_cache.queue(block_hash, Either::Right(response_tx)) { - let provider = this.provider.clone(); - let action_tx = this.action_tx.clone(); - let rate_limiter = this.rate_limiter.clone(); - this.action_task_spawner.spawn_blocking(Box::pin(async move { - // Acquire permit - let _permit = rate_limiter.acquire().await; - // Only look in the database to prevent situations where we - // looking up the tree is blocking - let res = provider - .block_with_senders( - BlockHashOrNumber::Hash(block_hash), - TransactionVariant::WithHash, - ) - .map(|b| b.map(Arc::new)); - let _ = action_tx.send(CacheAction::BlockWithSendersResult { - block_hash, - res, - }); - })); - } - } CacheAction::GetReceipts { block_hash, response_tx } => { // check if block is cached if let Some(receipts) = this.receipts_cache.get(&block_hash).cloned() { @@ -626,7 +524,7 @@ where } CacheAction::CacheNewCanonicalChain { chain_change } => { for block in chain_change.blocks { - this.on_new_block(block.hash(), Ok(Some(Arc::new(block.unseal())))); + this.on_new_block(block.hash(), Ok(Some(Arc::new(block)))); } for block_receipts in chain_change.receipts { @@ -640,7 +538,7 @@ where } CacheAction::RemoveReorgedChain { chain_change } => { for block in chain_change.blocks { - this.on_reorg_block(block.hash(), Ok(Some(block.unseal()))); + this.on_reorg_block(block.hash(), Ok(Some(block))); } for block_receipts in chain_change.receipts { @@ -662,17 +560,44 @@ where /// All message variants sent through the channel enum CacheAction { - GetBlockWithSenders { block_hash: B256, response_tx: BlockWithSendersResponseSender }, - GetBlockTransactions { block_hash: B256, response_tx: BlockTransactionsResponseSender }, - GetEnv { block_hash: B256, response_tx: EnvResponseSender }, - GetReceipts { block_hash: B256, response_tx: ReceiptsResponseSender }, - BlockWithSendersResult { block_hash: B256, res: ProviderResult>> }, - ReceiptsResult { block_hash: B256, res: ProviderResult>>> }, - EnvResult { block_hash: B256, res: Box> }, - CacheNewCanonicalChain { chain_change: ChainChange }, - RemoveReorgedChain { chain_change: ChainChange }, - GetSidecars { block_hash: B256, response_tx: SidecarsResponseSender }, - SidecarsResult { block_hash: B256, res: ProviderResult> }, + GetBlockWithSenders { + block_hash: B256, + response_tx: BlockWithSendersResponseSender, + }, + GetEnv { + block_hash: B256, + response_tx: EnvResponseSender, + }, + GetReceipts { + block_hash: B256, + response_tx: ReceiptsResponseSender, + }, + BlockWithSendersResult { + block_hash: B256, + res: ProviderResult>>, + }, + ReceiptsResult { + block_hash: B256, + res: ProviderResult>>>, + }, + EnvResult { + block_hash: B256, + res: Box>, + }, + CacheNewCanonicalChain { + chain_change: ChainChange, + }, + RemoveReorgedChain { + chain_change: ChainChange, + }, + GetSidecars { + block_hash: B256, + response_tx: SidecarsResponseSender, + }, + SidecarsResult { + block_hash: B256, + res: ProviderResult>, + }, } struct BlockReceipts { diff --git a/crates/rpc/rpc-eth-types/src/error.rs b/crates/rpc/rpc-eth-types/src/error.rs index 7beddc4aa0..a7f43b95a6 100644 --- a/crates/rpc/rpc-eth-types/src/error.rs +++ b/crates/rpc/rpc-eth-types/src/error.rs @@ -2,12 +2,13 @@ use std::time::Duration; +use alloy_eips::BlockId; use alloy_primitives::{Address, Bytes, U256}; use alloy_rpc_types::{error::EthRpcErrorCode, request::TransactionInputError, BlockError}; use alloy_sol_types::decode_revert_reason; use reth_bsc_consensus::BscTraceHelperError; use reth_errors::RethError; -use reth_primitives::{revm_primitives::InvalidHeader, BlockId}; +use reth_primitives::revm_primitives::InvalidHeader; use reth_rpc_server_types::result::{ block_id_to_str, internal_rpc_err, invalid_params_rpc_err, rpc_err, rpc_error_with_code, }; @@ -158,6 +159,11 @@ impl EthApiError { pub const fn is_gas_too_high(&self) -> bool { matches!(self, Self::InvalidTransaction(RpcInvalidTransactionError::GasTooHigh)) } + + /// Returns `true` if error is [`RpcInvalidTransactionError::GasTooLow`] + pub const fn is_gas_too_low(&self) -> bool { + matches!(self, Self::InvalidTransaction(RpcInvalidTransactionError::GasTooLow)) + } } impl From for jsonrpsee_types::error::ErrorObject<'static> { diff --git a/crates/rpc/rpc-eth-types/src/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs index 08ac56845f..7692d47de9 100644 --- a/crates/rpc/rpc-eth-types/src/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -73,16 +73,16 @@ impl FeeHistoryCache { } /// Insert block data into the cache. - async fn insert_blocks(&self, blocks: I) + async fn insert_blocks<'a, I>(&self, blocks: I) where - I: IntoIterator>)>, + I: IntoIterator>)>, { let mut entries = self.inner.entries.write().await; let percentiles = self.predefined_percentiles(); // Insert all new blocks and calculate approximated rewards for (block, receipts) in blocks { - let mut fee_history_entry = FeeHistoryEntry::new(&block); + let mut fee_history_entry = FeeHistoryEntry::new(block); fee_history_entry.rewards = calculate_reward_percentiles_for_block( &percentiles, fee_history_entry.gas_used, @@ -237,7 +237,9 @@ pub async fn fee_history_cache_new_blocks_task( tokio::select! { res = &mut fetch_missing_block => { if let Ok(res) = res { - fee_history_cache.insert_blocks(res.into_iter()).await; + fee_history_cache.insert_blocks(res.as_ref() + .map(|(b, r)| (&b.block, r.clone())) + .into_iter()).await; } } event = events.next() => { @@ -245,11 +247,12 @@ pub async fn fee_history_cache_new_blocks_task( // the stream ended, we are done break; }; - let (blocks, receipts): (Vec<_>, Vec<_>) = event - .committed() + + let committed = event .committed(); + let (blocks, receipts): (Vec<_>, Vec<_>) = committed .blocks_and_receipts() .map(|(block, receipts)| { - (block.block.clone(), Arc::new(receipts.iter().flatten().cloned().collect::>())) + (&block.block, Arc::new(receipts.iter().flatten().cloned().collect::>())) }) .unzip(); fee_history_cache.insert_blocks(blocks.into_iter().zip(receipts)).await; @@ -302,7 +305,7 @@ pub fn calculate_reward_percentiles_for_block( // the percentiles are monotonically increasing. let mut tx_index = 0; let mut cumulative_gas_used = transactions.first().map(|tx| tx.gas_used).unwrap_or_default(); - let mut rewards_in_block = Vec::new(); + let mut rewards_in_block = Vec::with_capacity(percentiles.len()); for percentile in percentiles { // Empty blocks should return in a zero row if transactions.is_empty() { @@ -363,7 +366,7 @@ impl FeeHistoryEntry { gas_used_ratio: block.gas_used as f64 / block.gas_limit as f64, base_fee_per_blob_gas: block.blob_fee(), blob_gas_used_ratio: block.blob_gas_used() as f64 / - reth_primitives::constants::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, + alloy_eips::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, excess_blob_gas: block.excess_blob_gas, blob_gas_used: block.blob_gas_used, gas_used: block.gas_used, diff --git a/crates/rpc/rpc-eth-types/src/gas_oracle.rs b/crates/rpc/rpc-eth-types/src/gas_oracle.rs index 01591bc4de..9da373376b 100644 --- a/crates/rpc/rpc-eth-types/src/gas_oracle.rs +++ b/crates/rpc/rpc-eth-types/src/gas_oracle.rs @@ -1,16 +1,17 @@ //! An implementation of the eth gas price oracle, used for providing gas price estimates based on //! previous blocks. -use std::fmt::{self, Debug, Formatter}; - +use alloy_consensus::constants::GWEI_TO_WEI; +use alloy_eips::BlockNumberOrTag; use alloy_primitives::{B256, U256}; use alloy_rpc_types::BlockId; use derive_more::{Deref, DerefMut, From, Into}; -use reth_primitives::{constants::GWEI_TO_WEI, BlockNumberOrTag}; +use itertools::Itertools; use reth_rpc_server_types::constants; use reth_storage_api::BlockReaderIdExt; use schnellru::{ByLength, LruMap}; use serde::{Deserialize, Serialize}; +use std::fmt::{self, Debug, Formatter}; use tokio::sync::Mutex; use tracing::warn; @@ -212,7 +213,7 @@ where limit: usize, ) -> EthResult)>> { // check the cache (this will hit the disk if the block is not cached) - let mut block = match self.cache.get_block(block_hash).await? { + let block = match self.cache.get_sealed_block_with_senders(block_hash).await? { Some(block) => block, None => return Ok(None), }; @@ -221,11 +222,15 @@ where let parent_hash = block.parent_hash; // sort the functions by ascending effective tip first - block.body.transactions.sort_by_cached_key(|tx| tx.effective_tip_per_gas(base_fee_per_gas)); + let sorted_transactions = block + .body + .transactions + .iter() + .sorted_by_cached_key(|tx| tx.effective_tip_per_gas(base_fee_per_gas)); let mut prices = Vec::with_capacity(limit); - for tx in block.body.transactions() { + for tx in sorted_transactions { let mut effective_gas_tip = None; // ignore transactions with a tip under the configured threshold if let Some(ignore_under) = self.ignore_price { diff --git a/crates/rpc/rpc-eth-types/src/lib.rs b/crates/rpc/rpc-eth-types/src/lib.rs index fa36dae4c8..03c23dc345 100644 --- a/crates/rpc/rpc-eth-types/src/lib.rs +++ b/crates/rpc/rpc-eth-types/src/lib.rs @@ -37,5 +37,5 @@ pub use gas_oracle::{ }; pub use id_provider::EthSubscriptionIdProvider; pub use pending_block::{PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; -pub use receipt::ReceiptBuilder; +pub use receipt::EthReceiptBuilder; pub use transaction::TransactionSource; diff --git a/crates/rpc/rpc-eth-types/src/logs_utils.rs b/crates/rpc/rpc-eth-types/src/logs_utils.rs index c64bbe055b..aa132675c9 100644 --- a/crates/rpc/rpc-eth-types/src/logs_utils.rs +++ b/crates/rpc/rpc-eth-types/src/logs_utils.rs @@ -2,12 +2,14 @@ //! //! Log parsing for building filter. +use alloy_eips::BlockNumHash; use alloy_primitives::TxHash; use alloy_rpc_types::{FilteredParams, Log}; use reth_chainspec::ChainInfo; use reth_errors::ProviderError; -use reth_primitives::{BlockNumHash, Receipt, SealedBlock}; +use reth_primitives::{Receipt, SealedBlockWithSenders}; use reth_storage_api::BlockReader; +use std::sync::Arc; /// Returns all matching of a block's receipts when the transaction hashes are known. pub fn matching_block_logs_with_tx_hashes<'a, I>( @@ -50,8 +52,8 @@ where pub enum ProviderOrBlock<'a, P: BlockReader> { /// Provider Provider(&'a P), - /// [`SealedBlock`] - Block(SealedBlock), + /// [`SealedBlockWithSenders`] + Block(Arc), } /// Appends all matching logs of a block's receipts. diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs index 949e205dcf..d8f413650a 100644 --- a/crates/rpc/rpc-eth-types/src/pending_block.rs +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -4,9 +4,10 @@ use std::time::Instant; +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::B256; use derive_more::Constructor; -use reth_primitives::{BlockId, BlockNumberOrTag, Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{Receipt, SealedBlockWithSenders, SealedHeader}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}; /// Configured [`BlockEnv`] and [`CfgEnvWithHandlerCfg`] for a pending block. diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs index e95c92f24a..0734b547ec 100644 --- a/crates/rpc/rpc-eth-types/src/receipt.rs +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -1,25 +1,101 @@ //! RPC receipt response builder, extends a layer one receipt with layer two data. +use alloy_consensus::{ReceiptEnvelope, Transaction}; use alloy_primitives::{Address, TxKind}; -use alloy_rpc_types::{ - AnyReceiptEnvelope, AnyTransactionReceipt, Log, ReceiptWithBloom, TransactionReceipt, -}; -use alloy_serde::{OtherFields, WithOtherFields}; -use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; +use alloy_rpc_types::{Log, ReceiptWithBloom, TransactionReceipt}; +use reth_primitives::{Receipt, TransactionMeta, TransactionSigned, TxType}; use revm_primitives::calc_blob_gasprice; use super::{EthApiError, EthResult}; +/// Builds an [`TransactionReceipt`] obtaining the inner receipt envelope from the given closure. +pub fn build_receipt( + transaction: &TransactionSigned, + meta: TransactionMeta, + receipt: &Receipt, + all_receipts: &[Receipt], + build_envelope: impl FnOnce(ReceiptWithBloom) -> T, +) -> EthResult> { + // Note: we assume this transaction is valid, because it's mined (or part of pending block) + // and we don't need to check for pre EIP-2 + let from = + transaction.recover_signer_unchecked().ok_or(EthApiError::InvalidTransactionSignature)?; + + // get the previous transaction cumulative gas used + let gas_used = if meta.index == 0 { + receipt.cumulative_gas_used + } else { + let prev_tx_idx = (meta.index - 1) as usize; + all_receipts + .get(prev_tx_idx) + .map(|prev_receipt| receipt.cumulative_gas_used - prev_receipt.cumulative_gas_used) + .unwrap_or_default() + }; + + let blob_gas_used = transaction.transaction.blob_gas_used(); + // Blob gas price should only be present if the transaction is a blob transaction + let blob_gas_price = blob_gas_used.and_then(|_| meta.excess_blob_gas.map(calc_blob_gasprice)); + let logs_bloom = receipt.bloom_slow(); + + // get number of logs in the block + let mut num_logs = 0; + for prev_receipt in all_receipts.iter().take(meta.index as usize) { + num_logs += prev_receipt.logs.len(); + } + + let logs: Vec = receipt + .logs + .iter() + .enumerate() + .map(|(tx_log_idx, log)| Log { + inner: log.clone(), + block_hash: Some(meta.block_hash), + block_number: Some(meta.block_number), + block_timestamp: Some(meta.timestamp), + transaction_hash: Some(meta.tx_hash), + transaction_index: Some(meta.index), + log_index: Some((num_logs + tx_log_idx) as u64), + removed: false, + }) + .collect(); + + let rpc_receipt = alloy_rpc_types::Receipt { + status: receipt.success.into(), + cumulative_gas_used: receipt.cumulative_gas_used as u128, + logs, + }; + + let (contract_address, to) = match transaction.transaction.kind() { + TxKind::Create => (Some(from.create(transaction.transaction.nonce())), None), + TxKind::Call(addr) => (None, Some(Address(*addr))), + }; + + Ok(TransactionReceipt { + inner: build_envelope(ReceiptWithBloom { receipt: rpc_receipt, logs_bloom }), + transaction_hash: meta.tx_hash, + transaction_index: Some(meta.index), + block_hash: Some(meta.block_hash), + block_number: Some(meta.block_number), + from, + to, + gas_used: gas_used as u128, + contract_address, + effective_gas_price: transaction.effective_gas_price(meta.base_fee), + // EIP-4844 fields + blob_gas_price, + blob_gas_used: blob_gas_used.map(u128::from), + authorization_list: transaction.authorization_list().map(|l| l.to_vec()), + }) +} + /// Receipt response builder. #[derive(Debug)] -pub struct ReceiptBuilder { +pub struct EthReceiptBuilder { /// The base response body, contains L1 fields. - pub base: TransactionReceipt>, - /// Additional L2 fields. - pub other: OtherFields, + pub base: TransactionReceipt, } -impl ReceiptBuilder { +impl EthReceiptBuilder { /// Returns a new builder with the base response body (L1 fields) set. /// /// Note: This requires _all_ block receipts because we need to calculate the gas used by the @@ -30,97 +106,23 @@ impl ReceiptBuilder { receipt: &Receipt, all_receipts: &[Receipt], ) -> EthResult { - // Note: we assume this transaction is valid, because it's mined (or part of pending block) - // and we don't need to check for pre EIP-2 - let from = transaction - .recover_signer_unchecked() - .ok_or(EthApiError::InvalidTransactionSignature)?; - - // get the previous transaction cumulative gas used - let gas_used = if meta.index == 0 { - receipt.cumulative_gas_used - } else { - let prev_tx_idx = (meta.index - 1) as usize; - all_receipts - .get(prev_tx_idx) - .map(|prev_receipt| receipt.cumulative_gas_used - prev_receipt.cumulative_gas_used) - .unwrap_or_default() - }; - - let blob_gas_used = transaction.transaction.blob_gas_used(); - // Blob gas price should only be present if the transaction is a blob transaction - let blob_gas_price = - blob_gas_used.and_then(|_| meta.excess_blob_gas.map(calc_blob_gasprice)); - let logs_bloom = receipt.bloom_slow(); - - // get number of logs in the block - let mut num_logs = 0; - for prev_receipt in all_receipts.iter().take(meta.index as usize) { - num_logs += prev_receipt.logs.len(); - } - - let logs: Vec = receipt - .logs - .iter() - .enumerate() - .map(|(tx_log_idx, log)| Log { - inner: log.clone(), - block_hash: Some(meta.block_hash), - block_number: Some(meta.block_number), - block_timestamp: Some(meta.timestamp), - transaction_hash: Some(meta.tx_hash), - transaction_index: Some(meta.index), - log_index: Some((num_logs + tx_log_idx) as u64), - removed: false, - }) - .collect(); - - let rpc_receipt = alloy_rpc_types::Receipt { - status: receipt.success.into(), - cumulative_gas_used: receipt.cumulative_gas_used as u128, - logs, - }; - - let (contract_address, to) = match transaction.transaction.kind() { - TxKind::Create => (Some(from.create(transaction.transaction.nonce())), None), - TxKind::Call(addr) => (None, Some(Address(*addr))), - }; - - #[allow(clippy::needless_update)] - let base = TransactionReceipt { - inner: AnyReceiptEnvelope { - inner: ReceiptWithBloom { receipt: rpc_receipt, logs_bloom }, - r#type: transaction.transaction.tx_type().into(), - }, - transaction_hash: meta.tx_hash, - transaction_index: Some(meta.index), - block_hash: Some(meta.block_hash), - block_number: Some(meta.block_number), - from, - to, - gas_used: gas_used as u128, - contract_address, - effective_gas_price: transaction.effective_gas_price(meta.base_fee), - // TODO pre-byzantium receipts have a post-transaction state root - state_root: None, - // EIP-4844 fields - blob_gas_price, - blob_gas_used: blob_gas_used.map(u128::from), - authorization_list: transaction.authorization_list().map(|l| l.to_vec()), - }; - - Ok(Self { base, other: Default::default() }) - } + let base = build_receipt(transaction, meta, receipt, all_receipts, |receipt_with_bloom| { + match receipt.tx_type { + TxType::Legacy => ReceiptEnvelope::Legacy(receipt_with_bloom), + TxType::Eip2930 => ReceiptEnvelope::Eip2930(receipt_with_bloom), + TxType::Eip1559 => ReceiptEnvelope::Eip1559(receipt_with_bloom), + TxType::Eip4844 => ReceiptEnvelope::Eip4844(receipt_with_bloom), + TxType::Eip7702 => ReceiptEnvelope::Eip7702(receipt_with_bloom), + #[allow(unreachable_patterns)] + _ => unreachable!(), + } + })?; - /// Adds fields to response body. - pub fn add_other_fields(mut self, mut fields: OtherFields) -> Self { - self.other.append(&mut fields); - self + Ok(Self { base }) } /// Builds a receipt response from the base response body, and any set additional fields. - pub fn build(self) -> AnyTransactionReceipt { - let Self { base, other } = self; - WithOtherFields { inner: base, other } + pub fn build(self) -> TransactionReceipt { + self.base } } diff --git a/crates/rpc/rpc-eth-types/src/revm_utils.rs b/crates/rpc/rpc-eth-types/src/revm_utils.rs index 25c54fd467..7dc20c5242 100644 --- a/crates/rpc/rpc-eth-types/src/revm_utils.rs +++ b/crates/rpc/rpc-eth-types/src/revm_utils.rs @@ -315,7 +315,7 @@ where #[cfg(test)] mod tests { use super::*; - use reth_primitives::constants::GWEI_TO_WEI; + use alloy_consensus::constants::GWEI_TO_WEI; #[test] fn test_ensure_0_fallback() { diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 561aa360d8..20952413c1 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -1,7 +1,7 @@ //! Utilities for serving `eth_simulateV1` -use alloy_consensus::{TxEip4844Variant, TxType, TypedTransaction}; -use alloy_primitives::Parity; +use alloy_consensus::{Transaction as _, TxEip4844Variant, TxType, TypedTransaction}; +use alloy_primitives::{Parity, Signature}; use alloy_rpc_types::{ simulate::{SimCallResult, SimulateError, SimulatedBlock}, Block, BlockTransactionsKind, @@ -9,10 +9,8 @@ use alloy_rpc_types::{ use alloy_rpc_types_eth::transaction::TransactionRequest; use jsonrpsee_types::ErrorObject; use reth_primitives::{ - logs_bloom, proofs::{calculate_receipt_root, calculate_transaction_root}, - BlockBody, BlockWithSenders, Receipt, Signature, Transaction, TransactionSigned, - TransactionSignedNoHash, + BlockBody, BlockWithSenders, Receipt, Transaction, TransactionSigned, TransactionSignedNoHash, }; use reth_revm::database::StateProviderDatabase; use reth_rpc_server_types::result::rpc_err; @@ -172,6 +170,7 @@ where } /// Handles outputs of the calls execution and builds a [`SimulatedBlock`]. +#[expect(clippy::too_many_arguments)] pub fn build_block( results: Vec<(Address, ExecutionResult)>, transactions: Vec, @@ -180,10 +179,11 @@ pub fn build_block( total_difficulty: U256, full_transactions: bool, db: &CacheDB>>, + tx_resp_builder: &T, ) -> Result>, EthApiError> { let mut calls: Vec = Vec::with_capacity(results.len()); let mut senders = Vec::with_capacity(results.len()); - let mut receipts = Vec::new(); + let mut receipts = Vec::with_capacity(results.len()); let mut log_index = 0; for (transaction_index, ((sender, result), tx)) in @@ -273,7 +273,7 @@ pub fn build_block( } } - let state_root = db.db.0.state_root(hashed_state)?; + let state_root = db.db.state_root(hashed_state)?; let header = reth_primitives::Header { beneficiary: block_env.coinbase, @@ -288,7 +288,9 @@ pub fn build_block( receipts_root: calculate_receipt_root(&receipts), transactions_root: calculate_transaction_root(&transactions), state_root, - logs_bloom: logs_bloom(receipts.iter().flat_map(|r| r.receipt.logs.iter())), + logs_bloom: alloy_primitives::logs_bloom( + receipts.iter().flat_map(|r| r.receipt.logs.iter()), + ), mix_hash: block_env.prevrandao.unwrap_or_default(), ..Default::default() }; @@ -304,6 +306,6 @@ pub fn build_block( let txs_kind = if full_transactions { BlockTransactionsKind::Full } else { BlockTransactionsKind::Hashes }; - let block = from_block::(block, total_difficulty, txs_kind, None)?; + let block = from_block(block, total_difficulty, txs_kind, None, tx_resp_builder)?; Ok(SimulatedBlock { inner: block, calls }) } diff --git a/crates/rpc/rpc-eth-types/src/transaction.rs b/crates/rpc/rpc-eth-types/src/transaction.rs index c3ca1b503a..7d2237a1b7 100644 --- a/crates/rpc/rpc-eth-types/src/transaction.rs +++ b/crates/rpc/rpc-eth-types/src/transaction.rs @@ -41,9 +41,9 @@ impl TransactionSource { } /// Conversion into network specific transaction type. - pub fn into_transaction(self) -> T::Transaction { + pub fn into_transaction(self, resp_builder: &T) -> T::Transaction { match self { - Self::Pool(tx) => from_recovered::(tx), + Self::Pool(tx) => from_recovered(tx, resp_builder), Self::Block { transaction, index, block_hash, block_number, base_fee } => { let tx_info = TransactionInfo { hash: Some(transaction.hash()), @@ -53,7 +53,7 @@ impl TransactionSource { base_fee: base_fee.map(u128::from), }; - from_recovered_with_block_context::(transaction, tx_info) + from_recovered_with_block_context(transaction, tx_info, resp_builder) } } } diff --git a/crates/rpc/rpc-server-types/Cargo.toml b/crates/rpc/rpc-server-types/Cargo.toml index 08ecd39477..275d8ea561 100644 --- a/crates/rpc/rpc-server-types/Cargo.toml +++ b/crates/rpc/rpc-server-types/Cargo.toml @@ -14,11 +14,11 @@ workspace = true [dependencies] reth-errors.workspace = true reth-network-api.workspace = true -reth-primitives.workspace = true # ethereum alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true +alloy-eips.workspace = true # rpc jsonrpsee-core.workspace = true @@ -27,4 +27,3 @@ jsonrpsee-types.workspace = true # misc strum = { workspace = true, features = ["derive"] } serde = { workspace = true, features = ["derive"] } - diff --git a/crates/rpc/rpc-server-types/src/constants.rs b/crates/rpc/rpc-server-types/src/constants.rs index 0bc4418193..48019745a3 100644 --- a/crates/rpc/rpc-server-types/src/constants.rs +++ b/crates/rpc/rpc-server-types/src/constants.rs @@ -51,9 +51,9 @@ pub const DEFAULT_MAX_SIMULATE_BLOCKS: u64 = 256; /// The default eth historical proof window. pub const DEFAULT_ETH_PROOF_WINDOW: u64 = 0; -/// Maximum eth historical proof window. Equivalent to roughly one and a half months of data on a 12 -/// second block time, and a week on a 2 second block time. -pub const MAX_ETH_PROOF_WINDOW: u64 = 7 * 24 * 60 * 60 / 2; +/// Maximum eth historical proof window. Equivalent to roughly 6 months of data on a 12 +/// second block time, and a month on a 2 second block time. +pub const MAX_ETH_PROOF_WINDOW: u64 = 28 * 24 * 60 * 60 / 2; /// GPO specific constants pub mod gas_oracle { @@ -80,9 +80,8 @@ pub mod gas_oracle { /// The default gas limit for `eth_call` and adjacent calls. /// - /// This is different from the default to regular 30M block gas limit - /// [`ETHEREUM_BLOCK_GAS_LIMIT`](reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT) to allow - /// for more complex calls. + /// This is different from the default to regular 30M block gas limit `ETHEREUM_BLOCK_GAS_LIMIT` + /// to allow for more complex calls. pub const RPC_DEFAULT_GAS_CAP: u64 = 50_000_000; /// Allowed error ratio for gas estimation diff --git a/crates/rpc/rpc-server-types/src/module.rs b/crates/rpc/rpc-server-types/src/module.rs index 72a5e7c858..9f96ff0cef 100644 --- a/crates/rpc/rpc-server-types/src/module.rs +++ b/crates/rpc/rpc-server-types/src/module.rs @@ -199,9 +199,12 @@ impl FromStr for RpcModuleSelection { } let mut modules = s.split(',').map(str::trim).peekable(); let first = modules.peek().copied().ok_or(ParseError::VariantNotFound)?; - match first { - "all" | "All" => Ok(Self::All), - "none" | "None" => Ok(Self::Selection(Default::default())), + // We convert to lowercase to make the comparison case-insensitive + // + // This is a way to allow typing "all" and "ALL" and "All" and "aLl" etc. + match first.to_lowercase().as_str() { + "all" => Ok(Self::All), + "none" => Ok(Self::Selection(Default::default())), _ => Self::try_from_selection(modules), } } @@ -255,6 +258,8 @@ pub enum RethRpcModule { Reth, /// `ots_` module Ots, + /// `flashbots_` module + Flashbots, } // === impl RethRpcModule === @@ -303,6 +308,7 @@ impl FromStr for RethRpcModule { "rpc" => Self::Rpc, "reth" => Self::Reth, "ots" => Self::Ots, + "flashbots" => Self::Flashbots, _ => return Err(ParseError::VariantNotFound), }) } @@ -329,3 +335,229 @@ impl Serialize for RethRpcModule { s.serialize_str(self.as_ref()) } } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_all_modules() { + let all_modules = RpcModuleSelection::all_modules(); + assert_eq!(all_modules.len(), RethRpcModule::variant_count()); + } + + #[test] + fn test_standard_modules() { + let standard_modules = RpcModuleSelection::standard_modules(); + let expected_modules: HashSet = + HashSet::from([RethRpcModule::Eth, RethRpcModule::Net, RethRpcModule::Web3]); + assert_eq!(standard_modules, expected_modules); + } + + #[test] + fn test_default_ipc_modules() { + let default_ipc_modules = RpcModuleSelection::default_ipc_modules(); + assert_eq!(default_ipc_modules, RpcModuleSelection::all_modules()); + } + + #[test] + fn test_try_from_selection_success() { + let selection = vec!["eth", "admin"]; + let config = RpcModuleSelection::try_from_selection(selection).unwrap(); + assert_eq!(config, RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin])); + } + + #[test] + fn test_rpc_module_selection_len() { + let all_modules = RpcModuleSelection::All; + let standard = RpcModuleSelection::Standard; + let selection = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]); + + assert_eq!(all_modules.len(), RethRpcModule::variant_count()); + assert_eq!(standard.len(), 3); + assert_eq!(selection.len(), 2); + } + + #[test] + fn test_rpc_module_selection_is_empty() { + let empty_selection = RpcModuleSelection::from(HashSet::new()); + assert!(empty_selection.is_empty()); + + let non_empty_selection = RpcModuleSelection::from([RethRpcModule::Eth]); + assert!(!non_empty_selection.is_empty()); + } + + #[test] + fn test_rpc_module_selection_iter_selection() { + let all_modules = RpcModuleSelection::All; + let standard = RpcModuleSelection::Standard; + let selection = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]); + + assert_eq!(all_modules.iter_selection().count(), RethRpcModule::variant_count()); + assert_eq!(standard.iter_selection().count(), 3); + assert_eq!(selection.iter_selection().count(), 2); + } + + #[test] + fn test_rpc_module_selection_to_selection() { + let all_modules = RpcModuleSelection::All; + let standard = RpcModuleSelection::Standard; + let selection = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]); + + assert_eq!(all_modules.to_selection(), RpcModuleSelection::all_modules()); + assert_eq!(standard.to_selection(), RpcModuleSelection::standard_modules()); + assert_eq!( + selection.to_selection(), + HashSet::from([RethRpcModule::Eth, RethRpcModule::Admin]) + ); + } + + #[test] + fn test_rpc_module_selection_are_identical() { + // Test scenario: both selections are `All` + // + // Since both selections include all possible RPC modules, they should be considered + // identical. + let all_modules = RpcModuleSelection::All; + assert!(RpcModuleSelection::are_identical(Some(&all_modules), Some(&all_modules))); + + // Test scenario: both `http` and `ws` are `None` + // + // When both arguments are `None`, the function should return `true` because no modules are + // selected. + assert!(RpcModuleSelection::are_identical(None, None)); + + // Test scenario: both selections contain identical sets of specific modules + // + // In this case, both selections contain the same modules (`Eth` and `Admin`), + // so they should be considered identical. + let selection1 = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]); + let selection2 = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]); + assert!(RpcModuleSelection::are_identical(Some(&selection1), Some(&selection2))); + + // Test scenario: one selection is `All`, the other is `Standard` + // + // `All` includes all possible modules, while `Standard` includes a specific set of modules. + // Since `Standard` does not cover all modules, these two selections should not be + // considered identical. + let standard = RpcModuleSelection::Standard; + assert!(!RpcModuleSelection::are_identical(Some(&all_modules), Some(&standard))); + + // Test scenario: one is `None`, the other is an empty selection + // + // When one selection is `None` and the other is an empty selection (no modules), + // they should be considered identical because neither selects any modules. + let empty_selection = RpcModuleSelection::Selection(HashSet::new()); + assert!(RpcModuleSelection::are_identical(None, Some(&empty_selection))); + assert!(RpcModuleSelection::are_identical(Some(&empty_selection), None)); + + // Test scenario: one is `None`, the other is a non-empty selection + // + // If one selection is `None` and the other contains modules, they should not be considered + // identical because `None` represents no selection, while the other explicitly + // selects modules. + let non_empty_selection = RpcModuleSelection::from([RethRpcModule::Eth]); + assert!(!RpcModuleSelection::are_identical(None, Some(&non_empty_selection))); + assert!(!RpcModuleSelection::are_identical(Some(&non_empty_selection), None)); + + // Test scenario: `All` vs. non-full selection + // + // If one selection is `All` (which includes all modules) and the other contains only a + // subset of modules, they should not be considered identical. + let partial_selection = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Net]); + assert!(!RpcModuleSelection::are_identical(Some(&all_modules), Some(&partial_selection))); + + // Test scenario: full selection vs `All` + // + // If the other selection explicitly selects all available modules, it should be identical + // to `All`. + let full_selection = + RpcModuleSelection::from(RethRpcModule::modules().into_iter().collect::>()); + assert!(RpcModuleSelection::are_identical(Some(&all_modules), Some(&full_selection))); + + // Test scenario: different non-empty selections + // + // If the two selections contain different sets of modules, they should not be considered + // identical. + let selection3 = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Net]); + let selection4 = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Web3]); + assert!(!RpcModuleSelection::are_identical(Some(&selection3), Some(&selection4))); + + // Test scenario: `Standard` vs an equivalent selection + // The `Standard` selection includes a predefined set of modules. If we explicitly create + // a selection with the same set of modules, they should be considered identical. + let matching_standard = + RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Net, RethRpcModule::Web3]); + assert!(RpcModuleSelection::are_identical(Some(&standard), Some(&matching_standard))); + + // Test scenario: `Standard` vs non-matching selection + // + // If the selection does not match the modules included in `Standard`, they should not be + // considered identical. + let non_matching_standard = + RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Net]); + assert!(!RpcModuleSelection::are_identical(Some(&standard), Some(&non_matching_standard))); + } + + #[test] + fn test_rpc_module_selection_from_str() { + // Test empty string returns default selection + let result = RpcModuleSelection::from_str(""); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), RpcModuleSelection::Selection(Default::default())); + + // Test "all" (case insensitive) returns All variant + let result = RpcModuleSelection::from_str("all"); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), RpcModuleSelection::All); + + let result = RpcModuleSelection::from_str("All"); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), RpcModuleSelection::All); + + let result = RpcModuleSelection::from_str("ALL"); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), RpcModuleSelection::All); + + // Test "none" (case insensitive) returns empty selection + let result = RpcModuleSelection::from_str("none"); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), RpcModuleSelection::Selection(Default::default())); + + let result = RpcModuleSelection::from_str("None"); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), RpcModuleSelection::Selection(Default::default())); + + let result = RpcModuleSelection::from_str("NONE"); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), RpcModuleSelection::Selection(Default::default())); + + // Test valid selections: "eth,admin" + let result = RpcModuleSelection::from_str("eth,admin"); + assert!(result.is_ok()); + let expected_selection = + RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]); + assert_eq!(result.unwrap(), expected_selection); + + // Test valid selection with extra spaces: " eth , admin " + let result = RpcModuleSelection::from_str(" eth , admin "); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), expected_selection); + + // Test invalid selection should return error + let result = RpcModuleSelection::from_str("invalid,unknown"); + assert!(result.is_err()); + assert_eq!(result.unwrap_err(), ParseError::VariantNotFound); + + // Test single valid selection: "eth" + let result = RpcModuleSelection::from_str("eth"); + assert!(result.is_ok()); + let expected_selection = RpcModuleSelection::from([RethRpcModule::Eth]); + assert_eq!(result.unwrap(), expected_selection); + + // Test single invalid selection: "unknown" + let result = RpcModuleSelection::from_str("unknown"); + assert!(result.is_err()); + assert_eq!(result.unwrap_err(), ParseError::VariantNotFound); + } +} diff --git a/crates/rpc/rpc-server-types/src/result.rs b/crates/rpc/rpc-server-types/src/result.rs index 78e6436643..5d1b702e9f 100644 --- a/crates/rpc/rpc-server-types/src/result.rs +++ b/crates/rpc/rpc-server-types/src/result.rs @@ -2,9 +2,10 @@ use std::fmt; +use alloy_eips::BlockId; use alloy_rpc_types_engine::PayloadError; use jsonrpsee_core::RpcResult; -use reth_primitives::BlockId; +use reth_errors::ConsensusError; /// Helper trait to easily convert various `Result` types into [`RpcResult`] pub trait ToRpcResult: Sized { @@ -102,6 +103,7 @@ macro_rules! impl_to_rpc_result { } impl_to_rpc_result!(PayloadError); +impl_to_rpc_result!(ConsensusError); impl_to_rpc_result!(reth_errors::RethError); impl_to_rpc_result!(reth_errors::ProviderError); impl_to_rpc_result!(reth_network_api::NetworkError); diff --git a/crates/rpc/rpc-testing-util/Cargo.toml b/crates/rpc/rpc-testing-util/Cargo.toml index 4977c3a2c4..e5c57502e2 100644 --- a/crates/rpc/rpc-testing-util/Cargo.toml +++ b/crates/rpc/rpc-testing-util/Cargo.toml @@ -21,6 +21,7 @@ alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true alloy-rpc-types.workspace = true alloy-rpc-types-trace.workspace = true +alloy-eips.workspace = true # async futures.workspace = true @@ -36,4 +37,4 @@ similar-asserts.workspace = true tokio = { workspace = true, features = ["rt-multi-thread", "macros", "rt"] } reth-rpc-eth-api.workspace = true jsonrpsee-http-client.workspace = true -alloy-rpc-types-trace.workspace = true \ No newline at end of file +alloy-rpc-types-trace.workspace = true diff --git a/crates/rpc/rpc-testing-util/src/debug.rs b/crates/rpc/rpc-testing-util/src/debug.rs index f50064e80c..d4c7dce860 100644 --- a/crates/rpc/rpc-testing-util/src/debug.rs +++ b/crates/rpc/rpc-testing-util/src/debug.rs @@ -6,6 +6,7 @@ use std::{ task::{Context, Poll}, }; +use alloy_eips::BlockId; use alloy_primitives::{TxHash, B256}; use alloy_rpc_types::{Block, Transaction}; use alloy_rpc_types_eth::transaction::TransactionRequest; @@ -15,7 +16,7 @@ use alloy_rpc_types_trace::{ }; use futures::{Stream, StreamExt}; use jsonrpsee::core::client::Error as RpcError; -use reth_primitives::{BlockId, Receipt}; +use reth_primitives::Receipt; use reth_rpc_api::{clients::DebugApiClient, EthApiClient}; const NOOP_TRACER: &str = include_str!("../assets/noop-tracer.js"); @@ -292,7 +293,7 @@ pub struct DebugTraceTransactionsStream<'a> { stream: Pin + 'a>>, } -impl<'a> DebugTraceTransactionsStream<'a> { +impl DebugTraceTransactionsStream<'_> { /// Returns the next error result of the stream. pub async fn next_err(&mut self) -> Option<(RpcError, TxHash)> { loop { @@ -324,7 +325,7 @@ pub struct DebugTraceBlockStream<'a> { stream: Pin + 'a>>, } -impl<'a> DebugTraceBlockStream<'a> { +impl DebugTraceBlockStream<'_> { /// Returns the next error result of the stream. pub async fn next_err(&mut self) -> Option<(RpcError, BlockId)> { loop { diff --git a/crates/rpc/rpc-testing-util/src/trace.rs b/crates/rpc/rpc-testing-util/src/trace.rs index c6dc16cf10..097d582df4 100644 --- a/crates/rpc/rpc-testing-util/src/trace.rs +++ b/crates/rpc/rpc-testing-util/src/trace.rs @@ -1,5 +1,6 @@ //! Helpers for testing trace calls. +use alloy_eips::BlockId; use alloy_primitives::{map::HashSet, Bytes, TxHash, B256}; use alloy_rpc_types::Index; use alloy_rpc_types_eth::transaction::TransactionRequest; @@ -10,7 +11,6 @@ use alloy_rpc_types_trace::{ }; use futures::{Stream, StreamExt}; use jsonrpsee::core::client::Error as RpcError; -use reth_primitives::BlockId; use reth_rpc_api::clients::TraceApiClient; use std::{ pin::Pin, @@ -381,7 +381,7 @@ pub struct TraceBlockStream<'a> { stream: Pin + 'a>>, } -impl<'a> TraceBlockStream<'a> { +impl TraceBlockStream<'_> { /// Returns the next error result of the stream. pub async fn next_err(&mut self) -> Option<(RpcError, BlockId)> { loop { @@ -514,9 +514,9 @@ where #[cfg(test)] mod tests { use super::*; + use alloy_eips::BlockNumberOrTag; use alloy_rpc_types_trace::filter::TraceFilterMode; use jsonrpsee::http_client::HttpClientBuilder; - use reth_primitives::BlockNumberOrTag; const fn assert_is_stream(_: &St) {} diff --git a/crates/rpc/rpc-types-compat/Cargo.toml b/crates/rpc/rpc-types-compat/Cargo.toml index 81b4def204..b9a9e5f036 100644 --- a/crates/rpc/rpc-types-compat/Cargo.toml +++ b/crates/rpc/rpc-types-compat/Cargo.toml @@ -22,8 +22,11 @@ alloy-primitives.workspace = true alloy-rlp.workspace = true alloy-rpc-types.workspace = true alloy-rpc-types-eth = { workspace = true, default-features = false, features = ["serde"] } -alloy-serde.workspace = true alloy-rpc-types-engine.workspace = true +alloy-consensus.workspace = true + +# io +serde.workspace = true [dev-dependencies] -serde_json.workspace = true \ No newline at end of file +serde_json.workspace = true diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index fc8ea9e1c4..a954e05e4f 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -20,12 +20,15 @@ pub fn from_block( total_difficulty: U256, kind: BlockTransactionsKind, block_hash: Option, + tx_resp_builder: &T, ) -> Result, BlockError> { match kind { BlockTransactionsKind::Hashes => { Ok(from_block_with_tx_hashes::(block, total_difficulty, block_hash)) } - BlockTransactionsKind::Full => from_block_full::(block, total_difficulty, block_hash), + BlockTransactionsKind::Full => { + from_block_full::(block, total_difficulty, block_hash, tx_resp_builder) + } } } @@ -60,6 +63,7 @@ pub fn from_block_full( mut block: BlockWithSenders, total_difficulty: U256, block_hash: Option, + tx_resp_builder: &T, ) -> Result, BlockError> { let block_hash = block_hash.unwrap_or_else(|| block.block.header.hash_slow()); let block_number = block.block.number; @@ -83,7 +87,7 @@ pub fn from_block_full( index: Some(idx as u64), }; - from_recovered_with_block_context::(signed_tx_ec_recovered, tx_info) + from_recovered_with_block_context::(signed_tx_ec_recovered, tx_info, tx_resp_builder) }) .collect::>(); @@ -124,7 +128,7 @@ pub fn from_primitive_with_hash(primitive_header: reth_primitives::SealedHeader) blob_gas_used, excess_blob_gas, parent_beacon_block_root, - requests_root, + requests_hash, } = header; Header { @@ -150,7 +154,7 @@ pub fn from_primitive_with_hash(primitive_header: reth_primitives::SealedHeader) excess_blob_gas, parent_beacon_block_root, total_difficulty: None, - requests_root, + requests_hash, } } @@ -179,14 +183,14 @@ fn from_block_with_transactions( /// an Uncle from its header. pub fn uncle_block_from_header(header: PrimitiveHeader) -> Block { let hash = header.hash_slow(); - let rpc_header = from_primitive_with_hash(SealedHeader::new(header.clone(), hash)); let uncle_block = PrimitiveBlock { header, ..Default::default() }; let size = Some(U256::from(uncle_block.length())); + let rpc_header = from_primitive_with_hash(SealedHeader::new(uncle_block.header, hash)); Block { uncles: vec![], header: rpc_header, transactions: BlockTransactions::Uncle, - withdrawals: Some(vec![]), + withdrawals: None, size, } } diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index 8820b16e6b..38a517ddb9 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -1,17 +1,20 @@ //! Standalone Conversion Functions for Handling Different Versions of Execution Payloads in //! Ethereum's Engine -use alloy_eips::eip2718::{Decodable2718, Encodable2718}; +use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, EMPTY_OMMER_ROOT_HASH}; +use alloy_eips::{ + eip2718::{Decodable2718, Encodable2718}, + eip7685::Requests, +}; use alloy_primitives::{B256, U256}; use alloy_rpc_types_engine::{ payload::{ExecutionPayloadBodyV1, ExecutionPayloadFieldV2, ExecutionPayloadInputV2}, - ExecutionPayload, ExecutionPayloadBodyV2, ExecutionPayloadV1, ExecutionPayloadV2, - ExecutionPayloadV3, ExecutionPayloadV4, PayloadError, + ExecutionPayload, ExecutionPayloadSidecar, ExecutionPayloadV1, ExecutionPayloadV2, + ExecutionPayloadV3, PayloadError, }; use reth_primitives::{ - constants::{EMPTY_OMMER_ROOT_HASH, MAXIMUM_EXTRA_DATA_SIZE}, proofs::{self}, - Block, BlockBody, Header, Request, SealedBlock, TransactionSigned, Withdrawals, + Block, BlockBody, Header, SealedBlock, TransactionSigned, Withdrawals, }; /// Converts [`ExecutionPayloadV1`] to [`Block`] @@ -63,7 +66,7 @@ pub fn try_payload_v1_to_block(payload: ExecutionPayloadV1) -> Result Result Result { - let ExecutionPayloadV4 { - payload_inner, - deposit_requests, - withdrawal_requests, - consolidation_requests, - } = payload; - let mut block = try_payload_v3_to_block(payload_inner)?; - - // attach requests with asc type identifiers - let requests = deposit_requests - .into_iter() - .map(Request::DepositRequest) - .chain(withdrawal_requests.into_iter().map(Request::WithdrawalRequest)) - .chain(consolidation_requests.into_iter().map(Request::ConsolidationRequest)) - .collect::>(); - - let requests_root = proofs::calculate_requests_root(&requests); - block.header.requests_root = Some(requests_root); - block.body.requests = Some(requests.into()); - - Ok(block) -} - /// Converts [`SealedBlock`] to [`ExecutionPayload`] pub fn block_to_payload(value: SealedBlock) -> ExecutionPayload { - if value.header.requests_root.is_some() { + if value.header.requests_hash.is_some() { // block with requests root: V3 - ExecutionPayload::V4(block_to_payload_v4(value)) + ExecutionPayload::V3(block_to_payload_v3(value)) } else if value.header.parent_beacon_block_root.is_some() { // block with parent beacon block root: V3 ExecutionPayload::V3(block_to_payload_v3(value)) @@ -213,37 +191,6 @@ pub fn block_to_payload_v3(value: SealedBlock) -> ExecutionPayloadV3 { } } -/// Converts [`SealedBlock`] to [`ExecutionPayloadV4`] -pub fn block_to_payload_v4(mut value: SealedBlock) -> ExecutionPayloadV4 { - let (deposit_requests, withdrawal_requests, consolidation_requests) = - value.body.requests.take().unwrap_or_default().into_iter().fold( - (Vec::new(), Vec::new(), Vec::new()), - |(mut deposits, mut withdrawals, mut consolidation_requests), request| { - match request { - Request::DepositRequest(r) => { - deposits.push(r); - } - Request::WithdrawalRequest(r) => { - withdrawals.push(r); - } - Request::ConsolidationRequest(r) => { - consolidation_requests.push(r); - } - _ => {} - }; - - (deposits, withdrawals, consolidation_requests) - }, - ); - - ExecutionPayloadV4 { - deposit_requests, - withdrawal_requests, - consolidation_requests, - payload_inner: block_to_payload_v3(value), - } -} - /// Converts [`SealedBlock`] to [`ExecutionPayloadFieldV2`] pub fn convert_block_to_payload_field_v2(value: SealedBlock) -> ExecutionPayloadFieldV2 { // if there are withdrawals, return V2 @@ -298,45 +245,49 @@ pub fn convert_block_to_payload_input_v2(value: SealedBlock) -> ExecutionPayload } } -/// Tries to create a new block (without a block hash) from the given payload and optional parent -/// beacon block root. +/// Tries to create a new unsealed block from the given payload and payload sidecar. +/// /// Performs additional validation of `extra_data` and `base_fee_per_gas` fields. /// -/// NOTE: The log bloom is assumed to be validated during serialization. +/// # Note +/// +/// The log bloom is assumed to be validated during serialization. /// /// See pub fn try_into_block( value: ExecutionPayload, - parent_beacon_block_root: Option, + sidecar: &ExecutionPayloadSidecar, ) -> Result { let mut base_payload = match value { ExecutionPayload::V1(payload) => try_payload_v1_to_block(payload)?, ExecutionPayload::V2(payload) => try_payload_v2_to_block(payload)?, ExecutionPayload::V3(payload) => try_payload_v3_to_block(payload)?, - ExecutionPayload::V4(payload) => try_payload_v4_to_block(payload)?, }; - base_payload.header.parent_beacon_block_root = parent_beacon_block_root; + base_payload.header.parent_beacon_block_root = sidecar.parent_beacon_block_root(); + base_payload.header.requests_hash = sidecar.requests().map(Requests::requests_hash); Ok(base_payload) } -/// Tries to create a new block from the given payload and optional parent beacon block root. -/// -/// NOTE: Empty ommers, nonce and difficulty values are validated upon computing block hash and -/// comparing the value with `payload.block_hash`. +/// Tries to create a sealed new block from the given payload and payload sidecar. /// /// Uses [`try_into_block`] to convert from the [`ExecutionPayload`] to [`Block`] and seals the /// block with its hash. /// /// Uses [`validate_block_hash`] to validate the payload block hash and ultimately return the /// [`SealedBlock`]. +/// +/// # Note +/// +/// Empty ommers, nonce, difficulty, and execution request values are validated upon computing block +/// hash and comparing the value with `payload.block_hash`. pub fn try_into_sealed_block( payload: ExecutionPayload, - parent_beacon_block_root: Option, + sidecar: &ExecutionPayloadSidecar, ) -> Result { let block_hash = payload.block_hash(); - let base_payload = try_into_block(payload, parent_beacon_block_root)?; + let base_payload = try_into_block(payload, sidecar)?; // validate block hash and return validate_block_hash(block_hash, base_payload) @@ -376,52 +327,6 @@ pub fn convert_to_payload_body_v1(value: Block) -> ExecutionPayloadBodyV1 { } } -/// Converts [`Block`] to [`ExecutionPayloadBodyV2`] -pub fn convert_to_payload_body_v2(value: Block) -> ExecutionPayloadBodyV2 { - let transactions = value.body.transactions.into_iter().map(|tx| { - let mut out = Vec::new(); - tx.encode_2718(&mut out); - out.into() - }); - - let mut payload = ExecutionPayloadBodyV2 { - transactions: transactions.collect(), - withdrawals: value.body.withdrawals.map(Withdrawals::into_inner), - deposit_requests: None, - withdrawal_requests: None, - consolidation_requests: None, - }; - - if let Some(requests) = value.body.requests { - let (deposit_requests, withdrawal_requests, consolidation_requests) = - requests.into_iter().fold( - (Vec::new(), Vec::new(), Vec::new()), - |(mut deposits, mut withdrawals, mut consolidation_requests), request| { - match request { - Request::DepositRequest(r) => { - deposits.push(r); - } - Request::WithdrawalRequest(r) => { - withdrawals.push(r); - } - Request::ConsolidationRequest(r) => { - consolidation_requests.push(r); - } - _ => {} - }; - - (deposits, withdrawals, consolidation_requests) - }, - ); - - payload.deposit_requests = Some(deposit_requests); - payload.withdrawal_requests = Some(withdrawal_requests); - payload.consolidation_requests = Some(consolidation_requests); - } - - payload -} - /// Transforms a [`SealedBlock`] into a [`ExecutionPayloadV1`] pub fn execution_payload_from_sealed_block(value: SealedBlock) -> ExecutionPayloadV1 { let transactions = value.raw_transactions(); @@ -446,13 +351,12 @@ pub fn execution_payload_from_sealed_block(value: SealedBlock) -> ExecutionPaylo #[cfg(test)] mod tests { use super::{ - block_to_payload_v3, try_into_block, try_payload_v3_to_block, try_payload_v4_to_block, - validate_block_hash, + block_to_payload_v3, try_into_block, try_payload_v3_to_block, validate_block_hash, }; use alloy_primitives::{b256, hex, Bytes, U256}; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, - ExecutionPayloadV3, ExecutionPayloadV4, + CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, ExecutionPayloadV1, + ExecutionPayloadV2, ExecutionPayloadV3, }; #[test] @@ -670,60 +574,9 @@ mod tests { let cancun_fields = CancunPayloadFields { parent_beacon_block_root, versioned_hashes }; // convert into block - let block = try_into_block(payload, Some(cancun_fields.parent_beacon_block_root)).unwrap(); + let block = try_into_block(payload, &ExecutionPayloadSidecar::v3(cancun_fields)).unwrap(); // Ensure the actual hash is calculated if we set the fields to what they should be validate_block_hash(block_hash_with_blob_fee_fields, block).unwrap(); } - - #[test] - fn parse_payload_v4() { - let s = r#"{ - "baseFeePerGas": "0x2ada43", - "blobGasUsed": "0x0", - "blockHash": "0x86eeb2a4b656499f313b601e1dcaedfeacccab27131b6d4ea99bc69a57607f7d", - "blockNumber": "0x2c", - "depositRequests": [ - { - "amount": "0xe8d4a51000", - "index": "0x0", - "pubkey": "0xaab5f2b3aad5c2075faf0c1d8937c7de51a53b765a21b4173eb2975878cea05d9ed3428b77f16a981716aa32af74c464", - "signature": "0xa889cd238be2dae44f2a3c24c04d686c548f6f82eb44d4604e1bc455b6960efb72b117e878068a8f2cfb91ad84b7ebce05b9254207aa51a1e8a3383d75b5a5bd2439f707636ea5b17b2b594b989c93b000b33e5dff6e4bed9d53a6d2d6889b0c", - "withdrawalCredentials": "0x00ab9364f8bf7561862ea0fc3b69c424c94ace406c4dc36ddfbf8a9d72051c80" - }, - { - "amount": "0xe8d4a51000", - "index": "0x1", - "pubkey": "0xb0b1b3b51cf688ead965a954c5cc206ba4e76f3f8efac60656ae708a9aad63487a2ca1fb30ccaf2ebe1028a2b2886b1b", - "signature": "0xb9759766e9bb191b1c457ae1da6cdf71a23fb9d8bc9f845eaa49ee4af280b3b9720ac4d81e64b1b50a65db7b8b4e76f1176a12e19d293d75574600e99fbdfecc1ab48edaeeffb3226cd47691d24473821dad0c6ff3973f03e4aa89f418933a56", - "withdrawalCredentials": "0x002d2b75f4a27f78e585a4735a40ab2437eceb12ec39938a94dc785a54d62513" - } - ], - "excessBlobGas": "0x0", - "extraData": "0x726574682f76302e322e302d626574612e372f6c696e7578", - "feeRecipient": "0x8943545177806ed17b9f23f0a21ee5948ecaa776", - "gasLimit": "0x1855e85", - "gasUsed": "0x25f98", - "logsBloom": "0x10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000", - "parentHash": "0xd753194ef19b5c566b7eca6e9ebcca03895b548e1e93a20a23d922ba0bc210d4", - "prevRandao": "0x8c52256fd491776dc32f531ad4c0dc1444684741bca15f54c9cd40c60142df90", - "receiptsRoot": "0x510e7fb94279897e5dcd6c1795f6137d8fe02e49e871bfea7999fd21a89f66aa", - "stateRoot": "0x59ae0706a2b47162666fc7af3e30ff7aa34154954b68cc6aed58c3af3d58c9c2", - "timestamp": "0x6643c5a9", - "transactions": [ - "0x02f9021e8330182480843b9aca0085174876e80083030d40944242424242424242424242424242424242424242893635c9adc5dea00000b901a422895118000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000012049f42823819771c6bbbd9cb6649850083fd3b6e5d0beb1069342c32d65a3b0990000000000000000000000000000000000000000000000000000000000000030aab5f2b3aad5c2075faf0c1d8937c7de51a53b765a21b4173eb2975878cea05d9ed3428b77f16a981716aa32af74c46400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000ab9364f8bf7561862ea0fc3b69c424c94ace406c4dc36ddfbf8a9d72051c800000000000000000000000000000000000000000000000000000000000000060a889cd238be2dae44f2a3c24c04d686c548f6f82eb44d4604e1bc455b6960efb72b117e878068a8f2cfb91ad84b7ebce05b9254207aa51a1e8a3383d75b5a5bd2439f707636ea5b17b2b594b989c93b000b33e5dff6e4bed9d53a6d2d6889b0cc080a0db786f0d89923949e533680524f003cebd66f32fbd30429a6b6bfbd3258dcf60a05241c54e05574765f7ddc1a742ae06b044edfe02bffb202bf172be97397eeca9", - "0x02f9021e8330182401843b9aca0085174876e80083030d40944242424242424242424242424242424242424242893635c9adc5dea00000b901a422895118000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000120d694d6a0b0103651aafd87db6c88297175d7317c6e6da53ccf706c3c991c91fd0000000000000000000000000000000000000000000000000000000000000030b0b1b3b51cf688ead965a954c5cc206ba4e76f3f8efac60656ae708a9aad63487a2ca1fb30ccaf2ebe1028a2b2886b1b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020002d2b75f4a27f78e585a4735a40ab2437eceb12ec39938a94dc785a54d625130000000000000000000000000000000000000000000000000000000000000060b9759766e9bb191b1c457ae1da6cdf71a23fb9d8bc9f845eaa49ee4af280b3b9720ac4d81e64b1b50a65db7b8b4e76f1176a12e19d293d75574600e99fbdfecc1ab48edaeeffb3226cd47691d24473821dad0c6ff3973f03e4aa89f418933a56c080a099dc5b94a51e9b91a6425b1fed9792863006496ab71a4178524819d7db0c5e88a0119748e62700234079d91ae80f4676f9e0f71b260e9b46ef9b4aff331d3c2318" - ], - "withdrawalRequests": [], - "withdrawals": [], - "consolidationRequests": [] - }"#; - - let payload = serde_json::from_str::(s).unwrap(); - let mut block = try_payload_v4_to_block(payload).unwrap(); - block.header.parent_beacon_block_root = - Some(b256!("d9851db05fa63593f75e2b12c4bba9f47740613ca57da3b523a381b8c27f3297")); - let hash = block.seal_slow().hash(); - assert_eq!(hash, b256!("86eeb2a4b656499f313b601e1dcaedfeacccab27131b6d4ea99bc69a57607f7d")) - } } diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction/mod.rs index 9bb2a8b5d9..16742144f2 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/mod.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/mod.rs @@ -2,14 +2,16 @@ mod signature; pub use signature::*; + use std::fmt; +use alloy_consensus::Transaction as _; use alloy_rpc_types::{ request::{TransactionInput, TransactionRequest}, - Transaction, TransactionInfo, + TransactionInfo, }; -use alloy_serde::WithOtherFields; use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered, TxType}; +use serde::{Deserialize, Serialize}; /// Create a new rpc transaction result for a mined transaction, using the given block hash, /// number, and tx index fields to populate the corresponding fields in the rpc result. @@ -19,21 +21,33 @@ use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered, TxType}; pub fn from_recovered_with_block_context( tx: TransactionSignedEcRecovered, tx_info: TransactionInfo, + resp_builder: &T, ) -> T::Transaction { - T::fill(tx, tx_info) + resp_builder.fill(tx, tx_info) } /// Create a new rpc transaction result for a _pending_ signed transaction, setting block /// environment related fields to `None`. -pub fn from_recovered(tx: TransactionSignedEcRecovered) -> T::Transaction { - T::fill(tx, TransactionInfo::default()) +pub fn from_recovered( + tx: TransactionSignedEcRecovered, + resp_builder: &T, +) -> T::Transaction { + resp_builder.fill(tx, TransactionInfo::default()) } /// Builds RPC transaction w.r.t. network. pub trait TransactionCompat: Send + Sync + Unpin + Clone + fmt::Debug { /// RPC transaction response type. - type Transaction: Send + Clone + Default + fmt::Debug; - + type Transaction: Serialize + + for<'de> Deserialize<'de> + + Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug; + + /// /// Formats gas price and max fee per gas for RPC transaction response w.r.t. network specific /// transaction type. fn gas_price(signed_tx: &TransactionSigned, base_fee: Option) -> GasPrice { @@ -47,9 +61,7 @@ pub trait TransactionCompat: Send + Sync + Unpin + Clone + fmt::Debug { // baseFee` let gas_price = base_fee .and_then(|base_fee| { - signed_tx - .effective_tip_per_gas(Some(base_fee)) - .map(|tip| tip + base_fee as u128) + signed_tx.effective_tip_per_gas(base_fee).map(|tip| tip + base_fee as u128) }) .unwrap_or_else(|| signed_tx.max_fee_per_gas()); @@ -64,7 +76,7 @@ pub trait TransactionCompat: Send + Sync + Unpin + Clone + fmt::Debug { /// Create a new rpc transaction result for a _pending_ signed transaction, setting block /// environment related fields to `None`. - fn fill(tx: TransactionSignedEcRecovered, tx_inf: TransactionInfo) -> Self::Transaction; + fn fill(&self, tx: TransactionSignedEcRecovered, tx_inf: TransactionInfo) -> Self::Transaction; /// Truncates the input of a transaction to only the first 4 bytes. // todo: remove in favour of using constructor on `TransactionResponse` or similar @@ -76,22 +88,6 @@ pub trait TransactionCompat: Send + Sync + Unpin + Clone + fmt::Debug { fn tx_type(tx: &Self::Transaction) -> u8; } -impl TransactionCompat for () { - // this noop impl depends on integration in `reth_rpc_eth_api::EthApiTypes` noop impl, and - // `alloy_network::AnyNetwork` - type Transaction = WithOtherFields; - - fn fill(_tx: TransactionSignedEcRecovered, _tx_info: TransactionInfo) -> Self::Transaction { - WithOtherFields::default() - } - - fn otterscan_api_truncate_input(_tx: &mut Self::Transaction) {} - - fn tx_type(_tx: &Self::Transaction) -> u8 { - 0 - } -} - /// Gas price and max fee per gas for a transaction. Helper type to format transaction RPC response. #[derive(Debug, Default)] pub struct GasPrice { diff --git a/crates/rpc/rpc-types-compat/src/transaction/signature.rs b/crates/rpc/rpc-types-compat/src/transaction/signature.rs index 536f6ac5e5..77ae365b2d 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/signature.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/signature.rs @@ -1,9 +1,9 @@ -use alloy_primitives::U256; +use alloy_primitives::{Signature as PrimitiveSignature, U256}; use alloy_rpc_types::{Parity, Signature}; -use reth_primitives::{transaction::legacy_parity, Signature as PrimitiveSignature, TxType}; +use reth_primitives::{transaction::legacy_parity, TxType}; /// Creates a new rpc signature from a legacy [primitive -/// signature](reth_primitives::Signature), using the give chain id to compute the signature's +/// signature](alloy_primitives::Signature), using the give chain id to compute the signature's /// recovery id. /// /// If the chain id is `Some`, the recovery id is computed according to [EIP-155](https://eips.ethereum.org/EIPS/eip-155). @@ -20,7 +20,7 @@ pub fn from_legacy_primitive_signature( } /// Creates a new rpc signature from a non-legacy [primitive -/// signature](reth_primitives::Signature). This sets the `v` value to `0` or `1` depending on +/// signature](alloy_primitives::Signature). This sets the `v` value to `0` or `1` depending on /// the signature's `odd_y_parity`. pub fn from_typed_primitive_signature(signature: PrimitiveSignature) -> Signature { Signature { @@ -32,7 +32,7 @@ pub fn from_typed_primitive_signature(signature: PrimitiveSignature) -> Signatur } /// Creates a new rpc signature from a legacy [primitive -/// signature](reth_primitives::Signature). +/// signature](alloy_primitives::Signature). /// /// The tx type is used to determine whether or not to use the `chain_id` to compute the /// signature's recovery id. diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index ffbaacbb8f..a1d0843636 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -18,6 +18,7 @@ reth-primitives = { workspace = true, features = ["secp256k1"] } reth-rpc-api.workspace = true reth-rpc-eth-api.workspace = true reth-errors.workspace = true +reth-ethereum-consensus.workspace = true reth-provider.workspace = true reth-transaction-pool.workspace = true reth-network-api.workspace = true @@ -31,9 +32,10 @@ reth-network-peers = { workspace = true, features = ["secp256k1"] } reth-evm.workspace = true reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true -reth-node-api.workspace = true reth-network-types.workspace = true reth-trie.workspace = true +reth-consensus.workspace = true +reth-payload-validator.workspace = true # bsc-reth reth-bsc-primitives.workspace = true @@ -43,13 +45,14 @@ reth-bsc-consensus.workspace = true alloy-consensus.workspace = true alloy-signer.workspace = true alloy-signer-local.workspace = true -alloy-eips.workspace = true +alloy-eips = { workspace = true, features = ["kzg"] } alloy-dyn-abi.workspace = true alloy-genesis.workspace = true alloy-network.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true alloy-rpc-types.workspace = true +alloy-rpc-types-beacon.workspace = true alloy-rpc-types-eth = { workspace = true, features = ["jsonrpsee-types"] } alloy-rpc-types-debug.workspace = true alloy-rpc-types-trace.workspace = true @@ -104,9 +107,4 @@ jsonrpsee = { workspace = true, features = ["client"] } js-tracer = ["revm-inspectors/js-tracer", "reth-rpc-eth-types/js-tracer"] bsc = [ "reth-rpc-eth-api/bsc", -] -optimism = [ - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-revm/optimism", ] \ No newline at end of file diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index db80da3ec1..02b9c79534 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -1,4 +1,4 @@ -use alloy_eips::eip2718::Encodable2718; +use alloy_eips::{eip2718::Encodable2718, BlockId, BlockNumberOrTag}; use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rlp::{Decodable, Encodable}; use alloy_rpc_types::{ @@ -18,25 +18,26 @@ use reth_bsc_primitives::system_contracts::is_system_transaction; use reth_chainspec::EthereumHardforks; use reth_evm::{ execute::{BlockExecutorProvider, Executor}, + system_calls::SystemCaller, ConfigureEvmEnv, }; -use reth_primitives::{Block, BlockId, BlockNumberOrTag, TransactionSignedEcRecovered}; +use reth_primitives::{Block, TransactionSignedEcRecovered}; use reth_provider::{ - BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, HeaderProvider, StateProofProvider, - StateProviderFactory, TransactionVariant, + BlockReaderIdExt, ChainSpecProvider, HeaderProvider, StateProofProvider, StateProviderFactory, + TransactionVariant, }; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::DebugApiServer; use reth_rpc_eth_api::{ - helpers::{Call, EthApiSpec, EthTransactions, TraceExt}, - EthApiTypes, FromEthApiError, + helpers::{EthApiSpec, EthTransactions, TraceExt}, + EthApiTypes, FromEthApiError, RpcNodeCore, }; use reth_rpc_eth_types::{EthApiError, StateCacheDb}; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use reth_tasks::pool::BlockingTaskGuard; use reth_trie::{HashedPostState, HashedStorage}; use revm::{ - db::CacheDB, + db::{CacheDB, State}, primitives::{db::DatabaseCommit, BlockEnv, CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg}, }; use revm_inspectors::tracing::{ @@ -84,7 +85,6 @@ where + HeaderProvider + ChainSpecProvider + StateProviderFactory - + EvmEnvProvider + 'static, Eth: EthApiTypes + TraceExt + 'static, BlockExecutor: BlockExecutorProvider, @@ -95,6 +95,7 @@ where } /// Trace the entire block asynchronously + #[allow(clippy::too_many_arguments)] async fn trace_block( &self, at: BlockId, @@ -103,6 +104,7 @@ where block_env: BlockEnv, opts: GethDebugTracingOptions, parent_timestamp: u64, + parent_beacon_block_root: Option, ) -> Result, Eth::Error> { if transactions.is_empty() { // nothing to trace @@ -116,6 +118,24 @@ where let block_hash = at.as_block_hash(); let mut results = Vec::with_capacity(transactions.len()); let mut db = CacheDB::new(StateProviderDatabase::new(state)); + + // apply relevant system calls + SystemCaller::new( + this.eth_api().evm_config().clone(), + this.eth_api().provider().chain_spec(), + ) + .pre_block_beacon_root_contract_call( + &mut db, + &cfg, + &block_env, + parent_beacon_block_root, + ) + .map_err(|_| { + EthApiError::EvmCustom( + "failed to apply 4788 beacon root system call".to_string(), + ) + })?; + let mut transactions = transactions.into_iter().enumerate().peekable(); let is_bsc = this.bsc_trace_helper.is_some(); @@ -130,6 +150,7 @@ where } } + let mut inspector = None; while let Some((index, tx)) = transactions.next() { let tx_hash = tx.hash; @@ -158,7 +179,8 @@ where before_system_tx = false; } - let tx_env = Call::evm_config(this.eth_api()).tx_env(&tx); + let tx_env = + RpcNodeCore::evm_config(this.eth_api()).tx_env(tx.as_signed(), tx.signer()); #[cfg(feature = "bsc")] let tx_env = { let mut tx_env = tx_env; @@ -173,7 +195,7 @@ where handler_cfg: cfg.handler_cfg, }; let (result, state_changes) = this.trace_transaction( - opts.clone(), + &opts, env, &mut db, Some(TransactionContext { @@ -181,8 +203,11 @@ where tx_hash: Some(tx_hash), tx_index: Some(index), }), + &mut inspector, )?; + inspector = inspector.map(|insp| insp.fused()); + results.push(TraceResult::Success { result, tx_hash: Some(tx_hash) }); if transactions.peek().is_some() { // need to apply the state changes of this transaction before executing the @@ -215,11 +240,14 @@ where let parent = block.parent_hash; let parent_timestamp = self .eth_api() - .block(parent.into()) + .block_with_senders(parent.into()) .await? .map(|block| block.timestamp) .ok_or(EthApiError::UnknownParentBlock)?; + // we need the beacon block root for a system call + let parent_beacon_block_root = block.parent_beacon_block_root; + // Depending on EIP-2 we need to recover the transactions differently let transactions = if self.inner.provider.chain_spec().is_homestead_active_at_block(block.number) { @@ -246,7 +274,16 @@ where .collect::, Eth::Error>>()? }; - self.trace_block(parent.into(), transactions, cfg, block_env, opts, parent_timestamp).await + self.trace_block( + parent.into(), + transactions, + cfg, + block_env, + opts, + parent_timestamp, + parent_beacon_block_root, + ) + .await } /// Replays a block and returns the trace of each transaction. @@ -273,18 +310,19 @@ where let state_at = block.parent_hash; let parent_timestamp = self .eth_api() - .block(state_at.into()) + .block_with_senders(state_at.into()) .await? .map(|block| block.timestamp) .ok_or(EthApiError::UnknownParentBlock)?; self.trace_block( state_at.into(), - block.into_transactions_ecrecovered().collect(), + (*block).clone().into_transactions_ecrecovered().collect(), cfg, block_env, opts, parent_timestamp, + block.parent_beacon_block_root, ) .await } @@ -307,21 +345,41 @@ where // block the transaction is included in let state_at: BlockId = block.parent_hash.into(); let block_hash = block.hash(); - let block_txs = block.into_transactions_ecrecovered(); let parent_timestamp = self .eth_api() - .block(state_at) + .block_with_senders(block.parent_hash.into()) .await? .map(|block| block.timestamp) .ok_or(EthApiError::UnknownParentBlock)?; + let parent_beacon_block_root = block.parent_beacon_block_root; let this = self.clone(); self.eth_api() .spawn_with_state_at_block(state_at, move |state| { + let block_txs = block.transactions_with_sender(); + // configure env for the target transaction let tx = transaction.into_recovered(); let mut db = CacheDB::new(StateProviderDatabase::new(state)); + + // apply relevant system calls + SystemCaller::new( + this.eth_api().evm_config().clone(), + this.eth_api().provider().chain_spec(), + ) + .pre_block_beacon_root_contract_call( + &mut db, + &cfg, + &block_env, + parent_beacon_block_root, + ) + .map_err(|_| { + EthApiError::EvmCustom( + "failed to apply 4788 beacon root system call".to_string(), + ) + })?; + // replay all transactions prior to the targeted transaction let index = this.eth_api().replay_transactions_until( &mut db, @@ -332,7 +390,8 @@ where parent_timestamp, )?; - let tx_env = Call::evm_config(this.eth_api()).tx_env(&tx); + let tx_env = + RpcNodeCore::evm_config(this.eth_api()).tx_env(tx.as_signed(), tx.signer()); #[cfg(feature = "bsc")] let tx_env = { let mut tx_env = tx_env; @@ -348,7 +407,7 @@ where }; this.trace_transaction( - opts, + &opts, env, &mut db, Some(TransactionContext { @@ -356,6 +415,7 @@ where tx_index: Some(index), tx_hash: Some(tx.hash), }), + &mut None, ) .map(|(trace, _)| trace) }) @@ -604,15 +664,15 @@ where if replay_block_txs { // only need to replay the transactions in the block if not all transactions are // to be replayed - let transactions = block.into_transactions_ecrecovered().take(num_txs); + let transactions = block.transactions_with_sender().take(num_txs); // Execute all transactions until index - for tx in transactions { + for (signer, tx) in transactions { let env = EnvWithHandlerCfg { env: Env::boxed( cfg.cfg_env.clone(), block_env.clone(), - Call::evm_config(this.eth_api()).tx_env(&tx), + RpcNodeCore::evm_config(this.eth_api()).tx_env(tx, *signer), ), handler_cfg: cfg.handler_cfg, }; @@ -628,6 +688,7 @@ where let Bundle { transactions, block_override } = bundle; let block_overrides = block_override.map(Box::new); + let mut inspector = None; let mut transactions = transactions.into_iter().peekable(); while let Some(tx) = transactions.next() { @@ -643,8 +704,15 @@ where overrides, )?; - let (trace, state) = - this.trace_transaction(tracing_options.clone(), env, &mut db, None)?; + let (trace, state) = this.trace_transaction( + &tracing_options, + env, + &mut db, + None, + &mut inspector, + )?; + + inspector = inspector.map(|insp| insp.fused()); // If there is more transactions, commit the database // If there is no transactions, but more bundles, commit to the database too @@ -690,13 +758,24 @@ where let _ = block_executor .execute_with_state_closure( - (&block.clone().unseal(), block.difficulty, None).into(), - |statedb| { + (&(*block).clone().unseal(), block.difficulty, None).into(), + |statedb: &State<_>| { codes = statedb .cache .contracts .iter() .map(|(hash, code)| (*hash, code.original_bytes())) + .chain( + // cache state does not have all the contracts, especially when + // a contract is created within the block + // the contract only exists in bundle state, therefore we need + // to include them as well + statedb + .bundle_state + .contracts + .iter() + .map(|(hash, code)| (*hash, code.original_bytes())), + ) .collect(); for (address, account) in &statedb.cache.accounts { @@ -729,17 +808,20 @@ where let state = state_provider.witness(Default::default(), hashed_state).map_err(Into::into)?; - Ok(ExecutionWitness { - state: HashMap::from_iter(state.into_iter()), - codes, - keys: Some(keys), - }) + Ok(ExecutionWitness { state: state.into_iter().collect(), codes, keys }) }) .await } /// Executes the configured transaction with the environment on the given database. /// + /// It optionally takes fused inspector ([`TracingInspector::fused`]) to avoid re-creating the + /// inspector for each transaction. This is useful when tracing multiple transactions in a + /// block. This is only useful for block tracing which uses the same tracer for all transactions + /// in the block. + /// + /// Caution: If the inspector is provided then `opts.tracer_config` is ignored. + /// /// Returns the trace frame and the state that got updated after executing the transaction. /// /// Note: this does not apply any state overrides if they're configured in the `opts`. @@ -747,10 +829,11 @@ where /// Caution: this is blocking and should be performed on a blocking task. fn trace_transaction( &self, - opts: GethDebugTracingOptions, + opts: &GethDebugTracingOptions, env: EnvWithHandlerCfg, db: &mut StateCacheDb<'_>, transaction_context: Option, + fused_inspector: &mut Option, ) -> Result<(GethTrace, revm_primitives::EvmState), Eth::Error> { let GethDebugTracingOptions { config, tracer, tracer_config, .. } = opts; @@ -764,35 +847,42 @@ where } GethDebugBuiltInTracerType::CallTracer => { let call_config = tracer_config + .clone() .into_call_config() .map_err(|_| EthApiError::InvalidTracerConfig)?; - let mut inspector = TracingInspector::new( - TracingInspectorConfig::from_geth_call_config(&call_config), - ); + let mut inspector = fused_inspector.get_or_insert_with(|| { + TracingInspector::new(TracingInspectorConfig::from_geth_call_config( + &call_config, + )) + }); let (res, env) = self.eth_api().inspect(db, env, &mut inspector)?; + inspector.set_transaction_gas_limit(env.tx.gas_limit); + let frame = inspector - .with_transaction_gas_limit(env.tx.gas_limit) - .into_geth_builder() + .geth_builder() .geth_call_traces(call_config, res.result.gas_used()); return Ok((frame.into(), res.state)) } GethDebugBuiltInTracerType::PreStateTracer => { let prestate_config = tracer_config + .clone() .into_pre_state_config() .map_err(|_| EthApiError::InvalidTracerConfig)?; - let mut inspector = TracingInspector::new( - TracingInspectorConfig::from_geth_prestate_config(&prestate_config), - ); + let mut inspector = fused_inspector.get_or_insert_with(|| { + TracingInspector::new( + TracingInspectorConfig::from_geth_prestate_config(&prestate_config), + ) + }); let (res, env) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; + inspector.set_transaction_gas_limit(env.tx.gas_limit); let frame = inspector - .with_transaction_gas_limit(env.tx.gas_limit) - .into_geth_builder() + .geth_builder() .geth_prestate_traces(&res, &prestate_config, db) .map_err(Eth::Error::from_eth_err)?; @@ -803,6 +893,7 @@ where } GethDebugBuiltInTracerType::MuxTracer => { let mux_config = tracer_config + .clone() .into_mux_config() .map_err(|_| EthApiError::InvalidTracerConfig)?; @@ -817,6 +908,7 @@ where } GethDebugBuiltInTracerType::FlatCallTracer => { let flat_call_config = tracer_config + .clone() .into_flat_call_config() .map_err(|_| EthApiError::InvalidTracerConfig)?; @@ -847,10 +939,10 @@ where } #[cfg(feature = "js-tracer")] GethDebugTracerType::JsTracer(code) => { - let config = tracer_config.into_json(); + let config = tracer_config.clone().into_json(); let mut inspector = revm_inspectors::tracing::js::JsInspector::with_transaction_context( - code, + code.clone(), config, transaction_context.unwrap_or_default(), ) @@ -866,17 +958,15 @@ where } // default structlog tracer - let inspector_config = TracingInspectorConfig::from_geth_config(&config); - - let mut inspector = TracingInspector::new(inspector_config); - + let mut inspector = fused_inspector.get_or_insert_with(|| { + let inspector_config = TracingInspectorConfig::from_geth_config(config); + TracingInspector::new(inspector_config) + }); let (res, env) = self.eth_api().inspect(db, env, &mut inspector)?; let gas_used = res.result.gas_used(); let return_value = res.result.into_output().unwrap_or_default(); - let frame = inspector - .with_transaction_gas_limit(env.tx.gas_limit) - .into_geth_builder() - .geth_traces(gas_used, return_value, config); + inspector.set_transaction_gas_limit(env.tx.gas_limit); + let frame = inspector.geth_builder().geth_traces(gas_used, return_value, *config); Ok((frame.into(), res.state)) } @@ -889,7 +979,6 @@ where + HeaderProvider + ChainSpecProvider + StateProviderFactory - + EvmEnvProvider + 'static, Eth: EthApiSpec + EthTransactions + TraceExt + 'static, BlockExecutor: BlockExecutorProvider, @@ -960,7 +1049,7 @@ where .to_rpc_result()? .unwrap_or_default() .into_iter() - .map(|receipt| receipt.with_bloom().envelope_encoded()) + .map(|receipt| receipt.with_bloom().encoded_2718().into()) .collect()) } @@ -1026,15 +1115,6 @@ where .map_err(Into::into) } - /// Handler for `debug_executionWitness` - async fn debug_execution_witness( - &self, - block: BlockNumberOrTag, - ) -> RpcResult { - let _permit = self.acquire_trace_permit().await; - Self::debug_execution_witness(self, block).await.map_err(Into::into) - } - /// Handler for `debug_traceCall` async fn debug_trace_call( &self, @@ -1058,6 +1138,15 @@ where Self::debug_trace_call_many(self, bundles, state_context, opts).await.map_err(Into::into) } + /// Handler for `debug_executionWitness` + async fn debug_execution_witness( + &self, + block: BlockNumberOrTag, + ) -> RpcResult { + let _permit = self.acquire_trace_permit().await; + Self::debug_execution_witness(self, block).await.map_err(Into::into) + } + async fn debug_backtrace_at(&self, _location: &str) -> RpcResult<()> { Ok(()) } diff --git a/crates/rpc/rpc/src/engine.rs b/crates/rpc/rpc/src/engine.rs index 928e2050a5..ac4de7c74e 100644 --- a/crates/rpc/rpc/src/engine.rs +++ b/crates/rpc/rpc/src/engine.rs @@ -1,3 +1,4 @@ +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::{Address, Bytes, B256, U256, U64}; use alloy_rpc_types::{ state::StateOverride, BlockOverrides, EIP1186AccountProofResponse, Filter, Log, SyncStatus, @@ -5,7 +6,6 @@ use alloy_rpc_types::{ use alloy_rpc_types_eth::transaction::TransactionRequest; use alloy_serde::JsonStorageKey; use jsonrpsee::core::RpcResult as Result; -use reth_primitives::{BlockId, BlockNumberOrTag}; use reth_rpc_api::{EngineEthApiServer, EthApiServer, EthFilterApiServer}; /// Re-export for convenience pub use reth_rpc_engine_api::EngineApi; diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index e97497786e..ec1a43c754 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -12,7 +12,7 @@ use reth_primitives::{ PooledTransactionsElement, }; use reth_revm::database::StateProviderDatabase; -use reth_rpc_eth_api::{FromEthApiError, FromEvmError}; +use reth_rpc_eth_api::{FromEthApiError, FromEvmError, RpcNodeCore}; use reth_tasks::pool::BlockingTaskGuard; use revm::{ db::CacheDB, @@ -130,12 +130,12 @@ where } else if cfg.handler_cfg.spec_id.is_enabled_in(SpecId::LONDON) { let parent_block = block_env.number.saturating_to::(); // here we need to fetch the _next_ block's basefee based on the parent block - let parent = LoadPendingBlock::provider(self.eth_api()) + let parent = RpcNodeCore::provider(self.eth_api()) .header_by_number(parent_block) .map_err(Eth::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(parent_block.into()))?; if let Some(base_fee) = parent.next_block_base_fee( - LoadPendingBlock::provider(self.eth_api()) + RpcNodeCore::provider(self.eth_api()) .chain_spec() .base_fee_params_at_block(parent_block), ) { @@ -166,7 +166,7 @@ where let mut total_gas_fess = U256::ZERO; let mut hasher = Keccak256::new(); - let mut evm = Call::evm_config(ð_api).evm_with_env(db, env); + let mut evm = RpcNodeCore::evm_config(ð_api).evm_with_env(db, env); let mut results = Vec::with_capacity(transactions.len()); let mut transactions = transactions.into_iter().peekable(); @@ -187,7 +187,7 @@ where .effective_tip_per_gas(basefee) .ok_or_else(|| RpcInvalidTransactionError::FeeCapTooLow) .map_err(Eth::Error::from_eth_err)?; - Call::evm_config(ð_api).fill_tx_env(evm.tx_mut(), &tx, signer); + RpcNodeCore::evm_config(ð_api).fill_tx_env(evm.tx_mut(), &tx, signer); let ResultAndState { result, state } = evm.transact().map_err(Eth::Error::from_evm_err)?; diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 16fdc502e1..1b8c0e4b98 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -4,16 +4,16 @@ use std::sync::Arc; use crate::eth::EthTxBuilder; -use alloy_network::AnyNetwork; +use alloy_eips::BlockNumberOrTag; +use alloy_network::Ethereum; use alloy_primitives::U256; use derive_more::Deref; use reth_bsc_consensus::BscTraceHelper; -use reth_node_api::{BuilderProvider, FullNodeComponents}; -use reth_primitives::BlockNumberOrTag; use reth_provider::{BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider}; use reth_rpc_eth_api::{ helpers::{EthSigner, SpawnBlocking}, - EthApiTypes, + node::RpcNodeCoreExt, + EthApiTypes, RpcNodeCore, }; use reth_rpc_eth_types::{ EthApiBuilderCtx, EthApiError, EthStateCache, FeeHistoryCache, GasCap, GasPriceOracle, @@ -21,7 +21,7 @@ use reth_rpc_eth_types::{ }; use reth_tasks::{ pool::{BlockingTaskGuard, BlockingTaskPool}, - TaskExecutor, TaskSpawner, TokioTaskExecutor, + TaskSpawner, TokioTaskExecutor, }; use tokio::sync::Mutex; @@ -40,11 +40,17 @@ pub struct EthApi { #[deref] pub(super) inner: Arc>, pub(super) bsc_trace_helper: Option, + /// Transaction RPC response builder. + pub tx_resp_builder: EthTxBuilder, } impl Clone for EthApi { fn clone(&self) -> Self { - Self { inner: self.inner.clone(), bsc_trace_helper: self.bsc_trace_helper.clone() } + Self { + inner: self.inner.clone(), + bsc_trace_helper: self.bsc_trace_helper.clone(), + tx_resp_builder: EthTxBuilder, + } } } @@ -85,7 +91,7 @@ where proof_permits, ); - Self { inner: Arc::new(inner), bsc_trace_helper } + Self { inner: Arc::new(inner), bsc_trace_helper, tx_resp_builder: EthTxBuilder } } } @@ -98,7 +104,7 @@ where { /// Creates a new, shareable instance. pub fn with_spawner( - ctx: &EthApiBuilderCtx, + ctx: &EthApiBuilderCtx, ) -> Self where Tasks: TaskSpawner + Clone + 'static, @@ -123,7 +129,11 @@ where ctx.config.proof_permits, ); - Self { inner: Arc::new(inner), bsc_trace_helper: ctx.bsc_trace_helper.clone() } + Self { + inner: Arc::new(inner), + bsc_trace_helper: ctx.bsc_trace_helper.clone(), + tx_resp_builder: EthTxBuilder, + } } } @@ -132,9 +142,52 @@ where Self: Send + Sync, { type Error = EthApiError; - // todo: replace with alloy_network::Ethereum - type NetworkTypes = AnyNetwork; + type NetworkTypes = Ethereum; type TransactionCompat = EthTxBuilder; + + fn tx_resp_builder(&self) -> &Self::TransactionCompat { + &self.tx_resp_builder + } +} + +impl RpcNodeCore for EthApi +where + Provider: Send + Sync + Clone + Unpin, + Pool: Send + Sync + Clone + Unpin, + Network: Send + Sync + Clone, + EvmConfig: Send + Sync + Clone + Unpin, +{ + type Provider = Provider; + type Pool = Pool; + type Evm = EvmConfig; + type Network = Network; + + fn pool(&self) -> &Self::Pool { + self.inner.pool() + } + + fn evm_config(&self) -> &Self::Evm { + self.inner.evm_config() + } + + fn network(&self) -> &Self::Network { + self.inner.network() + } + + fn provider(&self) -> &Self::Provider { + self.inner.provider() + } +} + +impl RpcNodeCoreExt + for EthApi +where + Self: RpcNodeCore, +{ + #[inline] + fn cache(&self) -> &EthStateCache { + self.inner.cache() + } } impl std::fmt::Debug @@ -166,25 +219,6 @@ where } } -impl BuilderProvider for EthApi -where - N: FullNodeComponents, -{ - type Ctx<'a> = &'a EthApiBuilderCtx< - N::Provider, - N::Pool, - N::Evm, - N::Network, - TaskExecutor, - N::Provider, - Self, - >; - - fn builder() -> Box Fn(Self::Ctx<'a>) -> Self + Send> { - Box::new(Self::with_spawner) - } -} - /// Container type `EthApi` #[allow(missing_debug_implementations)] pub struct EthApiInner { @@ -376,13 +410,14 @@ impl EthApiInner { /// All nested fields bundled together inner: Arc>>, /// Assembles response data w.r.t. network. - _tx_resp_builder: PhantomData, + tx_resp_builder: Eth::TransactionCompat, } impl Clone for EthFilter @@ -52,7 +52,7 @@ where Eth: EthApiTypes, { fn clone(&self) -> Self { - Self { inner: self.inner.clone(), _tx_resp_builder: PhantomData } + Self { inner: self.inner.clone(), tx_resp_builder: self.tx_resp_builder.clone() } } } @@ -76,6 +76,7 @@ where eth_cache: EthStateCache, config: EthFilterConfig, task_spawner: Box, + tx_resp_builder: Eth::TransactionCompat, ) -> Self { let EthFilterConfig { max_blocks_per_filter, max_logs_per_response, stale_filter_ttl } = config; @@ -93,7 +94,7 @@ where max_logs_per_response: max_logs_per_response.unwrap_or(usize::MAX), }; - let eth_filter = Self { inner: Arc::new(inner), _tx_resp_builder: PhantomData }; + let eth_filter = Self { inner: Arc::new(inner), tx_resp_builder }; let this = eth_filter.clone(); eth_filter.inner.task_spawner.spawn_critical( @@ -144,8 +145,7 @@ where impl EthFilter where Provider: BlockReader + BlockIdReader + EvmEnvProvider + 'static, - Pool: TransactionPool + 'static, - ::Transaction: 'static, + Pool: TransactionPool + 'static, Eth: FullEthApiTypes, { /// Returns all the filter changes for the given id, if any @@ -278,7 +278,7 @@ where PendingTransactionFilterKind::Full => { let stream = self.inner.pool.new_pending_pool_transactions_listener(); let full_txs_receiver = - FullTransactionsReceiver::<_, Eth::TransactionCompat>::new(stream); + FullTransactionsReceiver::new(stream, self.tx_resp_builder.clone()); FilterKind::PendingTransaction(PendingTransactionKind::FullTransaction(Arc::new( full_txs_receiver, ))) @@ -534,7 +534,8 @@ where &self, block_num_hash: &BlockNumHash, best_number: u64, - ) -> Result>, Option)>, EthFilterError> { + ) -> Result>, Option>)>, EthFilterError> + { // The last 4 blocks are most likely cached, so we can just fetch them let cached_range = best_number.saturating_sub(4)..=best_number; let receipts_block = if cached_range.contains(&block_num_hash.number) { @@ -602,7 +603,7 @@ impl PendingTransactionsReceiver { #[derive(Debug, Clone)] struct FullTransactionsReceiver { txs_stream: Arc>>, - _tx_resp_builder: PhantomData, + tx_resp_builder: TxCompat, } impl FullTransactionsReceiver @@ -611,8 +612,8 @@ where TxCompat: TransactionCompat, { /// Creates a new `FullTransactionsReceiver` encapsulating the provided transaction stream. - fn new(stream: NewSubpoolTransactionStream) -> Self { - Self { txs_stream: Arc::new(Mutex::new(stream)), _tx_resp_builder: PhantomData } + fn new(stream: NewSubpoolTransactionStream, tx_resp_builder: TxCompat) -> Self { + Self { txs_stream: Arc::new(Mutex::new(stream)), tx_resp_builder } } /// Returns all new pending transactions received since the last poll. @@ -624,7 +625,10 @@ where let mut prepared_stream = self.txs_stream.lock().await; while let Ok(tx) = prepared_stream.try_recv() { - pending_txs.push(from_recovered::(tx.transaction.to_recovered_transaction())) + pending_txs.push(from_recovered( + tx.transaction.to_recovered_transaction(), + &self.tx_resp_builder, + )) } FilterChanges::Transactions(pending_txs) } diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index b2ff30b88f..1e2d1802e0 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -1,13 +1,13 @@ //! Contains RPC handler implementations specific to blocks. -use alloy_rpc_types::{AnyTransactionReceipt, BlockId}; +use alloy_rpc_types::{BlockId, TransactionReceipt}; use reth_primitives::TransactionMeta; use reth_provider::{BlockReaderIdExt, HeaderProvider}; use reth_rpc_eth_api::{ helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, RpcReceipt, }; -use reth_rpc_eth_types::{EthApiError, EthStateCache, ReceiptBuilder}; +use reth_rpc_eth_types::{EthApiError, EthReceiptBuilder}; use crate::EthApi; @@ -15,15 +15,10 @@ impl EthBlocks for EthApi, + NetworkTypes: alloy_network::Network, + Provider: HeaderProvider, >, - Provider: HeaderProvider, { - #[inline] - fn provider(&self) -> impl HeaderProvider { - self.inner.provider() - } - async fn block_receipts( &self, block_id: BlockId, @@ -55,8 +50,7 @@ where excess_blob_gas, timestamp, }; - - ReceiptBuilder::new(&tx, meta, receipt, &receipts) + EthReceiptBuilder::new(&tx, meta, receipt, &receipts) .map(|builder| builder.build()) }) .collect::, Self::Error>>() @@ -72,13 +66,4 @@ where Self: LoadPendingBlock + SpawnBlocking, Provider: BlockReaderIdExt, { - #[inline] - fn provider(&self) -> impl BlockReaderIdExt { - self.inner.provider() - } - - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } } diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs index 161a94642c..967854b762 100644 --- a/crates/rpc/rpc/src/eth/helpers/call.rs +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -1,17 +1,19 @@ //! Contains RPC handler implementations specific to endpoints that call/execute within evm. use crate::EthApi; -use alloy_primitives::B256; +use alloy_primitives::{Address, B256}; use reth_bsc_primitives::system_contracts::is_system_transaction; -use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::{Header, TransactionSignedEcRecovered}; +use reth_evm::ConfigureEvm; +use reth_primitives::{Header, TransactionSigned}; use reth_rpc_eth_api::{ helpers::{Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}, FromEvmError, }; use reth_rpc_eth_types::EthApiError; -use revm::db::CacheDB; -use revm_primitives::{db::DatabaseRef, BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg}; +use revm_primitives::{ + db::{Database, DatabaseCommit}, + BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, +}; impl EthCall for EthApi where Self: Call + LoadPendingBlock @@ -20,7 +22,7 @@ impl EthCall for EthApi Call for EthApi where - Self: LoadState + SpawnBlocking, + Self: LoadState> + SpawnBlocking, EvmConfig: ConfigureEvm
, { #[inline] @@ -33,24 +35,20 @@ where self.inner.max_simulate_blocks() } - #[inline] - fn evm_config(&self) -> &impl ConfigureEvm
{ - self.inner.evm_config() - } - /// Replays all the transactions until the target transaction is found. - fn replay_transactions_until( + fn replay_transactions_until<'a, DB, I>( &self, - db: &mut CacheDB, + db: &mut DB, cfg: CfgEnvWithHandlerCfg, block_env: BlockEnv, - transactions: impl IntoIterator, + transactions: I, target_tx_hash: B256, parent_timestamp: u64, ) -> Result where - DB: DatabaseRef, + DB: Database + DatabaseCommit, EthApiError: From, + I: IntoIterator, { #[allow(clippy::redundant_clone)] let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env.clone(), Default::default()); @@ -70,12 +68,10 @@ where } } - for tx in transactions { + for (sender, tx) in transactions { // check if the transaction is a system transaction // this should be done before return - if is_bsc && - before_system_tx && - is_system_transaction(&tx, tx.signer(), block_env.coinbase) + if is_bsc && before_system_tx && is_system_transaction(tx, *sender, block_env.coinbase) { if let Some(trace_helper) = self.bsc_trace_helper.as_ref() { // move block reward from the system address to the coinbase @@ -98,8 +94,7 @@ where break } - let sender = tx.signer(); - self.evm_config().fill_tx_env(evm.tx_mut(), &tx.into_signed(), sender); + self.evm_config().fill_tx_env(evm.tx_mut(), tx, *sender); #[cfg(feature = "bsc")] if !before_system_tx { diff --git a/crates/rpc/rpc/src/eth/helpers/fees.rs b/crates/rpc/rpc/src/eth/helpers/fees.rs index a792f72895..e1a17ef647 100644 --- a/crates/rpc/rpc/src/eth/helpers/fees.rs +++ b/crates/rpc/rpc/src/eth/helpers/fees.rs @@ -1,10 +1,9 @@ //! Contains RPC handler implementations for fee history. -use reth_chainspec::EthereumHardforks; -use reth_provider::{BlockIdReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider}; - +use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_rpc_eth_api::helpers::{EthFees, LoadBlock, LoadFee}; -use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasPriceOracle}; +use reth_rpc_eth_types::{FeeHistoryCache, GasPriceOracle}; use crate::EthApi; @@ -15,23 +14,14 @@ impl EthFees for EthApi LoadFee for EthApi where - Self: LoadBlock, - Provider: BlockReaderIdExt + HeaderProvider + ChainSpecProvider, + Self: LoadBlock, + Provider: BlockReaderIdExt + + EvmEnvProvider + + ChainSpecProvider + + StateProviderFactory, { #[inline] - fn provider( - &self, - ) -> impl BlockIdReader + HeaderProvider + ChainSpecProvider { - self.inner.provider() - } - - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - - #[inline] - fn gas_oracle(&self) -> &GasPriceOracle { + fn gas_oracle(&self) -> &GasPriceOracle { self.inner.gas_oracle() } diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index 69d55f58bf..6b28947df3 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -1,10 +1,13 @@ //! Support for building a pending block with transactions from local view of mempool. -use reth_chainspec::EthereumHardforks; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; use reth_primitives::Header; use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; -use reth_rpc_eth_api::helpers::{LoadPendingBlock, SpawnBlocking}; +use reth_rpc_eth_api::{ + helpers::{LoadPendingBlock, SpawnBlocking}, + RpcNodeCore, +}; use reth_rpc_eth_types::PendingBlock; use reth_transaction_pool::TransactionPool; @@ -13,36 +16,18 @@ use crate::EthApi; impl LoadPendingBlock for EthApi where - Self: SpawnBlocking, - Provider: BlockReaderIdExt - + EvmEnvProvider - + ChainSpecProvider - + StateProviderFactory, - Pool: TransactionPool, - EvmConfig: ConfigureEvm
, + Self: SpawnBlocking + + RpcNodeCore< + Provider: BlockReaderIdExt + + EvmEnvProvider + + ChainSpecProvider + + StateProviderFactory, + Pool: TransactionPool, + Evm: ConfigureEvm
, + >, { - #[inline] - fn provider( - &self, - ) -> impl BlockReaderIdExt - + EvmEnvProvider - + ChainSpecProvider - + StateProviderFactory { - self.inner.provider() - } - - #[inline] - fn pool(&self) -> impl TransactionPool { - self.inner.pool() - } - #[inline] fn pending_block(&self) -> &tokio::sync::Mutex> { self.inner.pending_block() } - - #[inline] - fn evm_config(&self) -> &impl ConfigureEvm
{ - self.inner.evm_config() - } } diff --git a/crates/rpc/rpc/src/eth/helpers/receipt.rs b/crates/rpc/rpc/src/eth/helpers/receipt.rs index 2ac3609449..594cffd09f 100644 --- a/crates/rpc/rpc/src/eth/helpers/receipt.rs +++ b/crates/rpc/rpc/src/eth/helpers/receipt.rs @@ -1,20 +1,15 @@ //! Builds an RPC receipt response w.r.t. data layout of network. use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; -use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcReceipt}; -use reth_rpc_eth_types::{EthApiError, EthStateCache, ReceiptBuilder}; +use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcNodeCoreExt, RpcReceipt}; +use reth_rpc_eth_types::{EthApiError, EthReceiptBuilder}; use crate::EthApi; impl LoadReceipt for EthApi where - Self: Send + Sync, + Self: RpcNodeCoreExt, { - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - async fn build_transaction_receipt( &self, tx: TransactionSigned, @@ -30,6 +25,6 @@ where .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(hash.into()))?; - Ok(ReceiptBuilder::new(&tx, meta, &receipt, &all_receipts)?.build()) + Ok(EthReceiptBuilder::new(&tx, meta, &receipt, &all_receipts)?.build()) } } diff --git a/crates/rpc/rpc/src/eth/helpers/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs index a5818aa494..c6c6031273 100644 --- a/crates/rpc/rpc/src/eth/helpers/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -6,11 +6,11 @@ use crate::EthApi; use alloy_dyn_abi::TypedData; use alloy_eips::eip2718::Decodable2718; use alloy_network::{eip2718::Encodable2718, EthereumWallet, TransactionBuilder}; -use alloy_primitives::{eip191_hash_message, Address, B256}; +use alloy_primitives::{eip191_hash_message, Address, Signature, B256}; use alloy_rpc_types_eth::TransactionRequest; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; -use reth_primitives::{Signature, TransactionSigned}; +use reth_primitives::TransactionSigned; use reth_rpc_eth_api::helpers::{signer::Result, AddDevSigners, EthSigner}; use reth_rpc_eth_types::SignError; @@ -40,7 +40,7 @@ impl DevSigner { /// Generates provided number of random dev signers /// which satisfy [`EthSigner`] trait pub fn random_signers(num: u32) -> Vec> { - let mut signers = Vec::new(); + let mut signers = Vec::with_capacity(num as usize); for _ in 0..num { let sk = PrivateKeySigner::random_with(&mut rand::thread_rng()); diff --git a/crates/rpc/rpc/src/eth/helpers/spec.rs b/crates/rpc/rpc/src/eth/helpers/spec.rs index 92445bf5ed..a44692e18a 100644 --- a/crates/rpc/rpc/src/eth/helpers/spec.rs +++ b/crates/rpc/rpc/src/eth/helpers/spec.rs @@ -2,32 +2,19 @@ use alloy_primitives::U256; use reth_chainspec::EthereumHardforks; use reth_network_api::NetworkInfo; use reth_provider::{BlockNumReader, ChainSpecProvider, StageCheckpointReader}; -use reth_rpc_eth_api::helpers::EthApiSpec; -use reth_transaction_pool::TransactionPool; +use reth_rpc_eth_api::{helpers::EthApiSpec, RpcNodeCore}; use crate::EthApi; impl EthApiSpec for EthApi where - Pool: TransactionPool + 'static, - Provider: ChainSpecProvider - + BlockNumReader - + StageCheckpointReader - + 'static, - Network: NetworkInfo + 'static, - EvmConfig: Send + Sync, + Self: RpcNodeCore< + Provider: ChainSpecProvider + + BlockNumReader + + StageCheckpointReader, + Network: NetworkInfo, + >, { - fn provider( - &self, - ) -> impl ChainSpecProvider + BlockNumReader + StageCheckpointReader - { - self.inner.provider() - } - - fn network(&self) -> impl NetworkInfo { - self.inner.network() - } - fn starting_block(&self) -> U256 { self.inner.starting_block() } diff --git a/crates/rpc/rpc/src/eth/helpers/state.rs b/crates/rpc/rpc/src/eth/helpers/state.rs index 4a4ee948b3..02f9751030 100644 --- a/crates/rpc/rpc/src/eth/helpers/state.rs +++ b/crates/rpc/rpc/src/eth/helpers/state.rs @@ -4,8 +4,10 @@ use reth_chainspec::EthereumHardforks; use reth_provider::{ChainSpecProvider, StateProviderFactory}; use reth_transaction_pool::TransactionPool; -use reth_rpc_eth_api::helpers::{EthState, LoadState, SpawnBlocking}; -use reth_rpc_eth_types::EthStateCache; +use reth_rpc_eth_api::{ + helpers::{EthState, LoadState, SpawnBlocking}, + RpcNodeCore, +}; use crate::EthApi; @@ -18,38 +20,22 @@ where } } -impl LoadState for EthApi -where - Self: Send + Sync, - Provider: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, +impl LoadState for EthApi where + Self: RpcNodeCore< + Provider: StateProviderFactory + ChainSpecProvider, + Pool: TransactionPool, + > { - #[inline] - fn provider( - &self, - ) -> impl StateProviderFactory + ChainSpecProvider { - self.inner.provider() - } - - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - - #[inline] - fn pool(&self) -> impl TransactionPool { - self.inner.pool() - } } #[cfg(test)] mod tests { use super::*; + use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::{Address, StorageKey, StorageValue, U256}; use reth_chainspec::MAINNET; use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; - use reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider, NoopProvider}; use reth_rpc_eth_api::helpers::EthState; use reth_rpc_eth_types::{ diff --git a/crates/rpc/rpc/src/eth/helpers/trace.rs b/crates/rpc/rpc/src/eth/helpers/trace.rs index c40b7acf50..b270ed1b2a 100644 --- a/crates/rpc/rpc/src/eth/helpers/trace.rs +++ b/crates/rpc/rpc/src/eth/helpers/trace.rs @@ -6,13 +6,7 @@ use reth_rpc_eth_api::helpers::{LoadState, Trace}; use crate::EthApi; -impl Trace for EthApi -where - Self: LoadState, - EvmConfig: ConfigureEvm
, +impl Trace for EthApi where + Self: LoadState> { - #[inline] - fn evm_config(&self) -> &impl ConfigureEvm
{ - self.inner.evm_config() - } } diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index fd7ccd1b7c..f2c5cfbeba 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -3,9 +3,8 @@ use reth_provider::{BlockReaderIdExt, TransactionsProvider}; use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, - FullEthApiTypes, + FullEthApiTypes, RpcNodeCore, }; -use reth_rpc_eth_types::EthStateCache; use reth_transaction_pool::TransactionPool; use crate::EthApi; @@ -13,15 +12,8 @@ use crate::EthApi; impl EthTransactions for EthApi where - Self: LoadTransaction, - Pool: TransactionPool + 'static, - Provider: BlockReaderIdExt, + Self: LoadTransaction, { - #[inline] - fn provider(&self) -> impl BlockReaderIdExt { - self.inner.provider() - } - #[inline] fn signers(&self) -> &parking_lot::RwLock>> { self.inner.signers() @@ -31,35 +23,19 @@ where impl LoadTransaction for EthApi where - Self: SpawnBlocking + FullEthApiTypes, - Provider: TransactionsProvider, - Pool: TransactionPool, + Self: SpawnBlocking + + FullEthApiTypes + + RpcNodeCore, { - type Pool = Pool; - - #[inline] - fn provider(&self) -> impl TransactionsProvider { - self.inner.provider() - } - - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - - #[inline] - fn pool(&self) -> &Self::Pool { - self.inner.pool() - } } #[cfg(test)] mod tests { + use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::{hex_literal::hex, Bytes}; use reth_chainspec::ChainSpecProvider; use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; - use reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT; use reth_provider::test_utils::NoopProvider; use reth_rpc_eth_api::helpers::EthTransactions; use reth_rpc_eth_types::{ diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index d2b9c268e2..0998c057e2 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -1,9 +1,9 @@ //! L1 `eth` API types. -use alloy_network::{AnyNetwork, Network}; +use alloy_consensus::Transaction as _; +use alloy_network::{Ethereum, Network}; use alloy_primitives::{Address, TxKind}; use alloy_rpc_types::{Transaction, TransactionInfo}; -use alloy_serde::WithOtherFields; use reth_primitives::TransactionSignedEcRecovered; use reth_rpc_types_compat::{ transaction::{from_primitive_signature, GasPrice}, @@ -18,9 +18,13 @@ impl TransactionCompat for EthTxBuilder where Self: Send + Sync, { - type Transaction = ::TransactionResponse; + type Transaction = ::TransactionResponse; - fn fill(tx: TransactionSignedEcRecovered, tx_info: TransactionInfo) -> Self::Transaction { + fn fill( + &self, + tx: TransactionSignedEcRecovered, + tx_info: TransactionInfo, + ) -> Self::Transaction { let signer = tx.signer(); let signed_tx = tx.into_signed(); @@ -36,8 +40,9 @@ where let GasPrice { gas_price, max_fee_per_gas } = Self::gas_price(&signed_tx, base_fee.map(|fee| fee as u64)); + let input = signed_tx.input().to_vec().into(); let chain_id = signed_tx.chain_id(); - let blob_versioned_hashes = signed_tx.blob_versioned_hashes(); + let blob_versioned_hashes = signed_tx.blob_versioned_hashes().map(|hs| hs.to_vec()); let access_list = signed_tx.access_list().cloned(); let authorization_list = signed_tx.authorization_list().map(|l| l.to_vec()); @@ -47,41 +52,38 @@ where signed_tx.chain_id(), ); - WithOtherFields { - inner: Transaction { - hash: signed_tx.hash(), - nonce: signed_tx.nonce(), - from: signer, - to, - value: signed_tx.value(), - gas_price, - max_fee_per_gas, - max_priority_fee_per_gas: signed_tx.max_priority_fee_per_gas(), - signature: Some(signature), - gas: signed_tx.gas_limit(), - input: signed_tx.input().clone(), - chain_id, - access_list, - transaction_type: Some(signed_tx.tx_type() as u8), - // These fields are set to None because they are not stored as part of the - // transaction - block_hash, - block_number, - transaction_index, - // EIP-4844 fields - max_fee_per_blob_gas: signed_tx.max_fee_per_blob_gas(), - blob_versioned_hashes, - authorization_list, - }, - ..Default::default() + Transaction { + hash: signed_tx.hash(), + nonce: signed_tx.nonce(), + from: signer, + to, + value: signed_tx.value(), + gas_price, + max_fee_per_gas, + max_priority_fee_per_gas: signed_tx.max_priority_fee_per_gas(), + signature: Some(signature), + gas: signed_tx.gas_limit(), + input, + chain_id, + access_list, + transaction_type: Some(signed_tx.tx_type() as u8), + // These fields are set to None because they are not stored as part of the + // transaction + block_hash, + block_number, + transaction_index, + // EIP-4844 fields + max_fee_per_blob_gas: signed_tx.max_fee_per_blob_gas(), + blob_versioned_hashes, + authorization_list, } } fn otterscan_api_truncate_input(tx: &mut Self::Transaction) { - tx.inner.input = tx.inner.input.slice(..4); + tx.input = tx.input.slice(..4); } fn tx_type(tx: &Self::Transaction) -> u8 { - tx.inner.transaction_type.unwrap_or(0) + tx.transaction_type.unwrap_or(0) } } diff --git a/crates/rpc/rpc/src/eth/mod.rs b/crates/rpc/rpc/src/eth/mod.rs index 99919110da..4d1833add3 100644 --- a/crates/rpc/rpc/src/eth/mod.rs +++ b/crates/rpc/rpc/src/eth/mod.rs @@ -15,4 +15,4 @@ pub use pubsub::EthPubSub; pub use helpers::{signer::DevSigner, types::EthTxBuilder}; -pub use reth_rpc_eth_api::EthApiServer; +pub use reth_rpc_eth_api::{EthApiServer, EthApiTypes, FullEthApiServer, RpcNodeCore}; diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index 7bd1fd03d3..663ec0b99d 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -1,6 +1,6 @@ //! `eth_` `PubSub` RPC handler implementation -use std::{marker::PhantomData, sync::Arc}; +use std::sync::Arc; use alloy_primitives::TxHash; use alloy_rpc_types::{ @@ -8,16 +8,15 @@ use alloy_rpc_types::{ Params, PubSubSyncStatus, SubscriptionKind, SubscriptionResult as EthSubscriptionResult, SyncStatusMetadata, }, - FilteredParams, Header, Log, Transaction, + FilteredParams, Header, Log, }; -use alloy_serde::WithOtherFields; use futures::StreamExt; use jsonrpsee::{ server::SubscriptionMessage, types::ErrorObject, PendingSubscriptionSink, SubscriptionSink, }; use reth_network_api::NetworkInfo; use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider}; -use reth_rpc_eth_api::{pubsub::EthPubSubApiServer, FullEthApiTypes, RpcTransaction}; +use reth_rpc_eth_api::{pubsub::EthPubSubApiServer, TransactionCompat}; use reth_rpc_eth_types::logs_utils; use reth_rpc_server_types::result::{internal_rpc_err, invalid_params_rpc_err}; use reth_rpc_types_compat::transaction::from_recovered; @@ -38,7 +37,7 @@ pub struct EthPubSub { inner: Arc>, /// The type that's used to spawn subscription tasks. subscription_task_spawner: Box, - _tx_resp_builder: PhantomData, + tx_resp_builder: Eth, } // === impl EthPubSub === @@ -47,13 +46,20 @@ impl EthPubSub Self { + pub fn new( + provider: Provider, + pool: Pool, + chain_events: Events, + network: Network, + tx_resp_builder: Eth, + ) -> Self { Self::with_spawner( provider, pool, chain_events, network, Box::::default(), + tx_resp_builder, ) } @@ -64,21 +70,22 @@ impl EthPubSub, + tx_resp_builder: Eth, ) -> Self { let inner = EthPubSubInner { provider, pool, chain_events, network }; - Self { inner: Arc::new(inner), subscription_task_spawner, _tx_resp_builder: PhantomData } + Self { inner: Arc::new(inner), subscription_task_spawner, tx_resp_builder } } } #[async_trait::async_trait] -impl EthPubSubApiServer> +impl EthPubSubApiServer for EthPubSub where Provider: BlockReader + EvmEnvProvider + Clone + 'static, Pool: TransactionPool + 'static, Events: CanonStateSubscriptions + Clone + 'static, Network: NetworkInfo + Clone + 'static, - Eth: FullEthApiTypes + 'static, + Eth: TransactionCompat + 'static, { /// Handler for `eth_subscribe` async fn subscribe( @@ -89,8 +96,9 @@ where ) -> jsonrpsee::core::SubscriptionResult { let sink = pending.accept().await?; let pubsub = self.inner.clone(); + let resp_builder = self.tx_resp_builder.clone(); self.subscription_task_spawner.spawn(Box::pin(async move { - let _ = handle_accepted::<_, _, _, _, Eth>(pubsub, sink, kind, params).await; + let _ = handle_accepted(pubsub, sink, kind, params, resp_builder).await; })); Ok(()) @@ -103,21 +111,20 @@ async fn handle_accepted( accepted_sink: SubscriptionSink, kind: SubscriptionKind, params: Option, + tx_resp_builder: Eth, ) -> Result<(), ErrorObject<'static>> where Provider: BlockReader + EvmEnvProvider + Clone + 'static, Pool: TransactionPool + 'static, Events: CanonStateSubscriptions + Clone + 'static, Network: NetworkInfo + Clone + 'static, - Eth: FullEthApiTypes, + Eth: TransactionCompat, { match kind { SubscriptionKind::NewHeads => { - let stream = pubsub.new_headers_stream().map(|header| { - EthSubscriptionResult::>::Header(Box::new( - header.into(), - )) - }); + let stream = pubsub + .new_headers_stream() + .map(|header| EthSubscriptionResult::<()>::Header(Box::new(header.into()))); pipe_from_stream(accepted_sink, stream).await } SubscriptionKind::Logs => { @@ -129,9 +136,9 @@ where } _ => FilteredParams::default(), }; - let stream = pubsub.log_stream(filter).map(|log| { - EthSubscriptionResult::>::Log(Box::new(log)) - }); + let stream = pubsub + .log_stream(filter) + .map(|log| EthSubscriptionResult::<()>::Log(Box::new(log))); pipe_from_stream(accepted_sink, stream).await } SubscriptionKind::NewPendingTransactions => { @@ -140,10 +147,9 @@ where Params::Bool(true) => { // full transaction objects requested let stream = pubsub.full_pending_transaction_stream().map(|tx| { - EthSubscriptionResult::FullTransaction(Box::new(from_recovered::< - Eth::TransactionCompat, - >( + EthSubscriptionResult::FullTransaction(Box::new(from_recovered( tx.transaction.to_recovered_transaction(), + &tx_resp_builder, ))) }); return pipe_from_stream(accepted_sink, stream).await @@ -161,7 +167,7 @@ where let stream = pubsub .pending_transaction_hashes_stream() - .map(EthSubscriptionResult::>::TransactionHash); + .map(EthSubscriptionResult::<()>::TransactionHash); pipe_from_stream(accepted_sink, stream).await } SubscriptionKind::Syncing => { diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index eec14981bf..76fb96f916 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -42,7 +42,9 @@ mod reth; mod rpc; mod trace; mod txpool; +mod validation; mod web3; + pub use admin::AdminApi; pub use debug::DebugApi; pub use engine::{EngineApi, EngineEthApi}; @@ -53,4 +55,5 @@ pub use reth::RethApi; pub use rpc::RPCApi; pub use trace::TraceApi; pub use txpool::TxPoolApi; +pub use validation::{ValidationApi, ValidationApiConfig}; pub use web3::Web3Api; diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index 31db343a10..54ddaaaad5 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -1,4 +1,5 @@ use alloy_consensus::Transaction; +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_network::{ReceiptResponse, TransactionResponse}; use alloy_primitives::{Address, Bytes, TxHash, B256, U256}; use alloy_rpc_types::{BlockTransactions, Header, TransactionReceipt}; @@ -11,7 +12,6 @@ use alloy_rpc_types_trace::{ }; use async_trait::async_trait; use jsonrpsee::{core::RpcResult, types::ErrorObjectOwned}; -use reth_primitives::{BlockId, BlockNumberOrTag}; use reth_rpc_api::{EthApiServer, OtterscanServer}; use reth_rpc_eth_api::{ helpers::{EthTransactions, TraceExt}, @@ -227,7 +227,8 @@ where *transactions = transactions.drain(page_start..page_end).collect::>(); // The input field returns only the 4 bytes method selector instead of the entire - // calldata byte blob. + // calldata byte blob + // See also: for tx in transactions.iter_mut() { if tx.input().len() > 4 { Eth::TransactionCompat::otterscan_api_truncate_input(tx); @@ -261,7 +262,6 @@ where from: receipt.from(), to: receipt.to(), contract_address: receipt.contract_address(), - state_root: receipt.state_root(), authorization_list: receipt .authorization_list() .map(<[SignedAuthorization]>::to_vec), @@ -334,6 +334,7 @@ where .eth .trace_block_with( num.into(), + None, TracingInspectorConfig::default_parity(), |tx_info, inspector, _, _, _| { Ok(inspector.into_parity_builder().into_localized_transaction_traces(tx_info)) diff --git a/crates/rpc/rpc/src/reth.rs b/crates/rpc/rpc/src/reth.rs index 6d5897df13..c33f97f530 100644 --- a/crates/rpc/rpc/src/reth.rs +++ b/crates/rpc/rpc/src/reth.rs @@ -1,10 +1,10 @@ use std::{collections::HashMap, future::Future, sync::Arc}; +use alloy_eips::BlockId; use alloy_primitives::{Address, U256}; use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_errors::RethResult; -use reth_primitives::BlockId; use reth_provider::{BlockReaderIdExt, ChangeSetReader, StateProviderFactory}; use reth_rpc_api::RethApiServer; use reth_rpc_eth_types::{EthApiError, EthResult}; diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 687762b74b..38c73b0f51 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -1,5 +1,4 @@ -use std::sync::Arc; - +use alloy_eips::BlockId; use alloy_primitives::{map::HashSet, Bytes, B256, U256}; use alloy_rpc_types::{ state::{EvmOverrides, StateOverride}, @@ -19,14 +18,11 @@ use reth_consensus_common::calc::{ base_block_reward, base_block_reward_pre_merge, block_reward, ommer_reward, }; use reth_evm::ConfigureEvmEnv; -use reth_primitives::{BlockId, Header}; +use reth_primitives::Header; use reth_provider::{BlockReader, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::TraceApiServer; -use reth_rpc_eth_api::{ - helpers::{Call, TraceExt}, - FromEthApiError, -}; +use reth_rpc_eth_api::{helpers::TraceExt, FromEthApiError, RpcNodeCore}; use reth_rpc_eth_types::{error::EthApiError, utils::recover_raw_transaction}; use reth_tasks::pool::BlockingTaskGuard; use revm::{ @@ -37,6 +33,7 @@ use revm_inspectors::{ opcode::OpcodeGasInspector, tracing::{parity::populate_state_diff, TracingInspector, TracingInspectorConfig}, }; +use std::sync::Arc; use tokio::sync::{AcquireError, OwnedSemaphorePermit}; /// `trace` API implementation. @@ -118,14 +115,14 @@ where trace_types: HashSet, block_id: Option, ) -> Result { - let tx = recover_raw_transaction(tx)?; + let tx = recover_raw_transaction(tx)?.into_ecrecovered_transaction(); let (cfg, block, at) = self.eth_api().evm_env_at(block_id.unwrap_or_default()).await?; let env = EnvWithHandlerCfg::new_with_cfg_env( cfg, block, - Call::evm_config(self.eth_api()).tx_env(&tx.into_ecrecovered_transaction()), + RpcNodeCore::evm_config(self.eth_api()).tx_env(tx.as_signed(), tx.signer()), ); let config = TracingInspectorConfig::from_parity_config(&trace_types); @@ -251,7 +248,8 @@ where &self, filter: TraceFilter, ) -> Result, Eth::Error> { - let matcher = filter.matcher(); + // We'll reuse the matcher across multiple blocks that are traced in parallel + let matcher = Arc::new(filter.matcher()); let TraceFilter { from_block, to_block, after, count, .. } = filter; let start = from_block.unwrap_or(0); let end = if let Some(to_block) = to_block { @@ -277,14 +275,21 @@ where } // fetch all blocks in that range - let blocks = self.provider().block_range(start..=end).map_err(Eth::Error::from_eth_err)?; + let blocks = self + .provider() + .sealed_block_with_senders_range(start..=end) + .map_err(Eth::Error::from_eth_err)? + .into_iter() + .map(Arc::new) + .collect::>(); // trace all blocks let mut block_traces = Vec::with_capacity(blocks.len()); for block in &blocks { let matcher = matcher.clone(); let traces = self.eth_api().trace_block_until( - block.number.into(), + block.hash().into(), + Some(block.clone()), None, TracingInspectorConfig::default_parity(), move |tx_info, inspector, _, _, _| { @@ -307,13 +312,15 @@ where // add reward traces for all blocks for block in &blocks { if let Some(base_block_reward) = self.calculate_base_block_reward(&block.header)? { - let mut traces = self.extract_reward_traces( - &block.header, - &block.body.ommers, - base_block_reward, + all_traces.extend( + self.extract_reward_traces( + &block.header, + &block.body.ommers, + base_block_reward, + ) + .into_iter() + .filter(|trace| matcher.matches(&trace.trace)), ); - traces.retain(|trace| matcher.matches(&trace.trace)); - all_traces.extend(traces); } else { // no block reward, means we're past the Paris hardfork and don't expect any rewards // because the blocks in ascending order @@ -321,13 +328,18 @@ where } } - // apply after and count to traces if specified, this allows for a pagination style. - // only consider traces after - if let Some(after) = after.map(|a| a as usize).filter(|a| *a < all_traces.len()) { - all_traces = all_traces.split_off(after); + // Skips the first `after` number of matching traces. + // If `after` is greater than or equal to the number of matched traces, it returns an empty + // array. + if let Some(after) = after.map(|a| a as usize) { + if after < all_traces.len() { + all_traces.drain(..after); + } else { + return Ok(vec![]) + } } - // at most, return count of traces + // Return at most `count` of traces if let Some(count) = count { let count = count as usize; if count < all_traces.len() { @@ -363,6 +375,7 @@ where ) -> Result>, Eth::Error> { let traces = self.eth_api().trace_block_with( block_id, + None, TracingInspectorConfig::default_parity(), |tx_info, inspector, _, _, _| { let traces = @@ -371,7 +384,7 @@ where }, ); - let block = self.eth_api().block(block_id); + let block = self.eth_api().block_with_senders(block_id); let (maybe_traces, maybe_block) = futures::try_join!(traces, block)?; let mut maybe_traces = @@ -399,6 +412,7 @@ where self.eth_api() .trace_block_with( block_id, + None, TracingInspectorConfig::from_parity_config(&trace_types), move |tx_info, inspector, res, state, db| { let mut full_trace = @@ -454,6 +468,7 @@ where .eth_api() .trace_block_inspector( block_id, + None, OpcodeGasInspector::default, move |tx_info, inspector, _res, _, _| { let trace = TransactionOpcodeGas { @@ -467,7 +482,9 @@ where let Some(transactions) = res else { return Ok(None) }; - let Some(block) = self.eth_api().block(block_id).await? else { return Ok(None) }; + let Some(block) = self.eth_api().block_with_senders(block_id).await? else { + return Ok(None) + }; Ok(Some(BlockOpcodeGas { block_hash: block.hash(), diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index 5e26935ca1..d03e10ca75 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -1,5 +1,6 @@ -use std::{collections::BTreeMap, marker::PhantomData}; +use std::collections::BTreeMap; +use alloy_consensus::Transaction; use alloy_primitives::Address; use alloy_rpc_types_txpool::{ TxpoolContent, TxpoolContentFrom, TxpoolInspect, TxpoolInspectSummary, TxpoolStatus, @@ -8,7 +9,6 @@ use async_trait::async_trait; use jsonrpsee::core::RpcResult as Result; use reth_primitives::TransactionSignedEcRecovered; use reth_rpc_api::TxPoolApiServer; -use reth_rpc_eth_api::{FullEthApiTypes, RpcTransaction}; use reth_rpc_types_compat::{transaction::from_recovered, TransactionCompat}; use reth_transaction_pool::{AllPoolTransactions, PoolTransaction, TransactionPool}; use tracing::trace; @@ -20,33 +20,34 @@ use tracing::trace; pub struct TxPoolApi { /// An interface to interact with the pool pool: Pool, - _tx_resp_builder: PhantomData, + tx_resp_builder: Eth, } impl TxPoolApi { /// Creates a new instance of `TxpoolApi`. - pub const fn new(pool: Pool) -> Self { - Self { pool, _tx_resp_builder: PhantomData } + pub const fn new(pool: Pool, tx_resp_builder: Eth) -> Self { + Self { pool, tx_resp_builder } } } impl TxPoolApi where Pool: TransactionPool + 'static, - Eth: FullEthApiTypes, + Eth: TransactionCompat, { - fn content(&self) -> TxpoolContent> { + fn content(&self) -> TxpoolContent { #[inline] fn insert( tx: &Tx, content: &mut BTreeMap>, + resp_builder: &RpcTxB, ) where Tx: PoolTransaction>, RpcTxB: TransactionCompat, { content.entry(tx.sender()).or_default().insert( tx.nonce().to_string(), - from_recovered::(tx.clone().into_consensus().into()), + from_recovered(tx.clone().into_consensus().into(), resp_builder), ); } @@ -54,10 +55,10 @@ where let mut content = TxpoolContent { pending: BTreeMap::new(), queued: BTreeMap::new() }; for pending in pending { - insert::<_, Eth::TransactionCompat>(&pending.transaction, &mut content.pending); + insert::<_, Eth>(&pending.transaction, &mut content.pending, &self.tx_resp_builder); } for queued in queued { - insert::<_, Eth::TransactionCompat>(&queued.transaction, &mut content.queued); + insert::<_, Eth>(&queued.transaction, &mut content.queued, &self.tx_resp_builder); } content @@ -65,10 +66,10 @@ where } #[async_trait] -impl TxPoolApiServer> for TxPoolApi +impl TxPoolApiServer for TxPoolApi where Pool: TransactionPool + 'static, - Eth: FullEthApiTypes + 'static, + Eth: TransactionCompat + 'static, { /// Returns the number of transactions currently pending for inclusion in the next block(s), as /// well as the ones that are being scheduled for future execution only. @@ -130,7 +131,7 @@ where async fn txpool_content_from( &self, from: Address, - ) -> Result>> { + ) -> Result> { trace!(target: "rpc::eth", ?from, "Serving txpool_contentFrom"); Ok(self.content().remove_from(&from)) } @@ -140,7 +141,7 @@ where /// /// See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_content) for more details /// Handler for `txpool_content` - async fn txpool_content(&self) -> Result>> { + async fn txpool_content(&self) -> Result> { trace!(target: "rpc::eth", "Serving txpool_content"); Ok(self.content()) } diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs new file mode 100644 index 0000000000..bae0a36952 --- /dev/null +++ b/crates/rpc/rpc/src/validation.rs @@ -0,0 +1,455 @@ +use alloy_consensus::{BlobTransactionValidationError, EnvKzgSettings, Transaction}; +use alloy_eips::eip4844::kzg_to_versioned_hash; +use alloy_rpc_types::engine::{ + BlobsBundleV1, CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, +}; +use alloy_rpc_types_beacon::relay::{ + BidTrace, BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, +}; +use async_trait::async_trait; +use jsonrpsee::core::RpcResult; +use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; +use reth_consensus::{Consensus, PostExecutionInput}; +use reth_errors::{BlockExecutionError, ConsensusError, ProviderError, RethError}; +use reth_ethereum_consensus::GAS_LIMIT_BOUND_DIVISOR; +use reth_evm::execute::{BlockExecutorProvider, Executor}; +use reth_payload_validator::ExecutionPayloadValidator; +use reth_primitives::{Block, GotExpected, Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_provider::{ + AccountReader, BlockExecutionInput, BlockExecutionOutput, BlockReaderIdExt, HeaderProvider, + StateProviderFactory, WithdrawalsProvider, +}; +use reth_revm::database::StateProviderDatabase; +use reth_rpc_api::{ + BlockSubmissionValidationApiServer, BuilderBlockValidationRequestV3, + BuilderBlockValidationRequestV4, +}; +use reth_rpc_eth_types::EthApiError; +use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; +use reth_trie::HashedPostState; +use revm_primitives::{Address, B256, U256}; +use serde::{Deserialize, Serialize}; +use std::{collections::HashSet, sync::Arc}; + +/// Configuration for validation API. +#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] +pub struct ValidationApiConfig { + /// Disallowed addresses. + pub disallow: HashSet
, +} + +#[derive(Debug, thiserror::Error)] +pub enum ValidationApiError { + #[error("block gas limit mismatch: {_0}")] + GasLimitMismatch(GotExpected), + #[error("block gas used mismatch: {_0}")] + GasUsedMismatch(GotExpected), + #[error("block parent hash mismatch: {_0}")] + ParentHashMismatch(GotExpected), + #[error("block hash mismatch: {_0}")] + BlockHashMismatch(GotExpected), + #[error("missing latest block in database")] + MissingLatestBlock, + #[error("could not verify proposer payment")] + ProposerPayment, + #[error("invalid blobs bundle")] + InvalidBlobsBundle, + #[error("block accesses blacklisted address: {_0}")] + Blacklist(Address), + #[error(transparent)] + Blob(#[from] BlobTransactionValidationError), + #[error(transparent)] + Consensus(#[from] ConsensusError), + #[error(transparent)] + Provider(#[from] ProviderError), + #[error(transparent)] + Execution(#[from] BlockExecutionError), +} + +#[derive(Debug, Clone)] +pub struct ValidationApiInner { + /// The provider that can interact with the chain. + provider: Provider, + /// Consensus implementation. + consensus: Arc, + /// Execution payload validator. + payload_validator: ExecutionPayloadValidator, + /// Block executor factory. + executor_provider: E, + /// Set of disallowed addresses + disallow: HashSet
, +} + +/// The type that implements the `validation` rpc namespace trait +#[derive(Clone, Debug, derive_more::Deref)] +pub struct ValidationApi { + #[deref] + inner: Arc>, +} + +impl ValidationApi +where + Provider: ChainSpecProvider, +{ + /// Create a new instance of the [`ValidationApi`] + pub fn new( + provider: Provider, + consensus: Arc, + executor_provider: E, + config: ValidationApiConfig, + ) -> Self { + let ValidationApiConfig { disallow } = config; + + let payload_validator = ExecutionPayloadValidator::new(provider.chain_spec()); + let inner = Arc::new(ValidationApiInner { + provider, + consensus, + payload_validator, + executor_provider, + disallow, + }); + + Self { inner } + } +} + +impl ValidationApi +where + Provider: BlockReaderIdExt + + ChainSpecProvider + + StateProviderFactory + + HeaderProvider + + AccountReader + + WithdrawalsProvider + + Clone + + 'static, + E: BlockExecutorProvider, +{ + /// Validates the given block and a [`BidTrace`] against it. + pub fn validate_message_against_block( + &self, + block: SealedBlockWithSenders, + message: BidTrace, + registered_gas_limit: u64, + ) -> Result<(), ValidationApiError> { + self.validate_message_against_header(&block.header, &message)?; + + self.consensus.validate_header_with_total_difficulty(&block.header, U256::MAX)?; + self.consensus.validate_header(&block.header)?; + self.consensus.validate_block_pre_execution(&block)?; + + if !self.disallow.is_empty() { + if self.disallow.contains(&block.beneficiary) { + return Err(ValidationApiError::Blacklist(block.beneficiary)) + } + if self.disallow.contains(&message.proposer_fee_recipient) { + return Err(ValidationApiError::Blacklist(message.proposer_fee_recipient)) + } + for (sender, tx) in block.senders.iter().zip(block.transactions()) { + if self.disallow.contains(sender) { + return Err(ValidationApiError::Blacklist(*sender)) + } + if let Some(to) = tx.to() { + if self.disallow.contains(&to) { + return Err(ValidationApiError::Blacklist(to)) + } + } + } + } + + let latest_header = + self.provider.latest_header()?.ok_or_else(|| ValidationApiError::MissingLatestBlock)?; + + if latest_header.hash() != block.header.parent_hash { + return Err(ConsensusError::ParentHashMismatch( + GotExpected { got: block.header.parent_hash, expected: latest_header.hash() } + .into(), + ) + .into()) + } + self.consensus.validate_header_against_parent(&block.header, &latest_header)?; + self.validate_gas_limit(registered_gas_limit, &latest_header, &block.header)?; + + let state_provider = self.provider.state_by_block_hash(latest_header.hash())?; + let executor = + self.executor_provider.executor(StateProviderDatabase::new(&state_provider), None); + + let block = block.unseal(); + let mut accessed_blacklisted = None; + let output = executor.execute_with_state_closure( + BlockExecutionInput::new(&block, U256::MAX, None), + |state| { + if !self.disallow.is_empty() { + for account in state.cache.accounts.keys() { + if self.disallow.contains(account) { + accessed_blacklisted = Some(*account); + } + } + } + }, + )?; + + if let Some(account) = accessed_blacklisted { + return Err(ValidationApiError::Blacklist(account)) + } + + self.consensus.validate_block_post_execution( + &block, + PostExecutionInput::new(&output.receipts, &output.requests), + )?; + + self.ensure_payment(&block, &output, &message)?; + + let state_root = + state_provider.state_root(HashedPostState::from_bundle_state(&output.state.state))?; + + if state_root != block.state_root { + return Err(ConsensusError::BodyStateRootDiff( + GotExpected { got: state_root, expected: block.state_root }.into(), + ) + .into()) + } + + Ok(()) + } + + /// Ensures that fields of [`BidTrace`] match the fields of the [`SealedHeader`]. + fn validate_message_against_header( + &self, + header: &SealedHeader, + message: &BidTrace, + ) -> Result<(), ValidationApiError> { + if header.hash() != message.block_hash { + Err(ValidationApiError::BlockHashMismatch(GotExpected { + got: message.block_hash, + expected: header.hash(), + })) + } else if header.parent_hash != message.parent_hash { + Err(ValidationApiError::ParentHashMismatch(GotExpected { + got: message.parent_hash, + expected: header.parent_hash, + })) + } else if header.gas_limit != message.gas_limit { + Err(ValidationApiError::GasLimitMismatch(GotExpected { + got: message.gas_limit, + expected: header.gas_limit, + })) + } else if header.gas_used != message.gas_used { + return Err(ValidationApiError::GasUsedMismatch(GotExpected { + got: message.gas_used, + expected: header.gas_used, + })) + } else { + Ok(()) + } + } + + /// Ensures that the chosen gas limit is the closest possible value for the validator's + /// registered gas limit. + /// + /// Ref: + fn validate_gas_limit( + &self, + registered_gas_limit: u64, + parent_header: &SealedHeader, + header: &SealedHeader, + ) -> Result<(), ValidationApiError> { + let max_gas_limit = + parent_header.gas_limit + parent_header.gas_limit / GAS_LIMIT_BOUND_DIVISOR - 1; + let min_gas_limit = + parent_header.gas_limit - parent_header.gas_limit / GAS_LIMIT_BOUND_DIVISOR + 1; + + let best_gas_limit = + std::cmp::max(min_gas_limit, std::cmp::min(max_gas_limit, registered_gas_limit)); + + if best_gas_limit != header.gas_limit { + return Err(ValidationApiError::GasLimitMismatch(GotExpected { + got: header.gas_limit, + expected: best_gas_limit, + })) + } + + Ok(()) + } + + /// Ensures that the proposer has received [`BidTrace::value`] for this block. + /// + /// Firstly attempts to verify the payment by checking the state changes, otherwise falls back + /// to checking the latest block transaction. + fn ensure_payment( + &self, + block: &Block, + output: &BlockExecutionOutput, + message: &BidTrace, + ) -> Result<(), ValidationApiError> { + let (mut balance_before, balance_after) = if let Some(acc) = + output.state.state.get(&message.proposer_fee_recipient) + { + let balance_before = acc.original_info.as_ref().map(|i| i.balance).unwrap_or_default(); + let balance_after = acc.info.as_ref().map(|i| i.balance).unwrap_or_default(); + + (balance_before, balance_after) + } else { + // account might have balance but considering it zero is fine as long as we know + // that balance have not changed + (U256::ZERO, U256::ZERO) + }; + + if let Some(withdrawals) = &block.body.withdrawals { + for withdrawal in withdrawals { + if withdrawal.address == message.proposer_fee_recipient { + balance_before += withdrawal.amount_wei(); + } + } + } + + if balance_after >= balance_before + message.value { + return Ok(()) + } + + let (receipt, tx) = output + .receipts + .last() + .zip(block.body.transactions.last()) + .ok_or(ValidationApiError::ProposerPayment)?; + + if !receipt.success { + return Err(ValidationApiError::ProposerPayment) + } + + if tx.to() != Some(message.proposer_fee_recipient) { + return Err(ValidationApiError::ProposerPayment) + } + + if tx.value() != message.value { + return Err(ValidationApiError::ProposerPayment) + } + + if !tx.input().is_empty() { + return Err(ValidationApiError::ProposerPayment) + } + + if let Some(block_base_fee) = block.base_fee_per_gas { + if tx.effective_tip_per_gas(block_base_fee).unwrap_or_default() != 0 { + return Err(ValidationApiError::ProposerPayment) + } + } + + Ok(()) + } + + /// Validates the given [`BlobsBundleV1`] and returns versioned hashes for blobs. + pub fn validate_blobs_bundle( + &self, + mut blobs_bundle: BlobsBundleV1, + ) -> Result, ValidationApiError> { + if blobs_bundle.commitments.len() != blobs_bundle.proofs.len() || + blobs_bundle.commitments.len() != blobs_bundle.blobs.len() + { + return Err(ValidationApiError::InvalidBlobsBundle) + } + + let versioned_hashes = blobs_bundle + .commitments + .iter() + .map(|c| kzg_to_versioned_hash(c.as_slice())) + .collect::>(); + + let sidecar = blobs_bundle.pop_sidecar(blobs_bundle.blobs.len()); + + sidecar.validate(&versioned_hashes, EnvKzgSettings::default().get())?; + + Ok(versioned_hashes) + } +} + +#[async_trait] +impl BlockSubmissionValidationApiServer for ValidationApi +where + Provider: BlockReaderIdExt + + ChainSpecProvider + + StateProviderFactory + + HeaderProvider + + AccountReader + + WithdrawalsProvider + + Clone + + 'static, + E: BlockExecutorProvider, +{ + async fn validate_builder_submission_v1( + &self, + _request: BuilderBlockValidationRequest, + ) -> RpcResult<()> { + Err(internal_rpc_err("unimplemented")) + } + + async fn validate_builder_submission_v2( + &self, + _request: BuilderBlockValidationRequestV2, + ) -> RpcResult<()> { + Err(internal_rpc_err("unimplemented")) + } + + /// Validates a block submitted to the relay + async fn validate_builder_submission_v3( + &self, + request: BuilderBlockValidationRequestV3, + ) -> RpcResult<()> { + let block = self + .payload_validator + .ensure_well_formed_payload( + ExecutionPayload::V3(request.request.execution_payload), + ExecutionPayloadSidecar::v3(CancunPayloadFields { + parent_beacon_block_root: request.parent_beacon_block_root, + versioned_hashes: self + .validate_blobs_bundle(request.request.blobs_bundle) + .map_err(|e| RethError::Other(e.into())) + .to_rpc_result()?, + }), + ) + .to_rpc_result()? + .try_seal_with_senders() + .map_err(|_| EthApiError::InvalidTransactionSignature)?; + + self.validate_message_against_block( + block, + request.request.message, + request.registered_gas_limit, + ) + .map_err(|e| RethError::Other(e.into())) + .to_rpc_result() + } + + /// Validates a block submitted to the relay + async fn validate_builder_submission_v4( + &self, + request: BuilderBlockValidationRequestV4, + ) -> RpcResult<()> { + let block = self + .payload_validator + .ensure_well_formed_payload( + ExecutionPayload::V3(request.request.execution_payload), + ExecutionPayloadSidecar::v4( + CancunPayloadFields { + parent_beacon_block_root: request.parent_beacon_block_root, + versioned_hashes: self + .validate_blobs_bundle(request.request.blobs_bundle) + .map_err(|e| RethError::Other(e.into())) + .to_rpc_result()?, + }, + request.request.execution_requests.into(), + ), + ) + .to_rpc_result()? + .try_seal_with_senders() + .map_err(|_| EthApiError::InvalidTransactionSignature)?; + + self.validate_message_against_block( + block, + request.request.message, + request.registered_gas_limit, + ) + .map_err(|e| RethError::Other(e.into())) + .to_rpc_result() + } +} diff --git a/crates/stages/api/Cargo.toml b/crates/stages/api/Cargo.toml index 352d3e0247..88a8e3b96d 100644 --- a/crates/stages/api/Cargo.toml +++ b/crates/stages/api/Cargo.toml @@ -46,4 +46,10 @@ tokio-stream.workspace = true reth-testing-utils.workspace = true [features] -test-utils = [] +test-utils = [ + "reth-consensus/test-utils", + "reth-network-p2p/test-utils", + "reth-primitives-traits/test-utils", + "reth-provider/test-utils", + "reth-stages-types/test-utils" +] diff --git a/crates/stages/api/src/error.rs b/crates/stages/api/src/error.rs index 68e1d00fda..8562b10b6a 100644 --- a/crates/stages/api/src/error.rs +++ b/crates/stages/api/src/error.rs @@ -188,4 +188,7 @@ pub enum PipelineError { /// Internal error #[error(transparent)] Internal(#[from] RethError), + /// The pipeline encountered an unwind when `fail_on_unwind` was set to `true`. + #[error("unexpected unwind")] + UnexpectedUnwind, } diff --git a/crates/stages/api/src/pipeline/builder.rs b/crates/stages/api/src/pipeline/builder.rs index 68ca887fe7..45bdc2d894 100644 --- a/crates/stages/api/src/pipeline/builder.rs +++ b/crates/stages/api/src/pipeline/builder.rs @@ -14,6 +14,7 @@ pub struct PipelineBuilder { /// A receiver for the current chain tip to sync to. tip_tx: Option>, metrics_tx: Option, + fail_on_unwind: bool, } impl PipelineBuilder { @@ -34,7 +35,9 @@ impl PipelineBuilder { /// [`builder`][StageSet::builder] on the set which will convert it to a /// [`StageSetBuilder`][crate::StageSetBuilder]. pub fn add_stages>(mut self, set: Set) -> Self { - for stage in set.builder().build() { + let states = set.builder().build(); + self.stages.reserve_exact(states.len()); + for stage in states { self.stages.push(stage); } self @@ -60,6 +63,12 @@ impl PipelineBuilder { self } + /// Set whether pipeline should fail on unwind. + pub const fn with_fail_on_unwind(mut self, yes: bool) -> Self { + self.fail_on_unwind = yes; + self + } + /// Builds the final [`Pipeline`] using the given database. pub fn build( self, @@ -70,7 +79,7 @@ impl PipelineBuilder { N: ProviderNodeTypes, ProviderFactory: DatabaseProviderFactory, { - let Self { stages, max_block, tip_tx, metrics_tx } = self; + let Self { stages, max_block, tip_tx, metrics_tx, fail_on_unwind } = self; Pipeline { provider_factory, stages, @@ -80,13 +89,20 @@ impl PipelineBuilder { event_sender: Default::default(), progress: Default::default(), metrics_tx, + fail_on_unwind, } } } impl Default for PipelineBuilder { fn default() -> Self { - Self { stages: Vec::new(), max_block: None, tip_tx: None, metrics_tx: None } + Self { + stages: Vec::new(), + max_block: None, + tip_tx: None, + metrics_tx: None, + fail_on_unwind: false, + } } } @@ -95,6 +111,7 @@ impl std::fmt::Debug for PipelineBuilder { f.debug_struct("PipelineBuilder") .field("stages", &self.stages.iter().map(|stage| stage.id()).collect::>()) .field("max_block", &self.max_block) + .field("fail_on_unwind", &self.fail_on_unwind) .finish() } } diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index 1f6d9341ad..399a3ffb4b 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -78,6 +78,9 @@ pub struct Pipeline { /// A receiver for the current chain tip to sync to. tip_tx: Option>, metrics_tx: Option, + /// Whether an unwind should fail the syncing process. Should only be set when downloading + /// blocks from trusted sources and expecting them to be valid. + fail_on_unwind: bool, } impl Pipeline { @@ -164,6 +167,10 @@ impl Pipeline { loop { let next_action = self.run_loop().await?; + if next_action.is_unwind() && self.fail_on_unwind { + return Err(PipelineError::UnexpectedUnwind) + } + // Terminate the loop early if it's reached the maximum user // configured block. if next_action.should_continue() && @@ -276,6 +283,10 @@ impl Pipeline { // Unwind stages in reverse order of execution let unwind_pipeline = self.stages.iter_mut().rev(); + // Legacy Engine: This prevents a race condition in which the `StaticFileProducer` could + // attempt to proceed with a finalized block which has been unwinded + let _locked_sf_producer = self.static_file_producer.lock(); + let mut provider_rw = self.provider_factory.database_provider_rw()?; for stage in unwind_pipeline { @@ -582,6 +593,7 @@ impl std::fmt::Debug for Pipeline { .field("stages", &self.stages.iter().map(|stage| stage.id()).collect::>()) .field("max_block", &self.max_block) .field("event_sender", &self.event_sender) + .field("fail_on_unwind", &self.fail_on_unwind) .finish() } } diff --git a/crates/stages/stages/Cargo.toml b/crates/stages/stages/Cargo.toml index 5b58967a67..1a86239a83 100644 --- a/crates/stages/stages/Cargo.toml +++ b/crates/stages/stages/Cargo.toml @@ -24,7 +24,9 @@ reth-evm.workspace = true reth-exex.workspace = true reth-network-p2p.workspace = true reth-primitives = { workspace = true, features = ["secp256k1"] } -reth-primitives-traits = { workspace = true, features = ["serde-bincode-compat"] } +reth-primitives-traits = { workspace = true, features = [ + "serde-bincode-compat", +] } reth-provider.workspace = true reth-execution-types.workspace = true reth-prune.workspace = true @@ -83,9 +85,6 @@ tempfile.workspace = true # Stage benchmarks criterion = { workspace = true, features = ["async_tokio"] } -# io -serde_json.workspace = true - [target.'cfg(not(target_os = "windows"))'.dev-dependencies] pprof = { workspace = true, features = [ "flamegraph", @@ -95,13 +94,25 @@ pprof = { workspace = true, features = [ [features] test-utils = [ - "dep:reth-chainspec", - "reth-network-p2p/test-utils", - "reth-db/test-utils", - "reth-provider/test-utils", - "reth-stages-api/test-utils", - "dep:reth-testing-utils", - "dep:tempfile", + "dep:reth-chainspec", + "reth-network-p2p/test-utils", + "reth-db/test-utils", + "reth-provider/test-utils", + "reth-stages-api/test-utils", + "dep:reth-testing-utils", + "dep:tempfile", + "reth-chainspec?/test-utils", + "reth-consensus/test-utils", + "reth-evm/test-utils", + "reth-downloaders/test-utils", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-revm/test-utils", + "reth-codecs/test-utils", + "reth-db-api/test-utils", + "reth-trie-db/test-utils", + "reth-trie/test-utils", + "reth-prune-types/test-utils" ] bsc = [ "reth-bsc-evm/bsc", diff --git a/crates/stages/stages/benches/criterion.rs b/crates/stages/stages/benches/criterion.rs index 7519d81a36..0f876dd701 100644 --- a/crates/stages/stages/benches/criterion.rs +++ b/crates/stages/stages/benches/criterion.rs @@ -2,12 +2,11 @@ use criterion::{criterion_main, measurement::WallTime, BenchmarkGroup, Criterion}; #[cfg(not(target_os = "windows"))] use pprof::criterion::{Output, PProfProfiler}; -use reth_chainspec::ChainSpec; use reth_config::config::{EtlConfig, TransactionLookupConfig}; use reth_db::{test_utils::TempDatabase, Database, DatabaseEnv}; use alloy_primitives::BlockNumber; -use reth_provider::{DatabaseProvider, DatabaseProviderFactory}; +use reth_provider::{test_utils::MockNodeTypesWithDB, DatabaseProvider, DatabaseProviderFactory}; use reth_stages::{ stages::{MerkleStage, SenderRecoveryStage, TransactionLookupStage}, test_utils::TestStageDB, @@ -148,7 +147,8 @@ fn measure_stage( block_interval: RangeInclusive, label: String, ) where - S: Clone + Stage as Database>::TXMut, ChainSpec>>, + S: Clone + + Stage as Database>::TXMut, MockNodeTypesWithDB>>, F: Fn(S, &TestStageDB, StageRange), { let stage_range = ( diff --git a/crates/stages/stages/benches/setup/mod.rs b/crates/stages/stages/benches/setup/mod.rs index 4812fb13c3..e6ae33f9c2 100644 --- a/crates/stages/stages/benches/setup/mod.rs +++ b/crates/stages/stages/benches/setup/mod.rs @@ -1,14 +1,15 @@ #![allow(unreachable_pub)] use alloy_primitives::{Address, Sealable, B256, U256}; use itertools::concat; -use reth_chainspec::ChainSpec; use reth_db::{tables, test_utils::TempDatabase, Database, DatabaseEnv}; use reth_db_api::{ cursor::DbCursorRO, transaction::{DbTx, DbTxMut}, }; use reth_primitives::{Account, SealedBlock, SealedHeader}; -use reth_provider::{DatabaseProvider, DatabaseProviderFactory, TrieWriter}; +use reth_provider::{ + test_utils::MockNodeTypesWithDB, DatabaseProvider, DatabaseProviderFactory, TrieWriter, +}; use reth_stages::{ stages::{AccountHashingStage, StorageHashingStage}, test_utils::{StorageKind, TestStageDB}, @@ -31,7 +32,8 @@ use reth_trie_db::DatabaseStateRoot; pub(crate) type StageRange = (ExecInput, UnwindInput); pub(crate) fn stage_unwind< - S: Clone + Stage as Database>::TXMut, ChainSpec>>, + S: Clone + + Stage as Database>::TXMut, MockNodeTypesWithDB>>, >( stage: S, db: &TestStageDB, @@ -63,7 +65,8 @@ pub(crate) fn stage_unwind< pub(crate) fn unwind_hashes(stage: S, db: &TestStageDB, range: StageRange) where - S: Clone + Stage as Database>::TXMut, ChainSpec>>, + S: Clone + + Stage as Database>::TXMut, MockNodeTypesWithDB>>, { let (input, unwind) = range; diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index f53d25876f..b7f213d947 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -121,7 +121,6 @@ where let mut tx_block_cursor = tx.cursor_write::()?; let mut ommers_cursor = tx.cursor_write::()?; let mut withdrawals_cursor = tx.cursor_write::()?; - let mut requests_cursor = tx.cursor_write::()?; // Get id for the next tx_num of zero if there are no transactions. let mut next_tx_num = tx_block_cursor.last()?.map(|(id, _)| id + 1).unwrap_or_default(); @@ -275,13 +274,6 @@ where .append(block_number, StoredBlockWithdrawals { withdrawals })?; } } - - // Write requests if any - if let Some(requests) = block.body.requests { - if !requests.0.is_empty() { - requests_cursor.append(block_number, requests)?; - } - } } BlockResponse::Empty(header) => { // Write empty sidecars @@ -324,7 +316,6 @@ where let mut body_cursor = tx.cursor_write::()?; let mut ommers_cursor = tx.cursor_write::()?; let mut withdrawals_cursor = tx.cursor_write::()?; - let mut requests_cursor = tx.cursor_write::()?; // Cursors to unwind transitions let mut tx_block_cursor = tx.cursor_write::()?; @@ -344,11 +335,6 @@ where withdrawals_cursor.delete_current()?; } - // Delete the requests entry if any - if requests_cursor.seek_exact(number)?.is_some() { - requests_cursor.delete_current()?; - } - // Delete all transaction to block values. if !block_meta.is_empty() && tx_block_cursor.seek_exact(block_meta.last_tx_num())?.is_some() @@ -1018,7 +1004,8 @@ mod tests { return Poll::Ready(None) } - let mut response = Vec::default(); + let mut response = + Vec::with_capacity(std::cmp::min(this.headers.len(), this.batch_size as usize)); while let Some(header) = this.headers.pop_front() { if header.is_empty() { response.push(BlockResponse::Empty(header)) diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index c82ca75212..04866c189a 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -223,7 +223,7 @@ where provider.tx_ref(), provider.static_file_provider(), )); - let mut executor = self.executor_provider.batch_executor(db); + let mut executor = self.executor_provider.batch_executor(db, None); executor.set_tip(max_block); executor.set_prune_modes(prune_modes); @@ -678,7 +678,8 @@ mod tests { use assert_matches::assert_matches; use reth_chainspec::ChainSpecBuilder; use reth_db_api::{models::AccountBeforeTx, transaction::DbTxMut}; - use reth_evm_ethereum::execute::EthExecutorProvider; + use reth_evm::execute::BasicBlockExecutorProvider; + use reth_evm_ethereum::execute::EthExecutionStrategyFactory; use reth_execution_errors::BlockValidationError; use reth_primitives::{Account, Bytecode, SealedBlock, StorageEntry}; use reth_provider::{ @@ -689,10 +690,11 @@ mod tests { use reth_stages_api::StageUnitCheckpoint; use std::collections::BTreeMap; - fn stage() -> ExecutionStage { - let executor_provider = EthExecutorProvider::ethereum(Arc::new( + fn stage() -> ExecutionStage> { + let strategy_factory = EthExecutionStrategyFactory::ethereum(Arc::new( ChainSpecBuilder::mainnet().berlin_activated().build(), )); + let executor_provider = BasicBlockExecutorProvider::new(strategy_factory); ExecutionStage::new( executor_provider, ExecutionStageThresholds { diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index 14afb37d81..1ca0e1aa13 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -58,11 +58,8 @@ impl AccountHashingStage { /// /// Proceeds to go to the `BlockTransitionIndex` end, go back `transitions` and change the /// account state in the `AccountChangeSets` table. - pub fn seed< - Tx: DbTx + DbTxMut + 'static, - Spec: Send + Sync + 'static + reth_chainspec::EthereumHardforks, - >( - provider: &reth_provider::DatabaseProvider, + pub fn seed( + provider: &reth_provider::DatabaseProvider, opts: SeedOpts, ) -> Result, StageError> { use alloy_primitives::U256; @@ -234,7 +231,7 @@ where input.unwind_block_range_with_threshold(self.commit_threshold); // Aggregate all transition changesets and make a list of accounts that have been changed. - provider.unwind_account_hashing(range)?; + provider.unwind_account_hashing_range(range)?; let mut stage_checkpoint = input.checkpoint.account_hashing_stage_checkpoint().unwrap_or_default(); diff --git a/crates/stages/stages/src/stages/hashing_storage.rs b/crates/stages/stages/src/stages/hashing_storage.rs index ef070d30c6..dcabbe83ee 100644 --- a/crates/stages/stages/src/stages/hashing_storage.rs +++ b/crates/stages/stages/src/stages/hashing_storage.rs @@ -169,7 +169,7 @@ where let (range, unwind_progress, _) = input.unwind_block_range_with_threshold(self.commit_threshold); - provider.unwind_storage_hashing(BlockNumberAddress::range(range))?; + provider.unwind_storage_hashing_range(BlockNumberAddress::range(range))?; let mut stage_checkpoint = input.checkpoint.storage_hashing_stage_checkpoint().unwrap_or_default(); diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 199e015c2d..49e687a96a 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -2,7 +2,7 @@ use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256}; use futures_util::StreamExt; use reth_config::config::EtlConfig; use reth_consensus::Consensus; -use reth_db::{tables, RawKey, RawTable, RawValue}; +use reth_db::{tables, transaction::DbTx, RawKey, RawTable, RawValue}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, transaction::DbTxMut, @@ -155,11 +155,13 @@ where // If we only have the genesis block hash, then we are at first sync, and we can remove it, // add it to the collector and use tx.append on all hashes. - if let Some((hash, block_number)) = cursor_header_numbers.last()? { - if block_number.value()? == 0 { - self.hash_collector.insert(hash.key()?, 0)?; - cursor_header_numbers.delete_current()?; - first_sync = true; + if provider.tx_ref().entries::>()? == 1 { + if let Some((hash, block_number)) = cursor_header_numbers.last()? { + if block_number.value()? == 0 { + self.hash_collector.insert(hash.key()?, 0)?; + cursor_header_numbers.delete_current()?; + first_sync = true; + } } } diff --git a/crates/stages/stages/src/stages/index_account_history.rs b/crates/stages/stages/src/stages/index_account_history.rs index 8b10283fb4..38c238e5d9 100644 --- a/crates/stages/stages/src/stages/index_account_history.rs +++ b/crates/stages/stages/src/stages/index_account_history.rs @@ -134,7 +134,7 @@ where let (range, unwind_progress, _) = input.unwind_block_range_with_threshold(self.commit_threshold); - provider.unwind_account_history_indices(range)?; + provider.unwind_account_history_indices_range(range)?; // from HistoryIndex higher than that number. Ok(UnwindOutput { checkpoint: StageCheckpoint::new(unwind_progress) }) diff --git a/crates/stages/stages/src/stages/index_storage_history.rs b/crates/stages/stages/src/stages/index_storage_history.rs index ac645b8dd7..ba61e63123 100644 --- a/crates/stages/stages/src/stages/index_storage_history.rs +++ b/crates/stages/stages/src/stages/index_storage_history.rs @@ -140,7 +140,7 @@ where let (range, unwind_progress, _) = input.unwind_block_range_with_threshold(self.commit_threshold); - provider.unwind_storage_history_indices(BlockNumberAddress::range(range))?; + provider.unwind_storage_history_indices_range(BlockNumberAddress::range(range))?; Ok(UnwindOutput { checkpoint: StageCheckpoint::new(unwind_progress) }) } diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index 17ffcf2e90..4b9f929510 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -263,7 +263,7 @@ mod tests { ); db.insert_blocks(blocks.iter(), StorageKind::Static)?; - let mut receipts = Vec::new(); + let mut receipts = Vec::with_capacity(blocks.len()); let mut tx_num = 0u64; for block in &blocks { let mut block_receipts = Vec::with_capacity(block.body.transactions.len()); diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index a85b0bc60c..a4eda6394c 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -29,6 +29,9 @@ const BATCH_SIZE: usize = 100_000; /// Maximum number of senders to recover per rayon worker job. const WORKER_CHUNK_SIZE: usize = 100; +/// Type alias for a sender that transmits the result of sender recovery. +type RecoveryResultSender = mpsc::Sender>>; + /// The sender recovery stage iterates over existing transactions, /// recovers the transaction signer and stores them /// in [`TransactionSenders`][reth_db::tables::TransactionSenders] table. @@ -100,8 +103,10 @@ where .map(|start| start..std::cmp::min(start + BATCH_SIZE as u64, tx_range.end)) .collect::>>(); + let tx_batch_sender = setup_range_recovery(provider); + for range in batch { - recover_range(range, provider, &mut senders_cursor)?; + recover_range(range, provider, tx_batch_sender.clone(), &mut senders_cursor)?; } Ok(ExecOutput { @@ -136,15 +141,16 @@ where fn recover_range( tx_range: Range, provider: &Provider, + tx_batch_sender: mpsc::Sender, RecoveryResultSender)>>, senders_cursor: &mut CURSOR, ) -> Result<(), StageError> where Provider: DBProvider + HeaderProvider + StaticFileProviderFactory, CURSOR: DbCursorRW, { - debug!(target: "sync::stages::sender_recovery", ?tx_range, "Recovering senders batch"); + debug!(target: "sync::stages::sender_recovery", ?tx_range, "Sending batch for processing"); - // Preallocate channels + // Preallocate channels for each chunks in the batch let (chunks, receivers): (Vec<_>, Vec<_>) = tx_range .clone() .step_by(WORKER_CHUNK_SIZE) @@ -156,62 +162,9 @@ where }) .unzip(); - let static_file_provider = provider.static_file_provider(); - - // We do not use `tokio::task::spawn_blocking` because, during a shutdown, - // there will be a timeout grace period in which Tokio does not allow spawning - // additional blocking tasks. This would cause this function to return - // `SenderRecoveryStageError::RecoveredSendersMismatch` at the end. - // - // However, using `std::thread::spawn` allows us to utilize the timeout grace - // period to complete some work without throwing errors during the shutdown. - std::thread::spawn(move || { - for (chunk_range, recovered_senders_tx) in chunks { - // Read the raw value, and let the rayon worker to decompress & decode. - let chunk = match static_file_provider.fetch_range_with_predicate( - StaticFileSegment::Transactions, - chunk_range.clone(), - |cursor, number| { - Ok(cursor - .get_one::>>( - number.into(), - )? - .map(|tx| (number, tx))) - }, - |_| true, - ) { - Ok(chunk) => chunk, - Err(err) => { - // We exit early since we could not process this chunk. - let _ = recovered_senders_tx - .send(Err(Box::new(SenderRecoveryStageError::StageError(err.into())))); - break - } - }; - - // Spawn the task onto the global rayon pool - // This task will send the results through the channel after it has read the transaction - // and calculated the sender. - rayon::spawn(move || { - let mut rlp_buf = Vec::with_capacity(128); - for (number, tx) in chunk { - let res = tx - .value() - .map_err(|err| Box::new(SenderRecoveryStageError::StageError(err.into()))) - .and_then(|tx| recover_sender((number, tx), &mut rlp_buf)); - - let is_err = res.is_err(); - - let _ = recovered_senders_tx.send(res); - - // Finish early - if is_err { - break - } - } - }); - } - }); + if let Some(err) = tx_batch_sender.send(chunks).err() { + return Err(StageError::Fatal(err.into())); + } debug!(target: "sync::stages::sender_recovery", ?tx_range, "Appending recovered senders to the database"); @@ -235,6 +188,7 @@ where provider.sealed_header(block_number)?.ok_or_else(|| { ProviderError::HeaderNotFound(block_number.into()) })?; + Err(StageError::Block { block: Box::new(sealed_header), error: BlockErrorKind::Validation( @@ -269,10 +223,82 @@ where .into(), )); } - Ok(()) } +/// Spawns a thread to handle the recovery of transaction senders for +/// specified chunks of a given batch. It processes incoming ranges, fetching and recovering +/// transactions in parallel using global rayon pool +fn setup_range_recovery( + provider: &Provider, +) -> mpsc::Sender, RecoveryResultSender)>> +where + Provider: DBProvider + HeaderProvider + StaticFileProviderFactory, +{ + let (tx_sender, tx_receiver) = mpsc::channel::, RecoveryResultSender)>>(); + let static_file_provider = provider.static_file_provider(); + + // We do not use `tokio::task::spawn_blocking` because, during a shutdown, + // there will be a timeout grace period in which Tokio does not allow spawning + // additional blocking tasks. This would cause this function to return + // `SenderRecoveryStageError::RecoveredSendersMismatch` at the end. + // + // However, using `std::thread::spawn` allows us to utilize the timeout grace + // period to complete some work without throwing errors during the shutdown. + std::thread::spawn(move || { + while let Ok(chunks) = tx_receiver.recv() { + for (chunk_range, recovered_senders_tx) in chunks { + // Read the raw value, and let the rayon worker to decompress & decode. + let chunk = match static_file_provider.fetch_range_with_predicate( + StaticFileSegment::Transactions, + chunk_range.clone(), + |cursor, number| { + Ok(cursor + .get_one::>>( + number.into(), + )? + .map(|tx| (number, tx))) + }, + |_| true, + ) { + Ok(chunk) => chunk, + Err(err) => { + // We exit early since we could not process this chunk. + let _ = recovered_senders_tx + .send(Err(Box::new(SenderRecoveryStageError::StageError(err.into())))); + break + } + }; + + // Spawn the task onto the global rayon pool + // This task will send the results through the channel after it has read the + // transaction and calculated the sender. + rayon::spawn(move || { + let mut rlp_buf = Vec::with_capacity(128); + for (number, tx) in chunk { + let res = tx + .value() + .map_err(|err| { + Box::new(SenderRecoveryStageError::StageError(err.into())) + }) + .and_then(|tx| recover_sender((number, tx), &mut rlp_buf)); + + let is_err = res.is_err(); + + let _ = recovered_senders_tx.send(res); + + // Finish early + if is_err { + break + } + } + }); + } + } + }); + tx_sender +} + #[inline] fn recover_sender( (tx_id, tx): (TxNumber, TransactionSignedNoHash), diff --git a/crates/stages/stages/src/test_utils/runner.rs b/crates/stages/stages/src/test_utils/runner.rs index 26f245c130..c3d25b9953 100644 --- a/crates/stages/stages/src/test_utils/runner.rs +++ b/crates/stages/stages/src/test_utils/runner.rs @@ -1,7 +1,6 @@ use super::TestStageDB; -use reth_chainspec::ChainSpec; use reth_db::{test_utils::TempDatabase, Database, DatabaseEnv}; -use reth_provider::{DatabaseProvider, ProviderError}; +use reth_provider::{test_utils::MockNodeTypesWithDB, DatabaseProvider, ProviderError}; use reth_stages_api::{ ExecInput, ExecOutput, Stage, StageError, StageExt, UnwindInput, UnwindOutput, }; @@ -20,7 +19,7 @@ pub(crate) enum TestRunnerError { /// A generic test runner for stages. pub(crate) trait StageTestRunner { - type S: Stage as Database>::TXMut, ChainSpec>> + type S: Stage as Database>::TXMut, MockNodeTypesWithDB>> + 'static; /// Return a reference to the database. diff --git a/crates/stages/types/Cargo.toml b/crates/stages/types/Cargo.toml index 54b14b335c..a466b21b6f 100644 --- a/crates/stages/types/Cargo.toml +++ b/crates/stages/types/Cargo.toml @@ -19,6 +19,7 @@ alloy-primitives.workspace = true modular-bitfield.workspace = true bytes.workspace = true serde.workspace = true +arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] arbitrary = { workspace = true, features = ["derive"] } @@ -26,3 +27,15 @@ proptest.workspace = true proptest-arbitrary-interop.workspace = true test-fuzz.workspace = true rand.workspace = true + +[features] +test-utils = [ + "dep:arbitrary", + "reth-codecs/test-utils", + "reth-trie-common/test-utils" +] +arbitrary = [ + "alloy-primitives/arbitrary", + "reth-codecs/arbitrary", + "reth-trie-common/arbitrary" +] diff --git a/crates/stages/types/src/checkpoints.rs b/crates/stages/types/src/checkpoints.rs index e88e933d6c..87225f1eec 100644 --- a/crates/stages/types/src/checkpoints.rs +++ b/crates/stages/types/src/checkpoints.rs @@ -8,7 +8,7 @@ use std::ops::RangeInclusive; use super::StageId; /// Saves the progress of Merkle stage. -#[derive(Default, Debug, Clone, PartialEq)] +#[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct MerkleCheckpoint { /// The target block number. pub target_block: BlockNumber, @@ -76,7 +76,7 @@ impl Compact for MerkleCheckpoint { /// Saves the progress of AccountHashing stage. #[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub struct AccountHashingCheckpoint { /// The next account to start hashing from. @@ -89,7 +89,7 @@ pub struct AccountHashingCheckpoint { /// Saves the progress of StorageHashing stage. #[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub struct StorageHashingCheckpoint { /// The next account to start hashing from. @@ -104,7 +104,7 @@ pub struct StorageHashingCheckpoint { /// Saves the progress of Execution stage. #[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub struct ExecutionCheckpoint { /// Block range which this checkpoint is valid for. @@ -115,7 +115,7 @@ pub struct ExecutionCheckpoint { /// Saves the progress of Headers stage. #[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub struct HeadersCheckpoint { /// Block range which this checkpoint is valid for. @@ -126,7 +126,7 @@ pub struct HeadersCheckpoint { /// Saves the progress of Index History stages. #[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub struct IndexHistoryCheckpoint { /// Block range which this checkpoint is valid for. @@ -137,7 +137,7 @@ pub struct IndexHistoryCheckpoint { /// Saves the progress of abstract stage iterating over or downloading entities. #[derive(Debug, Default, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub struct EntitiesCheckpoint { /// Number of entities already processed. @@ -166,7 +166,7 @@ impl EntitiesCheckpoint { /// Saves the block range. Usually, it's used to check the validity of some stage checkpoint across /// multiple executions. #[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub struct CheckpointBlockRange { /// The first block of the range, inclusive. @@ -189,7 +189,7 @@ impl From<&RangeInclusive> for CheckpointBlockRange { /// Saves the progress of a stage. #[derive(Debug, Default, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub struct StageCheckpoint { /// The maximum block processed by the stage. @@ -256,7 +256,7 @@ impl StageCheckpoint { // is not a Copy type. /// Stage-specific checkpoint metrics. #[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub enum StageUnitCheckpoint { /// Saves the progress of AccountHashing stage. diff --git a/crates/static-file/static-file/Cargo.toml b/crates/static-file/static-file/Cargo.toml index 8fa89e12e0..d22b116cdc 100644 --- a/crates/static-file/static-file/Cargo.toml +++ b/crates/static-file/static-file/Cargo.toml @@ -13,17 +13,14 @@ workspace = true [dependencies] # reth -reth-chainspec.workspace = true reth-db.workspace = true reth-db-api.workspace = true reth-provider.workspace = true reth-storage-errors.workspace = true -reth-nippy-jar.workspace = true reth-tokio-util.workspace = true reth-prune-types.workspace = true reth-static-file-types.workspace = true reth-stages-types.workspace = true -reth-node-types.workspace = true alloy-primitives.workspace = true diff --git a/crates/static-file/static-file/src/static_file_producer.rs b/crates/static-file/static-file/src/static_file_producer.rs index fd8a037165..bf99092a05 100644 --- a/crates/static-file/static-file/src/static_file_producer.rs +++ b/crates/static-file/static-file/src/static_file_producer.rs @@ -5,8 +5,8 @@ use alloy_primitives::BlockNumber; use parking_lot::Mutex; use rayon::prelude::*; use reth_provider::{ - providers::StaticFileWriter, BlockReader, DBProvider, DatabaseProviderFactory, - StageCheckpointReader, StaticFileProviderFactory, + providers::StaticFileWriter, BlockReader, ChainStateBlockReader, DBProvider, + DatabaseProviderFactory, StageCheckpointReader, StaticFileProviderFactory, }; use reth_prune_types::PruneModes; use reth_stages_types::StageId; @@ -111,6 +111,16 @@ impl StaticFileProducerInner { } } +impl StaticFileProducerInner +where + Provider: StaticFileProviderFactory + DatabaseProviderFactory, +{ + /// Returns the last finalized block number on disk. + pub fn last_finalized_block(&self) -> ProviderResult> { + self.provider.database_provider_ro()?.last_finalized_block_number() + } +} + impl StaticFileProducerInner where Provider: StaticFileProviderFactory diff --git a/crates/static-file/types/src/lib.rs b/crates/static-file/types/src/lib.rs index 23c357db72..dc64be74cb 100644 --- a/crates/static-file/types/src/lib.rs +++ b/crates/static-file/types/src/lib.rs @@ -76,3 +76,93 @@ pub const fn find_fixed_range( let start = (block / blocks_per_static_file) * blocks_per_static_file; SegmentRangeInclusive::new(start, start + blocks_per_static_file - 1) } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_highest_static_files_highest() { + let files = HighestStaticFiles { + headers: Some(100), + receipts: Some(200), + transactions: None, + sidecars: None, + }; + + // Test for headers segment + assert_eq!(files.highest(StaticFileSegment::Headers), Some(100)); + + // Test for receipts segment + assert_eq!(files.highest(StaticFileSegment::Receipts), Some(200)); + + // Test for transactions segment + assert_eq!(files.highest(StaticFileSegment::Transactions), None); + } + + #[test] + fn test_highest_static_files_as_mut() { + let mut files = HighestStaticFiles::default(); + + // Modify headers value + *files.as_mut(StaticFileSegment::Headers) = Some(150); + assert_eq!(files.headers, Some(150)); + + // Modify receipts value + *files.as_mut(StaticFileSegment::Receipts) = Some(250); + assert_eq!(files.receipts, Some(250)); + + // Modify transactions value + *files.as_mut(StaticFileSegment::Transactions) = Some(350); + assert_eq!(files.transactions, Some(350)); + } + + #[test] + fn test_highest_static_files_min() { + let files = HighestStaticFiles { + headers: Some(300), + receipts: Some(100), + transactions: None, + sidecars: None, + }; + + // Minimum value among the available segments + assert_eq!(files.min(), Some(100)); + + let empty_files = HighestStaticFiles::default(); + // No values, should return None + assert_eq!(empty_files.min(), None); + } + + #[test] + fn test_highest_static_files_max() { + let files = HighestStaticFiles { + headers: Some(300), + receipts: Some(100), + transactions: Some(500), + sidecars: None, + }; + + // Maximum value among the available segments + assert_eq!(files.max(), Some(500)); + + let empty_files = HighestStaticFiles::default(); + // No values, should return None + assert_eq!(empty_files.max(), None); + } + + #[test] + fn test_find_fixed_range() { + // Test with default block size + let block: BlockNumber = 600_000; + let range = find_fixed_range(block, DEFAULT_BLOCKS_PER_STATIC_FILE); + assert_eq!(range.start(), 500_000); + assert_eq!(range.end(), 999_999); + + // Test with a custom block size + let block: BlockNumber = 1_200_000; + let range = find_fixed_range(block, 1_000_000); + assert_eq!(range.start(), 1_000_000); + assert_eq!(range.end(), 1_999_999); + } +} diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index 640ec8c956..20a0673dff 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -27,6 +27,9 @@ op-alloy-consensus = { workspace = true, optional = true } # misc bytes.workspace = true modular-bitfield = { workspace = true, optional = true } +visibility = { version = "0.1.1", optional = true} +serde.workspace = true +arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] alloy-eips = { workspace = true, default-features = false, features = [ @@ -39,19 +42,24 @@ alloy-primitives = { workspace = true, features = [ "rand", ] } alloy-consensus = { workspace = true, features = ["arbitrary"] } -alloy-rlp.workspace = true -rand.workspace = true test-fuzz.workspace = true serde_json.workspace = true arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true -serde.workspace = true [features] default = ["std", "alloy"] -std = ["alloy-primitives/std", "bytes/std"] +std = [ + "alloy-primitives/std", + "bytes/std", + "alloy-consensus?/std", + "alloy-eips?/std", + "alloy-genesis?/std", + "alloy-trie?/std", + "serde/std" +] alloy = [ "dep:alloy-consensus", "dep:alloy-eips", @@ -60,4 +68,25 @@ alloy = [ "dep:alloy-trie", ] optimism = ["alloy", "dep:op-alloy-consensus"] -test-utils = [] +test-utils = [ + "std", + "alloy", + "arbitrary", + "dep:visibility", + "dep:arbitrary" +] +serde = [ + "alloy-consensus?/serde", + "alloy-eips?/serde", + "alloy-primitives/serde", + "alloy-trie?/serde", + "bytes/serde", + "op-alloy-consensus?/serde" +] +arbitrary = [ + "alloy-consensus?/arbitrary", + "alloy-eips?/arbitrary", + "alloy-primitives/arbitrary", + "alloy-trie?/arbitrary", + "op-alloy-consensus?/arbitrary" +] diff --git a/crates/storage/codecs/derive/src/arbitrary.rs b/crates/storage/codecs/derive/src/arbitrary.rs index 8aa44062e2..753bb1e33a 100644 --- a/crates/storage/codecs/derive/src/arbitrary.rs +++ b/crates/storage/codecs/derive/src/arbitrary.rs @@ -18,10 +18,26 @@ pub fn maybe_generate_tests( let mut traits = vec![]; let mut roundtrips = vec![]; let mut additional_tests = vec![]; + let mut is_crate = false; - for arg in args { + let mut iter = args.into_iter().peekable(); + + // we check if there's a crate argument which is used from inside the codecs crate directly + if let Some(arg) = iter.peek() { + if arg.to_string() == "crate" { + is_crate = true; + iter.next(); + } + } + + for arg in iter { if arg.to_string() == "compact" { - traits.push(quote! { use super::Compact; }); + let path = if is_crate { + quote! { use crate::Compact; } + } else { + quote! { use reth_codecs::Compact; } + }; + traits.push(path); roundtrips.push(quote! { { let mut buf = vec![]; diff --git a/crates/storage/codecs/derive/src/compact/generator.rs b/crates/storage/codecs/derive/src/compact/generator.rs index 1fb6d40fa2..cf9bcc0c62 100644 --- a/crates/storage/codecs/derive/src/compact/generator.rs +++ b/crates/storage/codecs/derive/src/compact/generator.rs @@ -2,10 +2,12 @@ use super::*; use convert_case::{Case, Casing}; +use syn::{Attribute, LitStr}; /// Generates code to implement the `Compact` trait for a data type. pub fn generate_from_to( ident: &Ident, + attrs: &[Attribute], has_lifetime: bool, fields: &FieldList, is_zstd: bool, @@ -20,6 +22,8 @@ pub fn generate_from_to( let fuzz = format_ident!("fuzz_test_{snake_case_ident}"); let test = format_ident!("fuzz_{snake_case_ident}"); + let reth_codecs = parse_reth_codecs_path(attrs).unwrap(); + let lifetime = if has_lifetime { quote! { 'a } } else { @@ -28,11 +32,11 @@ pub fn generate_from_to( let impl_compact = if has_lifetime { quote! { - impl<#lifetime> Compact for #ident<#lifetime> + impl<#lifetime> #reth_codecs::Compact for #ident<#lifetime> } } else { quote! { - impl Compact for #ident + impl #reth_codecs::Compact for #ident } }; @@ -53,6 +57,7 @@ pub fn generate_from_to( #[allow(dead_code)] #[test_fuzz::test_fuzz] fn #fuzz(obj: #ident) { + use #reth_codecs::Compact; let mut buf = vec![]; let len = obj.clone().to_compact(&mut buf); let (same_obj, buf) = #ident::from_compact(buf.as_ref(), len); @@ -191,7 +196,7 @@ fn generate_to_compact(fields: &FieldList, ident: &Ident, is_zstd: bool) -> Vec< } // Just because a type supports compression, doesn't mean all its values are to be compressed. - // We skip the smaller ones, and thus require a flag `__zstd` to specify if this value is + // We skip the smaller ones, and thus require a flag` __zstd` to specify if this value is // compressed or not. if is_zstd { lines.push(quote! { @@ -232,3 +237,25 @@ fn generate_to_compact(fields: &FieldList, ident: &Ident, is_zstd: bool) -> Vec< lines } + +/// Function to extract the crate path from `reth_codecs(crate = "...")` attribute. +fn parse_reth_codecs_path(attrs: &[Attribute]) -> syn::Result { + // let default_crate_path: syn::Path = syn::parse_str("reth-codecs").unwrap(); + let mut reth_codecs_path: syn::Path = syn::parse_quote!(reth_codecs); + for attr in attrs { + if attr.path().is_ident("reth_codecs") { + attr.parse_nested_meta(|meta| { + if meta.path.is_ident("crate") { + let value = meta.value()?; + let lit: LitStr = value.parse()?; + reth_codecs_path = syn::parse_str(&lit.value())?; + Ok(()) + } else { + Err(meta.error("unsupported attribute")) + } + })?; + } + } + + Ok(reth_codecs_path) +} diff --git a/crates/storage/codecs/derive/src/compact/mod.rs b/crates/storage/codecs/derive/src/compact/mod.rs index e5a79b3fe5..b9d5cf18d6 100644 --- a/crates/storage/codecs/derive/src/compact/mod.rs +++ b/crates/storage/codecs/derive/src/compact/mod.rs @@ -43,13 +43,13 @@ pub enum FieldTypes { pub fn derive(input: TokenStream, is_zstd: bool) -> TokenStream { let mut output = quote! {}; - let DeriveInput { ident, data, generics, .. } = parse_macro_input!(input); + let DeriveInput { ident, data, generics, attrs, .. } = parse_macro_input!(input); let has_lifetime = has_lifetime(&generics); let fields = get_fields(&data); output.extend(generate_flag_struct(&ident, has_lifetime, &fields, is_zstd)); - output.extend(generate_from_to(&ident, has_lifetime, &fields, is_zstd)); + output.extend(generate_from_to(&ident, &attrs, has_lifetime, &fields, is_zstd)); output.into() } @@ -233,10 +233,10 @@ mod tests { // Generate code that will impl the `Compact` trait. let mut output = quote! {}; - let DeriveInput { ident, data, .. } = parse2(f_struct).unwrap(); + let DeriveInput { ident, data, attrs, .. } = parse2(f_struct).unwrap(); let fields = get_fields(&data); output.extend(generate_flag_struct(&ident, false, &fields, false)); - output.extend(generate_from_to(&ident, false, &fields, false)); + output.extend(generate_from_to(&ident, &attrs, false, &fields, false)); // Expected output in a TokenStream format. Commas matter! let should_output = quote! { @@ -285,6 +285,7 @@ mod tests { #[allow(dead_code)] #[test_fuzz::test_fuzz] fn fuzz_test_test_struct(obj: TestStruct) { + use reth_codecs::Compact; let mut buf = vec![]; let len = obj.clone().to_compact(&mut buf); let (same_obj, buf) = TestStruct::from_compact(buf.as_ref(), len); @@ -295,7 +296,7 @@ mod tests { pub fn fuzz_test_struct() { fuzz_test_test_struct(TestStruct::default()) } - impl Compact for TestStruct { + impl reth_codecs::Compact for TestStruct { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]> { let mut flags = TestStructFlags::default(); let mut total_length = 0; diff --git a/crates/storage/codecs/derive/src/lib.rs b/crates/storage/codecs/derive/src/lib.rs index 4ffdbfd6ef..0b4015830f 100644 --- a/crates/storage/codecs/derive/src/lib.rs +++ b/crates/storage/codecs/derive/src/lib.rs @@ -49,14 +49,14 @@ mod compact; /// own encoding and do not rely on the bitflag struct. /// - `Bytes` fields and any types containing a `Bytes` field should be placed last to ensure /// efficient decoding. -#[proc_macro_derive(Compact, attributes(maybe_zero))] +#[proc_macro_derive(Compact, attributes(maybe_zero, reth_codecs))] pub fn derive(input: TokenStream) -> TokenStream { let is_zstd = false; compact::derive(input, is_zstd) } /// Adds `zstd` compression to derived [`Compact`]. -#[proc_macro_derive(CompactZstd, attributes(maybe_zero))] +#[proc_macro_derive(CompactZstd, attributes(maybe_zero, reth_codecs))] pub fn derive_zstd(input: TokenStream) -> TokenStream { let is_zstd = true; compact::derive(input, is_zstd) diff --git a/crates/storage/codecs/src/alloy/access_list.rs b/crates/storage/codecs/src/alloy/access_list.rs index 306b64d7e4..304b6bd388 100644 --- a/crates/storage/codecs/src/alloy/access_list.rs +++ b/crates/storage/codecs/src/alloy/access_list.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AccessList`] + use crate::Compact; use alloc::vec::Vec; use alloy_eips::eip2930::{AccessList, AccessListItem}; diff --git a/crates/storage/codecs/src/alloy/authorization_list.rs b/crates/storage/codecs/src/alloy/authorization_list.rs index 3efe135906..e17c0fb32a 100644 --- a/crates/storage/codecs/src/alloy/authorization_list.rs +++ b/crates/storage/codecs/src/alloy/authorization_list.rs @@ -1,19 +1,25 @@ -use core::ops::Deref; +//! Compact implementation for [`AlloyAuthorization`] use crate::Compact; use alloy_eips::eip7702::{Authorization as AlloyAuthorization, SignedAuthorization}; use alloy_primitives::{Address, U256}; use bytes::Buf; +use core::ops::Deref; use reth_codecs_derive::add_arbitrary_tests; /// Authorization acts as bridge which simplifies Compact implementation for AlloyAuthorization. /// /// Notice: Make sure this struct is 1:1 with `alloy_eips::eip7702::Authorization` #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] -#[add_arbitrary_tests(compact)] +#[reth_codecs(crate = "crate")] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct Authorization { - chain_id: U256, + chain_id: u64, address: Address, nonce: u64, } @@ -44,11 +50,9 @@ impl Compact for SignedAuthorization { where B: bytes::BufMut + AsMut<[u8]>, { - let signature = self.signature(); - let (v, r, s) = (signature.v(), signature.r(), signature.s()); - buf.put_u8(v.y_parity_byte()); - buf.put_slice(r.as_le_slice()); - buf.put_slice(s.as_le_slice()); + buf.put_u8(self.y_parity()); + buf.put_slice(self.r().as_le_slice()); + buf.put_slice(self.s().as_le_slice()); // to_compact doesn't write the len to buffer. // By placing it as last, we don't need to store it either. @@ -56,17 +60,15 @@ impl Compact for SignedAuthorization { } fn from_compact(mut buf: &[u8], len: usize) -> (Self, &[u8]) { - let y = alloy_primitives::Parity::Parity(buf.get_u8() == 1); + let y_parity = buf.get_u8(); let r = U256::from_le_slice(&buf[0..32]); buf.advance(32); let s = U256::from_le_slice(&buf[0..32]); buf.advance(32); - let signature = alloy_primitives::Signature::from_rs_and_parity(r, s, y) - .expect("invalid authorization signature"); let (auth, buf) = AlloyAuthorization::from_compact(buf, len); - (auth.into_signed(signature), buf) + (Self::new_unchecked(auth, y_parity, r, s), buf) } } @@ -78,7 +80,7 @@ mod tests { #[test] fn test_roundtrip_compact_authorization_list_item() { let authorization = AlloyAuthorization { - chain_id: U256::from(1), + chain_id: 1u64, address: address!("dac17f958d2ee523a2206206994597c13d831ec7"), nonce: 1, } diff --git a/crates/storage/codecs/src/alloy/genesis_account.rs b/crates/storage/codecs/src/alloy/genesis_account.rs index 938ad1375b..a35d4947db 100644 --- a/crates/storage/codecs/src/alloy/genesis_account.rs +++ b/crates/storage/codecs/src/alloy/genesis_account.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AlloyGenesisAccount`] + use crate::Compact; use alloc::vec::Vec; use alloy_genesis::GenesisAccount as AlloyGenesisAccount; @@ -9,6 +11,7 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with `alloy_genesis::GenesisAccount` #[derive(Debug, Clone, PartialEq, Eq, Compact)] +#[reth_codecs(crate = "crate")] pub(crate) struct GenesisAccountRef<'a> { /// The nonce of the account at genesis. nonce: Option, @@ -22,9 +25,16 @@ pub(crate) struct GenesisAccountRef<'a> { private_key: Option<&'a B256>, } +/// Acts as bridge which simplifies Compact implementation for +/// `AlloyGenesisAccount`. #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] -#[add_arbitrary_tests(compact)] +#[reth_codecs(crate = "crate")] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct GenesisAccount { /// The nonce of the account at genesis. nonce: Option, @@ -39,15 +49,23 @@ pub(crate) struct GenesisAccount { } #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] -#[add_arbitrary_tests(compact)] +#[reth_codecs(crate = "crate")] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct StorageEntries { entries: Vec, } #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] -#[add_arbitrary_tests(compact)] +#[reth_codecs(crate = "crate")] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct StorageEntry { key: B256, value: B256, diff --git a/crates/storage/codecs/src/alloy/header.rs b/crates/storage/codecs/src/alloy/header.rs index 3a17ed1fdc..04b7d6ab71 100644 --- a/crates/storage/codecs/src/alloy/header.rs +++ b/crates/storage/codecs/src/alloy/header.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AlloyHeader`] + use crate::Compact; use alloy_consensus::Header as AlloyHeader; use alloy_primitives::{Address, BlockNumber, Bloom, Bytes, B256, U256}; @@ -10,8 +12,13 @@ use alloy_primitives::{Address, BlockNumber, Bloom, Bytes, B256, U256}; /// will automatically apply to this type. /// /// Notice: Make sure this struct is 1:1 with [`alloy_consensus::Header`] -#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(serde::Serialize, serde::Deserialize, arbitrary::Arbitrary) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] +#[reth_codecs(crate = "crate")] pub(crate) struct Header { parent_hash: B256, ommers_hash: B256, @@ -42,10 +49,15 @@ pub(crate) struct Header { /// used as a field of [`Header`] for backwards compatibility. /// /// More information: & [`reth_codecs_derive::Compact`]. -#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(serde::Serialize, serde::Deserialize, arbitrary::Arbitrary) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] +#[reth_codecs(crate = "crate")] pub(crate) struct HeaderExt { - requests_root: Option, + requests_hash: Option, } impl HeaderExt { @@ -53,7 +65,7 @@ impl HeaderExt { /// /// Required since [`Header`] uses `Option` as a field. const fn into_option(self) -> Option { - if self.requests_root.is_some() { + if self.requests_hash.is_some() { Some(self) } else { None @@ -66,7 +78,7 @@ impl Compact for AlloyHeader { where B: bytes::BufMut + AsMut<[u8]>, { - let extra_fields = HeaderExt { requests_root: self.requests_root }; + let extra_fields = HeaderExt { requests_hash: self.requests_hash }; let header = Header { parent_hash: self.parent_hash, @@ -116,7 +128,7 @@ impl Compact for AlloyHeader { blob_gas_used: header.blob_gas_used, excess_blob_gas: header.excess_blob_gas, parent_beacon_block_root: header.parent_beacon_block_root, - requests_root: header.extra_fields.and_then(|h| h.requests_root), + requests_hash: header.extra_fields.and_then(|h| h.requests_hash), extra_data: header.extra_data, }; (alloy_header, buf) @@ -126,12 +138,13 @@ impl Compact for AlloyHeader { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::EMPTY_OMMER_ROOT_HASH; use alloy_primitives::{address, b256, bloom, bytes, hex}; /// Holesky block #1947953 const HOLESKY_BLOCK: Header = Header { parent_hash: b256!("8605e0c46689f66b3deed82598e43d5002b71a929023b665228728f0c6e62a95"), - ommers_hash: b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), + ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: address!("c6e2459991bfe27cca6d86722f35da23a1e4cb97"), state_root: b256!("edad188ca5647d62f4cca417c11a1afbadebce30d23260767f6f587e9b3b9993"), transactions_root: b256!("4daf25dc08a841aa22aa0d3cb3e1f159d4dcaf6a6063d4d36bfac11d3fdb63ee"), @@ -175,7 +188,7 @@ mod tests { #[test] fn test_extra_fields() { let mut header = HOLESKY_BLOCK; - header.extra_fields = Some(HeaderExt { requests_root: Some(B256::random()) }); + header.extra_fields = Some(HeaderExt { requests_hash: Some(B256::random()) }); let mut encoded_header = vec![]; let len = header.to_compact(&mut encoded_header); diff --git a/crates/storage/codecs/src/alloy/mod.rs b/crates/storage/codecs/src/alloy/mod.rs index 942258d064..697bac901e 100644 --- a/crates/storage/codecs/src/alloy/mod.rs +++ b/crates/storage/codecs/src/alloy/mod.rs @@ -1,20 +1,34 @@ -mod access_list; -mod authorization_list; -mod genesis_account; -mod header; -mod log; -mod request; -mod signature; -mod transaction; -mod trie; -mod txkind; -mod withdrawal; +//! Implements Compact for alloy types. + +/// Will make it a pub mod if test-utils is enabled +macro_rules! cond_mod { + ($($mod_name:ident),*) => { + $( + #[cfg(feature = "test-utils")] + pub mod $mod_name; + #[cfg(not(feature = "test-utils"))] + pub(crate) mod $mod_name; + )* + }; +} + +cond_mod!( + access_list, + authorization_list, + genesis_account, + header, + log, + signature, + transaction, + trie, + txkind, + withdrawal +); #[cfg(test)] mod tests { use crate::{ alloy::{ - authorization_list::Authorization, genesis_account::{GenesisAccount, GenesisAccountRef, StorageEntries, StorageEntry}, header::{Header, HeaderExt}, transaction::{ @@ -38,7 +52,6 @@ mod tests { validate_bitflag_backwards_compat!(StorageEntries, UnusedBits::Zero); validate_bitflag_backwards_compat!(StorageEntry, UnusedBits::Zero); - validate_bitflag_backwards_compat!(Authorization, UnusedBits::NotZero); validate_bitflag_backwards_compat!(GenesisAccountRef<'_>, UnusedBits::NotZero); validate_bitflag_backwards_compat!(GenesisAccount, UnusedBits::NotZero); validate_bitflag_backwards_compat!(TxEip1559, UnusedBits::NotZero); diff --git a/crates/storage/codecs/src/alloy/request.rs b/crates/storage/codecs/src/alloy/request.rs deleted file mode 100644 index 2447160beb..0000000000 --- a/crates/storage/codecs/src/alloy/request.rs +++ /dev/null @@ -1,40 +0,0 @@ -//! Native Compact codec impl for EIP-7685 requests. - -use crate::Compact; -use alloy_consensus::Request; -use alloy_eips::eip7685::{Decodable7685, Encodable7685}; -use alloy_primitives::Bytes; -use bytes::BufMut; - -impl Compact for Request { - fn to_compact(&self, buf: &mut B) -> usize - where - B: BufMut + AsMut<[u8]>, - { - let encoded: Bytes = self.encoded_7685().into(); - encoded.to_compact(buf) - } - - fn from_compact(buf: &[u8], _: usize) -> (Self, &[u8]) { - let (raw, buf) = Bytes::from_compact(buf, buf.len()); - - (Self::decode_7685(&mut raw.as_ref()).expect("invalid eip-7685 request in db"), buf) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use proptest::proptest; - use proptest_arbitrary_interop::arb; - - proptest! { - #[test] - fn roundtrip(request in arb::()) { - let mut buf = Vec::::new(); - request.to_compact(&mut buf); - let (decoded, _) = Request::from_compact(&buf, buf.len()); - assert_eq!(request, decoded); - } - } -} diff --git a/crates/storage/codecs/src/alloy/signature.rs b/crates/storage/codecs/src/alloy/signature.rs index 70290ea96c..0cc4774d0f 100644 --- a/crates/storage/codecs/src/alloy/signature.rs +++ b/crates/storage/codecs/src/alloy/signature.rs @@ -1,6 +1,7 @@ -use alloy_primitives::{Parity, Signature, U256}; +//! Compact implementation for [`Signature`] use crate::Compact; +use alloy_primitives::{Parity, Signature, U256}; impl Compact for Signature { fn to_compact(&self, buf: &mut B) -> usize diff --git a/crates/storage/codecs/src/alloy/transaction/eip1559.rs b/crates/storage/codecs/src/alloy/transaction/eip1559.rs index 8e7594951f..6d910a6900 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip1559.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip1559.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AlloyTxEip1559`] + use crate::Compact; use alloy_consensus::TxEip1559 as AlloyTxEip1559; use alloy_eips::eip2930::AccessList; @@ -11,8 +13,13 @@ use alloy_primitives::{Bytes, ChainId, TxKind, U256}; /// /// Notice: Make sure this struct is 1:1 with [`alloy_consensus::TxEip1559`] #[derive(Debug, Clone, PartialEq, Eq, Hash, Compact, Default)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] -#[cfg_attr(test, crate::add_arbitrary_tests(compact))] +#[reth_codecs(crate = "crate")] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[cfg_attr(any(test, feature = "test-utils"), crate::add_arbitrary_tests(crate, compact))] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] pub(crate) struct TxEip1559 { chain_id: ChainId, nonce: u64, diff --git a/crates/storage/codecs/src/alloy/transaction/eip2930.rs b/crates/storage/codecs/src/alloy/transaction/eip2930.rs index e0c78a3e4c..aeb08f361b 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip2930.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip2930.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AlloyTxEip2930`] + use crate::Compact; use alloy_consensus::TxEip2930 as AlloyTxEip2930; use alloy_eips::eip2930::AccessList; @@ -13,8 +15,13 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with [`alloy_consensus::TxEip2930`] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] -#[add_arbitrary_tests(compact)] +#[reth_codecs(crate = "crate")] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct TxEip2930 { chain_id: ChainId, nonce: u64, diff --git a/crates/storage/codecs/src/alloy/transaction/eip4844.rs b/crates/storage/codecs/src/alloy/transaction/eip4844.rs index 27c6b92409..fac9ab9a1b 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip4844.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip4844.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AlloyTxEip4844`] + use crate::{Compact, CompactPlaceholder}; use alloc::vec::Vec; use alloy_consensus::TxEip4844 as AlloyTxEip4844; @@ -14,8 +16,10 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with [`alloy_consensus::TxEip4844`] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] -#[add_arbitrary_tests(compact)] +#[reth_codecs(crate = "crate")] +#[cfg_attr(any(test, feature = "test-utils"), derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct TxEip4844 { chain_id: ChainId, nonce: u64, @@ -25,6 +29,13 @@ pub(crate) struct TxEip4844 { /// TODO(debt): this should be removed if we break the DB. /// Makes sure that the Compact bitflag struct has one bit after the above field: /// + #[cfg_attr( + feature = "test-utils", + serde( + serialize_with = "serialize_placeholder", + deserialize_with = "deserialize_placeholder" + ) + )] placeholder: Option, to: Address, value: U256, @@ -75,6 +86,54 @@ impl Compact for AlloyTxEip4844 { } } +#[cfg(any(test, feature = "test-utils"))] +impl<'a> arbitrary::Arbitrary<'a> for TxEip4844 { + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + Ok(Self { + chain_id: ChainId::arbitrary(u)?, + nonce: u64::arbitrary(u)?, + gas_limit: u64::arbitrary(u)?, + max_fee_per_gas: u128::arbitrary(u)?, + max_priority_fee_per_gas: u128::arbitrary(u)?, + // Should always be Some for TxEip4844 + placeholder: Some(()), + to: Address::arbitrary(u)?, + value: U256::arbitrary(u)?, + access_list: AccessList::arbitrary(u)?, + blob_versioned_hashes: Vec::::arbitrary(u)?, + max_fee_per_blob_gas: u128::arbitrary(u)?, + input: Bytes::arbitrary(u)?, + }) + } +} + +#[cfg(feature = "test-utils")] +fn serialize_placeholder(value: &Option<()>, serializer: S) -> Result +where + S: serde::Serializer, +{ + // Required otherwise `serde_json` will serialize it as null and would be `None` when decoding + // it again. + match value { + Some(()) => serializer.serialize_str("placeholder"), // Custom serialization + None => serializer.serialize_none(), + } +} + +#[cfg(feature = "test-utils")] +fn deserialize_placeholder<'de, D>(deserializer: D) -> Result, D::Error> +where + D: serde::Deserializer<'de>, +{ + use serde::de::Deserialize; + let s: Option = Option::deserialize(deserializer)?; + match s.as_deref() { + Some("placeholder") => Ok(Some(())), + None => Ok(None), + _ => Err(serde::de::Error::custom("unexpected value")), + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/storage/codecs/src/alloy/transaction/eip7702.rs b/crates/storage/codecs/src/alloy/transaction/eip7702.rs index e714be1c3f..eab10af0b6 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip7702.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip7702.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AlloyTxEip7702`] + use crate::Compact; use alloc::vec::Vec; use alloy_consensus::TxEip7702 as AlloyTxEip7702; @@ -14,8 +16,13 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with [`alloy_consensus::TxEip7702`] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] -#[add_arbitrary_tests(compact)] +#[reth_codecs(crate = "crate")] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct TxEip7702 { chain_id: ChainId, nonce: u64, diff --git a/crates/storage/codecs/src/alloy/transaction/legacy.rs b/crates/storage/codecs/src/alloy/transaction/legacy.rs index 27e799a790..60250ba64a 100644 --- a/crates/storage/codecs/src/alloy/transaction/legacy.rs +++ b/crates/storage/codecs/src/alloy/transaction/legacy.rs @@ -1,11 +1,18 @@ +//! Compact implementation for [`AlloyTxLegacy`] + use crate::Compact; use alloy_consensus::TxLegacy as AlloyTxLegacy; use alloy_primitives::{Bytes, ChainId, TxKind, U256}; /// Legacy transaction. #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] -#[cfg_attr(test, crate::add_arbitrary_tests(compact))] +#[reth_codecs(crate = "crate")] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize), + crate::add_arbitrary_tests(crate, compact) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] pub(crate) struct TxLegacy { /// Added as EIP-155: Simple replay attack protection chain_id: Option, diff --git a/crates/storage/codecs/src/alloy/transaction/mod.rs b/crates/storage/codecs/src/alloy/transaction/mod.rs index 86edfee5bc..dc27eacfac 100644 --- a/crates/storage/codecs/src/alloy/transaction/mod.rs +++ b/crates/storage/codecs/src/alloy/transaction/mod.rs @@ -1,10 +1,18 @@ -pub(crate) mod eip1559; -pub(crate) mod eip2930; -pub(crate) mod eip4844; -pub(crate) mod eip7702; -pub(crate) mod legacy; -#[cfg(feature = "optimism")] -pub(crate) mod optimism; +//! Compact implementation for transaction types + +cond_mod!( + eip1559, + eip2930, + eip4844, + eip7702, + legacy +); + + +#[cfg(all(feature = "test-utils", feature = "optimism"))] +pub mod optimism; +#[cfg(all(not(feature = "test-utils"), feature = "optimism"))] +mod optimism; #[cfg(test)] mod tests { @@ -15,11 +23,13 @@ mod tests { // this check is to ensure we do not inadvertently add too many fields to a struct which would // expand the flags field and break backwards compatibility - #[cfg(feature = "optimism")] - use crate::alloy::transaction::optimism::TxDeposit; - use crate::alloy::transaction::{ - eip1559::TxEip1559, eip2930::TxEip2930, eip4844::TxEip4844, eip7702::TxEip7702, - legacy::TxLegacy, + use alloy_primitives::hex; + use crate::{ + alloy::{header::Header, transaction::{ + eip1559::TxEip1559, eip2930::TxEip2930, eip4844::TxEip4844, eip7702::TxEip7702, + legacy::TxLegacy, + }}, + test_utils::test_decode, }; #[test] @@ -34,6 +44,56 @@ mod tests { #[cfg(feature = "optimism")] #[test] fn test_ensure_backwards_compatibility_optimism() { - assert_eq!(TxDeposit::bitflag_encoded_bytes(), 2); + assert_eq!(crate::alloy::transaction::optimism::TxDeposit::bitflag_encoded_bytes(), 2); + } + + #[test] + fn test_decode_header() { + test_decode::
(&hex!( + "01000000fbbb564baeafd064b979c2ac032df5cd987098066a8c6969514dfb8ecfbf043e667fa19efcc00d1dd197c309a3cc42dec820cd627af8f7f38f3274f842406891b22624431d0ea858422db8415b1181f8d19befbd21287debaf98a94e84b3ec20be846f35abfbf743ee3eda4fdda6a6f9124d295da97e26eaa1cedd09936f0a3c560b6bc10316dba5e82abd21afcf519a985feb09a6ce7fba2e8163b10f06c99828b8049c29b993d88d1d112dca60a03ebd8ebc6d69a7e1f301ca6d67c21fe0949d67bca251edf36c96a2cf7c84d98fc60a53988ac95820f434eb35280d98c8ba4d7484e7ee8fefd63591ad4c937ccaaea23871d05c77bac754c5759b34cf9b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + )); + } + + #[test] + fn test_decode_eip1559() { + test_decode::(&hex!( + "88086110b81b05bc5bb59ec3e4cd44e895a9dcb2656d5003e2f64ecb2e15443898cc1cc19af19ca96fc2b4eafc4abc26e4bbd70a3ddb10b7530b65eea128f4095c97164f712c04239902c1b08acf3949d4687123cdd72d5c73df113d2dc6ed7e519f410ace5553ca805975240a208b57013532de78c5cb407423ea11921ab11b13e93ef35d4d01c9a23166c4d627987545fe4675528d0ab111b0a1dc83fba0a4e1cd5c826a94db3f" + )); + } + + #[test] + fn test_decode_eip2930() { + test_decode::(&hex!( + "7810833fce14e3e2921e94fd3727eb71e91551d2c1e029697a654bfab510f3963aa57074015e152065d1c807f8830079fb0aeadc251d248eaec7147e78580ed638c4e667827775e24270edd5aad475776533ece65373afa71722bfeba3c900" + )); + } + + #[test] + fn test_decode_eip4844() { + test_decode::(&hex!( + "88086110025c359180ea680b5007c856f9e1ad4d1be7a5019feb42133f4fc4bdf74da1b457ab787462385a28a1bf8edb401adabf3ff21ac18f695e30180348ea67246fc4dc25e88add12b7c317651a0ce08946d98dbbe5b38883aa758a0f247e23b0fe3ac1bcc43d7212c984d6ccc770d70135890c9a07d715cacb9032c90d539d0b3d209a8d600178bcfb416fd489e5d5dd56d9cfc6addae810ae70bdaee65672b871dc2b3f35ec00dbaa0d872f78cb58b3199984c608c8ba" + )); + } + + #[test] + fn test_decode_eip7702() { + test_decode::(&hex!( + "8808210881415c034feba383d7a6efd3f2601309b33a6d682ad47168cac0f7a5c5136a33370e5e7ca7f570d5530d7a0d18bf5eac33583fdc27b6580f61e8cbd34d6de596f925c1f353188feb2c1e9e20de82a80b57f0be425d8c5896280d4f5f66cdcfba256d0c9ac8abd833859a62ec019501b4585fa176f048de4f88b93bdefecfcaf4d8f0dd04767bc683a4569c893632e44ba9d53f90d758125c9b24c0192a649166520cd5eecbc110b53eda400cf184b8ef9932c81d0deb2ea27dfa863392a87bfd53af3ec67379f20992501e76e387cbe3933861beead1b49649383cf8b2a2d5c6d04b7edc376981ed9b12cf7199fe7fabf5198659e001bed40922969b82a6cd000000000000" + )); + } + + #[test] + fn test_decode_legacy() { + test_decode::(&hex!( + "112210080a8ba06a8d108540bb3140e9f71a0812c46226f9ea77ae880d98d19fe27e5911801175c3b32620b2e887af0296af343526e439b775ee3b1c06750058e9e5fc4cd5965c3010f86184" + )); + } + + #[cfg(feature = "optimism")] + #[test] + fn test_decode_deposit() { + test_decode::(&hex!( + "8108ac8f15983d59b6ae4911a00ff7bfcd2e53d2950926f8c82c12afad02861c46fcb293e776204052725e1c08ff2e9ff602ca916357601fa972a14094891fe3598b718758f22c46f163c18bcaa6296ce87e5267ef3fd932112842fbbf79011548cdf067d93ce6098dfc0aaf5a94531e439f30d6dfd0c6" + )); } } diff --git a/crates/storage/codecs/src/alloy/transaction/optimism.rs b/crates/storage/codecs/src/alloy/transaction/optimism.rs index f4fdcf5ee4..bb970b5817 100644 --- a/crates/storage/codecs/src/alloy/transaction/optimism.rs +++ b/crates/storage/codecs/src/alloy/transaction/optimism.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AlloyTxDeposit`] + use crate::Compact; use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; use op_alloy_consensus::TxDeposit as AlloyTxDeposit; @@ -12,8 +14,13 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with [`op_alloy_consensus::TxDeposit`] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] -#[add_arbitrary_tests(compact)] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] +#[reth_codecs(crate = "crate")] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct TxDeposit { source_hash: B256, from: Address, diff --git a/crates/storage/codecs/src/alloy/trie.rs b/crates/storage/codecs/src/alloy/trie.rs index c89ef0bf6e..cc5273dd02 100644 --- a/crates/storage/codecs/src/alloy/trie.rs +++ b/crates/storage/codecs/src/alloy/trie.rs @@ -1,15 +1,18 @@ -//! Native Compact codec impl for EIP-7685 requests. +//! Native Compact codec impl for alloy-trie types. use crate::Compact; use alloc::vec::Vec; use alloy_primitives::B256; -use alloy_trie::{hash_builder::HashBuilderValue, BranchNodeCompact, TrieMask}; +use alloy_trie::{ + hash_builder::{HashBuilderValue, HashBuilderValueRef}, + BranchNodeCompact, TrieMask, +}; use bytes::{Buf, BufMut}; -/// Identifier for [`HashBuilderValue::Hash`] +/// Identifier for [`HashBuilderValueRef::Hash`] const HASH_BUILDER_TYPE_HASH: u8 = 0; -/// Identifier for [`HashBuilderValue::Bytes`] +/// Identifier for [`HashBuilderValueRef::Bytes`] const HASH_BUILDER_TYPE_BYTES: u8 = 1; impl Compact for HashBuilderValue { @@ -17,34 +20,34 @@ impl Compact for HashBuilderValue { where B: BufMut + AsMut<[u8]>, { - match self { - Self::Hash(hash) => { + match self.as_ref() { + HashBuilderValueRef::Hash(hash) => { buf.put_u8(HASH_BUILDER_TYPE_HASH); 1 + hash.to_compact(buf) } - Self::Bytes(bytes) => { + HashBuilderValueRef::Bytes(bytes) => { buf.put_u8(HASH_BUILDER_TYPE_BYTES); 1 + bytes.to_compact(buf) } } } - // # Panics - // - // A panic will be triggered if a HashBuilderValue variant greater than 1 is passed from the - // database. fn from_compact(mut buf: &[u8], _: usize) -> (Self, &[u8]) { - match buf.get_u8() { + let mut this = Self::default(); + let buf = match buf.get_u8() { HASH_BUILDER_TYPE_HASH => { let (hash, buf) = B256::from_compact(buf, 32); - (Self::Hash(hash), buf) + this.set_from_ref(HashBuilderValueRef::Hash(&hash)); + buf } HASH_BUILDER_TYPE_BYTES => { let (bytes, buf) = Vec::from_compact(buf, 0); - (Self::Bytes(bytes), buf) + this.set_bytes_owned(bytes); + buf } _ => unreachable!("Junk data in database: unknown HashBuilderValue variant"), - } + }; + (this, buf) } } diff --git a/crates/storage/codecs/src/alloy/withdrawal.rs b/crates/storage/codecs/src/alloy/withdrawal.rs index 16324c280c..8aa5671798 100644 --- a/crates/storage/codecs/src/alloy/withdrawal.rs +++ b/crates/storage/codecs/src/alloy/withdrawal.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AlloyWithdrawal`] + use crate::Compact; use alloy_eips::eip4895::Withdrawal as AlloyWithdrawal; use alloy_primitives::Address; @@ -7,8 +9,13 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with `alloy_eips::eip4895::Withdrawal` #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] -#[add_arbitrary_tests(compact)] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[reth_codecs(crate = "crate")] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct Withdrawal { /// Monotonically increasing identifier issued by consensus layer. index: u64, diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index 8608c5eb8c..284c6454f8 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -18,6 +18,7 @@ #![cfg_attr(not(feature = "std"), no_std)] pub use reth_codecs_derive::*; +use serde as _; use alloy_primitives::{Address, Bloom, Bytes, FixedBytes, U256}; use bytes::{Buf, BufMut}; @@ -25,6 +26,10 @@ use bytes::{Buf, BufMut}; extern crate alloc; use alloc::vec::Vec; +#[cfg(feature = "test-utils")] +pub mod alloy; + +#[cfg(not(feature = "test-utils"))] #[cfg(any(test, feature = "alloy"))] mod alloy; @@ -48,6 +53,12 @@ pub mod test_utils; /// Regarding the `specialized_to/from_compact` methods: Mainly used as a workaround for not being /// able to specialize an impl over certain types like `Vec`/`Option` where `T` is a fixed /// size array like `Vec`. +/// +/// ## Caution +/// +/// Due to the bitfields, every type change on the rust type (e.g. `U256` to `u64`) is a breaking +/// change and will lead to a new, incompatible [`Compact`] implementation. Implementers must take +/// special care when changing or rearranging fields. pub trait Compact: Sized { /// Takes a buffer which can be written to. *Ideally*, it returns the length written to. fn to_compact(&self, buf: &mut B) -> usize @@ -78,6 +89,21 @@ pub trait Compact: Sized { } } +impl Compact for alloc::string::String { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + self.as_bytes().to_compact(buf) + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + let (vec, buf) = Vec::::from_compact(buf, len); + let string = Self::from_utf8(vec).unwrap(); // Safe conversion + (string, buf) + } +} + impl Compact for &T { fn to_compact(&self, buf: &mut B) -> usize where @@ -484,7 +510,7 @@ mod tests { #[test] fn compact_address() { - let mut buf = vec![]; + let mut buf = Vec::with_capacity(21); assert_eq!(Address::ZERO.to_compact(&mut buf), 20); assert_eq!(buf, vec![0; 20]); @@ -636,7 +662,8 @@ mod tests { } #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Compact, arbitrary::Arbitrary)] - #[add_arbitrary_tests(compact)] + #[add_arbitrary_tests(crate, compact)] + #[reth_codecs(crate = "crate")] struct TestStruct { f_u64: u64, f_u256: U256, @@ -688,7 +715,8 @@ mod tests { #[derive( Debug, PartialEq, Clone, Default, Serialize, Deserialize, Compact, arbitrary::Arbitrary, )] - #[add_arbitrary_tests(compact)] + #[add_arbitrary_tests(crate, compact)] + #[reth_codecs(crate = "crate")] enum TestEnum { #[default] Var0, diff --git a/crates/storage/codecs/src/test_utils.rs b/crates/storage/codecs/src/test_utils.rs index bb377c6916..b845645cb1 100644 --- a/crates/storage/codecs/src/test_utils.rs +++ b/crates/storage/codecs/src/test_utils.rs @@ -79,3 +79,12 @@ impl UnusedBits { matches!(self, Self::NotZero) } } + +/// Tests decoding and re-encoding to ensure correctness. +pub fn test_decode(buf: &[u8]) { + let (decoded, _) = T::from_compact(buf, buf.len()); + let mut encoded = Vec::with_capacity(buf.len()); + + decoded.to_compact(&mut encoded); + assert_eq!(buf, &encoded[..]); +} diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index 0797fe8597..ed88871098 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -57,11 +57,30 @@ proptest.workspace = true proptest-arbitrary-interop.workspace = true [features] -test-utils = ["arbitrary"] +test-utils = [ + "arbitrary", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-codecs/test-utils", + "reth-db-models/test-utils", + "reth-trie-common/test-utils", + "reth-prune-types/test-utils", + "reth-stages-types/test-utils" +] arbitrary = [ - "reth-primitives/arbitrary", - "reth-db-models/arbitrary", - "dep:arbitrary", - "dep:proptest", + "reth-primitives/arbitrary", + "reth-db-models/arbitrary", + "dep:arbitrary", + "dep:proptest", + "reth-primitives-traits/arbitrary", + "reth-trie-common/arbitrary", + "alloy-primitives/arbitrary", + "parity-scale-codec/arbitrary", + "reth-codecs/arbitrary", + "reth-prune-types/arbitrary", + "reth-stages-types/arbitrary" +] +optimism = [ + "reth-primitives/optimism", + "reth-codecs/optimism" ] -optimism = ["reth-primitives/optimism"] diff --git a/crates/storage/db-api/src/cursor.rs b/crates/storage/db-api/src/cursor.rs index 585aa4947a..9297f738ab 100644 --- a/crates/storage/db-api/src/cursor.rs +++ b/crates/storage/db-api/src/cursor.rs @@ -152,12 +152,7 @@ where impl> Iterator for Walker<'_, T, CURSOR> { type Item = Result, DatabaseError>; fn next(&mut self) -> Option { - let start = self.start.take(); - if start.is_some() { - return start - } - - self.cursor.next().transpose() + self.start.take().or_else(|| self.cursor.next().transpose()) } } diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 0b38eb9e50..6858725a43 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -8,8 +8,8 @@ use alloy_genesis::GenesisAccount; use alloy_primitives::{bytes::BufMut, Address, Bytes, Log, B256, U256}; use reth_codecs::{add_arbitrary_tests, Compact}; use reth_primitives::{ - parlia::Snapshot, Account, BlobSidecar, BlobSidecars, Bytecode, Header, Receipt, Requests, - StorageEntry, TransactionSignedNoHash, TxType, + parlia::Snapshot, Account, BlobSidecar, BlobSidecars, Bytecode, Header, Receipt, StorageEntry, + TransactionSignedNoHash, TxType, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; @@ -250,7 +250,6 @@ impl_compression_for_compact!( StageCheckpoint, PruneCheckpoint, ClientVersion, - Requests, BlobSidecar, BlobSidecars, // Non-DB @@ -393,7 +392,6 @@ mod tests { validate_bitflag_backwards_compat!(StoredBlockWithdrawals, UnusedBits::Zero); validate_bitflag_backwards_compat!(StorageHashingCheckpoint, UnusedBits::NotZero); validate_bitflag_backwards_compat!(Withdrawals, UnusedBits::Zero); - validate_bitflag_backwards_compat!(Requests, UnusedBits::Zero); } #[test] diff --git a/crates/storage/db-api/src/models/storage_sharded_key.rs b/crates/storage/db-api/src/models/storage_sharded_key.rs index 5fd79ba655..a7a1ffb71b 100644 --- a/crates/storage/db-api/src/models/storage_sharded_key.rs +++ b/crates/storage/db-api/src/models/storage_sharded_key.rs @@ -12,6 +12,10 @@ use super::ShardedKey; /// Number of indices in one shard. pub const NUM_OF_INDICES_IN_SHARD: usize = 2_000; +/// The size of [`StorageShardedKey`] encode bytes. +/// The fields are: 20-byte address, 32-byte key, and 8-byte block number +const STORAGE_SHARD_KEY_BYTES_SIZE: usize = 20 + 32 + 8; + /// Sometimes data can be too big to be saved for a single key. This helps out by dividing the data /// into different shards. Example: /// @@ -53,7 +57,8 @@ impl Encode for StorageShardedKey { type Encoded = Vec; fn encode(self) -> Self::Encoded { - let mut buf: Vec = Encode::encode(self.address).into(); + let mut buf: Vec = Vec::with_capacity(STORAGE_SHARD_KEY_BYTES_SIZE); + buf.extend_from_slice(&Encode::encode(self.address)); buf.extend_from_slice(&Encode::encode(self.sharded_key.key)); buf.extend_from_slice(&self.sharded_key.highest_block_number.to_be_bytes()); buf @@ -62,6 +67,9 @@ impl Encode for StorageShardedKey { impl Decode for StorageShardedKey { fn decode(value: &[u8]) -> Result { + if value.len() != STORAGE_SHARD_KEY_BYTES_SIZE { + return Err(DatabaseError::Decode) + } let tx_num_index = value.len() - 8; let highest_tx_number = u64::from_be_bytes( diff --git a/crates/storage/db-common/Cargo.toml b/crates/storage/db-common/Cargo.toml index 7fc4879698..9e4954357f 100644 --- a/crates/storage/db-common/Cargo.toml +++ b/crates/storage/db-common/Cargo.toml @@ -42,6 +42,7 @@ tracing.workspace = true [dev-dependencies] reth-primitives-traits.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } +alloy-consensus.workspace = true [lints] workspace = true diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 16f9533f78..7d201f13d1 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -585,6 +585,7 @@ struct GenesisAccountWithAddress { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::constants::{MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH}; use alloy_genesis::Genesis; use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET, SEPOLIA}; use reth_db::DatabaseEnv; @@ -595,7 +596,7 @@ mod tests { transaction::DbTx, Database, }; - use reth_primitives::{HOLESKY_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH}; + use reth_primitives::HOLESKY_GENESIS_HASH; use reth_primitives_traits::IntegerList; use reth_provider::{ test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, diff --git a/crates/storage/db-models/Cargo.toml b/crates/storage/db-models/Cargo.toml index 9bcd54f386..44b291959b 100644 --- a/crates/storage/db-models/Cargo.toml +++ b/crates/storage/db-models/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] # reth reth-codecs.workspace = true -reth-primitives = { workspace = true, features = ["reth-codec"] } +reth-primitives-traits.workspace = true # ethereum alloy-primitives.workspace = true @@ -32,18 +32,24 @@ proptest = { workspace = true, optional = true } [dev-dependencies] # reth -reth-primitives = { workspace = true, features = ["arbitrary"] } +reth-primitives-traits = { workspace = true, features = ["arbitrary"] } reth-codecs.workspace = true - arbitrary = { workspace = true, features = ["derive"] } -proptest-arbitrary-interop.workspace = true + proptest.workspace = true +proptest-arbitrary-interop.workspace = true test-fuzz.workspace = true [features] -test-utils = ["arbitrary"] +test-utils = [ + "reth-primitives-traits/test-utils", + "arbitrary", + "reth-codecs/test-utils" +] arbitrary = [ - "reth-primitives/arbitrary", - "dep:arbitrary", - "dep:proptest", + "reth-primitives-traits/arbitrary", + "dep:arbitrary", + "dep:proptest", + "alloy-primitives/arbitrary", + "reth-codecs/arbitrary" ] diff --git a/crates/storage/db-models/src/accounts.rs b/crates/storage/db-models/src/accounts.rs index e1f4773960..29a5cf3059 100644 --- a/crates/storage/db-models/src/accounts.rs +++ b/crates/storage/db-models/src/accounts.rs @@ -2,13 +2,13 @@ use reth_codecs::{add_arbitrary_tests, Compact}; use serde::Serialize; use alloy_primitives::{bytes::Buf, Address}; -use reth_primitives::Account; +use reth_primitives_traits::Account; /// Account as it is saved in the database. /// /// [`Address`] is the subkey. #[derive(Debug, Default, Clone, Eq, PartialEq, Serialize)] -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary, serde::Deserialize))] #[add_arbitrary_tests(compact)] pub struct AccountBeforeTx { /// Address for the account. Acts as `DupSort::SubKey`. @@ -39,13 +39,11 @@ impl Compact for AccountBeforeTx { let address = Address::from_slice(&buf[..20]); buf.advance(20); - let info = if len - 20 > 0 { + let info = (len - 20 > 0).then(|| { let (acc, advanced_buf) = Account::from_compact(buf, len - 20); buf = advanced_buf; - Some(acc) - } else { - None - }; + acc + }); (Self { address, info }, buf) } diff --git a/crates/storage/db-models/src/blocks.rs b/crates/storage/db-models/src/blocks.rs index 3e740a2e1a..b4399dc1e2 100644 --- a/crates/storage/db-models/src/blocks.rs +++ b/crates/storage/db-models/src/blocks.rs @@ -2,7 +2,7 @@ use std::ops::Range; use alloy_primitives::TxNumber; use reth_codecs::{add_arbitrary_tests, Compact}; -use reth_primitives::Withdrawals; +use reth_primitives_traits::Withdrawals; use serde::{Deserialize, Serialize}; /// Total number of transactions. diff --git a/crates/storage/db-models/src/client_version.rs b/crates/storage/db-models/src/client_version.rs index de074ac88c..a28e7385f6 100644 --- a/crates/storage/db-models/src/client_version.rs +++ b/crates/storage/db-models/src/client_version.rs @@ -28,20 +28,16 @@ impl Compact for ClientVersion { where B: bytes::BufMut + AsMut<[u8]>, { - self.version.as_bytes().to_compact(buf); - self.git_sha.as_bytes().to_compact(buf); - self.build_timestamp.as_bytes().to_compact(buf) + self.version.to_compact(buf); + self.git_sha.to_compact(buf); + self.build_timestamp.to_compact(buf) } fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (version, buf) = Vec::::from_compact(buf, len); - let (git_sha, buf) = Vec::::from_compact(buf, len); - let (build_timestamp, buf) = Vec::::from_compact(buf, len); - let client_version = Self { - version: unsafe { String::from_utf8_unchecked(version) }, - git_sha: unsafe { String::from_utf8_unchecked(git_sha) }, - build_timestamp: unsafe { String::from_utf8_unchecked(build_timestamp) }, - }; + let (version, buf) = String::from_compact(buf, len); + let (git_sha, buf) = String::from_compact(buf, len); + let (build_timestamp, buf) = String::from_compact(buf, len); + let client_version = Self { version, git_sha, build_timestamp }; (client_version, buf) } } diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index a075f77246..324411613f 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -58,7 +58,6 @@ strum = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] # reth libs with arbitrary reth-primitives = { workspace = true, features = ["arbitrary"] } -rand.workspace = true serde_json.workspace = true tempfile.workspace = true test-fuzz.workspace = true @@ -90,10 +89,32 @@ mdbx = [ "dep:strum", "dep:rustc-hash", ] -test-utils = ["dep:tempfile", "arbitrary", "parking_lot"] +test-utils = [ + "dep:tempfile", + "arbitrary", + "parking_lot", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-db-api/test-utils", + "reth-nippy-jar/test-utils", + "reth-trie-common/test-utils", + "reth-prune-types/test-utils", + "reth-stages-types/test-utils" +] bench = [] -arbitrary = ["reth-primitives/arbitrary", "reth-db-api/arbitrary"] -optimism = [] +arbitrary = [ + "reth-primitives/arbitrary", + "reth-db-api/arbitrary", + "reth-primitives-traits/arbitrary", + "reth-trie-common/arbitrary", + "alloy-primitives/arbitrary", + "reth-prune-types/arbitrary", + "reth-stages-types/arbitrary" +] +optimism = [ + "reth-primitives/optimism", + "reth-db-api/optimism" +] disable-lock = [] [[bench]] diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index aebc04ccfb..8e3dad4659 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -23,7 +23,7 @@ use reth_libmdbx::{ use reth_storage_errors::db::LogLevel; use reth_tracing::tracing::error; use std::{ - ops::Deref, + ops::{Deref, Range}, path::Path, sync::Arc, time::{SystemTime, UNIX_EPOCH}, @@ -33,8 +33,14 @@ use tx::Tx; pub mod cursor; pub mod tx; -const GIGABYTE: usize = 1024 * 1024 * 1024; -const TERABYTE: usize = GIGABYTE * 1024; +/// 1 KB in bytes +pub const KILOBYTE: usize = 1024; +/// 1 MB in bytes +pub const MEGABYTE: usize = KILOBYTE * 1024; +/// 1 GB in bytes +pub const GIGABYTE: usize = MEGABYTE * 1024; +/// 1 TB in bytes +pub const TERABYTE: usize = GIGABYTE * 1024; /// MDBX allows up to 32767 readers (`MDBX_READERS_LIMIT`), but we limit it to slightly below that const DEFAULT_MAX_READERS: u64 = 32_000; @@ -64,6 +70,8 @@ impl DatabaseEnvKind { pub struct DatabaseArguments { /// Client version that accesses the database. client_version: ClientVersion, + /// Database geometry settings. + geometry: Geometry>, /// Database log level. If [None], the default value is used. log_level: Option, /// Maximum duration of a read transaction. If [None], the default value is used. @@ -93,15 +101,37 @@ pub struct DatabaseArguments { impl DatabaseArguments { /// Create new database arguments with given client version. - pub const fn new(client_version: ClientVersion) -> Self { + pub fn new(client_version: ClientVersion) -> Self { Self { client_version, + geometry: Geometry { + size: Some(0..(8 * TERABYTE)), + growth_step: Some(4 * GIGABYTE as isize), + shrink_threshold: Some(0), + page_size: Some(PageSize::Set(default_page_size())), + }, log_level: None, max_read_transaction_duration: None, exclusive: None, } } + /// Sets the upper size limit of the db environment, the maximum database size in bytes. + pub const fn with_geometry_max_size(mut self, max_size: Option) -> Self { + if let Some(max_size) = max_size { + self.geometry.size = Some(0..max_size); + } + self + } + + /// Configures the database growth step in bytes. + pub const fn with_growth_step(mut self, growth_step: Option) -> Self { + if let Some(growth_step) = growth_step { + self.geometry.growth_step = Some(growth_step as isize); + } + self + } + /// Set the log level. pub const fn with_log_level(mut self, log_level: Option) -> Self { self.log_level = log_level; @@ -256,10 +286,9 @@ impl DatabaseEnv { args: DatabaseArguments, ) -> Result { let _lock_file = if kind.is_rw() { - Some( - StorageLock::try_acquire(path) - .map_err(|err| DatabaseError::Other(err.to_string()))?, - ) + StorageLock::try_acquire(path) + .map_err(|err| DatabaseError::Other(err.to_string()))? + .into() } else { None }; @@ -279,15 +308,7 @@ impl DatabaseEnv { // environment creation. debug_assert!(Tables::ALL.len() <= 256, "number of tables exceed max dbs"); inner_env.set_max_dbs(256); - inner_env.set_geometry(Geometry { - // Maximum database size of 8 terabytes - size: Some(0..(8 * TERABYTE)), - // We grow the database in increments of 4 gigabytes - growth_step: Some(4 * GIGABYTE as isize), - // The database never shrinks - shrink_threshold: Some(0), - page_size: Some(PageSize::Set(default_page_size())), - }); + inner_env.set_geometry(args.geometry); fn is_current_process(id: u32) -> bool { #[cfg(unix)] diff --git a/crates/storage/db/src/lockfile.rs b/crates/storage/db/src/lockfile.rs index 6dc063a167..a87ab7393f 100644 --- a/crates/storage/db/src/lockfile.rs +++ b/crates/storage/db/src/lockfile.rs @@ -30,31 +30,35 @@ impl StorageLock { /// Note: In-process exclusivity is not on scope. If called from the same process (or another /// with the same PID), it will succeed. pub fn try_acquire(path: &Path) -> Result { - let file_path = path.join(LOCKFILE_NAME); - #[cfg(feature = "disable-lock")] { + let file_path = path.join(LOCKFILE_NAME); // Too expensive for ef-tests to write/read lock to/from disk. Ok(Self(Arc::new(StorageLockInner { file_path }))) } #[cfg(not(feature = "disable-lock"))] - { - if let Some(process_lock) = ProcessUID::parse(&file_path)? { - if process_lock.pid != (process::id() as usize) && process_lock.is_active() { - error!( - target: "reth::db::lockfile", - path = ?file_path, - pid = process_lock.pid, - start_time = process_lock.start_time, - "Storage lock already taken." - ); - return Err(StorageLockError::Taken(process_lock.pid)) - } - } + Self::try_acquire_file_lock(path) + } - Ok(Self(Arc::new(StorageLockInner::new(file_path)?))) + /// Acquire a file write lock. + #[cfg(any(test, not(feature = "disable-lock")))] + fn try_acquire_file_lock(path: &Path) -> Result { + let file_path = path.join(LOCKFILE_NAME); + if let Some(process_lock) = ProcessUID::parse(&file_path)? { + if process_lock.pid != (process::id() as usize) && process_lock.is_active() { + error!( + target: "reth::db::lockfile", + path = ?file_path, + pid = process_lock.pid, + start_time = process_lock.start_time, + "Storage lock already taken." + ); + return Err(StorageLockError::Taken(process_lock.pid)) + } } + + Ok(Self(Arc::new(StorageLockInner::new(file_path)?))) } } @@ -164,10 +168,10 @@ mod tests { let temp_dir = tempfile::tempdir().unwrap(); - let lock = StorageLock::try_acquire(temp_dir.path()).unwrap(); + let lock = StorageLock::try_acquire_file_lock(temp_dir.path()).unwrap(); // Same process can re-acquire the lock - assert_eq!(Ok(lock.clone()), StorageLock::try_acquire(temp_dir.path())); + assert_eq!(Ok(lock.clone()), StorageLock::try_acquire_file_lock(temp_dir.path())); // A lock of a non existent PID can be acquired. let lock_file = temp_dir.path().join(LOCKFILE_NAME); @@ -177,18 +181,21 @@ mod tests { fake_pid += 1; } ProcessUID { pid: fake_pid, start_time: u64::MAX }.write(&lock_file).unwrap(); - assert_eq!(Ok(lock.clone()), StorageLock::try_acquire(temp_dir.path())); + assert_eq!(Ok(lock.clone()), StorageLock::try_acquire_file_lock(temp_dir.path())); let mut pid_1 = ProcessUID::new(1).unwrap(); // If a parsed `ProcessUID` exists, the lock can NOT be acquired. pid_1.write(&lock_file).unwrap(); - assert_eq!(Err(StorageLockError::Taken(1)), StorageLock::try_acquire(temp_dir.path())); + assert_eq!( + Err(StorageLockError::Taken(1)), + StorageLock::try_acquire_file_lock(temp_dir.path()) + ); // A lock of a different but existing PID can be acquired ONLY IF the start_time differs. pid_1.start_time += 1; pid_1.write(&lock_file).unwrap(); - assert_eq!(Ok(lock), StorageLock::try_acquire(temp_dir.path())); + assert_eq!(Ok(lock), StorageLock::try_acquire_file_lock(temp_dir.path())); } #[test] @@ -198,7 +205,7 @@ mod tests { let temp_dir = tempfile::tempdir().unwrap(); let lock_file = temp_dir.path().join(LOCKFILE_NAME); - let lock = StorageLock::try_acquire(temp_dir.path()).unwrap(); + let lock = StorageLock::try_acquire_file_lock(temp_dir.path()).unwrap(); assert!(lock_file.exists()); drop(lock); diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index 43550b6376..36e5b146de 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -31,8 +31,7 @@ use reth_db_api::{ table::{Decode, DupSort, Encode, Table}, }; use reth_primitives::{ - parlia::Snapshot, Account, Bytecode, Header, Receipt, Requests, StorageEntry, - TransactionSignedNoHash, + parlia::Snapshot, Account, Bytecode, Header, Receipt, StorageEntry, TransactionSignedNoHash, }; use reth_primitives_traits::{BlobSidecars, IntegerList}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; @@ -408,9 +407,6 @@ tables! { /// Stores the history of client versions that have accessed the database with write privileges by unix timestamp in seconds. table VersionHistory; - /// Stores EIP-7685 EL -> CL requests, indexed by block number. - table BlockRequests; - /// Stores generic chain state info, like the last finalized block. table ChainState; diff --git a/crates/storage/errors/Cargo.toml b/crates/storage/errors/Cargo.toml index 52c93ae4ef..ecefa5f6ac 100644 --- a/crates/storage/errors/Cargo.toml +++ b/crates/storage/errors/Cargo.toml @@ -25,4 +25,8 @@ derive_more.workspace = true [features] default = ["std"] -std = [] +std = [ + "reth-primitives/std", + "alloy-eips/std", + "alloy-primitives/std" +] diff --git a/crates/storage/libmdbx-rs/src/cursor.rs b/crates/storage/libmdbx-rs/src/cursor.rs index 3deff0c249..26cfef54d8 100644 --- a/crates/storage/libmdbx-rs/src/cursor.rs +++ b/crates/storage/libmdbx-rs/src/cursor.rs @@ -59,19 +59,18 @@ where } /// Returns an iterator over the raw key value slices. - #[allow(clippy::needless_lifetimes)] - pub fn iter_slices<'a>(&'a self) -> IntoIter<'a, K, Cow<'a, [u8]>, Cow<'a, [u8]>> { + pub fn iter_slices<'a>(self) -> IntoIter, Cow<'a, [u8]>> { self.into_iter() } /// Returns an iterator over database items. #[allow(clippy::should_implement_trait)] - pub fn into_iter(&self) -> IntoIter<'_, K, Key, Value> + pub fn into_iter(self) -> IntoIter where Key: TableObject, Value: TableObject, { - IntoIter::new(self.clone(), MDBX_NEXT, MDBX_NEXT) + IntoIter::new(self, MDBX_NEXT, MDBX_NEXT) } /// Retrieves a key/data pair from the cursor. Depending on the cursor op, @@ -508,7 +507,7 @@ unsafe impl Sync for Cursor where K: TransactionKind {} /// An iterator over the key/value pairs in an MDBX database. #[derive(Debug)] -pub enum IntoIter<'cur, K, Key, Value> +pub enum IntoIter where K: TransactionKind, Key: TableObject, @@ -535,11 +534,11 @@ where /// The next and subsequent operations to perform. next_op: ffi::MDBX_cursor_op, - _marker: PhantomData<(&'cur (), Key, Value)>, + _marker: PhantomData<(Key, Value)>, }, } -impl IntoIter<'_, K, Key, Value> +impl IntoIter where K: TransactionKind, Key: TableObject, @@ -547,11 +546,11 @@ where { /// Creates a new iterator backed by the given cursor. fn new(cursor: Cursor, op: ffi::MDBX_cursor_op, next_op: ffi::MDBX_cursor_op) -> Self { - IntoIter::Ok { cursor, op, next_op, _marker: Default::default() } + Self::Ok { cursor, op, next_op, _marker: Default::default() } } } -impl Iterator for IntoIter<'_, K, Key, Value> +impl Iterator for IntoIter where K: TransactionKind, Key: TableObject, @@ -747,13 +746,13 @@ where } } -impl<'cur, K, Key, Value> Iterator for IterDup<'cur, K, Key, Value> +impl Iterator for IterDup<'_, K, Key, Value> where K: TransactionKind, Key: TableObject, Value: TableObject, { - type Item = IntoIter<'cur, K, Key, Value>; + type Item = IntoIter; fn next(&mut self) -> Option { match self { diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index edf9321ace..6a0b210401 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -4,7 +4,7 @@ use crate::{ flags::EnvironmentFlags, transaction::{RO, RW}, txn_manager::{TxnManager, TxnManagerMessage, TxnPtr}, - Transaction, TransactionKind, + Mode, SyncMode, Transaction, TransactionKind, }; use byteorder::{ByteOrder, NativeEndian}; use mem::size_of; @@ -72,14 +72,14 @@ impl Environment { /// Returns true if the environment was opened in [`crate::Mode::ReadWrite`] mode. #[inline] - pub fn is_read_write(&self) -> bool { - self.inner.env_kind.is_write_map() + pub fn is_read_write(&self) -> Result { + Ok(!self.is_read_only()?) } /// Returns true if the environment was opened in [`crate::Mode::ReadOnly`] mode. #[inline] - pub fn is_read_only(&self) -> bool { - !self.inner.env_kind.is_write_map() + pub fn is_read_only(&self) -> Result { + Ok(matches!(self.info()?.mode(), Mode::ReadOnly)) } /// Returns the transaction manager. @@ -425,6 +425,23 @@ impl Info { fsync: self.0.mi_pgop_stat.fsync, } } + + /// Return the mode of the database + #[inline] + pub const fn mode(&self) -> Mode { + let mode = self.0.mi_mode; + if (mode & ffi::MDBX_RDONLY) != 0 { + Mode::ReadOnly + } else if (mode & ffi::MDBX_UTTERLY_NOSYNC) != 0 { + Mode::ReadWrite { sync_mode: SyncMode::UtterlyNoSync } + } else if (mode & ffi::MDBX_NOMETASYNC) != 0 { + Mode::ReadWrite { sync_mode: SyncMode::NoMetaSync } + } else if (mode & ffi::MDBX_SAFE_NOSYNC) != 0 { + Mode::ReadWrite { sync_mode: SyncMode::SafeNoSync } + } else { + Mode::ReadWrite { sync_mode: SyncMode::Durable } + } + } } impl fmt::Debug for Environment { @@ -472,8 +489,10 @@ pub struct PageOps { pub mincore: u64, } +/// Represents the geometry settings for the database environment #[derive(Clone, Debug, PartialEq, Eq)] pub struct Geometry { + /// The size range in bytes. pub size: Option, pub growth_step: Option, pub shrink_threshold: Option, @@ -781,15 +800,14 @@ impl EnvironmentBuilder { } /// Sets the interprocess/shared threshold to force flush the data buffers to disk, if - /// [`SyncMode::SafeNoSync`](crate::flags::SyncMode::SafeNoSync) is used. + /// [`SyncMode::SafeNoSync`] is used. pub fn set_sync_bytes(&mut self, v: usize) -> &mut Self { self.sync_bytes = Some(v as u64); self } /// Sets the interprocess/shared relative period since the last unsteady commit to force flush - /// the data buffers to disk, if [`SyncMode::SafeNoSync`](crate::flags::SyncMode::SafeNoSync) is - /// used. + /// the data buffers to disk, if [`SyncMode::SafeNoSync`] is used. pub fn set_sync_period(&mut self, v: Duration) -> &mut Self { // For this option, mdbx uses units of 1/65536 of a second. let as_mdbx_units = (v.as_secs_f64() * 65536f64) as u64; diff --git a/crates/storage/libmdbx-rs/src/transaction.rs b/crates/storage/libmdbx-rs/src/transaction.rs index 88236ebe99..84b2dabc90 100644 --- a/crates/storage/libmdbx-rs/src/transaction.rs +++ b/crates/storage/libmdbx-rs/src/transaction.rs @@ -6,7 +6,7 @@ use crate::{ txn_manager::{TxnManagerMessage, TxnPtr}, Cursor, Error, Stat, TableObject, }; -use ffi::{mdbx_txn_renew, MDBX_txn_flags_t, MDBX_TXN_RDONLY, MDBX_TXN_READWRITE}; +use ffi::{MDBX_txn_flags_t, MDBX_TXN_RDONLY, MDBX_TXN_READWRITE}; use indexmap::IndexSet; use parking_lot::{Mutex, MutexGuard}; use std::{ @@ -18,6 +18,9 @@ use std::{ time::Duration, }; +#[cfg(feature = "read-tx-timeouts")] +use ffi::mdbx_txn_renew; + mod private { use super::*; diff --git a/crates/storage/libmdbx-rs/tests/environment.rs b/crates/storage/libmdbx-rs/tests/environment.rs index 99453ef113..007418f76b 100644 --- a/crates/storage/libmdbx-rs/tests/environment.rs +++ b/crates/storage/libmdbx-rs/tests/environment.rs @@ -128,6 +128,18 @@ fn test_info() { // assert_eq!(info.last_pgno(), 1); // assert_eq!(info.last_txnid(), 0); assert_eq!(info.num_readers(), 0); + assert!(matches!(info.mode(), Mode::ReadWrite { sync_mode: SyncMode::Durable })); + assert!(env.is_read_write().unwrap()); + + drop(env); + let env = Environment::builder() + .set_geometry(Geometry { size: Some(map_size..), ..Default::default() }) + .set_flags(EnvironmentFlags { mode: Mode::ReadOnly, ..Default::default() }) + .open(dir.path()) + .unwrap(); + let info = env.info().unwrap(); + assert!(matches!(info.mode(), Mode::ReadOnly)); + assert!(env.is_read_only().unwrap()); } #[test] diff --git a/crates/storage/nippy-jar/src/compression/mod.rs b/crates/storage/nippy-jar/src/compression/mod.rs index 28a92fe909..f9bf8110ee 100644 --- a/crates/storage/nippy-jar/src/compression/mod.rs +++ b/crates/storage/nippy-jar/src/compression/mod.rs @@ -44,7 +44,9 @@ pub trait Compression: Serialize + for<'a> Deserialize<'a> { #[derive(Debug, Serialize, Deserialize)] #[cfg_attr(test, derive(PartialEq))] pub enum Compressors { + /// Zstandard compression algorithm with custom settings. Zstd(Zstd), + /// LZ4 compression algorithm with custom settings. Lz4(Lz4), } diff --git a/crates/storage/nippy-jar/src/compression/zstd.rs b/crates/storage/nippy-jar/src/compression/zstd.rs index 500247d176..896a65bd70 100644 --- a/crates/storage/nippy-jar/src/compression/zstd.rs +++ b/crates/storage/nippy-jar/src/compression/zstd.rs @@ -12,10 +12,13 @@ pub use zstd::{bulk::Decompressor, dict::DecoderDictionary}; type RawDictionary = Vec; +/// Represents the state of a Zstandard compression operation. #[derive(Debug, Default, PartialEq, Eq, Serialize, Deserialize)] pub enum ZstdState { + /// The compressor is pending a dictionary. #[default] PendingDictionary, + /// The compressor is ready to perform compression. Ready, } @@ -51,6 +54,7 @@ impl Zstd { } } + /// Sets the compression level for the Zstd compression instance. pub const fn with_level(mut self, level: i32) -> Self { self.level = level; self @@ -209,7 +213,7 @@ impl Compression for Zstd { return Err(NippyJarError::ColumnLenMismatch(self.columns, columns.len())) } - let mut dictionaries = vec![]; + let mut dictionaries = Vec::with_capacity(columns.len()); for column in columns { // ZSTD requires all training data to be continuous in memory, alongside the size of // each entry diff --git a/crates/storage/nippy-jar/src/consistency.rs b/crates/storage/nippy-jar/src/consistency.rs index 1093fb5546..952980ef6e 100644 --- a/crates/storage/nippy-jar/src/consistency.rs +++ b/crates/storage/nippy-jar/src/consistency.rs @@ -28,6 +28,11 @@ pub struct NippyJarChecker { } impl NippyJarChecker { + /// Creates a new instance of [`NippyJarChecker`] with the provided [`NippyJar`]. + /// + /// This method initializes the checker without any associated file handles for + /// the data or offsets files. The [`NippyJar`] passed in contains all necessary + /// configurations for handling data. pub const fn new(jar: NippyJar) -> Self { Self { jar, data_file: None, offsets_file: None } } diff --git a/crates/storage/nippy-jar/src/cursor.rs b/crates/storage/nippy-jar/src/cursor.rs index 2677648272..376411ac26 100644 --- a/crates/storage/nippy-jar/src/cursor.rs +++ b/crates/storage/nippy-jar/src/cursor.rs @@ -25,9 +25,10 @@ impl std::fmt::Debug for NippyJarCursor<'_, H> { } impl<'a, H: NippyJarHeader> NippyJarCursor<'a, H> { + /// Creates a new instance of [`NippyJarCursor`] for the given [`NippyJar`]. pub fn new(jar: &'a NippyJar) -> Result { let max_row_size = jar.max_row_size; - Ok(NippyJarCursor { + Ok(Self { jar, reader: Arc::new(jar.open_data_reader()?), // Makes sure that we have enough buffer capacity to decompress any row of data. @@ -36,12 +37,14 @@ impl<'a, H: NippyJarHeader> NippyJarCursor<'a, H> { }) } + /// Creates a new instance of [`NippyJarCursor`] with the specified [`NippyJar`] and data + /// reader. pub fn with_reader( jar: &'a NippyJar, reader: Arc, ) -> Result { let max_row_size = jar.max_row_size; - Ok(NippyJarCursor { + Ok(Self { jar, reader, // Makes sure that we have enough buffer capacity to decompress any row of data. diff --git a/crates/storage/nippy-jar/src/error.rs b/crates/storage/nippy-jar/src/error.rs index fc096cf848..f69bb44a06 100644 --- a/crates/storage/nippy-jar/src/error.rs +++ b/crates/storage/nippy-jar/src/error.rs @@ -4,53 +4,92 @@ use thiserror::Error; /// Errors associated with [`crate::NippyJar`]. #[derive(Error, Debug)] pub enum NippyJarError { + /// An internal error occurred, wrapping any type of error. #[error(transparent)] Internal(#[from] Box), + + /// An error occurred while disconnecting, wrapping a standard I/O error. #[error(transparent)] Disconnect(#[from] std::io::Error), + + /// An error related to the file system occurred, wrapping a file system path error. #[error(transparent)] FileSystem(#[from] reth_fs_util::FsPathError), + + /// A custom error message provided by the user. #[error("{0}")] Custom(String), + + /// An error occurred during serialization/deserialization with Bincode. #[error(transparent)] Bincode(#[from] Box), + + /// An error occurred with the Elias-Fano encoding/decoding process. #[error(transparent)] EliasFano(#[from] anyhow::Error), + + /// Compression was enabled, but the compressor is not ready yet. #[error("compression was enabled, but it's not ready yet")] CompressorNotReady, + + /// Decompression was enabled, but the decompressor is not ready yet. #[error("decompression was enabled, but it's not ready yet")] DecompressorNotReady, + + /// The number of columns does not match the expected length. #[error("number of columns does not match: {0} != {1}")] ColumnLenMismatch(usize, usize), + + /// An unexpected missing value was encountered at a specific row and column. #[error("unexpected missing value: row:col {0}:{1}")] UnexpectedMissingValue(u64, u64), + + /// The size of an offset exceeds the maximum allowed size of 8 bytes. #[error("the size of an offset must be at most 8 bytes, got {offset_size}")] OffsetSizeTooBig { /// The read offset size in number of bytes. offset_size: u8, }, + + /// The size of an offset is less than the minimum allowed size of 1 byte. #[error("the size of an offset must be at least 1 byte, got {offset_size}")] OffsetSizeTooSmall { /// The read offset size in number of bytes. offset_size: u8, }, + + /// An attempt was made to read an offset that is out of bounds. #[error("attempted to read an out of bounds offset: {index}")] OffsetOutOfBounds { /// The index of the offset that was being read. index: usize, }, + + /// The output buffer is too small for the compression or decompression operation. #[error("compression or decompression requires a bigger destination output")] OutputTooSmall, + + /// A dictionary is not loaded when it is required for operations. #[error("dictionary is not loaded.")] DictionaryNotLoaded, + + /// It's not possible to generate a compressor after loading a dictionary. #[error("it's not possible to generate a compressor after loading a dictionary.")] CompressorNotAllowed, + + /// The number of offsets is smaller than the requested prune size. #[error("number of offsets ({0}) is smaller than prune request ({1}).")] InvalidPruning(u64, u64), + + /// The jar has been frozen and cannot be modified. #[error("jar has been frozen and cannot be modified.")] FrozenJar, + + /// The file is in an inconsistent state. #[error("File is in an inconsistent state.")] InconsistentState, + + /// A specified file is missing. #[error("Missing file: {0}.")] MissingFile(PathBuf), } diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index bdc950aa38..b1d174feb2 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -10,7 +10,6 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![allow(missing_docs)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use memmap2::Mmap; @@ -28,6 +27,7 @@ use std::os::windows::prelude::OpenOptionsExt; use tracing::*; +/// Compression algorithms supported by `NippyJar`. pub mod compression; #[cfg(test)] use compression::Compression; @@ -55,10 +55,13 @@ pub use writer::NippyJarWriter; mod consistency; pub use consistency::NippyJarChecker; +/// The version number of the Nippy Jar format. const NIPPY_JAR_VERSION: usize = 1; - +/// The file extension used for index files. const INDEX_FILE_EXTENSION: &str = "idx"; +/// The file extension used for offsets files. const OFFSETS_FILE_EXTENSION: &str = "off"; +/// The file extension used for configuration files. pub const CONFIG_FILE_EXTENSION: &str = "conf"; /// A [`RefRow`] is a list of column value slices pointing to either an internal buffer or a diff --git a/crates/storage/nippy-jar/src/writer.rs b/crates/storage/nippy-jar/src/writer.rs index 9bf9bf5264..3a1003bee7 100644 --- a/crates/storage/nippy-jar/src/writer.rs +++ b/crates/storage/nippy-jar/src/writer.rs @@ -354,6 +354,10 @@ impl NippyJarWriter { Ok(()) } + /// Commits changes to the data file and offsets without synchronizing all data to disk. + /// + /// This function flushes the buffered data to the data file and commits the offsets, + /// but it does not guarantee that all data is synchronized to persistent storage. #[cfg(feature = "test-utils")] pub fn commit_without_sync_all(&mut self) -> Result<(), NippyJarError> { self.data_file.flush()?; @@ -412,41 +416,49 @@ impl NippyJarWriter { Ok(()) } + /// Returns the maximum row size for the associated [`NippyJar`]. #[cfg(test)] pub const fn max_row_size(&self) -> usize { self.jar.max_row_size } + /// Returns the column index of the current checker instance. #[cfg(test)] pub const fn column(&self) -> usize { self.column } + /// Returns a reference to the offsets vector. #[cfg(test)] pub fn offsets(&self) -> &[u64] { &self.offsets } + /// Returns a mutable reference to the offsets vector. #[cfg(test)] pub fn offsets_mut(&mut self) -> &mut Vec { &mut self.offsets } + /// Returns the path to the offsets file for the associated [`NippyJar`]. #[cfg(test)] pub fn offsets_path(&self) -> std::path::PathBuf { self.jar.offsets_path() } + /// Returns the path to the data file for the associated [`NippyJar`]. #[cfg(test)] pub fn data_path(&self) -> &Path { self.jar.data_path() } + /// Returns a mutable reference to the buffered writer for the data file. #[cfg(any(test, feature = "test-utils"))] pub fn data_file(&mut self) -> &mut BufWriter { &mut self.data_file } + /// Returns a reference to the associated [`NippyJar`] instance. #[cfg(any(test, feature = "test-utils"))] pub const fn jar(&self) -> &NippyJar { &self.jar diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 048353452c..4b71d82e2b 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -56,13 +56,14 @@ metrics.workspace = true # misc auto_impl.workspace = true itertools.workspace = true -notify = { workspace = true, default-features = false, features = ["macos_fsevent"] } +notify = { workspace = true, default-features = false, features = [ + "macos_fsevent", +] } parking_lot.workspace = true dashmap = { workspace = true, features = ["inline"] } strum.workspace = true # test-utils -once_cell = { workspace = true, optional = true } reth-ethereum-engine-primitives = { workspace = true, optional = true } alloy-consensus = { workspace = true, optional = true } @@ -72,27 +73,60 @@ rayon.workspace = true [dev-dependencies] reth-db = { workspace = true, features = ["test-utils"] } reth-primitives = { workspace = true, features = ["arbitrary", "test-utils"] } +reth-chain-state = { workspace = true, features = ["test-utils"] } reth-trie = { workspace = true, features = ["test-utils"] } reth-testing-utils.workspace = true +reth-ethereum-engine-primitives.workspace = true parking_lot.workspace = true tempfile.workspace = true assert_matches.workspace = true rand.workspace = true -once_cell.workspace = true eyre.workspace = true alloy-consensus.workspace = true [features] -optimism = ["reth-primitives/optimism", "reth-execution-types/optimism", "reth-optimism-primitives"] -serde = ["reth-execution-types/serde"] +optimism = [ + "reth-primitives/optimism", + "reth-execution-types/optimism", + "reth-optimism-primitives", + "reth-codecs/optimism", + "reth-db/optimism", + "reth-db-api/optimism", + "revm/optimism", + "reth-chainspec/optimism" +] +serde = [ + "reth-execution-types/serde", + "reth-trie-db/serde", + "reth-trie/serde", + "alloy-consensus?/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "alloy-rpc-types-engine/serde", + "dashmap/serde", + "notify/serde", + "parking_lot/serde", + "rand/serde", + "revm/serde", + "reth-codecs/serde" +] test-utils = [ - "reth-db/test-utils", - "reth-nippy-jar/test-utils", - "reth-trie/test-utils", - "reth-chain-state/test-utils", - "once_cell", - "reth-ethereum-engine-primitives", - "alloy-consensus", + "reth-db/test-utils", + "reth-nippy-jar/test-utils", + "reth-trie/test-utils", + "reth-chain-state/test-utils", + "reth-ethereum-engine-primitives", + "alloy-consensus", + "reth-chainspec/test-utils", + "reth-evm/test-utils", + "reth-network-p2p/test-utils", + "reth-primitives/test-utils", + "reth-codecs/test-utils", + "reth-db-api/test-utils", + "reth-trie-db/test-utils", + "revm/test-utils", + "reth-prune-types/test-utils", + "reth-stages-types/test-utils" ] diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/bundle_state/state_reverts.rs index 09b892562f..3e1ba2a4b8 100644 --- a/crates/storage/provider/src/bundle_state/state_reverts.rs +++ b/crates/storage/provider/src/bundle_state/state_reverts.rs @@ -76,3 +76,105 @@ where } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_storage_reverts_iter_empty() { + // Create empty sample data for reverts and wiped entries. + let reverts: Vec<(B256, RevertToSlot)> = vec![]; + let wiped: Vec<(B256, U256)> = vec![]; + + // Create the iterator with the empty data. + let iter = StorageRevertsIter::new(reverts, wiped); + + // Iterate and collect results into a vector for verification. + let results: Vec<_> = iter.collect(); + + // Verify that the results are empty. + assert_eq!(results, vec![]); + } + + #[test] + fn test_storage_reverts_iter_reverts_only() { + // Create sample data for only reverts. + let reverts = vec![ + (B256::from_slice(&[4; 32]), RevertToSlot::Destroyed), + (B256::from_slice(&[5; 32]), RevertToSlot::Some(U256::from(40))), + ]; + + // Create the iterator with only reverts and no wiped entries. + let iter = StorageRevertsIter::new(reverts, vec![]); + + // Iterate and collect results into a vector for verification. + let results: Vec<_> = iter.collect(); + + // Verify the output order and values. + assert_eq!( + results, + vec![ + (B256::from_slice(&[4; 32]), U256::ZERO), // Revert slot previous value + (B256::from_slice(&[5; 32]), U256::from(40)), // Only revert present. + ] + ); + } + + #[test] + fn test_storage_reverts_iter_wiped_only() { + // Create sample data for only wiped entries. + let wiped = vec![ + (B256::from_slice(&[6; 32]), U256::from(50)), + (B256::from_slice(&[7; 32]), U256::from(60)), + ]; + + // Create the iterator with only wiped entries and no reverts. + let iter = StorageRevertsIter::new(vec![], wiped); + + // Iterate and collect results into a vector for verification. + let results: Vec<_> = iter.collect(); + + // Verify the output order and values. + assert_eq!( + results, + vec![ + (B256::from_slice(&[6; 32]), U256::from(50)), // Only wiped present. + (B256::from_slice(&[7; 32]), U256::from(60)), // Only wiped present. + ] + ); + } + + #[test] + fn test_storage_reverts_iter_interleaved() { + // Create sample data for interleaved reverts and wiped entries. + let reverts = vec![ + (B256::from_slice(&[8; 32]), RevertToSlot::Some(U256::from(70))), + (B256::from_slice(&[9; 32]), RevertToSlot::Some(U256::from(80))), + // Some higher key than wiped + (B256::from_slice(&[15; 32]), RevertToSlot::Some(U256::from(90))), + ]; + + let wiped = vec![ + (B256::from_slice(&[8; 32]), U256::from(75)), // Same key as revert + (B256::from_slice(&[10; 32]), U256::from(85)), // Wiped with new key + ]; + + // Create the iterator with the sample data. + let iter = StorageRevertsIter::new(reverts, wiped); + + // Iterate and collect results into a vector for verification. + let results: Vec<_> = iter.collect(); + + // Verify the output order and values. + assert_eq!( + results, + vec![ + (B256::from_slice(&[8; 32]), U256::from(70)), // Revert takes priority. + (B256::from_slice(&[9; 32]), U256::from(80)), // Only revert present. + (B256::from_slice(&[10; 32]), U256::from(85)), // Wiped entry. + (B256::from_slice(&[15; 32]), U256::from(90)), // WGreater revert entry + ] + ); + } +} diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index eb2372cb77..11ecb4039c 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -1,14 +1,15 @@ +#![allow(unused)] use crate::{ - providers::StaticFileProvider, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, - BlockReader, BlockReaderIdExt, BlockSource, CanonChainTracker, CanonStateNotifications, - CanonStateSubscriptions, ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, - DatabaseProviderFactory, DatabaseProviderRO, EvmEnvProvider, HeaderProvider, - ParliaSnapshotReader, ProviderError, ProviderFactory, PruneCheckpointReader, ReceiptProvider, - ReceiptProviderIdExt, RequestsProvider, StageCheckpointReader, StateProviderBox, - StateProviderFactory, StateReader, StaticFileProviderFactory, TransactionVariant, - TransactionsProvider, WithdrawalsProvider, + providers::{ConsistentProvider, StaticFileProvider}, + AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, + BlockSource, CanonChainTracker, CanonStateNotifications, CanonStateSubscriptions, + ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProvider, + DatabaseProviderFactory, EvmEnvProvider, FullProvider, HeaderProvider, ParliaSnapshotReader, + ProviderError, ProviderFactory, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, + StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, + StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, HashOrNumber}; +use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; use alloy_rpc_types_engine::ForkchoiceState; use reth_chain_state::{ @@ -16,33 +17,29 @@ use reth_chain_state::{ MemoryOverlayStateProvider, }; use reth_chainspec::{ChainInfo, EthereumHardforks}; -use reth_db::models::BlockNumberAddress; +use reth_db::{models::BlockNumberAddress, transaction::DbTx, Database}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; -use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; +use reth_execution_types::ExecutionOutcome; use reth_node_types::NodeTypesWithDB; use reth_primitives::{ parlia::Snapshot, Account, BlobSidecars, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, Withdrawal, Withdrawals, + TransactionSignedNoHash, Withdrawals, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{SidecarsProvider, StorageChangeSetReader}; +use reth_storage_api::{DBProvider, SidecarsProvider, StorageChangeSetReader}; use reth_storage_errors::provider::ProviderResult; -use revm::{ - db::states::PlainStorageRevert, - primitives::{BlockEnv, CfgEnvWithHandlerCfg}, -}; +use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ - collections::{hash_map, HashMap}, - ops::{Add, Bound, RangeBounds, RangeInclusive, Sub}, + ops::{Add, RangeBounds, RangeInclusive, Sub}, sync::Arc, time::Instant, }; use tracing::trace; -use super::ProviderNodeTypes; +use crate::providers::ProviderNodeTypes; /// The main type for interacting with the blockchain. /// @@ -51,11 +48,11 @@ use super::ProviderNodeTypes; /// type that holds an instance of the database and the blockchain tree. #[derive(Debug)] pub struct BlockchainProvider2 { - /// Provider type used to access the database. - database: ProviderFactory, + /// Provider factory used to access the database. + pub(crate) database: ProviderFactory, /// Tracks the chain info wrt forkchoice updates and in memory canonical /// state. - pub(super) canonical_in_memory_state: CanonicalInMemoryState, + pub(crate) canonical_in_memory_state: CanonicalInMemoryState, } impl Clone for BlockchainProvider2 { @@ -68,15 +65,15 @@ impl Clone for BlockchainProvider2 { } impl BlockchainProvider2 { - /// Create a new provider using only the database, fetching the latest header from - /// the database to initialize the provider. - pub fn new(database: ProviderFactory) -> ProviderResult { - let provider = database.provider()?; - let best: ChainInfo = provider.chain_info()?; + /// Create a new [`BlockchainProvider2`] using only the storage, fetching the latest + /// header from the database to initialize the provider. + pub fn new(storage: ProviderFactory) -> ProviderResult { + let provider = storage.provider()?; + let best = provider.chain_info()?; match provider.header_by_number(best.best_number)? { Some(header) => { drop(provider); - Ok(Self::with_latest(database, SealedHeader::new(header, best.best_hash))?) + Ok(Self::with_latest(storage, SealedHeader::new(header, best.best_hash))?) } None => Err(ProviderError::HeaderNotFound(best.best_number.into())), } @@ -87,8 +84,8 @@ impl BlockchainProvider2 { /// /// This returns a `ProviderResult` since it tries the retrieve the last finalized header from /// `database`. - pub fn with_latest(database: ProviderFactory, latest: SealedHeader) -> ProviderResult { - let provider = database.provider()?; + pub fn with_latest(storage: ProviderFactory, latest: SealedHeader) -> ProviderResult { + let provider = storage.provider()?; let finalized_header = provider .last_finalized_block_number()? .map(|num| provider.sealed_header(num)) @@ -105,7 +102,7 @@ impl BlockchainProvider2 { .transpose()? .flatten(); Ok(Self { - database, + database: storage, canonical_in_memory_state: CanonicalInMemoryState::with_head( latest, finalized_header, @@ -119,280 +116,12 @@ impl BlockchainProvider2 { self.canonical_in_memory_state.clone() } - // Helper function to convert range bounds - fn convert_range_bounds( - &self, - range: impl RangeBounds, - end_unbounded: impl FnOnce() -> T, - ) -> (T, T) - where - T: Copy + Add + Sub + From, - { - let start = match range.start_bound() { - Bound::Included(&n) => n, - Bound::Excluded(&n) => n + T::from(1u8), - Bound::Unbounded => T::from(0u8), - }; - - let end = match range.end_bound() { - Bound::Included(&n) => n, - Bound::Excluded(&n) => n - T::from(1u8), - Bound::Unbounded => end_unbounded(), - }; - - (start, end) - } - - /// Return the last N blocks of state, recreating the [`ExecutionOutcome`]. - /// - /// If the range is empty, or there are no blocks for the given range, then this returns `None`. - pub fn get_state( - &self, - range: RangeInclusive, - ) -> ProviderResult> { - if range.is_empty() { - return Ok(None) - } - let start_block_number = *range.start(); - let end_block_number = *range.end(); - - // We are not removing block meta as it is used to get block changesets. - let mut block_bodies = Vec::new(); - for block_num in range.clone() { - let block_body = self - .block_body_indices(block_num)? - .ok_or(ProviderError::BlockBodyIndicesNotFound(block_num))?; - block_bodies.push((block_num, block_body)) - } - - // get transaction receipts - let Some(from_transaction_num) = block_bodies.first().map(|body| body.1.first_tx_num()) - else { - return Ok(None) - }; - let Some(to_transaction_num) = block_bodies.last().map(|body| body.1.last_tx_num()) else { - return Ok(None) - }; - - let mut account_changeset = Vec::new(); - for block_num in range.clone() { - let changeset = - self.account_block_changeset(block_num)?.into_iter().map(|elem| (block_num, elem)); - account_changeset.extend(changeset); - } - - let mut storage_changeset = Vec::new(); - for block_num in range { - let changeset = self.storage_changeset(block_num)?; - storage_changeset.extend(changeset); - } - - let (state, reverts) = - self.populate_bundle_state(account_changeset, storage_changeset, end_block_number)?; - - let mut receipt_iter = - self.receipts_by_tx_range(from_transaction_num..=to_transaction_num)?.into_iter(); - - let mut receipts = Vec::with_capacity(block_bodies.len()); - // loop break if we are at the end of the blocks. - for (_, block_body) in block_bodies { - let mut block_receipts = Vec::with_capacity(block_body.tx_count as usize); - for tx_num in block_body.tx_num_range() { - let receipt = - receipt_iter.next().ok_or(ProviderError::ReceiptNotFound(tx_num.into()))?; - block_receipts.push(Some(receipt)); - } - receipts.push(block_receipts); - } - - Ok(Some(ExecutionOutcome::new_init( - state, - reverts, - // We skip new contracts since we never delete them from the database - Vec::new(), - receipts.into(), - start_block_number, - Vec::new(), - ))) - } - - /// Populate a [`BundleStateInit`] and [`RevertsInit`] using cursors over the - /// [`reth_db::PlainAccountState`] and [`reth_db::PlainStorageState`] tables, based on the given - /// storage and account changesets. - fn populate_bundle_state( - &self, - account_changeset: Vec<(u64, AccountBeforeTx)>, - storage_changeset: Vec<(BlockNumberAddress, StorageEntry)>, - block_range_end: BlockNumber, - ) -> ProviderResult<(BundleStateInit, RevertsInit)> { - let mut state: BundleStateInit = HashMap::new(); - let mut reverts: RevertsInit = HashMap::new(); - let state_provider = self.state_by_block_number_or_tag(block_range_end.into())?; - - // add account changeset changes - for (block_number, account_before) in account_changeset.into_iter().rev() { - let AccountBeforeTx { info: old_info, address } = account_before; - match state.entry(address) { - hash_map::Entry::Vacant(entry) => { - let new_info = state_provider.basic_account(address)?; - entry.insert((old_info, new_info, HashMap::new())); - } - hash_map::Entry::Occupied(mut entry) => { - // overwrite old account state. - entry.get_mut().0 = old_info; - } - } - // insert old info into reverts. - reverts.entry(block_number).or_default().entry(address).or_default().0 = Some(old_info); - } - - // add storage changeset changes - for (block_and_address, old_storage) in storage_changeset.into_iter().rev() { - let BlockNumberAddress((block_number, address)) = block_and_address; - // get account state or insert from plain state. - let account_state = match state.entry(address) { - hash_map::Entry::Vacant(entry) => { - let present_info = state_provider.basic_account(address)?; - entry.insert((present_info, present_info, HashMap::new())) - } - hash_map::Entry::Occupied(entry) => entry.into_mut(), - }; - - // match storage. - match account_state.2.entry(old_storage.key) { - hash_map::Entry::Vacant(entry) => { - let new_storage_value = - state_provider.storage(address, old_storage.key)?.unwrap_or_default(); - entry.insert((old_storage.value, new_storage_value)); - } - hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().0 = old_storage.value; - } - }; - - reverts - .entry(block_number) - .or_default() - .entry(address) - .or_default() - .1 - .push(old_storage); - } - - Ok((state, reverts)) - } - - /// Fetches a range of data from both in-memory state and persistent storage while a predicate - /// is met. - /// - /// Creates a snapshot of the in-memory chain state and database provider to prevent - /// inconsistencies. Splits the range into in-memory and storage sections, prioritizing - /// recent in-memory blocks in case of overlaps. - /// - /// * `fetch_db_range` function (`F`) provides access to the database provider, allowing the - /// user to retrieve the required items from the database using [`RangeInclusive`]. - /// * `map_block_state_item` function (`G`) provides each block of the range in the in-memory - /// state, allowing for selection or filtering for the desired data. - fn get_in_memory_or_storage_by_block_range_while( - &self, - range: impl RangeBounds, - fetch_db_range: F, - map_block_state_item: G, - mut predicate: P, - ) -> ProviderResult> - where - F: FnOnce( - &DatabaseProviderRO, - RangeInclusive, - &mut P, - ) -> ProviderResult>, - G: Fn(Arc, &mut P) -> Option, - P: FnMut(&T) -> bool, - { - // Each one provides a snapshot at the time of instantiation, but its order matters. - // - // If we acquire first the database provider, it's possible that before the in-memory chain - // snapshot is instantiated, it will flush blocks to disk. This would - // mean that our database provider would not have access to the flushed blocks (since it's - // working under an older view), while the in-memory state may have deleted them - // entirely. Resulting in gaps on the range. - let mut in_memory_chain = - self.canonical_in_memory_state.canonical_chain().collect::>(); - let db_provider = self.database_provider_ro()?; - - let (start, end) = self.convert_range_bounds(range, || { - // the first block is the highest one. - in_memory_chain - .first() - .map(|b| b.number()) - .unwrap_or_else(|| db_provider.last_block_number().unwrap_or_default()) - }); - - if start > end { - return Ok(vec![]) - } - - // Split range into storage_range and in-memory range. If the in-memory range is not - // necessary drop it early. - // - // The last block of `in_memory_chain` is the lowest block number. - let (in_memory, storage_range) = match in_memory_chain.last().as_ref().map(|b| b.number()) { - Some(lowest_memory_block) if lowest_memory_block <= end => { - let highest_memory_block = - in_memory_chain.first().as_ref().map(|b| b.number()).expect("qed"); - - // Database will for a time overlap with in-memory-chain blocks. In - // case of a re-org, it can mean that the database blocks are of a forked chain, and - // so, we should prioritize the in-memory overlapped blocks. - let in_memory_range = - lowest_memory_block.max(start)..=end.min(highest_memory_block); - - // If requested range is in the middle of the in-memory range, remove the necessary - // lowest blocks - in_memory_chain.truncate( - in_memory_chain - .len() - .saturating_sub(start.saturating_sub(lowest_memory_block) as usize), - ); - - let storage_range = - (lowest_memory_block > start).then(|| start..=lowest_memory_block - 1); - - (Some((in_memory_chain, in_memory_range)), storage_range) - } - _ => { - // Drop the in-memory chain so we don't hold blocks in memory. - drop(in_memory_chain); - - (None, Some(start..=end)) - } - }; - - let mut items = Vec::with_capacity((end - start + 1) as usize); - - if let Some(storage_range) = storage_range { - let mut db_items = fetch_db_range(&db_provider, storage_range.clone(), &mut predicate)?; - items.append(&mut db_items); - - // The predicate was not met, if the number of items differs from the expected. So, we - // return what we have. - if items.len() as u64 != storage_range.end() - storage_range.start() + 1 { - return Ok(items) - } - } - - if let Some((in_memory_chain, in_memory_range)) = in_memory { - for (num, block) in in_memory_range.zip(in_memory_chain.into_iter().rev()) { - debug_assert!(num == block.number()); - if let Some(item) = map_block_state_item(block, &mut predicate) { - items.push(item); - } else { - break - } - } - } - - Ok(items) + /// Returns a provider with a created `DbTx` inside, which allows fetching data from the + /// database using different types of providers. Example: [`HeaderProvider`] + /// [`BlockHashReader`]. This may fail if the inner read database transaction fails to open. + #[track_caller] + pub fn consistent_provider(&self) -> ProviderResult> { + ConsistentProvider::new(self.database.clone(), self.canonical_in_memory_state()) } /// This uses a given [`BlockState`] to initialize a state provider for that block. @@ -402,225 +131,17 @@ impl BlockchainProvider2 { ) -> ProviderResult { let anchor_hash = state.anchor().hash; let latest_historical = self.database.history_by_block_hash(anchor_hash)?; - Ok(self.canonical_in_memory_state.state_provider_from_state(state, latest_historical)) + Ok(state.state_provider(latest_historical)) } - /// Fetches data from either in-memory state or persistent storage for a range of transactions. + /// Return the last N blocks of state, recreating the [`ExecutionOutcome`]. /// - /// * `fetch_from_db`: has a [`DatabaseProviderRO`] and the storage specific range. - /// * `fetch_from_block_state`: has a [`RangeInclusive`] of elements that should be fetched from - /// [`BlockState`]. [`RangeInclusive`] is necessary to handle partial look-ups of a block. - fn get_in_memory_or_storage_by_tx_range( - &self, - range: impl RangeBounds, - fetch_from_db: S, - fetch_from_block_state: M, - ) -> ProviderResult> - where - S: FnOnce( - DatabaseProviderRO, - RangeInclusive, - ) -> ProviderResult>, - M: Fn(RangeInclusive, Arc) -> ProviderResult>, - { - let in_mem_chain = self.canonical_in_memory_state.canonical_chain().collect::>(); - let provider = self.database.provider()?; - - // Get the last block number stored in the storage which does NOT overlap with in-memory - // chain. - let last_database_block_number = in_mem_chain - .last() - .map(|b| Ok(b.anchor().number)) - .unwrap_or_else(|| provider.last_block_number())?; - - // Get the next tx number for the last block stored in the storage, which marks the start of - // the in-memory state. - let last_block_body_index = provider - .block_body_indices(last_database_block_number)? - .ok_or(ProviderError::BlockBodyIndicesNotFound(last_database_block_number))?; - let mut in_memory_tx_num = last_block_body_index.next_tx_num(); - - let (start, end) = self.convert_range_bounds(range, || { - in_mem_chain - .iter() - .map(|b| b.block_ref().block().body.transactions.len() as u64) - .sum::() + - last_block_body_index.last_tx_num() - }); - - if start > end { - return Ok(vec![]) - } - - let mut tx_range = start..=end; - - // If the range is entirely before the first in-memory transaction number, fetch from - // storage - if *tx_range.end() < in_memory_tx_num { - return fetch_from_db(provider, tx_range); - } - - let mut items = Vec::with_capacity((tx_range.end() - tx_range.start() + 1) as usize); - - // If the range spans storage and memory, get elements from storage first. - if *tx_range.start() < in_memory_tx_num { - // Determine the range that needs to be fetched from storage. - let db_range = *tx_range.start()..=in_memory_tx_num.saturating_sub(1); - - // Set the remaining transaction range for in-memory - tx_range = in_memory_tx_num..=*tx_range.end(); - - items.extend(fetch_from_db(provider, db_range)?); - } - - // Iterate from the lowest block to the highest in-memory chain - for block_state in in_mem_chain.into_iter().rev() { - let block_tx_count = block_state.block_ref().block().body.transactions.len(); - let remaining = (tx_range.end() - tx_range.start() + 1) as usize; - - // If the transaction range start is equal or higher than the next block first - // transaction, advance - if *tx_range.start() >= in_memory_tx_num + block_tx_count as u64 { - in_memory_tx_num += block_tx_count as u64; - continue - } - - // This should only be more than 0 once, in case of a partial range inside a block. - let skip = (tx_range.start() - in_memory_tx_num) as usize; - - items.extend(fetch_from_block_state( - skip..=skip + (remaining.min(block_tx_count - skip) - 1), - block_state, - )?); - - in_memory_tx_num += block_tx_count as u64; - - // Break if the range has been fully processed - if in_memory_tx_num > *tx_range.end() { - break - } - - // Set updated range - tx_range = in_memory_tx_num..=*tx_range.end(); - } - - Ok(items) - } - - /// Fetches data from either in-memory state or persistent storage by transaction - /// [`HashOrNumber`]. - fn get_in_memory_or_storage_by_tx( - &self, - id: HashOrNumber, - fetch_from_db: S, - fetch_from_block_state: M, - ) -> ProviderResult> - where - S: FnOnce(DatabaseProviderRO) -> ProviderResult>, - M: Fn(usize, TxNumber, Arc) -> ProviderResult>, - { - // Order of instantiation matters. More information on: - // `get_in_memory_or_storage_by_block_range_while`. - let in_mem_chain = self.canonical_in_memory_state.canonical_chain().collect::>(); - let provider = self.database.provider()?; - - // Get the last block number stored in the database which does NOT overlap with in-memory - // chain. - let last_database_block_number = in_mem_chain - .last() - .map(|b| Ok(b.anchor().number)) - .unwrap_or_else(|| provider.last_block_number())?; - - // Get the next tx number for the last block stored in the database and consider it the - // first tx number of the in-memory state - let last_block_body_index = provider - .block_body_indices(last_database_block_number)? - .ok_or(ProviderError::BlockBodyIndicesNotFound(last_database_block_number))?; - let mut in_memory_tx_num = last_block_body_index.next_tx_num(); - - // If the transaction number is less than the first in-memory transaction number, make a - // database lookup - if let HashOrNumber::Number(id) = id { - if id < in_memory_tx_num { - return fetch_from_db(provider) - } - } - - // Iterate from the lowest block to the highest - for block_state in in_mem_chain.into_iter().rev() { - let executed_block = block_state.block_ref(); - let block = executed_block.block(); - - for tx_index in 0..block.body.transactions.len() { - match id { - HashOrNumber::Hash(tx_hash) => { - if tx_hash == block.body.transactions[tx_index].hash() { - return fetch_from_block_state(tx_index, in_memory_tx_num, block_state) - } - } - HashOrNumber::Number(id) => { - if id == in_memory_tx_num { - return fetch_from_block_state(tx_index, in_memory_tx_num, block_state) - } - } - } - - in_memory_tx_num += 1; - } - } - - // Not found in-memory, so check database. - if let HashOrNumber::Hash(_) = id { - return fetch_from_db(provider) - } - - Ok(None) - } - - /// Fetches data from either in-memory state or persistent storage by [`BlockHashOrNumber`]. - fn get_in_memory_or_storage_by_block( + /// If the range is empty, or there are no blocks for the given range, then this returns `None`. + pub fn get_state( &self, - id: BlockHashOrNumber, - fetch_from_db: S, - fetch_from_block_state: M, - ) -> ProviderResult - where - S: FnOnce(DatabaseProviderRO) -> ProviderResult, - M: Fn(Arc) -> ProviderResult, - { - let block_state = match id { - BlockHashOrNumber::Hash(block_hash) => { - self.canonical_in_memory_state.state_by_hash(block_hash) - } - BlockHashOrNumber::Number(block_number) => { - self.canonical_in_memory_state.state_by_number(block_number) - } - }; - - if let Some(block_state) = block_state { - return fetch_from_block_state(block_state) - } - fetch_from_db(self.database_provider_ro()?) - } -} - -impl BlockchainProvider2 { - /// Ensures that the given block number is canonical (synced) - /// - /// This is a helper for guarding the `HistoricalStateProvider` against block numbers that are - /// out of range and would lead to invalid results, mainly during initial sync. - /// - /// Verifying the `block_number` would be expensive since we need to lookup sync table - /// Instead, we ensure that the `block_number` is within the range of the - /// [`Self::best_block_number`] which is updated when a block is synced. - #[inline] - fn ensure_canonical_block(&self, block_number: BlockNumber) -> ProviderResult<()> { - let latest = self.best_block_number()?; - if block_number > latest { - Err(ProviderError::HeaderNotFound(block_number.into())) - } else { - Ok(()) - } + range: RangeInclusive, + ) -> ProviderResult> { + self.consistent_provider()?.get_state(range) } } @@ -646,78 +167,34 @@ impl StaticFileProviderFactory for BlockchainProvider2 impl HeaderProvider for BlockchainProvider2 { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - (*block_hash).into(), - |db_provider| db_provider.header(block_hash), - |block_state| Ok(Some(block_state.block_ref().block().header.header().clone())), - ) + self.consistent_provider()?.header(block_hash) } fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - num.into(), - |db_provider| db_provider.header_by_number(num), - |block_state| Ok(Some(block_state.block_ref().block().header.header().clone())), - ) + self.consistent_provider()?.header_by_number(num) } fn header_td(&self, hash: &BlockHash) -> ProviderResult> { - if let Some(num) = self.block_number(*hash)? { - self.header_td_by_number(num) - } else { - Ok(None) - } + self.consistent_provider()?.header_td(hash) } fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { - let number = if self.canonical_in_memory_state.hash_by_number(number).is_some() { - // If the block exists in memory, we should return a TD for it. - // - // The canonical in memory state should only store post-merge blocks. Post-merge blocks - // have zero difficulty. This means we can use the total difficulty for the last - // finalized block number if present (so that we are not affected by reorgs), if not the - // last number in the database will be used. - if let Some(last_finalized_num_hash) = - self.canonical_in_memory_state.get_finalized_num_hash() - { - last_finalized_num_hash.number - } else { - self.database.last_block_number()? - } - } else { - // Otherwise, return what we have on disk for the input block - number - }; - self.database.header_td_by_number(number) + self.consistent_provider()?.header_td_by_number(number) } fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, _| db_provider.headers_range(range), - |block_state, _| Some(block_state.block_ref().block().header.header().clone()), - |_| true, - ) + self.consistent_provider()?.headers_range(range) } fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - number.into(), - |db_provider| db_provider.sealed_header(number), - |block_state| Ok(Some(block_state.block_ref().block().header.clone())), - ) + self.consistent_provider()?.sealed_header(number) } fn sealed_headers_range( &self, range: impl RangeBounds, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, _| db_provider.sealed_headers_range(range), - |block_state, _| Some(block_state.block_ref().block().header.clone()), - |_| true, - ) + self.consistent_provider()?.sealed_headers_range(range) } fn sealed_headers_while( @@ -725,25 +202,13 @@ impl HeaderProvider for BlockchainProvider2 { range: impl RangeBounds, predicate: impl FnMut(&SealedHeader) -> bool, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, predicate| db_provider.sealed_headers_while(range, predicate), - |block_state, predicate| { - let header = &block_state.block_ref().block().header; - predicate(header).then(|| header.clone()) - }, - predicate, - ) + self.consistent_provider()?.sealed_headers_while(range, predicate) } } impl BlockHashReader for BlockchainProvider2 { fn block_hash(&self, number: u64) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - number.into(), - |db_provider| db_provider.block_hash(number), - |block_state| Ok(Some(block_state.hash())), - ) + self.consistent_provider()?.block_hash(number) } fn canonical_hashes_range( @@ -751,15 +216,7 @@ impl BlockHashReader for BlockchainProvider2 { start: BlockNumber, end: BlockNumber, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - start..end, - |db_provider, inclusive_range, _| { - db_provider - .canonical_hashes_range(*inclusive_range.start(), *inclusive_range.end() + 1) - }, - |block_state, _| Some(block_state.hash()), - |_| true, - ) + self.consistent_provider()?.canonical_hashes_range(start, end) } } @@ -777,11 +234,7 @@ impl BlockNumReader for BlockchainProvider2 { } fn block_number(&self, hash: B256) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - hash.into(), - |db_provider| db_provider.block_number(hash), - |block_state| Ok(Some(block_state.number())), - ) + self.consistent_provider()?.block_number(hash) } } @@ -801,28 +254,11 @@ impl BlockIdReader for BlockchainProvider2 { impl BlockReader for BlockchainProvider2 { fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { - match source { - BlockSource::Any | BlockSource::Canonical => { - // Note: it's fine to return the unsealed block because the caller already has - // the hash - self.get_in_memory_or_storage_by_block( - hash.into(), - |db_provider| db_provider.find_block_by_hash(hash, source), - |block_state| Ok(Some(block_state.block_ref().block().clone().unseal())), - ) - } - BlockSource::Pending => { - Ok(self.canonical_in_memory_state.pending_block().map(|block| block.unseal())) - } - } + self.consistent_provider()?.find_block_by_hash(hash, source) } fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.block(id), - |block_state| Ok(Some(block_state.block_ref().block().clone().unseal())), - ) + self.consistent_provider()?.block(id) } fn pending_block(&self) -> ProviderResult> { @@ -838,56 +274,14 @@ impl BlockReader for BlockchainProvider2 { } fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.ommers(id), - |block_state| { - if self - .database - .chain_spec() - .final_paris_total_difficulty(block_state.number()) - .is_some() - { - return Ok(Some(Vec::new())) - } - - Ok(Some(block_state.block_ref().block().body.ommers.clone())) - }, - ) + self.consistent_provider()?.ommers(id) } fn block_body_indices( &self, number: BlockNumber, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - number.into(), - |db_provider| db_provider.block_body_indices(number), - |block_state| { - // Find the last block indices on database - let last_storage_block_number = block_state.anchor().number; - let mut stored_indices = self - .database - .block_body_indices(last_storage_block_number)? - .ok_or(ProviderError::BlockBodyIndicesNotFound(last_storage_block_number))?; - - // Prepare our block indices - stored_indices.first_tx_num = stored_indices.next_tx_num(); - stored_indices.tx_count = 0; - - // Iterate from the lowest block in memory until our target block - for state in block_state.chain().into_iter().rev() { - let block_tx_count = state.block_ref().block.body.transactions.len() as u64; - if state.block_ref().block().number == number { - stored_indices.tx_count = block_tx_count; - } else { - stored_indices.first_tx_num += block_tx_count; - } - } - - Ok(Some(stored_indices)) - }, - ) + self.consistent_provider()?.block_body_indices(number) } /// Returns the block with senders with matching number or hash from database. @@ -901,11 +295,7 @@ impl BlockReader for BlockchainProvider2 { id: BlockHashOrNumber, transaction_kind: TransactionVariant, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.block_with_senders(id, transaction_kind), - |block_state| Ok(Some(block_state.block_with_senders())), - ) + self.consistent_provider()?.block_with_senders(id, transaction_kind) } fn sealed_block_with_senders( @@ -913,259 +303,116 @@ impl BlockReader for BlockchainProvider2 { id: BlockHashOrNumber, transaction_kind: TransactionVariant, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.sealed_block_with_senders(id, transaction_kind), - |block_state| Ok(Some(block_state.sealed_block_with_senders())), - ) + self.consistent_provider()?.sealed_block_with_senders(id, transaction_kind) } fn block_range(&self, range: RangeInclusive) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, _| db_provider.block_range(range), - |block_state, _| Some(block_state.block_ref().block().clone().unseal()), - |_| true, - ) + self.consistent_provider()?.block_range(range) } fn block_with_senders_range( &self, range: RangeInclusive, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, _| db_provider.block_with_senders_range(range), - |block_state, _| Some(block_state.block_with_senders()), - |_| true, - ) + self.consistent_provider()?.block_with_senders_range(range) } fn sealed_block_with_senders_range( &self, range: RangeInclusive, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, _| db_provider.sealed_block_with_senders_range(range), - |block_state, _| Some(block_state.sealed_block_with_senders()), - |_| true, - ) + self.consistent_provider()?.sealed_block_with_senders_range(range) } } impl TransactionsProvider for BlockchainProvider2 { fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - tx_hash.into(), - |db_provider| db_provider.transaction_id(tx_hash), - |_, tx_number, _| Ok(Some(tx_number)), - ) + self.consistent_provider()?.transaction_id(tx_hash) } fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - id.into(), - |provider| provider.transaction_by_id(id), - |tx_index, _, block_state| { - Ok(block_state.block_ref().block().body.transactions.get(tx_index).cloned()) - }, - ) + self.consistent_provider()?.transaction_by_id(id) } fn transaction_by_id_no_hash( &self, id: TxNumber, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - id.into(), - |provider| provider.transaction_by_id_no_hash(id), - |tx_index, _, block_state| { - Ok(block_state - .block_ref() - .block() - .body - .transactions - .get(tx_index) - .cloned() - .map(Into::into)) - }, - ) + self.consistent_provider()?.transaction_by_id_no_hash(id) } fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { - if let Some(tx) = self.canonical_in_memory_state.transaction_by_hash(hash) { - return Ok(Some(tx)) - } - - self.database.transaction_by_hash(hash) + self.consistent_provider()?.transaction_by_hash(hash) } fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, ) -> ProviderResult> { - if let Some((tx, meta)) = - self.canonical_in_memory_state.transaction_by_hash_with_meta(tx_hash) - { - return Ok(Some((tx, meta))) - } - - self.database.transaction_by_hash_with_meta(tx_hash) + self.consistent_provider()?.transaction_by_hash_with_meta(tx_hash) } fn transaction_block(&self, id: TxNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - id.into(), - |provider| provider.transaction_block(id), - |_, _, block_state| Ok(Some(block_state.block_ref().block().number)), - ) + self.consistent_provider()?.transaction_block(id) } fn transactions_by_block( &self, id: BlockHashOrNumber, ) -> ProviderResult>> { - self.get_in_memory_or_storage_by_block( - id, - |provider| provider.transactions_by_block(id), - |block_state| Ok(Some(block_state.block_ref().block().body.transactions.clone())), - ) + self.consistent_provider()?.transactions_by_block(id) } fn transactions_by_block_range( &self, range: impl RangeBounds, ) -> ProviderResult>> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, _| db_provider.transactions_by_block_range(range), - |block_state, _| Some(block_state.block_ref().block().body.transactions.clone()), - |_| true, - ) + self.consistent_provider()?.transactions_by_block_range(range) } fn transactions_by_tx_range( &self, range: impl RangeBounds, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx_range( - range, - |db_provider, db_range| db_provider.transactions_by_tx_range(db_range), - |index_range, block_state| { - Ok(block_state.block_ref().block().body.transactions[index_range] - .iter() - .cloned() - .map(Into::into) - .collect()) - }, - ) + self.consistent_provider()?.transactions_by_tx_range(range) } fn senders_by_tx_range( &self, range: impl RangeBounds, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx_range( - range, - |db_provider, db_range| db_provider.senders_by_tx_range(db_range), - |index_range, block_state| Ok(block_state.block_ref().senders[index_range].to_vec()), - ) + self.consistent_provider()?.senders_by_tx_range(range) } fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - id.into(), - |provider| provider.transaction_sender(id), - |tx_index, _, block_state| Ok(block_state.block_ref().senders.get(tx_index).copied()), - ) + self.consistent_provider()?.transaction_sender(id) } } impl ReceiptProvider for BlockchainProvider2 { fn receipt(&self, id: TxNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - id.into(), - |provider| provider.receipt(id), - |tx_index, _, block_state| { - Ok(block_state.executed_block_receipts().get(tx_index).cloned()) - }, - ) + self.consistent_provider()?.receipt(id) } fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { - for block_state in self.canonical_in_memory_state.canonical_chain() { - let executed_block = block_state.block_ref(); - let block = executed_block.block(); - let receipts = block_state.executed_block_receipts(); - - // assuming 1:1 correspondence between transactions and receipts - debug_assert_eq!( - block.body.transactions.len(), - receipts.len(), - "Mismatch between transaction and receipt count" - ); - - if let Some(tx_index) = block.body.transactions.iter().position(|tx| tx.hash() == hash) - { - // safe to use tx_index for receipts due to 1:1 correspondence - return Ok(receipts.get(tx_index).cloned()); - } - } - - self.database.receipt_by_hash(hash) + self.consistent_provider()?.receipt_by_hash(hash) } fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { - self.get_in_memory_or_storage_by_block( - block, - |db_provider| db_provider.receipts_by_block(block), - |block_state| Ok(Some(block_state.executed_block_receipts())), - ) + self.consistent_provider()?.receipts_by_block(block) } fn receipts_by_tx_range( &self, range: impl RangeBounds, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx_range( - range, - |db_provider, db_range| db_provider.receipts_by_tx_range(db_range), - |index_range, block_state| { - Ok(block_state.executed_block_receipts().drain(index_range).collect()) - }, - ) + self.consistent_provider()?.receipts_by_tx_range(range) } } impl ReceiptProviderIdExt for BlockchainProvider2 { fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { - match block { - BlockId::Hash(rpc_block_hash) => { - let mut receipts = self.receipts_by_block(rpc_block_hash.block_hash.into())?; - if receipts.is_none() && !rpc_block_hash.require_canonical.unwrap_or(false) { - let block_state = self - .canonical_in_memory_state - .state_by_hash(rpc_block_hash.block_hash) - .ok_or(ProviderError::StateForHashNotFound(rpc_block_hash.block_hash))?; - receipts = Some(block_state.executed_block_receipts()); - } - Ok(receipts) - } - BlockId::Number(num_tag) => match num_tag { - BlockNumberOrTag::Pending => Ok(self - .canonical_in_memory_state - .pending_state() - .map(|block_state| block_state.executed_block_receipts())), - _ => { - if let Some(num) = self.convert_block_number(num_tag)? { - self.receipts_by_block(num.into()) - } else { - Ok(None) - } - } - }, - } + self.consistent_provider()?.receipts_by_block_id(block) } } @@ -1175,33 +422,11 @@ impl WithdrawalsProvider for BlockchainProvider2 { id: BlockHashOrNumber, timestamp: u64, ) -> ProviderResult> { - if !self.database.chain_spec().is_shanghai_active_at_timestamp(timestamp) { - return Ok(None) - } - - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.withdrawals_by_block(id, timestamp), - |block_state| Ok(block_state.block_ref().block().body.withdrawals.clone()), - ) + self.consistent_provider()?.withdrawals_by_block(id, timestamp) } fn latest_withdrawal(&self) -> ProviderResult> { - let best_block_num = self.best_block_number()?; - - self.get_in_memory_or_storage_by_block( - best_block_num.into(), - |db_provider| db_provider.latest_withdrawal(), - |block_state| { - Ok(block_state - .block_ref() - .block() - .body - .withdrawals - .clone() - .and_then(|mut w| w.pop())) - }, - ) + self.consistent_provider()?.latest_withdrawal() } } @@ -1215,35 +440,17 @@ impl SidecarsProvider for BlockchainProvider2 { } } -impl RequestsProvider for BlockchainProvider2 { - fn requests_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult> { - if !self.database.chain_spec().is_prague_active_at_timestamp(timestamp) { - return Ok(None) - } - - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.requests_by_block(id, timestamp), - |block_state| Ok(block_state.block_ref().block().body.requests.clone()), - ) - } -} - impl StageCheckpointReader for BlockchainProvider2 { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { - self.database.provider()?.get_stage_checkpoint(id) + self.consistent_provider()?.get_stage_checkpoint(id) } fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { - self.database.provider()?.get_stage_checkpoint_progress(id) + self.consistent_provider()?.get_stage_checkpoint_progress(id) } fn get_all_checkpoints(&self) -> ProviderResult> { - self.database.provider()?.get_all_checkpoints() + self.consistent_provider()?.get_all_checkpoints() } } @@ -1258,9 +465,7 @@ impl EvmEnvProvider for BlockchainProvider2 { where EvmConfig: ConfigureEvmEnv
, { - let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; - let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; - self.fill_env_with_header(cfg, block_env, &header, evm_config) + self.consistent_provider()?.fill_env_at(cfg, block_env, at, evm_config) } fn fill_env_with_header( @@ -1273,11 +478,7 @@ impl EvmEnvProvider for BlockchainProvider2 { where EvmConfig: ConfigureEvmEnv
, { - let total_difficulty = self - .header_td_by_number(header.number)? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; - evm_config.fill_cfg_and_block_env(cfg, block_env, header, total_difficulty); - Ok(()) + self.consistent_provider()?.fill_env_with_header(cfg, block_env, header, evm_config) } fn fill_cfg_env_at( @@ -1289,9 +490,7 @@ impl EvmEnvProvider for BlockchainProvider2 { where EvmConfig: ConfigureEvmEnv
, { - let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; - let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; - self.fill_cfg_env_with_header(cfg, &header, evm_config) + self.consistent_provider()?.fill_cfg_env_at(cfg, at, evm_config) } fn fill_cfg_env_with_header( @@ -1303,11 +502,7 @@ impl EvmEnvProvider for BlockchainProvider2 { where EvmConfig: ConfigureEvmEnv
, { - let total_difficulty = self - .header_td_by_number(header.number)? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; - evm_config.fill_cfg_env(cfg, header, total_difficulty); - Ok(()) + self.consistent_provider()?.fill_cfg_env_with_header(cfg, header, evm_config) } } @@ -1316,11 +511,11 @@ impl PruneCheckpointReader for BlockchainProvider2 { &self, segment: PruneSegment, ) -> ProviderResult> { - self.database.provider()?.get_prune_checkpoint(segment) + self.consistent_provider()?.get_prune_checkpoint(segment) } fn get_prune_checkpoints(&self) -> ProviderResult> { - self.database.provider()?.get_prune_checkpoints() + self.consistent_provider()?.get_prune_checkpoints() } } @@ -1351,8 +546,9 @@ impl StateProviderFactory for BlockchainProvider2 { block_number: BlockNumber, ) -> ProviderResult { trace!(target: "providers::blockchain", ?block_number, "Getting history by block number"); - self.ensure_canonical_block(block_number)?; - let hash = self + let provider = self.consistent_provider()?; + provider.ensure_canonical_block(block_number)?; + let hash = provider .block_hash(block_number)? .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?; self.history_by_block_hash(hash) @@ -1361,14 +557,11 @@ impl StateProviderFactory for BlockchainProvider2 { fn history_by_block_hash(&self, block_hash: BlockHash) -> ProviderResult { trace!(target: "providers::blockchain", ?block_hash, "Getting history by block hash"); - self.get_in_memory_or_storage_by_block( + self.consistent_provider()?.get_in_memory_or_storage_by_block( block_hash.into(), - |_| { - // TODO(joshie): port history_by_block_hash to DatabaseProvider and use db_provider - self.database.history_by_block_hash(block_hash) - }, + |_| self.database.history_by_block_hash(block_hash), |block_state| { - let state_provider = self.block_state_provider(&block_state)?; + let state_provider = self.block_state_provider(block_state)?; Ok(Box::new(state_provider)) }, ) @@ -1477,105 +670,35 @@ where } } -impl BlockReaderIdExt for BlockchainProvider2 +impl BlockReaderIdExt for BlockchainProvider2 where Self: BlockReader + ReceiptProviderIdExt, { fn block_by_id(&self, id: BlockId) -> ProviderResult> { - match id { - BlockId::Number(num) => self.block_by_number_or_tag(num), - BlockId::Hash(hash) => { - // TODO: should we only apply this for the RPCs that are listed in EIP-1898? - // so not at the provider level? - // if we decide to do this at a higher level, then we can make this an automatic - // trait impl - if Some(true) == hash.require_canonical { - // check the database, canonical blocks are only stored in the database - self.find_block_by_hash(hash.block_hash, BlockSource::Canonical) - } else { - self.block_by_hash(hash.block_hash) - } - } - } + self.consistent_provider()?.block_by_id(id) } fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { - Ok(match id { - BlockNumberOrTag::Latest => { - Some(self.canonical_in_memory_state.get_canonical_head().unseal()) - } - BlockNumberOrTag::Finalized => { - self.canonical_in_memory_state.get_finalized_header().map(|h| h.unseal()) - } - BlockNumberOrTag::Safe => { - self.canonical_in_memory_state.get_safe_header().map(|h| h.unseal()) - } - BlockNumberOrTag::Earliest => self.header_by_number(0)?, - BlockNumberOrTag::Pending => self.canonical_in_memory_state.pending_header(), - - BlockNumberOrTag::Number(num) => self.header_by_number(num)?, - }) + self.consistent_provider()?.header_by_number_or_tag(id) } fn sealed_header_by_number_or_tag( &self, id: BlockNumberOrTag, ) -> ProviderResult> { - match id { - BlockNumberOrTag::Latest => { - Ok(Some(self.canonical_in_memory_state.get_canonical_head())) - } - BlockNumberOrTag::Finalized => { - Ok(self.canonical_in_memory_state.get_finalized_header()) - } - BlockNumberOrTag::Safe => Ok(self.canonical_in_memory_state.get_safe_header()), - BlockNumberOrTag::Earliest => self.header_by_number(0)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ), - BlockNumberOrTag::Pending => Ok(self.canonical_in_memory_state.pending_sealed_header()), - BlockNumberOrTag::Number(num) => self.header_by_number(num)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ), - } + self.consistent_provider()?.sealed_header_by_number_or_tag(id) } fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { - Ok(match id { - BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, - BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }), - }) + self.consistent_provider()?.sealed_header_by_id(id) } fn header_by_id(&self, id: BlockId) -> ProviderResult> { - Ok(match id { - BlockId::Number(num) => self.header_by_number_or_tag(num)?, - BlockId::Hash(hash) => self.header(&hash.block_hash)?, - }) + self.consistent_provider()?.header_by_id(id) } fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { - match id { - BlockId::Number(num) => self.ommers_by_number_or_tag(num), - BlockId::Hash(hash) => { - // TODO: EIP-1898 question, see above - // here it is not handled - self.ommers(BlockHashOrNumber::Hash(hash.block_hash)) - } - } + self.consistent_provider()?.ommers_by_id(id) } } @@ -1602,49 +725,7 @@ impl StorageChangeSetReader for BlockchainProvider2 { &self, block_number: BlockNumber, ) -> ProviderResult> { - if let Some(state) = self.canonical_in_memory_state.state_by_number(block_number) { - let changesets = state - .block() - .execution_output - .bundle - .reverts - .clone() - .into_plain_state_reverts() - .storage - .into_iter() - .flatten() - .flat_map(|revert: PlainStorageRevert| { - revert.storage_revert.into_iter().map(move |(key, value)| { - ( - BlockNumberAddress((block_number, revert.address)), - StorageEntry { key: key.into(), value: value.to_previous_value() }, - ) - }) - }) - .collect(); - Ok(changesets) - } else { - // Perform checks on whether or not changesets exist for the block. - let provider = self.database.provider()?; - - // No prune checkpoint means history should exist and we should `unwrap_or(true)` - let storage_history_exists = provider - .get_prune_checkpoint(PruneSegment::StorageHistory)? - .and_then(|checkpoint| { - // return true if the block number is ahead of the prune checkpoint. - // - // The checkpoint stores the highest pruned block number, so we should make - // sure the block_number is strictly greater. - checkpoint.block_number.map(|checkpoint| block_number > checkpoint) - }) - .unwrap_or(true); - - if !storage_history_exists { - return Err(ProviderError::StateAtBlockPruned(block_number)) - } - - provider.storage_changeset(block_number) - } + self.consistent_provider()?.storage_changeset(block_number) } } @@ -1653,50 +734,14 @@ impl ChangeSetReader for BlockchainProvider2 { &self, block_number: BlockNumber, ) -> ProviderResult> { - if let Some(state) = self.canonical_in_memory_state.state_by_number(block_number) { - let changesets = state - .block_ref() - .execution_output - .bundle - .reverts - .clone() - .into_plain_state_reverts() - .accounts - .into_iter() - .flatten() - .map(|(address, info)| AccountBeforeTx { address, info: info.map(Into::into) }) - .collect(); - Ok(changesets) - } else { - // Perform checks on whether or not changesets exist for the block. - let provider = self.database.provider()?; - // No prune checkpoint means history should exist and we should `unwrap_or(true)` - let account_history_exists = provider - .get_prune_checkpoint(PruneSegment::AccountHistory)? - .and_then(|checkpoint| { - // return true if the block number is ahead of the prune checkpoint. - // - // The checkpoint stores the highest pruned block number, so we should make - // sure the block_number is strictly greater. - checkpoint.block_number.map(|checkpoint| block_number > checkpoint) - }) - .unwrap_or(true); - - if !account_history_exists { - return Err(ProviderError::StateAtBlockPruned(block_number)) - } - - provider.account_block_changeset(block_number) - } + self.consistent_provider()?.account_block_changeset(block_number) } } impl AccountReader for BlockchainProvider2 { /// Get basic account information. fn basic_account(&self, address: Address) -> ProviderResult> { - // use latest state provider - let state_provider = self.latest()?; - state_provider.basic_account(address) + self.consistent_provider()?.basic_account(address) } } @@ -1717,12 +762,7 @@ impl StateReader for BlockchainProvider2 { /// because the tree thread is responsible for modifying the [`CanonicalInMemoryState`] in the /// first place. fn get_state(&self, block: BlockNumber) -> ProviderResult> { - if let Some(state) = self.canonical_in_memory_state.state_by_number(block) { - let state = state.block_ref().execution_outcome().clone(); - Ok(Some(state)) - } else { - self.get_state(block..=block) - } + StateReader::get_state(&self.consistent_provider()?, block) } } @@ -1768,8 +808,8 @@ mod tests { use reth_storage_api::{ BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, ChangeSetReader, DatabaseProviderFactory, HeaderProvider, ReceiptProvider, - ReceiptProviderIdExt, RequestsProvider, StateProviderFactory, TransactionVariant, - TransactionsProvider, WithdrawalsProvider, + ReceiptProviderIdExt, StateProviderFactory, TransactionVariant, TransactionsProvider, + WithdrawalsProvider, }; use reth_testing_utils::generators::{ self, random_block, random_block_range, random_changeset_range, random_eoa_accounts, @@ -1963,7 +1003,7 @@ mod tests { /// This simulates a RPC method having a different view than when its database transaction was /// created. fn persist_block_after_db_tx_creation( - provider: Arc>, + provider: BlockchainProvider2, block_number: BlockNumber, ) { let hook_provider = provider.clone(); @@ -2429,7 +1469,7 @@ mod tests { assert_eq!( provider .withdrawals_by_block( - reth_primitives::BlockHashOrNumber::Number(15), + alloy_eips::BlockHashOrNumber::Number(15), shainghai_timestamp ) .expect("could not call withdrawals by block"), @@ -2441,7 +1481,7 @@ mod tests { assert_eq!( provider .withdrawals_by_block( - reth_primitives::BlockHashOrNumber::Number(block.number), + alloy_eips::BlockHashOrNumber::Number(block.number), shainghai_timestamp )? .unwrap(), @@ -2879,37 +1919,6 @@ mod tests { Ok(()) } - #[test] - fn test_requests_provider() -> eyre::Result<()> { - let mut rng = generators::rng(); - let chain_spec = Arc::new(ChainSpecBuilder::mainnet().prague_activated().build()); - let (provider, database_blocks, in_memory_blocks, _) = - provider_with_chain_spec_and_random_blocks( - &mut rng, - chain_spec.clone(), - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams { requests_count: Some(1..2), ..Default::default() }, - )?; - - let database_block = database_blocks.first().unwrap().clone(); - let in_memory_block = in_memory_blocks.last().unwrap().clone(); - - let prague_timestamp = - chain_spec.hardforks.fork(EthereumHardfork::Prague).as_timestamp().unwrap(); - - assert_eq!( - provider.requests_by_block(database_block.number.into(), prague_timestamp,)?, - database_block.body.requests.clone() - ); - assert_eq!( - provider.requests_by_block(in_memory_block.number.into(), prague_timestamp,)?, - in_memory_block.body.requests.clone() - ); - - Ok(()) - } - #[test] fn test_state_provider_factory() -> eyre::Result<()> { let mut rng = generators::rng(); @@ -3172,7 +2181,6 @@ mod tests { ..Default::default() }, )?; - let provider = Arc::new(provider); $( // Since data moves for each tried method, need to recalculate everything @@ -3287,7 +2295,6 @@ mod tests { ..Default::default() }, )?; - let provider = Arc::new(provider); $( // Since data moves for each tried method, need to recalculate everything @@ -3413,7 +2420,6 @@ mod tests { ..Default::default() }, )?; - let provider = Arc::new(provider); let mut in_memory_blocks: std::collections::VecDeque<_> = in_memory_blocks.into(); @@ -3715,8 +2721,6 @@ mod tests { }, )?; - let provider = Arc::new(provider); - // Old implementation was querying the database first. This is problematic, if there are // changes AFTER the database transaction is created. let old_transaction_hash_fn = @@ -3769,7 +2773,7 @@ mod tests { correct_transaction_hash_fn( to_be_persisted_tx.hash(), provider.canonical_in_memory_state(), - provider.database.clone() + provider.database ), Ok(Some(to_be_persisted_tx)) ); diff --git a/crates/storage/provider/src/providers/bundle_state_provider.rs b/crates/storage/provider/src/providers/bundle_state_provider.rs index 296dae8c6a..be6549033c 100644 --- a/crates/storage/provider/src/providers/bundle_state_provider.rs +++ b/crates/storage/provider/src/providers/bundle_state_provider.rs @@ -43,7 +43,7 @@ impl BundleStateProvider account.storage.iter().map(|(slot, value)| (slot, &value.present_value)), ) }) - .unwrap_or_else(|| HashedStorage::new(false)) + .unwrap_or_default() } } diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs new file mode 100644 index 0000000000..6d6738ca4b --- /dev/null +++ b/crates/storage/provider/src/providers/consistent.rs @@ -0,0 +1,1885 @@ +use super::{DatabaseProviderRO, ProviderFactory, ProviderNodeTypes}; +use crate::{ + providers::StaticFileProvider, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, + BlockReader, BlockReaderIdExt, BlockSource, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, + HeaderProvider, ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, + StageCheckpointReader, StateReader, StaticFileProviderFactory, TransactionVariant, + TransactionsProvider, WithdrawalsProvider, +}; +use alloy_eips::{ + eip4895::Withdrawal, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, HashOrNumber, +}; +use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; +use reth_chain_state::{BlockState, CanonicalInMemoryState, MemoryOverlayStateProviderRef}; +use reth_chainspec::{ChainInfo, EthereumHardforks}; +use reth_db::models::BlockNumberAddress; +use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; +use reth_evm::ConfigureEvmEnv; +use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; +use reth_primitives::{ + Account, BlobSidecars, Block, BlockWithSenders, Header, Receipt, SealedBlock, + SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, + TransactionSignedNoHash, Withdrawals, +}; +use reth_prune_types::{PruneCheckpoint, PruneSegment}; +use reth_stages_types::{StageCheckpoint, StageId}; +use reth_storage_api::{ + DatabaseProviderFactory, SidecarsProvider, StateProvider, StorageChangeSetReader, +}; +use reth_storage_errors::provider::ProviderResult; +use revm::{ + db::states::PlainStorageRevert, + primitives::{BlockEnv, CfgEnvWithHandlerCfg}, +}; +use std::{ + collections::{hash_map, HashMap}, + ops::{Add, Bound, RangeBounds, RangeInclusive, Sub}, + sync::Arc, +}; +use tracing::trace; + +/// Type that interacts with a snapshot view of the blockchain (storage and in-memory) at time of +/// instantiation, EXCEPT for pending, safe and finalized block which might change while holding +/// this provider. +/// +/// CAUTION: Avoid holding this provider for too long or the inner database transaction will +/// time-out. +#[derive(Debug)] +pub struct ConsistentProvider { + /// Storage provider. + storage_provider: as DatabaseProviderFactory>::Provider, + /// Head block at time of [`Self`] creation + head_block: Option>, + /// In-memory canonical state. This is not a snapshot, and can change! Use with caution. + canonical_in_memory_state: CanonicalInMemoryState, +} + +impl ConsistentProvider { + /// Create a new provider using [`ProviderFactory`] and [`CanonicalInMemoryState`], + /// + /// Underneath it will take a snapshot by fetching [`CanonicalInMemoryState::head_state`] and + /// [`ProviderFactory::database_provider_ro`] effectively maintaining one single snapshotted + /// view of memory and database. + pub fn new( + storage_provider_factory: ProviderFactory, + state: CanonicalInMemoryState, + ) -> ProviderResult { + // Each one provides a snapshot at the time of instantiation, but its order matters. + // + // If we acquire first the database provider, it's possible that before the in-memory chain + // snapshot is instantiated, it will flush blocks to disk. This would + // mean that our database provider would not have access to the flushed blocks (since it's + // working under an older view), while the in-memory state may have deleted them + // entirely. Resulting in gaps on the range. + let head_block = state.head_state(); + let storage_provider = storage_provider_factory.database_provider_ro()?; + Ok(Self { storage_provider, head_block, canonical_in_memory_state: state }) + } + + // Helper function to convert range bounds + fn convert_range_bounds( + &self, + range: impl RangeBounds, + end_unbounded: impl FnOnce() -> T, + ) -> (T, T) + where + T: Copy + Add + Sub + From, + { + let start = match range.start_bound() { + Bound::Included(&n) => n, + Bound::Excluded(&n) => n + T::from(1u8), + Bound::Unbounded => T::from(0u8), + }; + + let end = match range.end_bound() { + Bound::Included(&n) => n, + Bound::Excluded(&n) => n - T::from(1u8), + Bound::Unbounded => end_unbounded(), + }; + + (start, end) + } + + /// Storage provider for latest block + fn latest_ref<'a>(&'a self) -> ProviderResult> { + trace!(target: "providers::blockchain", "Getting latest block state provider"); + + // use latest state provider if the head state exists + if let Some(state) = &self.head_block { + trace!(target: "providers::blockchain", "Using head state for latest state provider"); + Ok(self.block_state_provider_ref(state)?.boxed()) + } else { + trace!(target: "providers::blockchain", "Using database state for latest state provider"); + self.storage_provider.latest() + } + } + + fn history_by_block_hash_ref<'a>( + &'a self, + block_hash: BlockHash, + ) -> ProviderResult> { + trace!(target: "providers::blockchain", ?block_hash, "Getting history by block hash"); + + self.get_in_memory_or_storage_by_block( + block_hash.into(), + |_| self.storage_provider.history_by_block_hash(block_hash), + |block_state| { + let state_provider = self.block_state_provider_ref(block_state)?; + Ok(Box::new(state_provider)) + }, + ) + } + + /// Returns a state provider indexed by the given block number or tag. + fn state_by_block_number_ref<'a>( + &'a self, + number: BlockNumber, + ) -> ProviderResult> { + let hash = + self.block_hash(number)?.ok_or_else(|| ProviderError::HeaderNotFound(number.into()))?; + self.history_by_block_hash_ref(hash) + } + + /// Return the last N blocks of state, recreating the [`ExecutionOutcome`]. + /// + /// If the range is empty, or there are no blocks for the given range, then this returns `None`. + pub fn get_state( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + if range.is_empty() { + return Ok(None) + } + let start_block_number = *range.start(); + let end_block_number = *range.end(); + + // We are not removing block meta as it is used to get block changesets. + let mut block_bodies = Vec::new(); + for block_num in range.clone() { + let block_body = self + .block_body_indices(block_num)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(block_num))?; + block_bodies.push((block_num, block_body)) + } + + // get transaction receipts + let Some(from_transaction_num) = block_bodies.first().map(|body| body.1.first_tx_num()) + else { + return Ok(None) + }; + let Some(to_transaction_num) = block_bodies.last().map(|body| body.1.last_tx_num()) else { + return Ok(None) + }; + + let mut account_changeset = Vec::new(); + for block_num in range.clone() { + let changeset = + self.account_block_changeset(block_num)?.into_iter().map(|elem| (block_num, elem)); + account_changeset.extend(changeset); + } + + let mut storage_changeset = Vec::new(); + for block_num in range { + let changeset = self.storage_changeset(block_num)?; + storage_changeset.extend(changeset); + } + + let (state, reverts) = + self.populate_bundle_state(account_changeset, storage_changeset, end_block_number)?; + + let mut receipt_iter = + self.receipts_by_tx_range(from_transaction_num..=to_transaction_num)?.into_iter(); + + let mut receipts = Vec::with_capacity(block_bodies.len()); + // loop break if we are at the end of the blocks. + for (_, block_body) in block_bodies { + let mut block_receipts = Vec::with_capacity(block_body.tx_count as usize); + for tx_num in block_body.tx_num_range() { + let receipt = receipt_iter + .next() + .ok_or_else(|| ProviderError::ReceiptNotFound(tx_num.into()))?; + block_receipts.push(Some(receipt)); + } + receipts.push(block_receipts); + } + + Ok(Some(ExecutionOutcome::new_init( + state, + reverts, + // We skip new contracts since we never delete them from the database + Vec::new(), + receipts.into(), + start_block_number, + Vec::new(), + ))) + } + + /// Populate a [`BundleStateInit`] and [`RevertsInit`] using cursors over the + /// [`reth_db::PlainAccountState`] and [`reth_db::PlainStorageState`] tables, based on the given + /// storage and account changesets. + fn populate_bundle_state( + &self, + account_changeset: Vec<(u64, AccountBeforeTx)>, + storage_changeset: Vec<(BlockNumberAddress, StorageEntry)>, + block_range_end: BlockNumber, + ) -> ProviderResult<(BundleStateInit, RevertsInit)> { + let mut state: BundleStateInit = HashMap::new(); + let mut reverts: RevertsInit = HashMap::new(); + let state_provider = self.state_by_block_number_ref(block_range_end)?; + + // add account changeset changes + for (block_number, account_before) in account_changeset.into_iter().rev() { + let AccountBeforeTx { info: old_info, address } = account_before; + match state.entry(address) { + hash_map::Entry::Vacant(entry) => { + let new_info = state_provider.basic_account(address)?; + entry.insert((old_info, new_info, HashMap::new())); + } + hash_map::Entry::Occupied(mut entry) => { + // overwrite old account state. + entry.get_mut().0 = old_info; + } + } + // insert old info into reverts. + reverts.entry(block_number).or_default().entry(address).or_default().0 = Some(old_info); + } + + // add storage changeset changes + for (block_and_address, old_storage) in storage_changeset.into_iter().rev() { + let BlockNumberAddress((block_number, address)) = block_and_address; + // get account state or insert from plain state. + let account_state = match state.entry(address) { + hash_map::Entry::Vacant(entry) => { + let present_info = state_provider.basic_account(address)?; + entry.insert((present_info, present_info, HashMap::new())) + } + hash_map::Entry::Occupied(entry) => entry.into_mut(), + }; + + // match storage. + match account_state.2.entry(old_storage.key) { + hash_map::Entry::Vacant(entry) => { + let new_storage_value = + state_provider.storage(address, old_storage.key)?.unwrap_or_default(); + entry.insert((old_storage.value, new_storage_value)); + } + hash_map::Entry::Occupied(mut entry) => { + entry.get_mut().0 = old_storage.value; + } + }; + + reverts + .entry(block_number) + .or_default() + .entry(address) + .or_default() + .1 + .push(old_storage); + } + + Ok((state, reverts)) + } + + /// Fetches a range of data from both in-memory state and persistent storage while a predicate + /// is met. + /// + /// Creates a snapshot of the in-memory chain state and database provider to prevent + /// inconsistencies. Splits the range into in-memory and storage sections, prioritizing + /// recent in-memory blocks in case of overlaps. + /// + /// * `fetch_db_range` function (`F`) provides access to the database provider, allowing the + /// user to retrieve the required items from the database using [`RangeInclusive`]. + /// * `map_block_state_item` function (`G`) provides each block of the range in the in-memory + /// state, allowing for selection or filtering for the desired data. + fn get_in_memory_or_storage_by_block_range_while( + &self, + range: impl RangeBounds, + fetch_db_range: F, + map_block_state_item: G, + mut predicate: P, + ) -> ProviderResult> + where + F: FnOnce( + &DatabaseProviderRO, + RangeInclusive, + &mut P, + ) -> ProviderResult>, + G: Fn(&BlockState, &mut P) -> Option, + P: FnMut(&T) -> bool, + { + // Each one provides a snapshot at the time of instantiation, but its order matters. + // + // If we acquire first the database provider, it's possible that before the in-memory chain + // snapshot is instantiated, it will flush blocks to disk. This would + // mean that our database provider would not have access to the flushed blocks (since it's + // working under an older view), while the in-memory state may have deleted them + // entirely. Resulting in gaps on the range. + let mut in_memory_chain = + self.head_block.as_ref().map(|b| b.chain().collect::>()).unwrap_or_default(); + let db_provider = &self.storage_provider; + + let (start, end) = self.convert_range_bounds(range, || { + // the first block is the highest one. + in_memory_chain + .first() + .map(|b| b.number()) + .unwrap_or_else(|| db_provider.last_block_number().unwrap_or_default()) + }); + + if start > end { + return Ok(vec![]) + } + + // Split range into storage_range and in-memory range. If the in-memory range is not + // necessary drop it early. + // + // The last block of `in_memory_chain` is the lowest block number. + let (in_memory, storage_range) = match in_memory_chain.last().as_ref().map(|b| b.number()) { + Some(lowest_memory_block) if lowest_memory_block <= end => { + let highest_memory_block = + in_memory_chain.first().as_ref().map(|b| b.number()).expect("qed"); + + // Database will for a time overlap with in-memory-chain blocks. In + // case of a re-org, it can mean that the database blocks are of a forked chain, and + // so, we should prioritize the in-memory overlapped blocks. + let in_memory_range = + lowest_memory_block.max(start)..=end.min(highest_memory_block); + + // If requested range is in the middle of the in-memory range, remove the necessary + // lowest blocks + in_memory_chain.truncate( + in_memory_chain + .len() + .saturating_sub(start.saturating_sub(lowest_memory_block) as usize), + ); + + let storage_range = + (lowest_memory_block > start).then(|| start..=lowest_memory_block - 1); + + (Some((in_memory_chain, in_memory_range)), storage_range) + } + _ => { + // Drop the in-memory chain so we don't hold blocks in memory. + drop(in_memory_chain); + + (None, Some(start..=end)) + } + }; + + let mut items = Vec::with_capacity((end - start + 1) as usize); + + if let Some(storage_range) = storage_range { + let mut db_items = fetch_db_range(db_provider, storage_range.clone(), &mut predicate)?; + items.append(&mut db_items); + + // The predicate was not met, if the number of items differs from the expected. So, we + // return what we have. + if items.len() as u64 != storage_range.end() - storage_range.start() + 1 { + return Ok(items) + } + } + + if let Some((in_memory_chain, in_memory_range)) = in_memory { + for (num, block) in in_memory_range.zip(in_memory_chain.into_iter().rev()) { + debug_assert!(num == block.number()); + if let Some(item) = map_block_state_item(block, &mut predicate) { + items.push(item); + } else { + break + } + } + } + + Ok(items) + } + + /// This uses a given [`BlockState`] to initialize a state provider for that block. + fn block_state_provider_ref( + &self, + state: &BlockState, + ) -> ProviderResult> { + let anchor_hash = state.anchor().hash; + let latest_historical = self.history_by_block_hash_ref(anchor_hash)?; + let in_memory = state.chain().map(|block_state| block_state.block()).collect(); + Ok(MemoryOverlayStateProviderRef::new(latest_historical, in_memory)) + } + + /// Fetches data from either in-memory state or persistent storage for a range of transactions. + /// + /// * `fetch_from_db`: has a `DatabaseProviderRO` and the storage specific range. + /// * `fetch_from_block_state`: has a [`RangeInclusive`] of elements that should be fetched from + /// [`BlockState`]. [`RangeInclusive`] is necessary to handle partial look-ups of a block. + fn get_in_memory_or_storage_by_tx_range( + &self, + range: impl RangeBounds, + fetch_from_db: S, + fetch_from_block_state: M, + ) -> ProviderResult> + where + S: FnOnce( + &DatabaseProviderRO, + RangeInclusive, + ) -> ProviderResult>, + M: Fn(RangeInclusive, &BlockState) -> ProviderResult>, + { + let in_mem_chain = self.head_block.iter().flat_map(|b| b.chain()).collect::>(); + let provider = &self.storage_provider; + + // Get the last block number stored in the storage which does NOT overlap with in-memory + // chain. + let last_database_block_number = in_mem_chain + .last() + .map(|b| Ok(b.anchor().number)) + .unwrap_or_else(|| provider.last_block_number())?; + + // Get the next tx number for the last block stored in the storage, which marks the start of + // the in-memory state. + let last_block_body_index = provider + .block_body_indices(last_database_block_number)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(last_database_block_number))?; + let mut in_memory_tx_num = last_block_body_index.next_tx_num(); + + let (start, end) = self.convert_range_bounds(range, || { + in_mem_chain + .iter() + .map(|b| b.block_ref().block().body.transactions.len() as u64) + .sum::() + + last_block_body_index.last_tx_num() + }); + + if start > end { + return Ok(vec![]) + } + + let mut tx_range = start..=end; + + // If the range is entirely before the first in-memory transaction number, fetch from + // storage + if *tx_range.end() < in_memory_tx_num { + return fetch_from_db(provider, tx_range); + } + + let mut items = Vec::with_capacity((tx_range.end() - tx_range.start() + 1) as usize); + + // If the range spans storage and memory, get elements from storage first. + if *tx_range.start() < in_memory_tx_num { + // Determine the range that needs to be fetched from storage. + let db_range = *tx_range.start()..=in_memory_tx_num.saturating_sub(1); + + // Set the remaining transaction range for in-memory + tx_range = in_memory_tx_num..=*tx_range.end(); + + items.extend(fetch_from_db(provider, db_range)?); + } + + // Iterate from the lowest block to the highest in-memory chain + for block_state in in_mem_chain.iter().rev() { + let block_tx_count = block_state.block_ref().block().body.transactions.len(); + let remaining = (tx_range.end() - tx_range.start() + 1) as usize; + + // If the transaction range start is equal or higher than the next block first + // transaction, advance + if *tx_range.start() >= in_memory_tx_num + block_tx_count as u64 { + in_memory_tx_num += block_tx_count as u64; + continue + } + + // This should only be more than 0 once, in case of a partial range inside a block. + let skip = (tx_range.start() - in_memory_tx_num) as usize; + + items.extend(fetch_from_block_state( + skip..=skip + (remaining.min(block_tx_count - skip) - 1), + block_state, + )?); + + in_memory_tx_num += block_tx_count as u64; + + // Break if the range has been fully processed + if in_memory_tx_num > *tx_range.end() { + break + } + + // Set updated range + tx_range = in_memory_tx_num..=*tx_range.end(); + } + + Ok(items) + } + + /// Fetches data from either in-memory state or persistent storage by transaction + /// [`HashOrNumber`]. + fn get_in_memory_or_storage_by_tx( + &self, + id: HashOrNumber, + fetch_from_db: S, + fetch_from_block_state: M, + ) -> ProviderResult> + where + S: FnOnce(&DatabaseProviderRO) -> ProviderResult>, + M: Fn(usize, TxNumber, &BlockState) -> ProviderResult>, + { + let in_mem_chain = self.head_block.iter().flat_map(|b| b.chain()).collect::>(); + let provider = &self.storage_provider; + + // Get the last block number stored in the database which does NOT overlap with in-memory + // chain. + let last_database_block_number = in_mem_chain + .last() + .map(|b| Ok(b.anchor().number)) + .unwrap_or_else(|| provider.last_block_number())?; + + // Get the next tx number for the last block stored in the database and consider it the + // first tx number of the in-memory state + let last_block_body_index = provider + .block_body_indices(last_database_block_number)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(last_database_block_number))?; + let mut in_memory_tx_num = last_block_body_index.next_tx_num(); + + // If the transaction number is less than the first in-memory transaction number, make a + // database lookup + if let HashOrNumber::Number(id) = id { + if id < in_memory_tx_num { + return fetch_from_db(provider) + } + } + + // Iterate from the lowest block to the highest + for block_state in in_mem_chain.iter().rev() { + let executed_block = block_state.block_ref(); + let block = executed_block.block(); + + for tx_index in 0..block.body.transactions.len() { + match id { + HashOrNumber::Hash(tx_hash) => { + if tx_hash == block.body.transactions[tx_index].hash() { + return fetch_from_block_state(tx_index, in_memory_tx_num, block_state) + } + } + HashOrNumber::Number(id) => { + if id == in_memory_tx_num { + return fetch_from_block_state(tx_index, in_memory_tx_num, block_state) + } + } + } + + in_memory_tx_num += 1; + } + } + + // Not found in-memory, so check database. + if let HashOrNumber::Hash(_) = id { + return fetch_from_db(provider) + } + + Ok(None) + } + + /// Fetches data from either in-memory state or persistent storage by [`BlockHashOrNumber`]. + pub(crate) fn get_in_memory_or_storage_by_block( + &self, + id: BlockHashOrNumber, + fetch_from_db: S, + fetch_from_block_state: M, + ) -> ProviderResult + where + S: FnOnce(&DatabaseProviderRO) -> ProviderResult, + M: Fn(&BlockState) -> ProviderResult, + { + if let Some(Some(block_state)) = self.head_block.as_ref().map(|b| b.block_on_chain(id)) { + return fetch_from_block_state(block_state) + } + fetch_from_db(&self.storage_provider) + } +} + +impl ConsistentProvider { + /// Ensures that the given block number is canonical (synced) + /// + /// This is a helper for guarding the `HistoricalStateProvider` against block numbers that are + /// out of range and would lead to invalid results, mainly during initial sync. + /// + /// Verifying the `block_number` would be expensive since we need to lookup sync table + /// Instead, we ensure that the `block_number` is within the range of the + /// [`Self::best_block_number`] which is updated when a block is synced. + #[inline] + pub(crate) fn ensure_canonical_block(&self, block_number: BlockNumber) -> ProviderResult<()> { + let latest = self.best_block_number()?; + if block_number > latest { + Err(ProviderError::HeaderNotFound(block_number.into())) + } else { + Ok(()) + } + } +} + +impl StaticFileProviderFactory for ConsistentProvider { + fn static_file_provider(&self) -> StaticFileProvider { + self.storage_provider.static_file_provider() + } +} + +impl HeaderProvider for ConsistentProvider { + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + (*block_hash).into(), + |db_provider| db_provider.header(block_hash), + |block_state| Ok(Some(block_state.block_ref().block().header.header().clone())), + ) + } + + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + num.into(), + |db_provider| db_provider.header_by_number(num), + |block_state| Ok(Some(block_state.block_ref().block().header.header().clone())), + ) + } + + fn header_td(&self, hash: &BlockHash) -> ProviderResult> { + if let Some(num) = self.block_number(*hash)? { + self.header_td_by_number(num) + } else { + Ok(None) + } + } + + fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { + let number = if self.head_block.as_ref().map(|b| b.block_on_chain(number.into())).is_some() + { + // If the block exists in memory, we should return a TD for it. + // + // The canonical in memory state should only store post-merge blocks. Post-merge blocks + // have zero difficulty. This means we can use the total difficulty for the last + // finalized block number if present (so that we are not affected by reorgs), if not the + // last number in the database will be used. + if let Some(last_finalized_num_hash) = + self.canonical_in_memory_state.get_finalized_num_hash() + { + last_finalized_num_hash.number + } else { + self.last_block_number()? + } + } else { + // Otherwise, return what we have on disk for the input block + number + }; + self.storage_provider.header_td_by_number(number) + } + + fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.headers_range(range), + |block_state, _| Some(block_state.block_ref().block().header.header().clone()), + |_| true, + ) + } + + fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + number.into(), + |db_provider| db_provider.sealed_header(number), + |block_state| Ok(Some(block_state.block_ref().block().header.clone())), + ) + } + + fn sealed_headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.sealed_headers_range(range), + |block_state, _| Some(block_state.block_ref().block().header.clone()), + |_| true, + ) + } + + fn sealed_headers_while( + &self, + range: impl RangeBounds, + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, predicate| db_provider.sealed_headers_while(range, predicate), + |block_state, predicate| { + let header = &block_state.block_ref().block().header; + predicate(header).then(|| header.clone()) + }, + predicate, + ) + } +} + +impl BlockHashReader for ConsistentProvider { + fn block_hash(&self, number: u64) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + number.into(), + |db_provider| db_provider.block_hash(number), + |block_state| Ok(Some(block_state.hash())), + ) + } + + fn canonical_hashes_range( + &self, + start: BlockNumber, + end: BlockNumber, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block_range_while( + start..end, + |db_provider, inclusive_range, _| { + db_provider + .canonical_hashes_range(*inclusive_range.start(), *inclusive_range.end() + 1) + }, + |block_state, _| Some(block_state.hash()), + |_| true, + ) + } +} + +impl BlockNumReader for ConsistentProvider { + fn chain_info(&self) -> ProviderResult { + let best_number = self.best_block_number()?; + Ok(ChainInfo { best_hash: self.block_hash(best_number)?.unwrap_or_default(), best_number }) + } + + fn best_block_number(&self) -> ProviderResult { + self.head_block.as_ref().map(|b| Ok(b.number())).unwrap_or_else(|| self.last_block_number()) + } + + fn last_block_number(&self) -> ProviderResult { + self.storage_provider.last_block_number() + } + + fn block_number(&self, hash: B256) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + hash.into(), + |db_provider| db_provider.block_number(hash), + |block_state| Ok(Some(block_state.number())), + ) + } +} + +impl BlockIdReader for ConsistentProvider { + fn pending_block_num_hash(&self) -> ProviderResult> { + Ok(self.canonical_in_memory_state.pending_block_num_hash()) + } + + fn safe_block_num_hash(&self) -> ProviderResult> { + Ok(self.canonical_in_memory_state.get_safe_num_hash()) + } + + fn finalized_block_num_hash(&self) -> ProviderResult> { + Ok(self.canonical_in_memory_state.get_finalized_num_hash()) + } +} + +impl SidecarsProvider for ConsistentProvider { + fn sidecars(&self, block_hash: &BlockHash) -> ProviderResult> { + self.storage_provider.sidecars(block_hash) + } + + fn sidecars_by_number(&self, num: BlockNumber) -> ProviderResult> { + self.storage_provider.sidecars_by_number(num) + } +} + +impl BlockReader for ConsistentProvider { + fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { + match source { + BlockSource::Any | BlockSource::Canonical => { + // Note: it's fine to return the unsealed block because the caller already has + // the hash + self.get_in_memory_or_storage_by_block( + hash.into(), + |db_provider| db_provider.find_block_by_hash(hash, source), + |block_state| Ok(Some(block_state.block_ref().block().clone().unseal())), + ) + } + BlockSource::Pending => { + Ok(self.canonical_in_memory_state.pending_block().map(|block| block.unseal())) + } + } + } + + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.block(id), + |block_state| Ok(Some(block_state.block_ref().block().clone().unseal())), + ) + } + + fn pending_block(&self) -> ProviderResult> { + Ok(self.canonical_in_memory_state.pending_block()) + } + + fn pending_block_with_senders(&self) -> ProviderResult> { + Ok(self.canonical_in_memory_state.pending_block_with_senders()) + } + + fn pending_block_and_receipts(&self) -> ProviderResult)>> { + Ok(self.canonical_in_memory_state.pending_block_and_receipts()) + } + + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.ommers(id), + |block_state| { + if self.chain_spec().final_paris_total_difficulty(block_state.number()).is_some() { + return Ok(Some(Vec::new())) + } + + Ok(Some(block_state.block_ref().block().body.ommers.clone())) + }, + ) + } + + fn block_body_indices( + &self, + number: BlockNumber, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + number.into(), + |db_provider| db_provider.block_body_indices(number), + |block_state| { + // Find the last block indices on database + let last_storage_block_number = block_state.anchor().number; + let mut stored_indices = self + .storage_provider + .block_body_indices(last_storage_block_number)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(last_storage_block_number))?; + + // Prepare our block indices + stored_indices.first_tx_num = stored_indices.next_tx_num(); + stored_indices.tx_count = 0; + + // Iterate from the lowest block in memory until our target block + for state in block_state.chain().collect::>().into_iter().rev() { + let block_tx_count = state.block_ref().block.body.transactions.len() as u64; + if state.block_ref().block().number == number { + stored_indices.tx_count = block_tx_count; + } else { + stored_indices.first_tx_num += block_tx_count; + } + } + + Ok(Some(stored_indices)) + }, + ) + } + + /// Returns the block with senders with matching number or hash from database. + /// + /// **NOTE: If [`TransactionVariant::NoHash`] is provided then the transactions have invalid + /// hashes, since they would need to be calculated on the spot, and we want fast querying.** + /// + /// Returns `None` if block is not found. + fn block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.block_with_senders(id, transaction_kind), + |block_state| Ok(Some(block_state.block_with_senders())), + ) + } + + fn sealed_block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.sealed_block_with_senders(id, transaction_kind), + |block_state| Ok(Some(block_state.sealed_block_with_senders())), + ) + } + + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.block_range(range), + |block_state, _| Some(block_state.block_ref().block().clone().unseal()), + |_| true, + ) + } + + fn block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.block_with_senders_range(range), + |block_state, _| Some(block_state.block_with_senders()), + |_| true, + ) + } + + fn sealed_block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.sealed_block_with_senders_range(range), + |block_state, _| Some(block_state.sealed_block_with_senders()), + |_| true, + ) + } +} + +impl TransactionsProvider for ConsistentProvider { + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx( + tx_hash.into(), + |db_provider| db_provider.transaction_id(tx_hash), + |_, tx_number, _| Ok(Some(tx_number)), + ) + } + + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.transaction_by_id(id), + |tx_index, _, block_state| { + Ok(block_state.block_ref().block().body.transactions.get(tx_index).cloned()) + }, + ) + } + + fn transaction_by_id_no_hash( + &self, + id: TxNumber, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.transaction_by_id_no_hash(id), + |tx_index, _, block_state| { + Ok(block_state + .block_ref() + .block() + .body + .transactions + .get(tx_index) + .cloned() + .map(Into::into)) + }, + ) + } + + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + if let Some(tx) = self.head_block.as_ref().and_then(|b| b.transaction_on_chain(hash)) { + return Ok(Some(tx)) + } + + self.storage_provider.transaction_by_hash(hash) + } + + fn transaction_by_hash_with_meta( + &self, + tx_hash: TxHash, + ) -> ProviderResult> { + if let Some((tx, meta)) = + self.head_block.as_ref().and_then(|b| b.transaction_meta_on_chain(tx_hash)) + { + return Ok(Some((tx, meta))) + } + + self.storage_provider.transaction_by_hash_with_meta(tx_hash) + } + + fn transaction_block(&self, id: TxNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.transaction_block(id), + |_, _, block_state| Ok(Some(block_state.block_ref().block().number)), + ) + } + + fn transactions_by_block( + &self, + id: BlockHashOrNumber, + ) -> ProviderResult>> { + self.get_in_memory_or_storage_by_block( + id, + |provider| provider.transactions_by_block(id), + |block_state| Ok(Some(block_state.block_ref().block().body.transactions.clone())), + ) + } + + fn transactions_by_block_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult>> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.transactions_by_block_range(range), + |block_state, _| Some(block_state.block_ref().block().body.transactions.clone()), + |_| true, + ) + } + + fn transactions_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx_range( + range, + |db_provider, db_range| db_provider.transactions_by_tx_range(db_range), + |index_range, block_state| { + Ok(block_state.block_ref().block().body.transactions[index_range] + .iter() + .cloned() + .map(Into::into) + .collect()) + }, + ) + } + + fn senders_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx_range( + range, + |db_provider, db_range| db_provider.senders_by_tx_range(db_range), + |index_range, block_state| Ok(block_state.block_ref().senders[index_range].to_vec()), + ) + } + + fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.transaction_sender(id), + |tx_index, _, block_state| Ok(block_state.block_ref().senders.get(tx_index).copied()), + ) + } +} + +impl ReceiptProvider for ConsistentProvider { + fn receipt(&self, id: TxNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.receipt(id), + |tx_index, _, block_state| { + Ok(block_state.executed_block_receipts().get(tx_index).cloned()) + }, + ) + } + + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + for block_state in self.head_block.iter().flat_map(|b| b.chain()) { + let executed_block = block_state.block_ref(); + let block = executed_block.block(); + let receipts = block_state.executed_block_receipts(); + + // assuming 1:1 correspondence between transactions and receipts + debug_assert_eq!( + block.body.transactions.len(), + receipts.len(), + "Mismatch between transaction and receipt count" + ); + + if let Some(tx_index) = block.body.transactions.iter().position(|tx| tx.hash() == hash) + { + // safe to use tx_index for receipts due to 1:1 correspondence + return Ok(receipts.get(tx_index).cloned()); + } + } + + self.storage_provider.receipt_by_hash(hash) + } + + fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { + self.get_in_memory_or_storage_by_block( + block, + |db_provider| db_provider.receipts_by_block(block), + |block_state| Ok(Some(block_state.executed_block_receipts())), + ) + } + + fn receipts_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx_range( + range, + |db_provider, db_range| db_provider.receipts_by_tx_range(db_range), + |index_range, block_state| { + Ok(block_state.executed_block_receipts().drain(index_range).collect()) + }, + ) + } +} + +impl ReceiptProviderIdExt for ConsistentProvider { + fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { + match block { + BlockId::Hash(rpc_block_hash) => { + let mut receipts = self.receipts_by_block(rpc_block_hash.block_hash.into())?; + if receipts.is_none() && !rpc_block_hash.require_canonical.unwrap_or(false) { + if let Some(state) = self + .head_block + .as_ref() + .and_then(|b| b.block_on_chain(rpc_block_hash.block_hash.into())) + { + receipts = Some(state.executed_block_receipts()); + } + } + Ok(receipts) + } + BlockId::Number(num_tag) => match num_tag { + BlockNumberOrTag::Pending => Ok(self + .canonical_in_memory_state + .pending_state() + .map(|block_state| block_state.executed_block_receipts())), + _ => { + if let Some(num) = self.convert_block_number(num_tag)? { + self.receipts_by_block(num.into()) + } else { + Ok(None) + } + } + }, + } + } +} + +impl WithdrawalsProvider for ConsistentProvider { + fn withdrawals_by_block( + &self, + id: BlockHashOrNumber, + timestamp: u64, + ) -> ProviderResult> { + if !self.chain_spec().is_shanghai_active_at_timestamp(timestamp) { + return Ok(None) + } + + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.withdrawals_by_block(id, timestamp), + |block_state| Ok(block_state.block_ref().block().body.withdrawals.clone()), + ) + } + + fn latest_withdrawal(&self) -> ProviderResult> { + let best_block_num = self.best_block_number()?; + + self.get_in_memory_or_storage_by_block( + best_block_num.into(), + |db_provider| db_provider.latest_withdrawal(), + |block_state| { + Ok(block_state + .block_ref() + .block() + .body + .withdrawals + .clone() + .and_then(|mut w| w.pop())) + }, + ) + } +} + +impl StageCheckpointReader for ConsistentProvider { + fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { + self.storage_provider.get_stage_checkpoint(id) + } + + fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { + self.storage_provider.get_stage_checkpoint_progress(id) + } + + fn get_all_checkpoints(&self) -> ProviderResult> { + self.storage_provider.get_all_checkpoints() + } +} + +impl EvmEnvProvider for ConsistentProvider { + fn fill_env_at( + &self, + cfg: &mut CfgEnvWithHandlerCfg, + block_env: &mut BlockEnv, + at: BlockHashOrNumber, + evm_config: EvmConfig, + ) -> ProviderResult<()> + where + EvmConfig: ConfigureEvmEnv
, + { + let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; + let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; + self.fill_env_with_header(cfg, block_env, &header, evm_config) + } + + fn fill_env_with_header( + &self, + cfg: &mut CfgEnvWithHandlerCfg, + block_env: &mut BlockEnv, + header: &Header, + evm_config: EvmConfig, + ) -> ProviderResult<()> + where + EvmConfig: ConfigureEvmEnv
, + { + let total_difficulty = self + .header_td_by_number(header.number)? + .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; + evm_config.fill_cfg_and_block_env(cfg, block_env, header, total_difficulty); + Ok(()) + } + + fn fill_cfg_env_at( + &self, + cfg: &mut CfgEnvWithHandlerCfg, + at: BlockHashOrNumber, + evm_config: EvmConfig, + ) -> ProviderResult<()> + where + EvmConfig: ConfigureEvmEnv
, + { + let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; + let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; + self.fill_cfg_env_with_header(cfg, &header, evm_config) + } + + fn fill_cfg_env_with_header( + &self, + cfg: &mut CfgEnvWithHandlerCfg, + header: &Header, + evm_config: EvmConfig, + ) -> ProviderResult<()> + where + EvmConfig: ConfigureEvmEnv
, + { + let total_difficulty = self + .header_td_by_number(header.number)? + .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; + evm_config.fill_cfg_env(cfg, header, total_difficulty); + Ok(()) + } +} + +impl PruneCheckpointReader for ConsistentProvider { + fn get_prune_checkpoint( + &self, + segment: PruneSegment, + ) -> ProviderResult> { + self.storage_provider.get_prune_checkpoint(segment) + } + + fn get_prune_checkpoints(&self) -> ProviderResult> { + self.storage_provider.get_prune_checkpoints() + } +} + +impl ChainSpecProvider for ConsistentProvider { + type ChainSpec = N::ChainSpec; + + fn chain_spec(&self) -> Arc { + ChainSpecProvider::chain_spec(&self.storage_provider) + } +} + +impl BlockReaderIdExt for ConsistentProvider { + fn block_by_id(&self, id: BlockId) -> ProviderResult> { + match id { + BlockId::Number(num) => self.block_by_number_or_tag(num), + BlockId::Hash(hash) => { + // TODO: should we only apply this for the RPCs that are listed in EIP-1898? + // so not at the provider level? + // if we decide to do this at a higher level, then we can make this an automatic + // trait impl + if Some(true) == hash.require_canonical { + // check the database, canonical blocks are only stored in the database + self.find_block_by_hash(hash.block_hash, BlockSource::Canonical) + } else { + self.block_by_hash(hash.block_hash) + } + } + } + } + + fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { + Ok(match id { + BlockNumberOrTag::Latest => { + Some(self.canonical_in_memory_state.get_canonical_head().unseal()) + } + BlockNumberOrTag::Finalized => { + self.canonical_in_memory_state.get_finalized_header().map(|h| h.unseal()) + } + BlockNumberOrTag::Safe => { + self.canonical_in_memory_state.get_safe_header().map(|h| h.unseal()) + } + BlockNumberOrTag::Earliest => self.header_by_number(0)?, + BlockNumberOrTag::Pending => self.canonical_in_memory_state.pending_header(), + + BlockNumberOrTag::Number(num) => self.header_by_number(num)?, + }) + } + + fn sealed_header_by_number_or_tag( + &self, + id: BlockNumberOrTag, + ) -> ProviderResult> { + match id { + BlockNumberOrTag::Latest => { + Ok(Some(self.canonical_in_memory_state.get_canonical_head())) + } + BlockNumberOrTag::Finalized => { + Ok(self.canonical_in_memory_state.get_finalized_header()) + } + BlockNumberOrTag::Safe => Ok(self.canonical_in_memory_state.get_safe_header()), + BlockNumberOrTag::Earliest => self.header_by_number(0)?.map_or_else( + || Ok(None), + |h| { + let sealed = h.seal_slow(); + let (header, seal) = sealed.into_parts(); + Ok(Some(SealedHeader::new(header, seal))) + }, + ), + BlockNumberOrTag::Pending => Ok(self.canonical_in_memory_state.pending_sealed_header()), + BlockNumberOrTag::Number(num) => self.header_by_number(num)?.map_or_else( + || Ok(None), + |h| { + let sealed = h.seal_slow(); + let (header, seal) = sealed.into_parts(); + Ok(Some(SealedHeader::new(header, seal))) + }, + ), + } + } + + fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { + Ok(match id { + BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, + BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(|h| { + let sealed = h.seal_slow(); + let (header, seal) = sealed.into_parts(); + SealedHeader::new(header, seal) + }), + }) + } + + fn header_by_id(&self, id: BlockId) -> ProviderResult> { + Ok(match id { + BlockId::Number(num) => self.header_by_number_or_tag(num)?, + BlockId::Hash(hash) => self.header(&hash.block_hash)?, + }) + } + + fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { + match id { + BlockId::Number(num) => self.ommers_by_number_or_tag(num), + BlockId::Hash(hash) => { + // TODO: EIP-1898 question, see above + // here it is not handled + self.ommers(BlockHashOrNumber::Hash(hash.block_hash)) + } + } + } +} + +impl StorageChangeSetReader for ConsistentProvider { + fn storage_changeset( + &self, + block_number: BlockNumber, + ) -> ProviderResult> { + if let Some(state) = + self.head_block.as_ref().and_then(|b| b.block_on_chain(block_number.into())) + { + let changesets = state + .block() + .execution_output + .bundle + .reverts + .clone() + .into_plain_state_reverts() + .storage + .into_iter() + .flatten() + .flat_map(|revert: PlainStorageRevert| { + revert.storage_revert.into_iter().map(move |(key, value)| { + ( + BlockNumberAddress((block_number, revert.address)), + StorageEntry { key: key.into(), value: value.to_previous_value() }, + ) + }) + }) + .collect(); + Ok(changesets) + } else { + // Perform checks on whether or not changesets exist for the block. + + // No prune checkpoint means history should exist and we should `unwrap_or(true)` + let storage_history_exists = self + .storage_provider + .get_prune_checkpoint(PruneSegment::StorageHistory)? + .and_then(|checkpoint| { + // return true if the block number is ahead of the prune checkpoint. + // + // The checkpoint stores the highest pruned block number, so we should make + // sure the block_number is strictly greater. + checkpoint.block_number.map(|checkpoint| block_number > checkpoint) + }) + .unwrap_or(true); + + if !storage_history_exists { + return Err(ProviderError::StateAtBlockPruned(block_number)) + } + + self.storage_provider.storage_changeset(block_number) + } + } +} + +impl ChangeSetReader for ConsistentProvider { + fn account_block_changeset( + &self, + block_number: BlockNumber, + ) -> ProviderResult> { + if let Some(state) = + self.head_block.as_ref().and_then(|b| b.block_on_chain(block_number.into())) + { + let changesets = state + .block_ref() + .execution_output + .bundle + .reverts + .clone() + .into_plain_state_reverts() + .accounts + .into_iter() + .flatten() + .map(|(address, info)| AccountBeforeTx { address, info: info.map(Into::into) }) + .collect(); + Ok(changesets) + } else { + // Perform checks on whether or not changesets exist for the block. + + // No prune checkpoint means history should exist and we should `unwrap_or(true)` + let account_history_exists = self + .storage_provider + .get_prune_checkpoint(PruneSegment::AccountHistory)? + .and_then(|checkpoint| { + // return true if the block number is ahead of the prune checkpoint. + // + // The checkpoint stores the highest pruned block number, so we should make + // sure the block_number is strictly greater. + checkpoint.block_number.map(|checkpoint| block_number > checkpoint) + }) + .unwrap_or(true); + + if !account_history_exists { + return Err(ProviderError::StateAtBlockPruned(block_number)) + } + + self.storage_provider.account_block_changeset(block_number) + } + } +} + +impl AccountReader for ConsistentProvider { + /// Get basic account information. + fn basic_account(&self, address: Address) -> ProviderResult> { + // use latest state provider + let state_provider = self.latest_ref()?; + state_provider.basic_account(address) + } +} + +impl StateReader for ConsistentProvider { + /// Re-constructs the [`ExecutionOutcome`] from in-memory and database state, if necessary. + /// + /// If data for the block does not exist, this will return [`None`]. + /// + /// NOTE: This cannot be called safely in a loop outside of the blockchain tree thread. This is + /// because the [`CanonicalInMemoryState`] could change during a reorg, causing results to be + /// inconsistent. Currently this can safely be called within the blockchain tree thread, + /// because the tree thread is responsible for modifying the [`CanonicalInMemoryState`] in the + /// first place. + fn get_state(&self, block: BlockNumber) -> ProviderResult> { + if let Some(state) = self.head_block.as_ref().and_then(|b| b.block_on_chain(block.into())) { + let state = state.block_ref().execution_outcome().clone(); + Ok(Some(state)) + } else { + Self::get_state(self, block..=block) + } + } +} + +#[cfg(test)] +mod tests { + use crate::{ + providers::blockchain_provider::BlockchainProvider2, + test_utils::create_test_provider_factory, BlockWriter, + }; + use alloy_eips::BlockHashOrNumber; + use alloy_primitives::B256; + use itertools::Itertools; + use rand::Rng; + use reth_chain_state::{ExecutedBlock, NewCanonicalChain}; + use reth_db::models::AccountBeforeTx; + use reth_execution_types::ExecutionOutcome; + use reth_primitives::SealedBlock; + use reth_storage_api::{BlockReader, BlockSource, ChangeSetReader}; + use reth_testing_utils::generators::{ + self, random_block_range, random_changeset_range, random_eoa_accounts, BlockRangeParams, + }; + use revm::db::BundleState; + use std::{ + ops::{Bound, Range, RangeBounds}, + sync::Arc, + }; + + const TEST_BLOCKS_COUNT: usize = 5; + + fn random_blocks( + rng: &mut impl Rng, + database_blocks: usize, + in_memory_blocks: usize, + requests_count: Option>, + withdrawals_count: Option>, + tx_count: impl RangeBounds, + ) -> (Vec, Vec) { + let block_range = (database_blocks + in_memory_blocks - 1) as u64; + + let tx_start = match tx_count.start_bound() { + Bound::Included(&n) | Bound::Excluded(&n) => n, + Bound::Unbounded => u8::MIN, + }; + let tx_end = match tx_count.end_bound() { + Bound::Included(&n) | Bound::Excluded(&n) => n + 1, + Bound::Unbounded => u8::MAX, + }; + + let blocks = random_block_range( + rng, + 0..=block_range, + BlockRangeParams { + parent: Some(B256::ZERO), + tx_count: tx_start..tx_end, + requests_count, + withdrawals_count, + }, + ); + let (database_blocks, in_memory_blocks) = blocks.split_at(database_blocks); + (database_blocks.to_vec(), in_memory_blocks.to_vec()) + } + + #[test] + fn test_block_reader_find_block_by_hash() -> eyre::Result<()> { + // Initialize random number generator and provider factory + let mut rng = generators::rng(); + let factory = create_test_provider_factory(); + + // Generate 10 random blocks and split into database and in-memory blocks + let blocks = random_block_range( + &mut rng, + 0..=10, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() }, + ); + let (database_blocks, in_memory_blocks) = blocks.split_at(5); + + // Insert first 5 blocks into the database + let provider_rw = factory.provider_rw()?; + for block in database_blocks { + provider_rw.insert_historical_block( + block.clone().seal_with_senders().expect("failed to seal block with senders"), + )?; + } + provider_rw.commit()?; + + // Create a new provider + let provider = BlockchainProvider2::new(factory)?; + let consistent_provider = provider.consistent_provider()?; + + // Useful blocks + let first_db_block = database_blocks.first().unwrap(); + let first_in_mem_block = in_memory_blocks.first().unwrap(); + let last_in_mem_block = in_memory_blocks.last().unwrap(); + + // No block in memory before setting in memory state + assert_eq!( + consistent_provider.find_block_by_hash(first_in_mem_block.hash(), BlockSource::Any)?, + None + ); + assert_eq!( + consistent_provider + .find_block_by_hash(first_in_mem_block.hash(), BlockSource::Canonical)?, + None + ); + // No pending block in memory + assert_eq!( + consistent_provider + .find_block_by_hash(first_in_mem_block.hash(), BlockSource::Pending)?, + None + ); + + // Insert first block into the in-memory state + let in_memory_block_senders = + first_in_mem_block.senders().expect("failed to recover senders"); + let chain = NewCanonicalChain::Commit { + new: vec![ExecutedBlock::new( + Arc::new(first_in_mem_block.clone()), + Arc::new(in_memory_block_senders), + Default::default(), + Default::default(), + Default::default(), + )], + }; + consistent_provider.canonical_in_memory_state.update_chain(chain); + let consistent_provider = provider.consistent_provider()?; + + // Now the block should be found in memory + assert_eq!( + consistent_provider.find_block_by_hash(first_in_mem_block.hash(), BlockSource::Any)?, + Some(first_in_mem_block.clone().into()) + ); + assert_eq!( + consistent_provider + .find_block_by_hash(first_in_mem_block.hash(), BlockSource::Canonical)?, + Some(first_in_mem_block.clone().into()) + ); + + // Find the first block in database by hash + assert_eq!( + consistent_provider.find_block_by_hash(first_db_block.hash(), BlockSource::Any)?, + Some(first_db_block.clone().into()) + ); + assert_eq!( + consistent_provider + .find_block_by_hash(first_db_block.hash(), BlockSource::Canonical)?, + Some(first_db_block.clone().into()) + ); + + // No pending block in database + assert_eq!( + consistent_provider.find_block_by_hash(first_db_block.hash(), BlockSource::Pending)?, + None + ); + + // Insert the last block into the pending state + provider.canonical_in_memory_state.set_pending_block(ExecutedBlock { + block: Arc::new(last_in_mem_block.clone()), + senders: Default::default(), + execution_output: Default::default(), + hashed_state: Default::default(), + trie: Default::default(), + }); + + // Now the last block should be found in memory + assert_eq!( + consistent_provider + .find_block_by_hash(last_in_mem_block.hash(), BlockSource::Pending)?, + Some(last_in_mem_block.clone().into()) + ); + + Ok(()) + } + + #[test] + fn test_block_reader_block() -> eyre::Result<()> { + // Initialize random number generator and provider factory + let mut rng = generators::rng(); + let factory = create_test_provider_factory(); + + // Generate 10 random blocks and split into database and in-memory blocks + let blocks = random_block_range( + &mut rng, + 0..=10, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() }, + ); + let (database_blocks, in_memory_blocks) = blocks.split_at(5); + + // Insert first 5 blocks into the database + let provider_rw = factory.provider_rw()?; + for block in database_blocks { + provider_rw.insert_historical_block( + block.clone().seal_with_senders().expect("failed to seal block with senders"), + )?; + } + provider_rw.commit()?; + + // Create a new provider + let provider = BlockchainProvider2::new(factory)?; + let consistent_provider = provider.consistent_provider()?; + + // First in memory block + let first_in_mem_block = in_memory_blocks.first().unwrap(); + // First database block + let first_db_block = database_blocks.first().unwrap(); + + // First in memory block should not be found yet as not integrated to the in-memory state + assert_eq!( + consistent_provider.block(BlockHashOrNumber::Hash(first_in_mem_block.hash()))?, + None + ); + assert_eq!( + consistent_provider.block(BlockHashOrNumber::Number(first_in_mem_block.number))?, + None + ); + + // Insert first block into the in-memory state + let in_memory_block_senders = + first_in_mem_block.senders().expect("failed to recover senders"); + let chain = NewCanonicalChain::Commit { + new: vec![ExecutedBlock::new( + Arc::new(first_in_mem_block.clone()), + Arc::new(in_memory_block_senders), + Default::default(), + Default::default(), + Default::default(), + )], + }; + consistent_provider.canonical_in_memory_state.update_chain(chain); + + let consistent_provider = provider.consistent_provider()?; + + // First in memory block should be found + assert_eq!( + consistent_provider.block(BlockHashOrNumber::Hash(first_in_mem_block.hash()))?, + Some(first_in_mem_block.clone().into()) + ); + assert_eq!( + consistent_provider.block(BlockHashOrNumber::Number(first_in_mem_block.number))?, + Some(first_in_mem_block.clone().into()) + ); + + // First database block should be found + assert_eq!( + consistent_provider.block(BlockHashOrNumber::Hash(first_db_block.hash()))?, + Some(first_db_block.clone().into()) + ); + assert_eq!( + consistent_provider.block(BlockHashOrNumber::Number(first_db_block.number))?, + Some(first_db_block.clone().into()) + ); + + Ok(()) + } + + #[test] + fn test_changeset_reader() -> eyre::Result<()> { + let mut rng = generators::rng(); + + let (database_blocks, in_memory_blocks) = + random_blocks(&mut rng, TEST_BLOCKS_COUNT, 1, None, None, 0..1); + + let first_database_block = database_blocks.first().map(|block| block.number).unwrap(); + let last_database_block = database_blocks.last().map(|block| block.number).unwrap(); + let first_in_memory_block = in_memory_blocks.first().map(|block| block.number).unwrap(); + + let accounts = random_eoa_accounts(&mut rng, 2); + + let (database_changesets, database_state) = random_changeset_range( + &mut rng, + &database_blocks, + accounts.into_iter().map(|(address, account)| (address, (account, Vec::new()))), + 0..0, + 0..0, + ); + let (in_memory_changesets, in_memory_state) = random_changeset_range( + &mut rng, + &in_memory_blocks, + database_state + .iter() + .map(|(address, (account, storage))| (*address, (*account, storage.clone()))), + 0..0, + 0..0, + ); + + let factory = create_test_provider_factory(); + + let provider_rw = factory.provider_rw()?; + provider_rw.append_blocks_with_state( + database_blocks + .into_iter() + .map(|b| b.seal_with_senders().expect("failed to seal block with senders")) + .collect(), + ExecutionOutcome { + bundle: BundleState::new( + database_state.into_iter().map(|(address, (account, _))| { + (address, None, Some(account.into()), Default::default()) + }), + database_changesets + .iter() + .map(|block_changesets| { + block_changesets.iter().map(|(address, account, _)| { + (*address, Some(Some((*account).into())), []) + }) + }) + .collect::>(), + Vec::new(), + ), + first_block: first_database_block, + ..Default::default() + }, + Default::default(), + Default::default(), + )?; + provider_rw.commit()?; + + let provider = BlockchainProvider2::new(factory)?; + + let in_memory_changesets = in_memory_changesets.into_iter().next().unwrap(); + let chain = NewCanonicalChain::Commit { + new: vec![in_memory_blocks + .first() + .map(|block| { + let senders = block.senders().expect("failed to recover senders"); + ExecutedBlock::new( + Arc::new(block.clone()), + Arc::new(senders), + Arc::new(ExecutionOutcome { + bundle: BundleState::new( + in_memory_state.into_iter().map(|(address, (account, _))| { + (address, None, Some(account.into()), Default::default()) + }), + [in_memory_changesets.iter().map(|(address, account, _)| { + (*address, Some(Some((*account).into())), Vec::new()) + })], + [], + ), + first_block: first_in_memory_block, + ..Default::default() + }), + Default::default(), + Default::default(), + ) + }) + .unwrap()], + }; + provider.canonical_in_memory_state.update_chain(chain); + + let consistent_provider = provider.consistent_provider()?; + + assert_eq!( + consistent_provider.account_block_changeset(last_database_block).unwrap(), + database_changesets + .into_iter() + .last() + .unwrap() + .into_iter() + .sorted_by_key(|(address, _, _)| *address) + .map(|(address, account, _)| AccountBeforeTx { address, info: Some(account) }) + .collect::>() + ); + assert_eq!( + consistent_provider.account_block_changeset(first_in_memory_block).unwrap(), + in_memory_changesets + .into_iter() + .sorted_by_key(|(address, _, _)| *address) + .map(|(address, account, _)| AccountBeforeTx { address, info: Some(account) }) + .collect::>() + ); + + Ok(()) + } +} diff --git a/crates/storage/provider/src/providers/database/metrics.rs b/crates/storage/provider/src/providers/database/metrics.rs index 559e2ae90e..8d0a1706db 100644 --- a/crates/storage/provider/src/providers/database/metrics.rs +++ b/crates/storage/provider/src/providers/database/metrics.rs @@ -62,7 +62,6 @@ pub(crate) enum Action { InsertTransactionHashNumbers, InsertBlockWithdrawals, InsertBlockSidecars, - InsertBlockRequests, InsertBlockBodyIndices, InsertTransactionBlocks, GetNextTxNum, @@ -109,8 +108,6 @@ struct DatabaseProviderMetrics { insert_block_withdrawals: Histogram, /// Duration of insert block sidecars insert_block_sidecars: Histogram, - /// Duration of insert block requests - insert_block_requests: Histogram, /// Duration of insert block body indices insert_block_body_indices: Histogram, /// Duration of insert transaction blocks @@ -143,7 +140,6 @@ impl DatabaseProviderMetrics { Action::InsertTransactionHashNumbers => self.insert_tx_hash_numbers.record(duration), Action::InsertBlockWithdrawals => self.insert_block_withdrawals.record(duration), Action::InsertBlockSidecars => self.insert_block_sidecars.record(duration), - Action::InsertBlockRequests => self.insert_block_requests.record(duration), Action::InsertBlockBodyIndices => self.insert_block_body_indices.record(duration), Action::InsertTransactionBlocks => self.insert_tx_blocks.record(duration), Action::GetNextTxNum => self.get_next_tx_num.record(duration), diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 82b3b644f8..4e3a651ecc 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -4,11 +4,10 @@ use crate::{ traits::{BlockSource, ReceiptProvider}, BlockHashReader, BlockNumReader, BlockReader, ChainSpecProvider, DatabaseProviderFactory, EvmEnvProvider, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, ParliaSnapshotReader, - ProviderError, PruneCheckpointReader, RequestsProvider, StageCheckpointReader, - StateProviderBox, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, - WithdrawalsProvider, + ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProviderBox, + StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use alloy_eips::BlockHashOrNumber; +use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use core::fmt; use reth_chainspec::{ChainInfo, EthereumHardforks}; @@ -20,7 +19,7 @@ use reth_node_types::NodeTypesWithDB; use reth_primitives::{ parlia::Snapshot, BlobSidecars, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, Withdrawal, Withdrawals, + TransactionSignedNoHash, Withdrawals, }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -131,7 +130,7 @@ impl ProviderFactory { /// This sets the [`PruneModes`] to [`None`], because they should only be relevant for writing /// data. #[track_caller] - pub fn provider(&self) -> ProviderResult> { + pub fn provider(&self) -> ProviderResult> { Ok(DatabaseProvider::new( self.db.tx()?, self.chain_spec.clone(), @@ -145,7 +144,7 @@ impl ProviderFactory { /// [`BlockHashReader`]. This may fail if the inner read/write database transaction fails to /// open. #[track_caller] - pub fn provider_rw(&self) -> ProviderResult> { + pub fn provider_rw(&self) -> ProviderResult> { Ok(DatabaseProviderRW(DatabaseProvider::new_rw( self.db.tx_mut()?, self.chain_spec.clone(), @@ -187,8 +186,8 @@ impl ProviderFactory { impl DatabaseProviderFactory for ProviderFactory { type DB = N::DB; - type Provider = DatabaseProvider<::TX, N::ChainSpec>; - type ProviderRW = DatabaseProvider<::TXMut, N::ChainSpec>; + type Provider = DatabaseProvider<::TX, N>; + type ProviderRW = DatabaseProvider<::TXMut, N>; fn database_provider_ro(&self) -> ProviderResult { self.provider() @@ -535,16 +534,6 @@ impl SidecarsProvider for ProviderFactory { } } -impl RequestsProvider for ProviderFactory { - fn requests_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult> { - self.provider()?.requests_by_block(id, timestamp) - } -} - impl StageCheckpointReader for ProviderFactory { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { self.provider()?.get_stage_checkpoint(id) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 1ac6665002..4d35474f74 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -6,17 +6,17 @@ use crate::{ AccountExtReader, BlockSource, ChangeSetReader, ReceiptProvider, StageCheckpointWriter, }, writer::UnifiedStorageWriter, - AccountReader, BlockExecutionReader, BlockExecutionWriter, BlockHashReader, BlockNumReader, - BlockReader, BlockWriter, BundleStateInit, ChainStateBlockReader, ChainStateBlockWriter, - DBProvider, EvmEnvProvider, HashingWriter, HeaderProvider, HeaderSyncGap, - HeaderSyncGapProvider, HistoricalStateProvider, HistoryWriter, LatestStateProvider, + AccountReader, BlockExecutionWriter, BlockHashReader, BlockNumReader, BlockReader, BlockWriter, + BundleStateInit, ChainStateBlockReader, ChainStateBlockWriter, DBProvider, EvmEnvProvider, + HashingWriter, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HistoricalStateProvider, + HistoricalStateProviderRef, HistoryWriter, LatestStateProvider, LatestStateProviderRef, OriginalValuesKnown, ParliaSnapshotReader, ProviderError, PruneCheckpointReader, - PruneCheckpointWriter, RequestsProvider, RevertsInit, SidecarsProvider, StageCheckpointReader, - StateChangeWriter, StateProviderBox, StateReader, StateWriter, StaticFileProviderFactory, - StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, + PruneCheckpointWriter, RevertsInit, SidecarsProvider, StageCheckpointReader, StateChangeWriter, + StateProviderBox, StateReader, StateWriter, StaticFileProviderFactory, StatsReader, + StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, TransactionsProviderExt, TrieWriter, WithdrawalsProvider, }; -use alloy_eips::BlockHashOrNumber; +use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber}; use alloy_primitives::{keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use itertools::{izip, Itertools}; use rayon::slice::ParallelSliceMut; @@ -39,15 +39,16 @@ use reth_db_api::{ use reth_evm::ConfigureEvmEnv; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_network_p2p::headers::downloader::SyncTarget; +use reth_node_types::NodeTypes; use reth_primitives::{ parlia::Snapshot, Account, BlobSidecars, Block, BlockBody, BlockWithSenders, Bytecode, - GotExpected, Header, Receipt, Requests, SealedBlock, SealedBlockWithSenders, SealedHeader, + GotExpected, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, TransactionSigned, - TransactionSignedEcRecovered, TransactionSignedNoHash, Withdrawal, Withdrawals, + TransactionSignedEcRecovered, TransactionSignedNoHash, Withdrawals, }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{StorageChangeSetReader, TryIntoHistoricalStateProvider}; +use reth_storage_api::{StateProvider, StorageChangeSetReader, TryIntoHistoricalStateProvider}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; use reth_trie::{ prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets}, @@ -68,43 +69,43 @@ use std::{ time::{Duration, Instant}, }; use tokio::sync::watch; -use tracing::{debug, error, warn}; +use tracing::{debug, error, trace, warn}; /// A [`DatabaseProvider`] that holds a read-only database transaction. -pub type DatabaseProviderRO = DatabaseProvider<::TX, Spec>; +pub type DatabaseProviderRO = DatabaseProvider<::TX, N>; /// A [`DatabaseProvider`] that holds a read-write database transaction. /// /// Ideally this would be an alias type. However, there's some weird compiler error (), that forces us to wrap this in a struct instead. /// Once that issue is solved, we can probably revert back to being an alias type. #[derive(Debug)] -pub struct DatabaseProviderRW( - pub DatabaseProvider<::TXMut, Spec>, +pub struct DatabaseProviderRW( + pub DatabaseProvider<::TXMut, N>, ); -impl Deref for DatabaseProviderRW { - type Target = DatabaseProvider<::TXMut, Spec>; +impl Deref for DatabaseProviderRW { + type Target = DatabaseProvider<::TXMut, N>; fn deref(&self) -> &Self::Target { &self.0 } } -impl DerefMut for DatabaseProviderRW { +impl DerefMut for DatabaseProviderRW { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } -impl AsRef::TXMut, Spec>> - for DatabaseProviderRW +impl AsRef::TXMut, N>> + for DatabaseProviderRW { - fn as_ref(&self) -> &DatabaseProvider<::TXMut, Spec> { + fn as_ref(&self) -> &DatabaseProvider<::TXMut, N> { &self.0 } } -impl DatabaseProviderRW { +impl DatabaseProviderRW { /// Commit database transaction and static file if it exists. pub fn commit(self) -> ProviderResult { self.0.commit() @@ -116,10 +117,10 @@ impl DatabaseProviderRW { } } -impl From> - for DatabaseProvider<::TXMut, Spec> +impl From> + for DatabaseProvider<::TXMut, N> { - fn from(provider: DatabaseProviderRW) -> Self { + fn from(provider: DatabaseProviderRW) -> Self { provider.0 } } @@ -127,46 +128,104 @@ impl From> /// A provider struct that fetches data from the database. /// Wrapper around [`DbTx`] and [`DbTxMut`]. Example: [`HeaderProvider`] [`BlockHashReader`] #[derive(Debug)] -pub struct DatabaseProvider { +pub struct DatabaseProvider { /// Database transaction. tx: TX, /// Chain spec - chain_spec: Arc, + chain_spec: Arc, /// Static File provider static_file_provider: StaticFileProvider, /// Pruning configuration prune_modes: PruneModes, } -impl DatabaseProvider { +impl DatabaseProvider { /// Returns reference to prune modes. pub const fn prune_modes_ref(&self) -> &PruneModes { &self.prune_modes } } -impl StaticFileProviderFactory for DatabaseProvider { +impl DatabaseProvider { + /// State provider for latest block + pub fn latest<'a>(&'a self) -> ProviderResult> { + trace!(target: "providers::db", "Returning latest state provider"); + Ok(Box::new(LatestStateProviderRef::new(&self.tx, self.static_file_provider.clone()))) + } + + /// Storage provider for state at that given block hash + pub fn history_by_block_hash<'a>( + &'a self, + block_hash: BlockHash, + ) -> ProviderResult> { + let mut block_number = + self.block_number(block_hash)?.ok_or(ProviderError::BlockHashNotFound(block_hash))?; + if block_number == self.best_block_number().unwrap_or_default() && + block_number == self.last_block_number().unwrap_or_default() + { + return Ok(Box::new(LatestStateProviderRef::new( + &self.tx, + self.static_file_provider.clone(), + ))) + } + + // +1 as the changeset that we want is the one that was applied after this block. + block_number += 1; + + let account_history_prune_checkpoint = + self.get_prune_checkpoint(PruneSegment::AccountHistory)?; + let storage_history_prune_checkpoint = + self.get_prune_checkpoint(PruneSegment::StorageHistory)?; + + let mut state_provider = HistoricalStateProviderRef::new( + &self.tx, + block_number, + self.static_file_provider.clone(), + ); + + // If we pruned account or storage history, we can't return state on every historical block. + // Instead, we should cap it at the latest prune checkpoint for corresponding prune segment. + if let Some(prune_checkpoint_block_number) = + account_history_prune_checkpoint.and_then(|checkpoint| checkpoint.block_number) + { + state_provider = state_provider.with_lowest_available_account_history_block_number( + prune_checkpoint_block_number + 1, + ); + } + if let Some(prune_checkpoint_block_number) = + storage_history_prune_checkpoint.and_then(|checkpoint| checkpoint.block_number) + { + state_provider = state_provider.with_lowest_available_storage_history_block_number( + prune_checkpoint_block_number + 1, + ); + } + + Ok(Box::new(state_provider)) + } +} + +impl StaticFileProviderFactory for DatabaseProvider { /// Returns a static file provider fn static_file_provider(&self) -> StaticFileProvider { self.static_file_provider.clone() } } -impl ChainSpecProvider - for DatabaseProvider +impl> ChainSpecProvider + for DatabaseProvider { - type ChainSpec = Spec; + type ChainSpec = N::ChainSpec; fn chain_spec(&self) -> Arc { self.chain_spec.clone() } } -impl DatabaseProvider { +impl DatabaseProvider { /// Creates a provider with an inner read-write transaction. pub const fn new_rw( tx: TX, - chain_spec: Arc, + chain_spec: Arc, static_file_provider: StaticFileProvider, prune_modes: PruneModes, ) -> Self { @@ -174,15 +233,13 @@ impl DatabaseProvider { } } -impl AsRef for DatabaseProvider { +impl AsRef for DatabaseProvider { fn as_ref(&self) -> &Self { self } } -impl TryIntoHistoricalStateProvider - for DatabaseProvider -{ +impl TryIntoHistoricalStateProvider for DatabaseProvider { fn try_into_history_at_block( self, mut block_number: BlockNumber, @@ -225,8 +282,8 @@ impl TryIntoHistoricalStateProvider } } -impl - DatabaseProvider +impl + 'static> + DatabaseProvider { // TODO: uncomment below, once `reth debug_cmd` has been feature gated with dev. // #[cfg(any(test, feature = "test-utils"))] @@ -308,11 +365,11 @@ where Ok(Vec::new()) } -impl DatabaseProvider { +impl DatabaseProvider { /// Creates a provider with an inner read-only transaction. pub const fn new( tx: TX, - chain_spec: Arc, + chain_spec: Arc, static_file_provider: StaticFileProvider, prune_modes: PruneModes, ) -> Self { @@ -335,7 +392,7 @@ impl DatabaseProvider { } /// Returns a reference to the chain specification. - pub fn chain_spec(&self) -> &Spec { + pub fn chain_spec(&self) -> &N::ChainSpec { &self.chain_spec } @@ -433,7 +490,7 @@ impl DatabaseProvider { construct_block: BF, ) -> ProviderResult> where - Spec: EthereumHardforks, + N::ChainSpec: EthereumHardforks, H: AsRef
, HF: FnOnce(BlockNumber) -> ProviderResult>, BF: FnOnce( @@ -442,7 +499,6 @@ impl DatabaseProvider { Vec
, Vec
, Option, - Option, ) -> ProviderResult>, { let Some(block_number) = self.convert_hash_or_number(id)? else { return Ok(None) }; @@ -451,7 +507,6 @@ impl DatabaseProvider { let ommers = self.ommers(block_number.into())?.unwrap_or_default(); let withdrawals = self.withdrawals_by_block(block_number.into(), header.as_ref().timestamp)?; - let requests = self.requests_by_block(block_number.into(), header.as_ref().timestamp)?; // Get the block body // @@ -483,7 +538,7 @@ impl DatabaseProvider { .collect(); // the sidecars will always be None as this is not needed - construct_block(header, body, senders, ommers, withdrawals, requests) + construct_block(header, body, senders, ommers, withdrawals) } /// Returns a range of blocks from the database. @@ -494,7 +549,6 @@ impl DatabaseProvider { /// - Range of transaction numbers /// – Ommers /// – Withdrawals - /// – Requests /// – Senders fn block_range( &self, @@ -503,7 +557,7 @@ impl DatabaseProvider { mut assemble_block: F, ) -> ProviderResult> where - Spec: EthereumHardforks, + N::ChainSpec: EthereumHardforks, H: AsRef
, HF: FnOnce(RangeInclusive) -> ProviderResult>, F: FnMut( @@ -512,7 +566,6 @@ impl DatabaseProvider { Vec
, Option, Option, - Option, ) -> ProviderResult, { if range.is_empty() { @@ -525,7 +578,6 @@ impl DatabaseProvider { let headers = headers_range(range)?; let mut ommers_cursor = self.tx.cursor_read::()?; let mut withdrawals_cursor = self.tx.cursor_read::()?; - let mut requests_cursor = self.tx.cursor_read::()?; let mut block_body_cursor = self.tx.cursor_read::()?; for header in headers { @@ -544,18 +596,11 @@ impl DatabaseProvider { // even if empty let withdrawals = if self.chain_spec.is_shanghai_active_at_timestamp(header_ref.timestamp) { - Some( - withdrawals_cursor - .seek_exact(header_ref.number)? - .map(|(_, w)| w.withdrawals) - .unwrap_or_default(), - ) - } else { - None - }; - let requests = - if self.chain_spec.is_prague_active_at_timestamp(header_ref.timestamp) { - Some(requests_cursor.seek_exact(header_ref.number)?.unwrap_or_default().1) + withdrawals_cursor + .seek_exact(header_ref.number)? + .map(|(_, w)| w.withdrawals) + .unwrap_or_default() + .into() } else { None }; @@ -569,10 +614,8 @@ impl DatabaseProvider { .unwrap_or_default() }; - let sidecars = Some(Default::default()); // no need to read sidecars - if let Ok(b) = - assemble_block(header, tx_range, ommers, withdrawals, sidecars, requests) + assemble_block(header, tx_range, ommers, withdrawals, Some(Default::default())) { blocks.push(b); } @@ -591,7 +634,6 @@ impl DatabaseProvider { /// - Transactions /// – Ommers /// – Withdrawals - /// – Requests /// – Senders fn block_with_senders_range( &self, @@ -600,7 +642,7 @@ impl DatabaseProvider { assemble_block: BF, ) -> ProviderResult> where - Spec: EthereumHardforks, + N::ChainSpec: EthereumHardforks, H: AsRef
, HF: Fn(RangeInclusive) -> ProviderResult>, BF: Fn( @@ -609,231 +651,46 @@ impl DatabaseProvider { Vec
, Option, Option, - Option, Vec
, ) -> ProviderResult, { let mut tx_cursor = self.tx.cursor_read::()?; let mut senders_cursor = self.tx.cursor_read::()?; - self.block_range( - range, - headers_range, - |header, tx_range, ommers, withdrawals, sidecars, requests| { - let (body, senders) = if tx_range.is_empty() { - (Vec::new(), Vec::new()) - } else { - let body = self - .transactions_by_tx_range_with_cursor(tx_range.clone(), &mut tx_cursor)? - .into_iter() - .map(Into::into) - .collect::>(); - // fetch senders from the senders table - let known_senders = senders_cursor + self.block_range(range, headers_range, |header, tx_range, ommers, withdrawals, sidecars| { + let (body, senders) = if tx_range.is_empty() { + (Vec::new(), Vec::new()) + } else { + let body = self + .transactions_by_tx_range_with_cursor(tx_range.clone(), &mut tx_cursor)? + .into_iter() + .map(Into::into) + .collect::>(); + // fetch senders from the senders table + let known_senders = + senders_cursor .walk_range(tx_range.clone())? .collect::, _>>()?; - let mut senders = Vec::with_capacity(body.len()); - for (tx_num, tx) in tx_range.zip(body.iter()) { - match known_senders.get(&tx_num) { - None => { - // recover the sender from the transaction if not found - let sender = tx - .recover_signer_unchecked() - .ok_or(ProviderError::SenderRecoveryError)?; - senders.push(sender); - } - Some(sender) => senders.push(*sender), + let mut senders = Vec::with_capacity(body.len()); + for (tx_num, tx) in tx_range.zip(body.iter()) { + match known_senders.get(&tx_num) { + None => { + // recover the sender from the transaction if not found + let sender = tx + .recover_signer_unchecked() + .ok_or(ProviderError::SenderRecoveryError)?; + senders.push(sender); } + Some(sender) => senders.push(*sender), } - - (body, senders) - }; - - assemble_block(header, body, ommers, withdrawals, sidecars, requests, senders) - }, - ) - } - - /// Get requested blocks transaction with senders - pub(crate) fn get_block_transaction_range( - &self, - range: impl RangeBounds + Clone, - ) -> ProviderResult)>> { - // Raad range of block bodies to get all transactions id's of this range. - let block_bodies = self.get::(range)?; - - if block_bodies.is_empty() { - return Ok(Vec::new()) - } - - // Compute the first and last tx ID in the range - let first_transaction = block_bodies.first().expect("If we have headers").1.first_tx_num(); - let last_transaction = block_bodies.last().expect("Not empty").1.last_tx_num(); - - // If this is the case then all of the blocks in the range are empty - if last_transaction < first_transaction { - return Ok(block_bodies.into_iter().map(|(n, _)| (n, Vec::new())).collect()) - } - - // Get transactions and senders - let transactions = self - .get::(first_transaction..=last_transaction)? - .into_iter() - .map(|(id, tx)| (id, tx.into())) - .collect::>(); - - let mut senders = - self.get::(first_transaction..=last_transaction)?; - - recover_block_senders(&mut senders, &transactions, first_transaction, last_transaction)?; - - // Merge transaction into blocks - let mut block_tx = Vec::with_capacity(block_bodies.len()); - let mut senders = senders.into_iter(); - let mut transactions = transactions.into_iter(); - for (block_number, block_body) in block_bodies { - let mut one_block_tx = Vec::with_capacity(block_body.tx_count as usize); - for _ in block_body.tx_num_range() { - let tx = transactions.next(); - let sender = senders.next(); - - let recovered = match (tx, sender) { - (Some((tx_id, tx)), Some((sender_tx_id, sender))) => { - if tx_id == sender_tx_id { - Ok(TransactionSignedEcRecovered::from_signed_transaction(tx, sender)) - } else { - Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id }) - } - } - (Some((tx_id, _)), _) | (_, Some((tx_id, _))) => { - Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id }) - } - (None, None) => Err(ProviderError::BlockBodyTransactionCount), - }?; - one_block_tx.push(recovered) - } - block_tx.push((block_number, one_block_tx)); - } - - Ok(block_tx) - } - - /// Get the given range of blocks. - pub fn get_block_range( - &self, - range: impl RangeBounds + Clone, - ) -> ProviderResult> - where - Spec: EthereumHardforks, - { - // For blocks we need: - // - // - Headers - // - Bodies (transactions) - // - Uncles/ommers - // - Withdrawals - // - Requests - // - Signers - - let block_headers = self.get::(range.clone())?; - if block_headers.is_empty() { - return Ok(Vec::new()) - } - - let block_header_hashes = self.get::(range.clone())?; - let block_ommers = self.get::(range.clone())?; - let block_withdrawals = self.get::(range.clone())?; - let block_requests = self.get::(range.clone())?; - let block_sidecars = self.get::(range.clone())?; - - let block_tx = self.get_block_transaction_range(range)?; - let mut blocks = Vec::with_capacity(block_headers.len()); - - // merge all into block - let block_header_iter = block_headers.into_iter(); - let block_header_hashes_iter = block_header_hashes.into_iter(); - let block_tx_iter = block_tx.into_iter(); - - // Ommers can be empty for some blocks - let mut block_ommers_iter = block_ommers.into_iter(); - let mut block_withdrawals_iter = block_withdrawals.into_iter(); - let mut block_requests_iter = block_requests.into_iter(); - let mut block_sidecars_iter = block_sidecars.into_iter(); - let mut block_ommers = block_ommers_iter.next(); - let mut block_withdrawals = block_withdrawals_iter.next(); - let mut block_requests = block_requests_iter.next(); - let mut block_sidecars = block_sidecars_iter.next(); - - for ((main_block_number, header), (_, header_hash), (_, tx)) in - izip!(block_header_iter, block_header_hashes_iter, block_tx_iter) - { - let header = SealedHeader::new(header, header_hash); - - let (transactions, senders) = tx.into_iter().map(|tx| tx.to_components()).unzip(); - - // Ommers can be missing - let mut ommers = Vec::new(); - if let Some((block_number, _)) = block_ommers.as_ref() { - if *block_number == main_block_number { - ommers = block_ommers.take().unwrap().1.ommers; - block_ommers = block_ommers_iter.next(); } - }; - // withdrawal can be missing - let shanghai_is_active = - self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp); - let mut withdrawals = Some(Withdrawals::default()); - if shanghai_is_active { - if let Some((block_number, _)) = block_withdrawals.as_ref() { - if *block_number == main_block_number { - withdrawals = Some(block_withdrawals.take().unwrap().1.withdrawals); - block_withdrawals = block_withdrawals_iter.next(); - } - } - } else { - withdrawals = None - } - - // requests can be missing - let prague_is_active = self.chain_spec.is_prague_active_at_timestamp(header.timestamp); - let mut requests = Some(Requests::default()); - if prague_is_active { - if let Some((block_number, _)) = block_requests.as_ref() { - if *block_number == main_block_number { - requests = Some(block_requests.take().unwrap().1); - block_requests = block_requests_iter.next(); - } - } - } else { - requests = None; - } - - // sidecars can be missing - let cancun_is_active = self.chain_spec.is_cancun_active_at_timestamp(header.timestamp); - let mut sidecars = Some(BlobSidecars::default()); - if cancun_is_active { - if let Some((block_number, _)) = block_sidecars.as_ref() { - if *block_number == main_block_number { - sidecars = Some(block_sidecars.take().unwrap().1); - block_sidecars = block_sidecars_iter.next(); - } - } - } else { - sidecars = None; + (body, senders) }; - blocks.push(SealedBlockWithSenders { - block: SealedBlock { - header, - body: BlockBody { transactions, ommers, withdrawals, sidecars, requests }, - }, - senders, - }) - } - - Ok(blocks) + assemble_block(header, body, ommers, withdrawals, sidecars, senders) + }) } /// Return the last N blocks of state, recreating the [`ExecutionOutcome`]. @@ -1006,7 +863,7 @@ impl DatabaseProvider { } } -impl DatabaseProvider { +impl DatabaseProvider { /// Commit database transaction. pub fn commit(self) -> ProviderResult { Ok(self.tx.commit()?) @@ -1191,7 +1048,6 @@ impl DatabaseProvider { /// * [`CanonicalHeaders`](tables::CanonicalHeaders) /// * [`BlockOmmers`](tables::BlockOmmers) /// * [`BlockWithdrawals`](tables::BlockWithdrawals) - /// * [`BlockRequests`](tables::BlockRequests) /// * [`HeaderTerminalDifficulties`](tables::HeaderTerminalDifficulties) /// /// This will also remove transaction data according to @@ -1211,7 +1067,6 @@ impl DatabaseProvider { self.remove::(range.clone())?; self.remove::(range.clone())?; self.remove::(range.clone())?; - self.remove::(range.clone())?; self.remove_block_transaction_range(range.clone())?; self.remove::(range)?; @@ -1225,7 +1080,6 @@ impl DatabaseProvider { /// * [`CanonicalHeaders`](tables::CanonicalHeaders) /// * [`BlockOmmers`](tables::BlockOmmers) /// * [`BlockWithdrawals`](tables::BlockWithdrawals) - /// * [`BlockRequests`](tables::BlockRequests) /// * [`HeaderTerminalDifficulties`](tables::HeaderTerminalDifficulties) /// /// This will also remove transaction data according to @@ -1235,7 +1089,7 @@ impl DatabaseProvider { range: impl RangeBounds + Clone, ) -> ProviderResult> where - Spec: EthereumHardforks, + N::ChainSpec: EthereumHardforks, { // For blocks we need: // @@ -1243,7 +1097,6 @@ impl DatabaseProvider { // - Bodies (transactions) // - Uncles/ommers // - Withdrawals - // - Requests // - Signers let block_headers = self.take::(range.clone())?; @@ -1257,7 +1110,6 @@ impl DatabaseProvider { let block_header_hashes = self.take::(range.clone())?; let block_ommers = self.take::(range.clone())?; let block_withdrawals = self.take::(range.clone())?; - let block_requests = self.take::(range.clone())?; let block_tx = self.take_block_transaction_range(range.clone())?; let block_sidecars = self.take::(range.clone())?; @@ -1274,11 +1126,9 @@ impl DatabaseProvider { // Ommers can be empty for some blocks let mut block_ommers_iter = block_ommers.into_iter(); let mut block_withdrawals_iter = block_withdrawals.into_iter(); - let mut block_requests_iter = block_requests.into_iter(); let mut block_sidecars_iter = block_sidecars.into_iter(); let mut block_ommers = block_ommers_iter.next(); let mut block_withdrawals = block_withdrawals_iter.next(); - let mut block_requests = block_requests_iter.next(); let mut block_sidecars = block_sidecars_iter.next(); for ((main_block_number, header), (_, header_hash), (_, tx)) in @@ -1312,20 +1162,6 @@ impl DatabaseProvider { withdrawals = None } - // requests can be missing - let prague_is_active = self.chain_spec.is_prague_active_at_timestamp(header.timestamp); - let mut requests = Some(Requests::default()); - if prague_is_active { - if let Some((block_number, _)) = block_requests.as_ref() { - if *block_number == main_block_number { - requests = Some(block_requests.take().unwrap().1); - block_requests = block_requests_iter.next(); - } - } - } else { - requests = None; - } - // sidecars can be missing let cancun_is_active = self.chain_spec.is_cancun_active_at_timestamp(header.timestamp); let mut sidecars = Some(BlobSidecars::default()); @@ -1343,7 +1179,7 @@ impl DatabaseProvider { blocks.push(SealedBlockWithSenders { block: SealedBlock { header, - body: BlockBody { transactions, ommers, withdrawals, sidecars, requests }, + body: BlockBody { transactions, ommers, withdrawals, sidecars }, }, senders, }) @@ -1409,13 +1245,13 @@ impl DatabaseProvider { } } -impl AccountReader for DatabaseProvider { +impl AccountReader for DatabaseProvider { fn basic_account(&self, address: Address) -> ProviderResult> { Ok(self.tx.get::(address)?) } } -impl AccountExtReader for DatabaseProvider { +impl AccountExtReader for DatabaseProvider { fn changed_accounts_with_range( &self, range: impl RangeBounds, @@ -1459,7 +1295,7 @@ impl AccountExtReader for DatabaseProvider StorageChangeSetReader for DatabaseProvider { +impl StorageChangeSetReader for DatabaseProvider { fn storage_changeset( &self, block_number: BlockNumber, @@ -1474,7 +1310,7 @@ impl StorageChangeSetReader for DatabaseProvider ChangeSetReader for DatabaseProvider { +impl ChangeSetReader for DatabaseProvider { fn account_block_changeset( &self, block_number: BlockNumber, @@ -1491,7 +1327,7 @@ impl ChangeSetReader for DatabaseProvider } } -impl HeaderSyncGapProvider for DatabaseProvider { +impl HeaderSyncGapProvider for DatabaseProvider { fn sync_gap( &self, tip: watch::Receiver, @@ -1535,8 +1371,8 @@ impl HeaderSyncGapProvider for DatabaseProvider HeaderProvider - for DatabaseProvider +impl> HeaderProvider + for DatabaseProvider { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { if let Some(num) = self.block_number(*block_hash)? { @@ -1635,7 +1471,7 @@ impl HeaderProvider } } -impl BlockHashReader for DatabaseProvider { +impl BlockHashReader for DatabaseProvider { fn block_hash(&self, number: u64) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Headers, @@ -1662,7 +1498,7 @@ impl BlockHashReader for DatabaseProvider } } -impl BlockNumReader for DatabaseProvider { +impl BlockNumReader for DatabaseProvider { fn chain_info(&self) -> ProviderResult { let best_number = self.best_block_number()?; let best_hash = self.block_hash(best_number)?.unwrap_or_default(); @@ -1693,7 +1529,7 @@ impl BlockNumReader for DatabaseProvider } } -impl BlockReader for DatabaseProvider { +impl> BlockReader for DatabaseProvider { fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { if source.is_canonical() { self.block(hash.into()) @@ -1712,7 +1548,6 @@ impl BlockReader for DatabasePr if let Some(header) = self.header_by_number(number)? { let withdrawals = self.withdrawals_by_block(number.into(), header.timestamp)?; let ommers = self.ommers(number.into())?.unwrap_or_default(); - let requests = self.requests_by_block(number.into(), header.timestamp)?; // If the body indices are not found, this means that the transactions either do not // exist in the database yet, or they do exit but are not indexed. // If they exist but are not indexed, we don't have enough @@ -1726,7 +1561,7 @@ impl BlockReader for DatabasePr return Ok(Some(Block { header, - body: BlockBody { transactions, ommers, withdrawals, sidecars, requests }, + body: BlockBody { transactions, ommers, withdrawals, sidecars }, })) } } @@ -1786,7 +1621,7 @@ impl BlockReader for DatabasePr id, transaction_kind, |block_number| self.header_by_number(block_number), - |header, transactions, senders, ommers, withdrawals, requests| { + |header, transactions, senders, ommers, withdrawals| { Block { header, body: BlockBody { @@ -1794,7 +1629,6 @@ impl BlockReader for DatabasePr ommers, withdrawals, sidecars: Some(Default::default()), - requests, }, } // Note: we're using unchecked here because we know the block contains valid txs @@ -1816,7 +1650,7 @@ impl BlockReader for DatabasePr id, transaction_kind, |block_number| self.sealed_header(block_number), - |header, transactions, senders, ommers, withdrawals, requests| { + |header, transactions, senders, ommers, withdrawals| { SealedBlock { header, body: BlockBody { @@ -1824,7 +1658,6 @@ impl BlockReader for DatabasePr ommers, withdrawals, sidecars: Some(Default::default()), - requests, }, } // Note: we're using unchecked here because we know the block contains valid txs @@ -1842,7 +1675,7 @@ impl BlockReader for DatabasePr self.block_range( range, |range| self.headers_range(range), - |header, tx_range, ommers, withdrawals, sidecars, requests| { + |header, tx_range, ommers, withdrawals, sidecars| { let transactions = if tx_range.is_empty() { Vec::new() } else { @@ -1853,7 +1686,7 @@ impl BlockReader for DatabasePr }; Ok(Block { header, - body: BlockBody { transactions, ommers, withdrawals, sidecars, requests }, + body: BlockBody { transactions, ommers, withdrawals, sidecars }, }) }, ) @@ -1866,13 +1699,10 @@ impl BlockReader for DatabasePr self.block_with_senders_range( range, |range| self.headers_range(range), - |header, transactions, ommers, withdrawals, sidecars, requests, senders| { - Block { - header, - body: BlockBody { transactions, ommers, withdrawals, sidecars, requests }, - } - .try_with_senders_unchecked(senders) - .map_err(|_| ProviderError::SenderRecoveryError) + |header, transactions, ommers, withdrawals, sidecars, senders| { + Block { header, body: BlockBody { transactions, ommers, withdrawals, sidecars } } + .try_with_senders_unchecked(senders) + .map_err(|_| ProviderError::SenderRecoveryError) }, ) } @@ -1884,11 +1714,11 @@ impl BlockReader for DatabasePr self.block_with_senders_range( range, |range| self.sealed_headers_range(range), - |header, transactions, ommers, withdrawals, sidecars, requests, senders| { + |header, transactions, ommers, withdrawals, sidecars, senders| { SealedBlockWithSenders::new( SealedBlock { header, - body: BlockBody { transactions, ommers, withdrawals, sidecars, requests }, + body: BlockBody { transactions, ommers, withdrawals, sidecars }, }, senders, ) @@ -1898,8 +1728,8 @@ impl BlockReader for DatabasePr } } -impl TransactionsProviderExt - for DatabaseProvider +impl> TransactionsProviderExt + for DatabaseProvider { /// Recovers transaction hashes by walking through `Transactions` table and /// calculating them in a parallel manner. Returned unsorted. @@ -1968,8 +1798,8 @@ impl TransactionsProviderExt } // Calculates the hash of the given transaction -impl TransactionsProvider - for DatabaseProvider +impl> TransactionsProvider + for DatabaseProvider { fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { Ok(self.tx.get::(tx_hash)?) @@ -2128,8 +1958,8 @@ impl TransactionsProvider } } -impl ReceiptProvider - for DatabaseProvider +impl> ReceiptProvider + for DatabaseProvider { fn receipt(&self, id: TxNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( @@ -2176,8 +2006,8 @@ impl ReceiptProvider } } -impl WithdrawalsProvider - for DatabaseProvider +impl> WithdrawalsProvider + for DatabaseProvider { fn withdrawals_by_block( &self, @@ -2206,7 +2036,9 @@ impl WithdrawalsProvider } } -impl SidecarsProvider for DatabaseProvider { +impl> SidecarsProvider + for DatabaseProvider +{ fn sidecars(&self, block_hash: &BlockHash) -> ProviderResult> { if let Some(num) = self.block_number(*block_hash)? { Ok(self.sidecars_by_number(num)?) @@ -2225,26 +2057,8 @@ impl SidecarsProvider for DatabaseProvider RequestsProvider - for DatabaseProvider -{ - fn requests_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult> { - if self.chain_spec.is_prague_active_at_timestamp(timestamp) { - if let Some(number) = self.convert_hash_or_number(id)? { - let requests = self.tx.get::(number)?; - return Ok(requests) - } - } - Ok(None) - } -} - -impl EvmEnvProvider - for DatabaseProvider +impl> EvmEnvProvider + for DatabaseProvider { fn fill_env_at( &self, @@ -2309,11 +2123,16 @@ impl EvmEnvProvider } } -impl StageCheckpointReader for DatabaseProvider { +impl StageCheckpointReader for DatabaseProvider { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { Ok(self.tx.get::(id.to_string())?) } + /// Get stage checkpoint progress. + fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { + Ok(self.tx.get::(id.to_string())?) + } + fn get_all_checkpoints(&self) -> ProviderResult> { self.tx .cursor_read::()? @@ -2321,14 +2140,9 @@ impl StageCheckpointReader for DatabaseProvider, _>>() .map_err(ProviderError::Database) } - - /// Get stage checkpoint progress. - fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { - Ok(self.tx.get::(id.to_string())?) - } } -impl StageCheckpointWriter for DatabaseProvider { +impl StageCheckpointWriter for DatabaseProvider { /// Save stage checkpoint. fn save_stage_checkpoint( &self, @@ -2369,7 +2183,7 @@ impl StageCheckpointWriter for DatabaseProvider< } } -impl StorageReader for DatabaseProvider { +impl StorageReader for DatabaseProvider { fn plain_state_storages( &self, addresses_with_keys: impl IntoIterator)>, @@ -2432,7 +2246,7 @@ impl StorageReader for DatabaseProvider { } } -impl StateChangeWriter for DatabaseProvider { +impl StateChangeWriter for DatabaseProvider { fn write_state_reverts( &self, reverts: PlainStateReverts, @@ -2809,7 +2623,7 @@ impl StateChangeWriter for DatabaseProvid } } -impl TrieWriter for DatabaseProvider { +impl TrieWriter for DatabaseProvider { /// Writes trie updates. Returns the number of entries modified. fn write_trie_updates(&self, trie_updates: &TrieUpdates) -> ProviderResult { if trie_updates.is_empty() { @@ -2859,7 +2673,7 @@ impl TrieWriter for DatabaseProvider StorageTrieWriter for DatabaseProvider { +impl StorageTrieWriter for DatabaseProvider { /// Writes storage trie updates from the given storage trie map. First sorts the storage trie /// updates by the hashed address, writing in sorted order. fn write_storage_trie_updates( @@ -2896,20 +2710,18 @@ impl StorageTrieWriter for DatabaseProvid } } -impl HashingWriter for DatabaseProvider { - fn unwind_account_hashing( +impl HashingWriter for DatabaseProvider { + fn unwind_account_hashing<'a>( &self, - range: RangeInclusive, + changesets: impl Iterator, ) -> ProviderResult>> { // Aggregate all block changesets and make a list of accounts that have been changed. // Note that collecting and then reversing the order is necessary to ensure that the // changes are applied in the correct order. - let hashed_accounts = self - .tx - .cursor_read::()? - .walk_range(range)? - .map(|entry| entry.map(|(_, e)| (keccak256(e.address), e.info))) - .collect::, _>>()? + let hashed_accounts = changesets + .into_iter() + .map(|(_, e)| (keccak256(e.address), e.info)) + .collect::>() .into_iter() .rev() .collect::>(); @@ -2927,13 +2739,25 @@ impl HashingWriter for DatabaseProvider, + ) -> ProviderResult>> { + let changesets = self + .tx + .cursor_read::()? + .walk_range(range)? + .collect::, _>>()?; + self.unwind_account_hashing(changesets.iter()) + } + fn insert_account_for_hashing( &self, - accounts: impl IntoIterator)>, + changesets: impl IntoIterator)>, ) -> ProviderResult>> { let mut hashed_accounts_cursor = self.tx.cursor_write::()?; let hashed_accounts = - accounts.into_iter().map(|(ad, ac)| (keccak256(ad), ac)).collect::>(); + changesets.into_iter().map(|(ad, ac)| (keccak256(ad), ac)).collect::>(); for (hashed_address, account) in &hashed_accounts { if let Some(account) = account { hashed_accounts_cursor.upsert(*hashed_address, *account)?; @@ -2946,18 +2770,15 @@ impl HashingWriter for DatabaseProvider, + changesets: impl Iterator, ) -> ProviderResult>> { // Aggregate all block changesets and make list of accounts that have been changed. - let mut changesets = self.tx.cursor_read::()?; let mut hashed_storages = changesets - .walk_range(range)? - .map(|entry| { - entry.map(|(BlockNumberAddress((_, address)), storage_entry)| { - (keccak256(address), keccak256(storage_entry.key), storage_entry.value) - }) + .into_iter() + .map(|(BlockNumberAddress((_, address)), storage_entry)| { + (keccak256(address), keccak256(storage_entry.key), storage_entry.value) }) - .collect::, _>>()?; + .collect::>(); hashed_storages.sort_by_key(|(ha, hk, _)| (*ha, *hk)); // Apply values to HashedState, and remove the account if it's None. @@ -2982,6 +2803,18 @@ impl HashingWriter for DatabaseProvider, + ) -> ProviderResult>> { + let changesets = self + .tx + .cursor_read::()? + .walk_range(range)? + .collect::, _>>()?; + self.unwind_storage_hashing(changesets.into_iter()) + } + fn insert_storage_for_hashing( &self, storages: impl IntoIterator)>, @@ -2997,10 +2830,10 @@ impl HashingWriter for DatabaseProvider()?; // Hash the address and key and apply them to HashedStorage (if Storage is None @@ -3102,17 +2935,15 @@ impl HashingWriter for DatabaseProvider HistoryWriter for DatabaseProvider { - fn unwind_account_history_indices( +impl HistoryWriter for DatabaseProvider { + fn unwind_account_history_indices<'a>( &self, - range: RangeInclusive, + changesets: impl Iterator, ) -> ProviderResult { - let mut last_indices = self - .tx - .cursor_read::()? - .walk_range(range)? - .map(|entry| entry.map(|(index, account)| (account.address, index))) - .collect::, _>>()?; + let mut last_indices = changesets + .into_iter() + .map(|(index, account)| (account.address, *index)) + .collect::>(); last_indices.sort_by_key(|(a, _)| *a); // Unwind the account history index. @@ -3139,6 +2970,18 @@ impl HistoryWriter for DatabaseProvider, + ) -> ProviderResult { + let changesets = self + .tx + .cursor_read::()? + .walk_range(range)? + .collect::, _>>()?; + self.unwind_account_history_indices(changesets.iter()) + } + fn insert_account_history_index( &self, account_transitions: impl IntoIterator)>, @@ -3151,16 +2994,12 @@ impl HistoryWriter for DatabaseProvider, + changesets: impl Iterator, ) -> ProviderResult { - let mut storage_changesets = self - .tx - .cursor_read::()? - .walk_range(range)? - .map(|entry| { - entry.map(|(BlockNumberAddress((bn, address)), storage)| (address, storage.key, bn)) - }) - .collect::, _>>()?; + let mut storage_changesets = changesets + .into_iter() + .map(|(BlockNumberAddress((bn, address)), storage)| (address, storage.key, bn)) + .collect::>(); storage_changesets.sort_by_key(|(address, key, _)| (*address, *key)); let mut cursor = self.tx.cursor_write::()?; @@ -3189,6 +3028,18 @@ impl HistoryWriter for DatabaseProvider, + ) -> ProviderResult { + let changesets = self + .tx + .cursor_read::()? + .walk_range(range)? + .collect::, _>>()?; + self.unwind_storage_history_indices(changesets.into_iter()) + } + fn insert_storage_history_index( &self, storage_transitions: impl IntoIterator)>, @@ -3218,40 +3069,27 @@ impl HistoryWriter for DatabaseProvider BlockExecutionReader - for DatabaseProvider -{ - fn get_block_and_execution_range( - &self, - range: RangeInclusive, - ) -> ProviderResult { - // get blocks - let blocks = self.get_block_range(range.clone())?; - - // get execution res - let execution_state = self.get_state(range)?.unwrap_or_default(); - - Ok(Chain::new(blocks, execution_state, None)) - } -} - -impl StateReader for DatabaseProvider { +impl StateReader for DatabaseProvider { fn get_state(&self, block: BlockNumber) -> ProviderResult> { self.get_state(block..=block) } } -impl - BlockExecutionWriter for DatabaseProvider +impl + 'static> + BlockExecutionWriter for DatabaseProvider { fn take_block_and_execution_range( &self, range: RangeInclusive, ) -> ProviderResult { - let storage_range = BlockNumberAddress::range(range.clone()); + let changed_accounts = self + .tx + .cursor_read::()? + .walk_range(range.clone())? + .collect::, _>>()?; // Unwind account hashes. Add changed accounts to account prefix set. - let hashed_addresses = self.unwind_account_hashing(range.clone())?; + let hashed_addresses = self.unwind_account_hashing(changed_accounts.iter())?; let mut account_prefix_set = PrefixSetMut::with_capacity(hashed_addresses.len()); let mut destroyed_accounts = HashSet::default(); for (hashed_address, account) in hashed_addresses { @@ -3262,12 +3100,19 @@ impl()? + .walk_range(storage_range)? + .collect::, _>>()?; // Unwind storage hashes. Add changed account and storage keys to corresponding prefix // sets. let mut storage_prefix_sets = HashMap::::default(); - let storage_entries = self.unwind_storage_hashing(storage_range.clone())?; + let storage_entries = self.unwind_storage_hashing(changed_storages.iter().copied())?; for (hashed_address, hashed_slots) in storage_entries { account_prefix_set.insert(Nibbles::unpack(hashed_address)); let mut storage_prefix_set = PrefixSetMut::with_capacity(hashed_slots.len()); @@ -3278,7 +3123,7 @@ impl, ) -> ProviderResult<()> { - let storage_range = BlockNumberAddress::range(range.clone()); + let changed_accounts = self + .tx + .cursor_read::()? + .walk_range(range.clone())? + .collect::, _>>()?; // Unwind account hashes. Add changed accounts to account prefix set. - let hashed_addresses = self.unwind_account_hashing(range.clone())?; + let hashed_addresses = self.unwind_account_hashing(changed_accounts.iter())?; let mut account_prefix_set = PrefixSetMut::with_capacity(hashed_addresses.len()); let mut destroyed_accounts = HashSet::default(); for (hashed_address, account) in hashed_addresses { @@ -3350,12 +3199,19 @@ impl()? + .walk_range(storage_range)? + .collect::, _>>()?; // Unwind storage hashes. Add changed account and storage keys to corresponding prefix // sets. let mut storage_prefix_sets = HashMap::::default(); - let storage_entries = self.unwind_storage_hashing(storage_range.clone())?; + let storage_entries = self.unwind_storage_hashing(changed_storages.iter().copied())?; for (hashed_address, hashed_slots) in storage_entries { account_prefix_set.insert(Nibbles::unpack(hashed_address)); let mut storage_prefix_set = PrefixSetMut::with_capacity(hashed_slots.len()); @@ -3366,7 +3222,7 @@ impl BlockWriter - for DatabaseProvider +impl + 'static> BlockWriter + for DatabaseProvider { /// Inserts the block into the database, always modifying the following tables: /// * [`CanonicalHeaders`](tables::CanonicalHeaders) @@ -3438,7 +3294,6 @@ impl(block_number, requests)?; - durations_recorder.record_relative(metrics::Action::InsertBlockRequests); - } - let sidecars = block.block.body.sidecars.unwrap_or_default(); self.tx.put::(block_number, sidecars)?; durations_recorder.record_relative(metrics::Action::InsertBlockSidecars); @@ -3644,7 +3494,7 @@ impl PruneCheckpointReader for DatabaseProvider { +impl PruneCheckpointReader for DatabaseProvider { fn get_prune_checkpoint( &self, segment: PruneSegment, @@ -3661,7 +3511,7 @@ impl PruneCheckpointReader for DatabaseProvider PruneCheckpointWriter for DatabaseProvider { +impl PruneCheckpointWriter for DatabaseProvider { fn save_prune_checkpoint( &self, segment: PruneSegment, @@ -3671,7 +3521,7 @@ impl PruneCheckpointWriter for DatabaseProvider< } } -impl StatsReader for DatabaseProvider { +impl StatsReader for DatabaseProvider { fn count_entries(&self) -> ProviderResult { let db_entries = self.tx.entries::()?; let static_file_entries = match self.static_file_provider.count_entries::() { @@ -3684,7 +3534,7 @@ impl StatsReader for DatabaseProvider { } } -impl ChainStateBlockReader for DatabaseProvider { +impl ChainStateBlockReader for DatabaseProvider { fn last_finalized_block_number(&self) -> ProviderResult> { let mut finalized_blocks = self .tx @@ -3710,7 +3560,7 @@ impl ChainStateBlockReader for DatabaseProvider ChainStateBlockWriter for DatabaseProvider { +impl ChainStateBlockWriter for DatabaseProvider { fn save_finalized_block_number(&self, block_number: BlockNumber) -> ProviderResult<()> { Ok(self .tx @@ -3724,13 +3574,13 @@ impl ChainStateBlockWriter for DatabaseProvider< } } -impl ParliaSnapshotReader for DatabaseProvider { +impl ParliaSnapshotReader for DatabaseProvider { fn get_parlia_snapshot(&self, block_hash: B256) -> ProviderResult> { Ok(self.tx.get::(block_hash)?) } } -impl DBProvider for DatabaseProvider { +impl DBProvider for DatabaseProvider { type Tx = TX; fn tx_ref(&self) -> &Self::Tx { diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 20879b1bf2..22559ef155 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -4,11 +4,11 @@ use crate::{ CanonStateSubscriptions, ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProviderFactory, EvmEnvProvider, FullExecutionDataProvider, HeaderProvider, ParliaSnapshotReader, ProviderError, PruneCheckpointReader, ReceiptProvider, - ReceiptProviderIdExt, RequestsProvider, StageCheckpointReader, StateProviderBox, - StateProviderFactory, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, - TreeViewer, WithdrawalsProvider, + ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, StateProviderFactory, + StaticFileProviderFactory, TransactionVariant, TransactionsProvider, TreeViewer, + WithdrawalsProvider, }; -use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; +use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; use reth_blockchain_tree_api::{ error::{CanonicalError, InsertBlockError}, @@ -23,7 +23,7 @@ use reth_node_types::NodeTypesWithDB; use reth_primitives::{ parlia::Snapshot, Account, BlobSidecars, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, Withdrawal, Withdrawals, + TransactionSignedNoHash, Withdrawals, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -63,6 +63,9 @@ use reth_storage_api::SidecarsProvider; mod blockchain_provider; pub use blockchain_provider::BlockchainProvider2; +mod consistent; +pub use consistent::ConsistentProvider; + /// Helper trait keeping common requirements of providers for [`NodeTypesWithDB`]. pub trait ProviderNodeTypes: NodeTypesWithDB {} @@ -120,7 +123,7 @@ impl BlockchainProvider { /// the database to initialize the provider. pub fn new(database: ProviderFactory, tree: Arc) -> ProviderResult { let provider = database.provider()?; - let best: ChainInfo = provider.chain_info()?; + let best = provider.chain_info()?; let latest_header = provider .header_by_number(best.best_number)? .ok_or_else(|| ProviderError::HeaderNotFound(best.best_number.into()))?; @@ -516,16 +519,6 @@ impl SidecarsProvider for BlockchainProvider { } } -impl RequestsProvider for BlockchainProvider { - fn requests_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult> { - self.database.requests_by_block(id, timestamp) - } -} - impl StageCheckpointReader for BlockchainProvider { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { self.database.provider()?.get_stage_checkpoint(id) @@ -620,35 +613,6 @@ impl StateProviderFactory for BlockchainProvider { self.database.latest() } - fn history_by_block_number( - &self, - block_number: BlockNumber, - ) -> ProviderResult { - trace!(target: "providers::blockchain", ?block_number, "Getting history by block number"); - self.ensure_canonical_block(block_number)?; - self.database.history_by_block_number(block_number) - } - - fn history_by_block_hash(&self, block_hash: BlockHash) -> ProviderResult { - trace!(target: "providers::blockchain", ?block_hash, "Getting history by block hash"); - self.database.history_by_block_hash(block_hash) - } - - fn state_by_block_hash(&self, block: BlockHash) -> ProviderResult { - trace!(target: "providers::blockchain", ?block, "Getting state by block hash"); - let mut state = self.history_by_block_hash(block); - - // we failed to get the state by hash, from disk, hash block be the pending block - if state.is_err() { - if let Ok(Some(pending)) = self.pending_state_by_hash(block) { - // we found pending block by hash - state = Ok(pending) - } - } - - state - } - /// Returns a [`StateProviderBox`] indexed by the given block number or tag. /// /// Note: if a number is provided this will only look at historical(canonical) state. @@ -681,6 +645,35 @@ impl StateProviderFactory for BlockchainProvider { } } + fn history_by_block_number( + &self, + block_number: BlockNumber, + ) -> ProviderResult { + trace!(target: "providers::blockchain", ?block_number, "Getting history by block number"); + self.ensure_canonical_block(block_number)?; + self.database.history_by_block_number(block_number) + } + + fn history_by_block_hash(&self, block_hash: BlockHash) -> ProviderResult { + trace!(target: "providers::blockchain", ?block_hash, "Getting history by block hash"); + self.database.history_by_block_hash(block_hash) + } + + fn state_by_block_hash(&self, block: BlockHash) -> ProviderResult { + trace!(target: "providers::blockchain", ?block, "Getting state by block hash"); + let mut state = self.history_by_block_hash(block); + + // we failed to get the state by hash, from disk, hash block be the pending block + if state.is_err() { + if let Ok(Some(pending)) = self.pending_state_by_hash(block) { + // we found pending block by hash + state = Ok(pending) + } + } + + state + } + /// Returns the state provider for pending state. /// /// If there's no pending block available then the latest state provider is returned: diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 781a11f6de..56a1d057e7 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -2,6 +2,7 @@ use crate::{ providers::{state::macros::delegate_provider_impls, StaticFileProvider}, AccountReader, BlockHashReader, ProviderError, StateProvider, StateRootProvider, }; +use alloy_eips::merge::EPOCH_SLOTS; use alloy_primitives::{ map::{HashMap, HashSet}, Address, BlockNumber, Bytes, StorageKey, StorageValue, B256, @@ -13,7 +14,7 @@ use reth_db_api::{ table::Table, transaction::DbTx, }; -use reth_primitives::{constants::EPOCH_SLOTS, Account, Bytecode, StaticFileSegment}; +use reth_primitives::{Account, Bytecode, StaticFileSegment}; use reth_storage_api::{StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ @@ -227,6 +228,24 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { Ok(HistoryInfo::NotYetWritten) } } + + /// Set the lowest block number at which the account history is available. + pub const fn with_lowest_available_account_history_block_number( + mut self, + block_number: BlockNumber, + ) -> Self { + self.lowest_available_blocks.account_history_block_number = Some(block_number); + self + } + + /// Set the lowest block number at which the storage history is available. + pub const fn with_lowest_available_storage_history_block_number( + mut self, + block_number: BlockNumber, + ) -> Self { + self.lowest_available_blocks.storage_history_block_number = Some(block_number); + self + } } impl AccountReader for HistoricalStateProviderRef<'_, TX> { diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 0ba8f7d280..2cabc9f026 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -4,10 +4,10 @@ use super::{ }; use crate::{ to_range, BlockHashReader, BlockNumReader, BlockReader, BlockSource, HeaderProvider, - ReceiptProvider, RequestsProvider, StageCheckpointReader, StatsReader, TransactionVariant, - TransactionsProvider, TransactionsProviderExt, WithdrawalsProvider, + ReceiptProvider, StageCheckpointReader, StatsReader, TransactionVariant, TransactionsProvider, + TransactionsProviderExt, WithdrawalsProvider, }; -use alloy_eips::BlockHashOrNumber; +use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber}; use alloy_primitives::{keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use dashmap::DashMap; use notify::{RecommendedWatcher, RecursiveMode, Watcher}; @@ -34,7 +34,7 @@ use reth_primitives::{ }, BlobSidecars, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, - Withdrawal, Withdrawals, + Withdrawals, }; use reth_stages_types::{PipelineTarget, StageId}; use reth_storage_api::{DBProvider, SidecarsProvider}; @@ -145,6 +145,7 @@ impl StaticFileProvider { // appending/truncating rows for segment in event.paths { // Ensure it's a file with the .conf extension + #[allow(clippy::nonminimal_bool)] if !segment .extension() .is_some_and(|s| s.to_str() == Some(CONFIG_FILE_EXTENSION)) @@ -224,7 +225,7 @@ impl StaticFileProviderInner { /// Creates a new [`StaticFileProviderInner`]. fn new(path: impl AsRef, access: StaticFileAccess) -> ProviderResult { let _lock_file = if access.is_read_write() { - Some(StorageLock::try_acquire(path.as_ref())?) + StorageLock::try_acquire(path.as_ref())?.into() } else { None }; @@ -1407,13 +1408,13 @@ impl TransactionsProviderExt for StaticFileProvider { // chunks are too big, there will be idle threads waiting for work. Choosing an // arbitrary smaller value to make sure it doesn't happen. let chunk_size = 100; - let mut channels = Vec::new(); // iterator over the chunks let chunks = tx_range .clone() .step_by(chunk_size) .map(|start| start..std::cmp::min(start + chunk_size as u64, tx_range.end)); + let mut channels = Vec::with_capacity(tx_range_size.div_ceil(chunk_size)); for chunk_range in chunks { let (channel_tx, channel_rx) = mpsc::channel(); @@ -1713,17 +1714,6 @@ impl SidecarsProvider for StaticFileProvider { } } -impl RequestsProvider for StaticFileProvider { - fn requests_by_block( - &self, - _id: BlockHashOrNumber, - _timestamp: u64, - ) -> ProviderResult> { - // Required data not present in static_files - Err(ProviderError::UnsupportedProvider) - } -} - impl StatsReader for StaticFileProvider { fn count_entries(&self) -> ProviderResult { match T::NAME { diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index 8390eb43cd..049ce0a0ee 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -56,6 +56,7 @@ impl Deref for LoadedJar { mod tests { use super::*; use crate::{test_utils::create_test_provider_factory, HeaderProvider}; + use alloy_consensus::Transaction; use alloy_eips::eip4844::BlobTransactionSidecar; use alloy_primitives::{BlockHash, TxNumber, B256, U256}; use rand::seq::SliceRandom; diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 26e9f496cd..e535fe80bc 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -307,16 +307,16 @@ impl StaticFileProviderRW { // // If that expected block start is 0, then it means that there's no actual block data, and // there's no block data in static files. - let segment_max_block = match self.writer.user_header().block_range() { - Some(block_range) => Some(block_range.end()), - None => { - if self.writer.user_header().expected_block_start() > 0 { - Some(self.writer.user_header().expected_block_start() - 1) - } else { - None - } - } - }; + let segment_max_block = self + .writer + .user_header() + .block_range() + .as_ref() + .map(|block_range| block_range.end()) + .or_else(|| { + (self.writer.user_header().expected_block_start() > 0) + .then(|| self.writer.user_header().expected_block_start() - 1) + }); self.reader().update_index(self.writer.user_header().segment(), segment_max_block) } diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index daed906646..19f885e27a 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -1,24 +1,27 @@ //! Dummy blocks and data for tests use crate::{DatabaseProviderRW, ExecutionOutcome}; -use alloy_consensus::TxLegacy; +use alloy_consensus::{TxLegacy, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::{ b256, hex_literal::hex, map::HashMap, Address, BlockNumber, Bytes, Log, Parity, Sealable, TxKind, B256, U256, }; -use once_cell::sync::Lazy; + +use alloy_eips::eip4895::Withdrawal; +use alloy_primitives::Signature; use reth_db::tables; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; +use reth_node_types::NodeTypes; use reth_primitives::{ Account, BlockBody, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - Signature, Transaction, TransactionSigned, TxType, Withdrawal, Withdrawals, + Transaction, TransactionSigned, TxType, Withdrawals, }; use reth_trie::root::{state_root_unhashed, storage_root_unhashed}; use revm::{db::BundleState, primitives::AccountInfo}; -use std::str::FromStr; +use std::{str::FromStr, sync::LazyLock}; /// Assert genesis block -pub fn assert_genesis_block( - provider: &DatabaseProviderRW, +pub fn assert_genesis_block( + provider: &DatabaseProviderRW, g: SealedBlock, ) { let n = g.number; @@ -40,7 +43,6 @@ pub fn assert_genesis_block( ); assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); - assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); @@ -61,13 +63,12 @@ pub fn assert_genesis_block( // StageCheckpoints is not updated in tests } -pub(crate) static TEST_BLOCK: Lazy = Lazy::new(|| SealedBlock { +pub(crate) static TEST_BLOCK: LazyLock = LazyLock::new(|| SealedBlock { header: SealedHeader::new( Header { parent_hash: hex!("c86e8cc0310ae7c531c758678ddbfd16fc51c8cef8cec650b032de9869e8b94f") .into(), - ommers_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347") - .into(), + ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: hex!("2adc25665018aa1fe0e6bc666dac8fc2697ff9ba").into(), state_root: hex!("50554882fbbda2c2fd93fdc466db9946ea262a67f7a76cc169e714f105ab583d") .into(), @@ -201,20 +202,20 @@ fn block1(number: BlockNumber) -> (SealedBlockWithSenders, ExecutionOutcome) { .revert_account_info(number, account2, Some(None)) .state_storage(account1, HashMap::from_iter([(slot, (U256::ZERO, U256::from(10)))])) .build(), - vec![vec![Some(Receipt { - tx_type: TxType::Eip2930, - success: true, - cumulative_gas_used: 300, - logs: vec![Log::new_unchecked( - Address::new([0x60; 20]), - vec![B256::with_last_byte(1), B256::with_last_byte(2)], - Bytes::default(), - )], - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - })]] + vec![vec![Some( + #[allow(clippy::needless_update)] // side-effect of optimism fields + Receipt { + tx_type: TxType::Eip2930, + success: true, + cumulative_gas_used: 300, + logs: vec![Log::new_unchecked( + Address::new([0x60; 20]), + vec![B256::with_last_byte(1), B256::with_last_byte(2)], + Bytes::default(), + )], + ..Default::default() + }, + )]] .into(), number, Vec::new(), @@ -263,20 +264,20 @@ fn block2( ) .revert_storage(number, account, Vec::from([(slot, U256::from(10))])) .build(), - vec![vec![Some(Receipt { - tx_type: TxType::Eip1559, - success: false, - cumulative_gas_used: 400, - logs: vec![Log::new_unchecked( - Address::new([0x61; 20]), - vec![B256::with_last_byte(3), B256::with_last_byte(4)], - Bytes::default(), - )], - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - })]] + vec![vec![Some( + #[allow(clippy::needless_update)] // side-effect of optimism fields + Receipt { + tx_type: TxType::Eip1559, + success: false, + cumulative_gas_used: 400, + logs: vec![Log::new_unchecked( + Address::new([0x61; 20]), + vec![B256::with_last_byte(3), B256::with_last_byte(4)], + Bytes::default(), + )], + ..Default::default() + }, + )]] .into(), number, Vec::new(), @@ -324,31 +325,30 @@ fn block3( ) .state_storage( address, - HashMap::from_iter( - slot_range - .clone() - .map(|slot| (U256::from(slot), (U256::ZERO, U256::from(slot)))), - ), + slot_range + .clone() + .map(|slot| (U256::from(slot), (U256::ZERO, U256::from(slot)))) + .collect(), ) .revert_account_info(number, address, Some(None)) .revert_storage(number, address, Vec::new()); } let execution_outcome = ExecutionOutcome::new( bundle_state_builder.build(), - vec![vec![Some(Receipt { - tx_type: TxType::Eip1559, - success: true, - cumulative_gas_used: 400, - logs: vec![Log::new_unchecked( - Address::new([0x61; 20]), - vec![B256::with_last_byte(3), B256::with_last_byte(4)], - Bytes::default(), - )], - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - })]] + vec![vec![Some( + #[allow(clippy::needless_update)] // side-effect of optimism fields + Receipt { + tx_type: TxType::Eip1559, + success: true, + cumulative_gas_used: 400, + logs: vec![Log::new_unchecked( + Address::new([0x61; 20]), + vec![B256::with_last_byte(3), B256::with_last_byte(4)], + Bytes::default(), + )], + ..Default::default() + }, + )]] .into(), number, Vec::new(), @@ -393,20 +393,18 @@ fn block4( ) .state_storage( address, - HashMap::from_iter( - slot_range.clone().map(|slot| { - (U256::from(slot), (U256::from(slot), U256::from(slot * 2))) - }), - ), + slot_range + .clone() + .map(|slot| (U256::from(slot), (U256::from(slot), U256::from(slot * 2)))) + .collect(), ) } else { bundle_state_builder.state_address(address).state_storage( address, - HashMap::from_iter( - slot_range - .clone() - .map(|slot| (U256::from(slot), (U256::from(slot), U256::ZERO))), - ), + slot_range + .clone() + .map(|slot| (U256::from(slot), (U256::from(slot), U256::ZERO))) + .collect(), ) }; // record previous account info @@ -423,25 +421,25 @@ fn block4( .revert_storage( number, address, - Vec::from_iter(slot_range.clone().map(|slot| (U256::from(slot), U256::from(slot)))), + slot_range.clone().map(|slot| (U256::from(slot), U256::from(slot))).collect(), ); } let execution_outcome = ExecutionOutcome::new( bundle_state_builder.build(), - vec![vec![Some(Receipt { - tx_type: TxType::Eip1559, - success: true, - cumulative_gas_used: 400, - logs: vec![Log::new_unchecked( - Address::new([0x61; 20]), - vec![B256::with_last_byte(3), B256::with_last_byte(4)], - Bytes::default(), - )], - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - })]] + vec![vec![Some( + #[allow(clippy::needless_update)] // side-effect of optimism fields + Receipt { + tx_type: TxType::Eip1559, + success: true, + cumulative_gas_used: 400, + logs: vec![Log::new_unchecked( + Address::new([0x61; 20]), + vec![B256::with_last_byte(3), B256::with_last_byte(4)], + Bytes::default(), + )], + ..Default::default() + }, + )]] .into(), number, Vec::new(), @@ -485,12 +483,11 @@ fn block5( ) .state_storage( address, - HashMap::from_iter( - slot_range - .clone() - .take(50) - .map(|slot| (U256::from(slot), (U256::from(slot), U256::from(slot * 4)))), - ), + slot_range + .clone() + .take(50) + .map(|slot| (U256::from(slot), (U256::from(slot), U256::from(slot * 4)))) + .collect(), ); bundle_state_builder = if idx % 2 == 0 { bundle_state_builder @@ -506,9 +503,10 @@ fn block5( .revert_storage( number, address, - Vec::from_iter( - slot_range.clone().map(|slot| (U256::from(slot), U256::from(slot * 2))), - ), + slot_range + .clone() + .map(|slot| (U256::from(slot), U256::from(slot * 2))) + .collect(), ) } else { bundle_state_builder.revert_address(number, address) @@ -516,20 +514,20 @@ fn block5( } let execution_outcome = ExecutionOutcome::new( bundle_state_builder.build(), - vec![vec![Some(Receipt { - tx_type: TxType::Eip1559, - success: true, - cumulative_gas_used: 400, - logs: vec![Log::new_unchecked( - Address::new([0x61; 20]), - vec![B256::with_last_byte(3), B256::with_last_byte(4)], - Bytes::default(), - )], - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - })]] + vec![vec![Some( + #[allow(clippy::needless_update)] // side-effect of optimism fields + Receipt { + tx_type: TxType::Eip1559, + success: true, + cumulative_gas_used: 400, + logs: vec![Log::new_unchecked( + Address::new([0x61; 20]), + vec![B256::with_last_byte(3), B256::with_last_byte(4)], + Bytes::default(), + )], + ..Default::default() + }, + )]] .into(), number, Vec::new(), diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 65e0fcffcc..bf9e9a407b 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -1,13 +1,12 @@ use crate::{ traits::{BlockSource, ReceiptProvider}, - AccountReader, BlockExecutionReader, BlockHashReader, BlockIdReader, BlockNumReader, - BlockReader, BlockReaderIdExt, ChainSpecProvider, ChangeSetReader, DatabaseProvider, - EvmEnvProvider, HeaderProvider, ReceiptProviderIdExt, RequestsProvider, StateProvider, - StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, TransactionVariant, - TransactionsProvider, WithdrawalsProvider, + AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, + ChainSpecProvider, ChangeSetReader, DatabaseProvider, EvmEnvProvider, HeaderProvider, + ReceiptProviderIdExt, StateProvider, StateProviderBox, StateProviderFactory, StateReader, + StateRootProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; use alloy_consensus::constants::EMPTY_ROOT_HASH; -use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; +use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{ keccak256, map::{HashMap, HashSet}, @@ -19,11 +18,12 @@ use reth_chainspec::{ChainInfo, ChainSpec}; use reth_db::mock::{DatabaseMock, TxMock}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; -use reth_execution_types::{Chain, ExecutionOutcome}; +use reth_execution_types::ExecutionOutcome; +use reth_node_types::NodeTypes; use reth_primitives::{ Account, BlobSidecars, Block, BlockWithSenders, Bytecode, GotExpected, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, Withdrawal, Withdrawals, + TransactionSignedNoHash, Withdrawals, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ @@ -35,6 +35,7 @@ use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, StorageProof, TrieInput, }; +use reth_trie_db::MerklePatriciaTrie; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ collections::BTreeMap, @@ -151,10 +152,20 @@ impl MockEthProvider { } } +/// Mock node. +#[derive(Debug)] +pub struct MockNode; + +impl NodeTypes for MockNode { + type Primitives = (); + type ChainSpec = ChainSpec; + type StateCommitment = MerklePatriciaTrie; +} + impl DatabaseProviderFactory for MockEthProvider { type DB = DatabaseMock; - type Provider = DatabaseProvider; - type ProviderRW = DatabaseProvider; + type Provider = DatabaseProvider; + type ProviderRW = DatabaseProvider; fn database_provider_ro(&self) -> ProviderResult { Err(ConsistentViewError::Syncing { best_block: GotExpected::new(0, 0) }.into()) @@ -345,13 +356,8 @@ impl TransactionsProvider for MockEthProvider { .values() .flat_map(|block| &block.body.transactions) .enumerate() - .filter_map(|(tx_number, tx)| { - if range.contains(&(tx_number as TxNumber)) { - Some(tx.clone().into()) - } else { - None - } - }) + .filter(|&(tx_number, _)| range.contains(&(tx_number as TxNumber))) + .map(|(_, tx)| tx.clone().into()) .collect(); Ok(transactions) @@ -367,11 +373,7 @@ impl TransactionsProvider for MockEthProvider { .flat_map(|block| &block.body.transactions) .enumerate() .filter_map(|(tx_number, tx)| { - if range.contains(&(tx_number as TxNumber)) { - Some(tx.recover_signer()?) - } else { - None - } + range.contains(&(tx_number as TxNumber)).then(|| tx.recover_signer()).flatten() }) .collect(); @@ -462,15 +464,15 @@ impl BlockNumReader for MockEthProvider { } impl BlockIdReader for MockEthProvider { - fn pending_block_num_hash(&self) -> ProviderResult> { + fn pending_block_num_hash(&self) -> ProviderResult> { Ok(None) } - fn safe_block_num_hash(&self) -> ProviderResult> { + fn safe_block_num_hash(&self) -> ProviderResult> { Ok(None) } - fn finalized_block_num_hash(&self) -> ProviderResult> { + fn finalized_block_num_hash(&self) -> ProviderResult> { Ok(None) } } @@ -759,18 +761,6 @@ impl StateProviderFactory for MockEthProvider { Ok(Box::new(self.clone())) } - fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult { - Ok(Box::new(self.clone())) - } - - fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult { - Ok(Box::new(self.clone())) - } - - fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult { - Ok(Box::new(self.clone())) - } - fn state_by_block_number_or_tag( &self, number_or_tag: BlockNumberOrTag, @@ -797,6 +787,18 @@ impl StateProviderFactory for MockEthProvider { } } + fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult { + Ok(Box::new(self.clone())) + } + + fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult { + Ok(Box::new(self.clone())) + } + + fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult { + Ok(Box::new(self.clone())) + } + fn pending(&self) -> ProviderResult { Ok(Box::new(self.clone())) } @@ -829,16 +831,6 @@ impl SidecarsProvider for MockEthProvider { } } -impl RequestsProvider for MockEthProvider { - fn requests_by_block( - &self, - _id: BlockHashOrNumber, - _timestamp: u64, - ) -> ProviderResult> { - Ok(None) - } -} - impl ChangeSetReader for MockEthProvider { fn account_block_changeset( &self, @@ -848,15 +840,6 @@ impl ChangeSetReader for MockEthProvider { } } -impl BlockExecutionReader for MockEthProvider { - fn get_block_and_execution_range( - &self, - _range: RangeInclusive, - ) -> ProviderResult { - Ok(Chain::default()) - } -} - impl StateReader for MockEthProvider { fn get_state(&self, _block: BlockNumber) -> ProviderResult> { Ok(None) diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index 2200781096..c0e80930b3 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -25,6 +25,7 @@ pub type MockNodeTypes = reth_node_types::AnyNodeTypesWithEngine< (), reth_ethereum_engine_primitives::EthEngineTypes, reth_chainspec::ChainSpec, + reth_trie_db::MerklePatriciaTrie, >; /// Mock [`reth_node_types::NodeTypesWithDB`] for testing. diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 266a94aa76..a589fecd31 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -4,7 +4,7 @@ use std::{ sync::Arc, }; -use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; +use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{ map::{HashMap, HashSet}, Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, U256, @@ -20,7 +20,7 @@ use reth_evm::ConfigureEvmEnv; use reth_primitives::{ parlia::Snapshot, Account, BlobSidecars, Block, BlockWithSenders, Bytecode, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, Withdrawal, Withdrawals, + TransactionSignedNoHash, Withdrawals, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -37,9 +37,9 @@ use crate::{ traits::{BlockSource, ReceiptProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, HeaderProvider, ParliaSnapshotReader, - PruneCheckpointReader, ReceiptProviderIdExt, RequestsProvider, StageCheckpointReader, - StateProvider, StateProviderBox, StateProviderFactory, StateRootProvider, - StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, + PruneCheckpointReader, ReceiptProviderIdExt, StageCheckpointReader, StateProvider, + StateProviderBox, StateProviderFactory, StateRootProvider, StaticFileProviderFactory, + TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; /// Supports various api interfaces for testing purposes. @@ -175,15 +175,15 @@ impl BlockReaderIdExt for NoopProvider { } impl BlockIdReader for NoopProvider { - fn pending_block_num_hash(&self) -> ProviderResult> { + fn pending_block_num_hash(&self) -> ProviderResult> { Ok(None) } - fn safe_block_num_hash(&self) -> ProviderResult> { + fn safe_block_num_hash(&self) -> ProviderResult> { Ok(None) } - fn finalized_block_num_hash(&self) -> ProviderResult> { + fn finalized_block_num_hash(&self) -> ProviderResult> { Ok(None) } } @@ -465,22 +465,6 @@ impl StateProviderFactory for NoopProvider { Ok(Box::new(*self)) } - fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult { - Ok(Box::new(*self)) - } - - fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult { - Ok(Box::new(*self)) - } - - fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult { - Ok(Box::new(*self)) - } - - fn pending(&self) -> ProviderResult { - Ok(Box::new(*self)) - } - fn state_by_block_number_or_tag( &self, number_or_tag: BlockNumberOrTag, @@ -507,6 +491,22 @@ impl StateProviderFactory for NoopProvider { } } + fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult { + Ok(Box::new(*self)) + } + + fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult { + Ok(Box::new(*self)) + } + + fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult { + Ok(Box::new(*self)) + } + + fn pending(&self) -> ProviderResult { + Ok(Box::new(*self)) + } + fn pending_state_by_hash(&self, _block_hash: B256) -> ProviderResult> { Ok(Some(Box::new(*self))) } @@ -549,16 +549,6 @@ impl SidecarsProvider for NoopProvider { } } -impl RequestsProvider for NoopProvider { - fn requests_by_block( - &self, - _id: BlockHashOrNumber, - _timestamp: u64, - ) -> ProviderResult> { - Ok(None) - } -} - impl PruneCheckpointReader for NoopProvider { fn get_prune_checkpoint( &self, diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index 8e3a54d86b..7202c405f0 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -2,7 +2,6 @@ use alloy_primitives::BlockNumber; use reth_db_api::models::StoredBlockBodyIndices; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::SealedBlockWithSenders; -use reth_storage_api::BlockReader; use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, HashedPostStateSorted}; use std::ops::RangeInclusive; @@ -23,16 +22,6 @@ pub trait BlockExecutionWriter: BlockWriter + Send + Sync { ) -> ProviderResult<()>; } -/// BlockExecution Reader -#[auto_impl::auto_impl(&, Arc, Box)] -pub trait BlockExecutionReader: BlockReader + Send + Sync { - /// Get range of blocks and its execution result - fn get_block_and_execution_range( - &self, - range: RangeInclusive, - ) -> ProviderResult; -} - /// This just receives state, or [`ExecutionOutcome`], from the provider #[auto_impl::auto_impl(&, Arc, Box)] pub trait StateReader: Send + Sync { diff --git a/crates/storage/provider/src/traits/hashing.rs b/crates/storage/provider/src/traits/hashing.rs index 2b759afa72..c6958aa4d6 100644 --- a/crates/storage/provider/src/traits/hashing.rs +++ b/crates/storage/provider/src/traits/hashing.rs @@ -1,11 +1,11 @@ use alloy_primitives::{Address, BlockNumber, B256}; use auto_impl::auto_impl; -use reth_db_api::models::BlockNumberAddress; +use reth_db::models::{AccountBeforeTx, BlockNumberAddress}; use reth_primitives::{Account, StorageEntry}; use reth_storage_errors::provider::ProviderResult; use std::{ collections::{BTreeMap, BTreeSet, HashMap}, - ops::{Range, RangeInclusive}, + ops::{RangeBounds, RangeInclusive}, }; /// Hashing Writer @@ -16,9 +16,19 @@ pub trait HashingWriter: Send + Sync { /// # Returns /// /// Set of hashed keys of updated accounts. - fn unwind_account_hashing( + fn unwind_account_hashing<'a>( &self, - range: RangeInclusive, + changesets: impl Iterator, + ) -> ProviderResult>>; + + /// Unwind and clear account hashing in a given block range. + /// + /// # Returns + /// + /// Set of hashed keys of updated accounts. + fn unwind_account_hashing_range( + &self, + range: impl RangeBounds, ) -> ProviderResult>>; /// Inserts all accounts into [reth_db::tables::AccountsHistory] table. @@ -38,7 +48,17 @@ pub trait HashingWriter: Send + Sync { /// Mapping of hashed keys of updated accounts to their respective updated hashed slots. fn unwind_storage_hashing( &self, - range: Range, + changesets: impl Iterator, + ) -> ProviderResult>>; + + /// Unwind and clear storage hashing in a given block range. + /// + /// # Returns + /// + /// Mapping of hashed keys of updated accounts to their respective updated hashed slots. + fn unwind_storage_hashing_range( + &self, + range: impl RangeBounds, ) -> ProviderResult>>; /// Iterates over storages and inserts them to hashing table. diff --git a/crates/storage/provider/src/traits/history.rs b/crates/storage/provider/src/traits/history.rs index cbf9bece4b..4eadd6031c 100644 --- a/crates/storage/provider/src/traits/history.rs +++ b/crates/storage/provider/src/traits/history.rs @@ -1,8 +1,9 @@ use alloy_primitives::{Address, BlockNumber, B256}; use auto_impl::auto_impl; -use reth_db_api::models::BlockNumberAddress; +use reth_db::models::{AccountBeforeTx, BlockNumberAddress}; +use reth_primitives::StorageEntry; use reth_storage_errors::provider::ProviderResult; -use std::ops::{Range, RangeInclusive}; +use std::ops::{RangeBounds, RangeInclusive}; /// History Writer #[auto_impl(&, Arc, Box)] @@ -10,9 +11,17 @@ pub trait HistoryWriter: Send + Sync { /// Unwind and clear account history indices. /// /// Returns number of changesets walked. - fn unwind_account_history_indices( + fn unwind_account_history_indices<'a>( &self, - range: RangeInclusive, + changesets: impl Iterator, + ) -> ProviderResult; + + /// Unwind and clear account history indices in a given block range. + /// + /// Returns number of changesets walked. + fn unwind_account_history_indices_range( + &self, + range: impl RangeBounds, ) -> ProviderResult; /// Insert account change index to database. Used inside AccountHistoryIndex stage @@ -26,7 +35,15 @@ pub trait HistoryWriter: Send + Sync { /// Returns number of changesets walked. fn unwind_storage_history_indices( &self, - range: Range, + changesets: impl Iterator, + ) -> ProviderResult; + + /// Unwind and clear storage history indices in a given block range. + /// + /// Returns number of changesets walked. + fn unwind_storage_history_indices_range( + &self, + range: impl RangeBounds, ) -> ProviderResult; /// Insert storage change index to database. Used inside StorageHistoryIndex stage diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index 51d8eabfc4..0ae8b28458 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -26,5 +26,6 @@ reth-trie.workspace = true # ethereum alloy-eips.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true auto_impl.workspace = true diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index 3a073ad601..18afd3ebca 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -1,6 +1,6 @@ use crate::{ - BlockNumReader, HeaderProvider, ReceiptProvider, ReceiptProviderIdExt, RequestsProvider, - SidecarsProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, + BlockNumReader, HeaderProvider, ReceiptProvider, ReceiptProviderIdExt, SidecarsProvider, + TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{BlockNumber, Sealable, B256}; @@ -52,7 +52,6 @@ pub trait BlockReader: + HeaderProvider + TransactionsProvider + ReceiptProvider - + RequestsProvider + WithdrawalsProvider + SidecarsProvider + Send diff --git a/crates/storage/storage-api/src/block_id.rs b/crates/storage/storage-api/src/block_id.rs index 3d9df2e329..00856d348a 100644 --- a/crates/storage/storage-api/src/block_id.rs +++ b/crates/storage/storage-api/src/block_id.rs @@ -82,11 +82,10 @@ pub trait BlockIdReader: BlockNumReader + Send + Sync { BlockNumberOrTag::Pending => self .pending_block_num_hash() .map(|res_opt| res_opt.map(|num_hash| num_hash.hash)), - _ => self - .convert_block_number(num)? - .map(|num| self.block_hash(num)) - .transpose() - .map(|maybe_hash| maybe_hash.flatten()), + BlockNumberOrTag::Finalized => self.finalized_block_hash(), + BlockNumberOrTag::Safe => self.safe_block_hash(), + BlockNumberOrTag::Earliest => self.block_hash(0), + BlockNumberOrTag::Number(num) => self.block_hash(num), }, } } @@ -100,13 +99,13 @@ pub trait BlockIdReader: BlockNumReader + Send + Sync { } /// Get the current pending block number and hash. - fn pending_block_num_hash(&self) -> ProviderResult>; + fn pending_block_num_hash(&self) -> ProviderResult>; /// Get the current safe block number and hash. - fn safe_block_num_hash(&self) -> ProviderResult>; + fn safe_block_num_hash(&self) -> ProviderResult>; /// Get the current finalized block number and hash. - fn finalized_block_num_hash(&self) -> ProviderResult>; + fn finalized_block_num_hash(&self) -> ProviderResult>; /// Get the safe block number. fn safe_block_number(&self) -> ProviderResult> { diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index ab98df1118..da69eaee90 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -31,9 +31,6 @@ pub use prune_checkpoint::*; mod receipts; pub use receipts::*; -mod requests; -pub use requests::*; - mod stage_checkpoint; pub use stage_checkpoint::*; diff --git a/crates/storage/storage-api/src/requests.rs b/crates/storage/storage-api/src/requests.rs deleted file mode 100644 index 916b2a6e88..0000000000 --- a/crates/storage/storage-api/src/requests.rs +++ /dev/null @@ -1,14 +0,0 @@ -use alloy_eips::BlockHashOrNumber; -use reth_primitives::Requests; -use reth_storage_errors::provider::ProviderResult; - -/// Client trait for fetching EIP-7685 [Requests] for blocks. -#[auto_impl::auto_impl(&, Arc)] -pub trait RequestsProvider: Send + Sync { - /// Get requests by block id. - fn requests_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult>; -} diff --git a/crates/storage/storage-api/src/state.rs b/crates/storage/storage-api/src/state.rs index 9a3b855ff1..d37940f047 100644 --- a/crates/storage/storage-api/src/state.rs +++ b/crates/storage/storage-api/src/state.rs @@ -2,11 +2,12 @@ use super::{ AccountReader, BlockHashReader, BlockIdReader, StateProofProvider, StateRootProvider, StorageRootProvider, }; +use alloy_consensus::constants::KECCAK_EMPTY; use alloy_eips::{BlockId, BlockNumHash, BlockNumberOrTag}; use alloy_primitives::{Address, BlockHash, BlockNumber, StorageKey, StorageValue, B256, U256}; use auto_impl::auto_impl; use reth_execution_types::ExecutionOutcome; -use reth_primitives::{Bytecode, KECCAK_EMPTY}; +use reth_primitives::Bytecode; use reth_storage_errors::provider::{ProviderError, ProviderResult}; /// Type alias of boxed [`StateProvider`]. diff --git a/crates/storage/storage-api/src/withdrawals.rs b/crates/storage/storage-api/src/withdrawals.rs index 2de69b34eb..ba422a3b33 100644 --- a/crates/storage/storage-api/src/withdrawals.rs +++ b/crates/storage/storage-api/src/withdrawals.rs @@ -1,5 +1,5 @@ -use alloy_eips::BlockHashOrNumber; -use reth_primitives::{Withdrawal, Withdrawals}; +use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber}; +use reth_primitives::Withdrawals; use reth_storage_errors::provider::ProviderResult; /// Client trait for fetching [Withdrawal] related data. diff --git a/crates/tasks/src/lib.rs b/crates/tasks/src/lib.rs index a0070698fc..28b5eaba9f 100644 --- a/crates/tasks/src/lib.rs +++ b/crates/tasks/src/lib.rs @@ -288,7 +288,7 @@ pub struct TaskExecutor { on_shutdown: Shutdown, /// Sender half for sending panic signals to this type panicked_tasks_tx: UnboundedSender, - // Task Executor Metrics + /// Task Executor Metrics metrics: TaskExecutorMetrics, /// How many [`GracefulShutdown`] tasks are currently active graceful_tasks: Arc, diff --git a/crates/tokio-util/src/event_sender.rs b/crates/tokio-util/src/event_sender.rs index a4e9815388..16208ee19c 100644 --- a/crates/tokio-util/src/event_sender.rs +++ b/crates/tokio-util/src/event_sender.rs @@ -40,3 +40,96 @@ impl EventSender { EventStream::new(self.sender.subscribe()) } } + +#[cfg(test)] +mod tests { + use super::*; + use tokio::{ + task, + time::{timeout, Duration}, + }; + use tokio_stream::StreamExt; + + #[tokio::test] + async fn test_event_broadcast_to_listener() { + let sender = EventSender::default(); + + // Create a listener for the events + let mut listener = sender.new_listener(); + + // Broadcast an event + sender.notify("event1"); + + // Check if the listener receives the event + let received_event = listener.next().await; + assert_eq!(received_event, Some("event1")); + } + + #[tokio::test] + async fn test_event_no_listener() { + let sender = EventSender::default(); + + // Broadcast an event with no listeners + sender.notify("event2"); + + // Ensure it doesn't panic or fail when no listeners are present + // (this test passes if it runs without errors). + } + + #[tokio::test] + async fn test_multiple_listeners_receive_event() { + let sender = EventSender::default(); + + // Create two listeners + let mut listener1 = sender.new_listener(); + let mut listener2 = sender.new_listener(); + + // Broadcast an event + sender.notify("event3"); + + // Both listeners should receive the same event + let event1 = listener1.next().await; + let event2 = listener2.next().await; + + assert_eq!(event1, Some("event3")); + assert_eq!(event2, Some("event3")); + } + + #[tokio::test] + async fn test_bounded_channel_size() { + // Create a channel with size 2 + let sender = EventSender::new(2); + + // Create a listener + let mut listener = sender.new_listener(); + + // Broadcast 3 events, which exceeds the channel size + sender.notify("event4"); + sender.notify("event5"); + sender.notify("event6"); + + // Only the last two should be received due to the size limit + let received_event1 = listener.next().await; + let received_event2 = listener.next().await; + + assert_eq!(received_event1, Some("event5")); + assert_eq!(received_event2, Some("event6")); + } + + #[tokio::test] + async fn test_event_listener_timeout() { + let sender = EventSender::default(); + let mut listener = sender.new_listener(); + + // Broadcast an event asynchronously + task::spawn(async move { + tokio::time::sleep(Duration::from_millis(50)).await; + sender.notify("delayed_event"); + }); + + // Use a timeout to ensure that the event is received within a certain time + let result = timeout(Duration::from_millis(100), listener.next()).await; + assert!(result.is_ok()); + assert_eq!(result.unwrap(), Some("delayed_event")); + } +} diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 41abbb4b6b..1bfb10d86d 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -27,6 +27,7 @@ revm.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true +alloy-consensus.workspace = true # async/futures futures-util.workspace = true @@ -54,7 +55,6 @@ rand = { workspace = true, optional = true } paste = { workspace = true, optional = true } proptest = { workspace = true, optional = true } proptest-arbitrary-interop = { workspace = true, optional = true } -alloy-consensus = { workspace = true, optional = true } [dev-dependencies] reth-primitives = { workspace = true, features = ["arbitrary"] } @@ -69,13 +69,46 @@ pprof = { workspace = true, features = ["criterion", "flamegraph"] } assert_matches.workspace = true tempfile.workspace = true serde_json.workspace = true -alloy-consensus.workspace = true [features] default = ["serde"] -serde = ["dep:serde"] -test-utils = ["rand", "paste", "serde", "alloy-consensus"] -arbitrary = ["proptest", "reth-primitives/arbitrary", "proptest-arbitrary-interop"] +serde = [ + "dep:serde", + "reth-execution-types/serde", + "reth-eth-wire-types/serde", + "reth-provider/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "bitflags/serde", + "parking_lot/serde", + "rand?/serde", + "revm/serde", + "smallvec/serde" +] +test-utils = [ + "rand", + "paste", + "serde", + "reth-chain-state/test-utils", + "reth-chainspec/test-utils", + "reth-primitives/test-utils", + "reth-provider/test-utils", + "revm/test-utils" +] +arbitrary = [ + "proptest", + "reth-primitives/arbitrary", + "proptest-arbitrary-interop", + "reth-chainspec/arbitrary", + "reth-eth-wire-types/arbitrary", + "alloy-consensus/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", + "bitflags/arbitrary", + "revm/arbitrary", + "smallvec/arbitrary" +] [[bench]] name = "truncate" diff --git a/crates/transaction-pool/benches/reorder.rs b/crates/transaction-pool/benches/reorder.rs index 1836ce40c0..9fc2162975 100644 --- a/crates/transaction-pool/benches/reorder.rs +++ b/crates/transaction-pool/benches/reorder.rs @@ -206,10 +206,12 @@ mod implementations { self.base_fee = Some(base_fee); let drained = self.inner.drain(); - self.inner = BinaryHeap::from_iter(drained.map(|mock| { - let priority = mock.tx.effective_tip_per_gas(base_fee).expect("set"); - MockTransactionWithPriority { tx: mock.tx, priority } - })); + self.inner = drained + .map(|mock| { + let priority = mock.tx.effective_tip_per_gas(base_fee).expect("set"); + MockTransactionWithPriority { tx: mock.tx, priority } + }) + .collect(); } } } diff --git a/crates/transaction-pool/benches/truncate.rs b/crates/transaction-pool/benches/truncate.rs index 22e4576305..1ca6f98499 100644 --- a/crates/transaction-pool/benches/truncate.rs +++ b/crates/transaction-pool/benches/truncate.rs @@ -66,7 +66,7 @@ fn generate_many_transactions(senders: usize, max_depth: usize) -> Vec().new_tree(&mut runner).unwrap().current() % max_depth + 1; diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index 96119a0f81..987264853d 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -3,7 +3,6 @@ use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobStoreSize}; use alloy_eips::eip4844::BlobAndProofV1; use alloy_primitives::{TxHash, B256}; -use alloy_rlp::{Decodable, Encodable}; use parking_lot::{Mutex, RwLock}; use reth_primitives::BlobTransactionSidecar; use schnellru::{ByLength, LruMap}; @@ -104,7 +103,7 @@ impl BlobStore for DiskFileBlobStore { stat } - fn get(&self, tx: B256) -> Result, BlobStoreError> { + fn get(&self, tx: B256) -> Result>, BlobStoreError> { self.inner.get_one(tx) } @@ -115,14 +114,17 @@ impl BlobStore for DiskFileBlobStore { fn get_all( &self, txs: Vec, - ) -> Result, BlobStoreError> { + ) -> Result)>, BlobStoreError> { if txs.is_empty() { return Ok(Vec::new()) } self.inner.get_all(txs) } - fn get_exact(&self, txs: Vec) -> Result, BlobStoreError> { + fn get_exact( + &self, + txs: Vec, + ) -> Result>, BlobStoreError> { if txs.is_empty() { return Ok(Vec::new()) } @@ -165,7 +167,7 @@ impl BlobStore for DiskFileBlobStore { struct DiskFileBlobStoreInner { blob_dir: PathBuf, - blob_cache: Mutex>, + blob_cache: Mutex, ByLength>>, size_tracker: BlobStoreSize, file_lock: RwLock<()>, txs_to_delete: RwLock>, @@ -204,9 +206,9 @@ impl DiskFileBlobStoreInner { /// Ensures blob is in the blob cache and written to the disk. fn insert_one(&self, tx: B256, data: BlobTransactionSidecar) -> Result<(), BlobStoreError> { - let mut buf = Vec::with_capacity(data.fields_len()); - data.encode(&mut buf); - self.blob_cache.lock().insert(tx, data); + let mut buf = Vec::with_capacity(data.rlp_encoded_fields_length()); + data.rlp_encode_fields(&mut buf); + self.blob_cache.lock().insert(tx, Arc::new(data)); let size = self.write_one_encoded(tx, &buf)?; self.size_tracker.add_size(size); @@ -219,8 +221,8 @@ impl DiskFileBlobStoreInner { let raw = txs .iter() .map(|(tx, data)| { - let mut buf = Vec::with_capacity(data.fields_len()); - data.encode(&mut buf); + let mut buf = Vec::with_capacity(data.rlp_encoded_fields_length()); + data.rlp_encode_fields(&mut buf); (self.blob_disk_file(*tx), buf) }) .collect::>(); @@ -228,7 +230,7 @@ impl DiskFileBlobStoreInner { { let mut cache = self.blob_cache.lock(); for (tx, data) in txs { - cache.insert(tx, data); + cache.insert(tx, Arc::new(data)); } } let mut add = 0; @@ -279,15 +281,19 @@ impl DiskFileBlobStoreInner { } /// Retrieves the blob for the given transaction hash from the blob cache or disk. - fn get_one(&self, tx: B256) -> Result, BlobStoreError> { + fn get_one(&self, tx: B256) -> Result>, BlobStoreError> { if let Some(blob) = self.blob_cache.lock().get(&tx) { return Ok(Some(blob.clone())) } let blob = self.read_one(tx)?; + if let Some(blob) = &blob { - self.blob_cache.lock().insert(tx, blob.clone()); + let blob_arc = Arc::new(blob.clone()); + self.blob_cache.lock().insert(tx, blob_arc.clone()); + return Ok(Some(blob_arc)) } - Ok(blob) + + Ok(None) } /// Returns the path to the blob file for the given transaction hash. @@ -312,7 +318,7 @@ impl DiskFileBlobStoreInner { } } }; - BlobTransactionSidecar::decode(&mut data.as_slice()) + BlobTransactionSidecar::rlp_decode_fields(&mut data.as_slice()) .map(Some) .map_err(BlobStoreError::DecodeError) } @@ -322,7 +328,7 @@ impl DiskFileBlobStoreInner { self.read_many_raw(txs) .into_iter() .filter_map(|(tx, data)| { - BlobTransactionSidecar::decode(&mut data.as_slice()) + BlobTransactionSidecar::rlp_decode_fields(&mut data.as_slice()) .map(|sidecar| (tx, sidecar)) .ok() }) @@ -375,7 +381,7 @@ impl DiskFileBlobStoreInner { fn get_all( &self, txs: Vec, - ) -> Result, BlobStoreError> { + ) -> Result)>, BlobStoreError> { let mut res = Vec::with_capacity(txs.len()); let mut cache_miss = Vec::new(); { @@ -397,8 +403,9 @@ impl DiskFileBlobStoreInner { } let mut cache = self.blob_cache.lock(); for (tx, data) in from_disk { - cache.insert(tx, data.clone()); - res.push((tx, data)); + let arc = Arc::new(data.clone()); + cache.insert(tx, arc.clone()); + res.push((tx, arc.clone())); } Ok(res) @@ -408,14 +415,13 @@ impl DiskFileBlobStoreInner { /// /// Returns an error if there are any missing blobs. #[inline] - fn get_exact(&self, txs: Vec) -> Result, BlobStoreError> { - let mut res = Vec::with_capacity(txs.len()); - for tx in txs { - let blob = self.get_one(tx)?.ok_or_else(|| BlobStoreError::MissingSidecar(tx))?; - res.push(blob) - } - - Ok(res) + fn get_exact( + &self, + txs: Vec, + ) -> Result>, BlobStoreError> { + txs.into_iter() + .map(|tx| self.get_one(tx)?.ok_or(BlobStoreError::MissingSidecar(tx))) + .collect() } } @@ -519,14 +525,17 @@ mod tests { let blobs = rng_blobs(10); let all_hashes = blobs.iter().map(|(tx, _)| *tx).collect::>(); store.insert_all(blobs.clone()).unwrap(); + // all cached for (tx, blob) in &blobs { assert!(store.is_cached(tx)); - assert_eq!(store.get(*tx).unwrap().unwrap(), *blob); + let b = store.get(*tx).unwrap().map(Arc::unwrap_or_clone).unwrap(); + assert_eq!(b, *blob); } + let all = store.get_all(all_hashes.clone()).unwrap(); for (tx, blob) in all { - assert!(blobs.contains(&(tx, blob)), "missing blob {tx:?}"); + assert!(blobs.contains(&(tx, Arc::unwrap_or_clone(blob))), "missing blob {tx:?}"); } assert!(store.contains(all_hashes[0]).unwrap()); diff --git a/crates/transaction-pool/src/blobstore/mem.rs b/crates/transaction-pool/src/blobstore/mem.rs index 15160c2c3f..cea1837bdc 100644 --- a/crates/transaction-pool/src/blobstore/mem.rs +++ b/crates/transaction-pool/src/blobstore/mem.rs @@ -15,7 +15,7 @@ pub struct InMemoryBlobStore { #[derive(Debug, Default)] struct InMemoryBlobStoreInner { /// Storage for all blob data. - store: RwLock>, + store: RwLock>>, size_tracker: BlobStoreSize, } @@ -75,43 +75,28 @@ impl BlobStore for InMemoryBlobStore { } // Retrieves the decoded blob data for the given transaction hash. - fn get(&self, tx: B256) -> Result, BlobStoreError> { - let store = self.inner.store.read(); - Ok(store.get(&tx).cloned()) + fn get(&self, tx: B256) -> Result>, BlobStoreError> { + Ok(self.inner.store.read().get(&tx).cloned()) } fn contains(&self, tx: B256) -> Result { - let store = self.inner.store.read(); - Ok(store.contains_key(&tx)) + Ok(self.inner.store.read().contains_key(&tx)) } fn get_all( &self, txs: Vec, - ) -> Result, BlobStoreError> { - let mut items = Vec::with_capacity(txs.len()); + ) -> Result)>, BlobStoreError> { let store = self.inner.store.read(); - for tx in txs { - if let Some(item) = store.get(&tx) { - items.push((tx, item.clone())); - } - } - - Ok(items) + Ok(txs.into_iter().filter_map(|tx| store.get(&tx).map(|item| (tx, item.clone()))).collect()) } - fn get_exact(&self, txs: Vec) -> Result, BlobStoreError> { - let mut items = Vec::with_capacity(txs.len()); + fn get_exact( + &self, + txs: Vec, + ) -> Result>, BlobStoreError> { let store = self.inner.store.read(); - for tx in txs { - if let Some(item) = store.get(&tx) { - items.push(item.clone()); - } else { - return Err(BlobStoreError::MissingSidecar(tx)) - } - } - - Ok(items) + Ok(txs.into_iter().filter_map(|tx| store.get(&tx).cloned()).collect()) } fn get_by_versioned_hashes( @@ -150,7 +135,7 @@ impl BlobStore for InMemoryBlobStore { /// Removes the given blob from the store and returns the size of the blob that was removed. #[inline] -fn remove_size(store: &mut HashMap, tx: &B256) -> usize { +fn remove_size(store: &mut HashMap>, tx: &B256) -> usize { store.remove(tx).map(|rem| rem.size()).unwrap_or_default() } @@ -159,11 +144,11 @@ fn remove_size(store: &mut HashMap, tx: &B256) -> /// We don't need to handle the size updates for replacements because transactions are unique. #[inline] fn insert_size( - store: &mut HashMap, + store: &mut HashMap>, tx: B256, blob: BlobTransactionSidecar, ) -> usize { let add = blob.size(); - store.insert(tx, blob); + store.insert(tx, Arc::new(blob)); add } diff --git a/crates/transaction-pool/src/blobstore/mod.rs b/crates/transaction-pool/src/blobstore/mod.rs index ee98e3eed8..f8d37bfcc0 100644 --- a/crates/transaction-pool/src/blobstore/mod.rs +++ b/crates/transaction-pool/src/blobstore/mod.rs @@ -8,7 +8,10 @@ pub use noop::NoopBlobStore; use reth_primitives::BlobTransactionSidecar; use std::{ fmt, - sync::atomic::{AtomicUsize, Ordering}, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, }; pub use tracker::{BlobStoreCanonTracker, BlobStoreUpdates}; @@ -44,7 +47,7 @@ pub trait BlobStore: fmt::Debug + Send + Sync + 'static { fn cleanup(&self) -> BlobStoreCleanupStat; /// Retrieves the decoded blob data for the given transaction hash. - fn get(&self, tx: B256) -> Result, BlobStoreError>; + fn get(&self, tx: B256) -> Result>, BlobStoreError>; /// Checks if the given transaction hash is in the blob store. fn contains(&self, tx: B256) -> Result; @@ -58,13 +61,14 @@ pub trait BlobStore: fmt::Debug + Send + Sync + 'static { fn get_all( &self, txs: Vec, - ) -> Result, BlobStoreError>; + ) -> Result)>, BlobStoreError>; /// Returns the exact [`BlobTransactionSidecar`] for the given transaction hashes in the exact /// order they were requested. /// /// Returns an error if any of the blobs are not found in the blob store. - fn get_exact(&self, txs: Vec) -> Result, BlobStoreError>; + fn get_exact(&self, txs: Vec) + -> Result>, BlobStoreError>; /// Return the [`BlobTransactionSidecar`]s for a list of blob versioned hashes. fn get_by_versioned_hashes( diff --git a/crates/transaction-pool/src/blobstore/noop.rs b/crates/transaction-pool/src/blobstore/noop.rs index 0e99858bd6..0f29357355 100644 --- a/crates/transaction-pool/src/blobstore/noop.rs +++ b/crates/transaction-pool/src/blobstore/noop.rs @@ -1,6 +1,7 @@ use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobTransactionSidecar}; use alloy_eips::eip4844::BlobAndProofV1; use alloy_primitives::B256; +use std::sync::Arc; /// A blobstore implementation that does nothing #[derive(Clone, Copy, Debug, PartialOrd, PartialEq, Eq, Default)] @@ -28,7 +29,7 @@ impl BlobStore for NoopBlobStore { BlobStoreCleanupStat::default() } - fn get(&self, _tx: B256) -> Result, BlobStoreError> { + fn get(&self, _tx: B256) -> Result>, BlobStoreError> { Ok(None) } @@ -39,11 +40,14 @@ impl BlobStore for NoopBlobStore { fn get_all( &self, _txs: Vec, - ) -> Result, BlobStoreError> { + ) -> Result)>, BlobStoreError> { Ok(vec![]) } - fn get_exact(&self, txs: Vec) -> Result, BlobStoreError> { + fn get_exact( + &self, + txs: Vec, + ) -> Result>, BlobStoreError> { if txs.is_empty() { return Ok(vec![]) } diff --git a/crates/transaction-pool/src/blobstore/tracker.rs b/crates/transaction-pool/src/blobstore/tracker.rs index e6041fa12e..f22dcf5706 100644 --- a/crates/transaction-pool/src/blobstore/tracker.rs +++ b/crates/transaction-pool/src/blobstore/tracker.rs @@ -81,6 +81,13 @@ pub enum BlobStoreUpdates { #[cfg(test)] mod tests { + use alloy_consensus::Header; + use reth_execution_types::Chain; + use reth_primitives::{ + BlockBody, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, + TransactionSigned, + }; + use super::*; #[test] @@ -101,4 +108,85 @@ mod tests { BlobStoreUpdates::Finalized(block2.into_iter().chain(block3).collect::>()) ); } + + #[test] + fn test_add_new_chain_blocks() { + let mut tracker = BlobStoreCanonTracker::default(); + + // Create sample transactions + let tx1_hash = B256::random(); // EIP-4844 transaction + let tx2_hash = B256::random(); // EIP-4844 transaction + let tx3_hash = B256::random(); // Non-EIP-4844 transaction + + // Creating a first block with EIP-4844 transactions + let block1 = SealedBlockWithSenders { + block: SealedBlock { + header: SealedHeader::new( + Header { number: 10, ..Default::default() }, + B256::random(), + ), + body: BlockBody { + transactions: vec![ + TransactionSigned { + hash: tx1_hash, + transaction: Transaction::Eip4844(Default::default()), + ..Default::default() + }, + TransactionSigned { + hash: tx2_hash, + transaction: Transaction::Eip4844(Default::default()), + ..Default::default() + }, + // Another transaction that is not EIP-4844 + TransactionSigned { + hash: B256::random(), + transaction: Transaction::Eip7702(Default::default()), + ..Default::default() + }, + ], + ..Default::default() + }, + }, + ..Default::default() + }; + + // Creating a second block with EIP-1559 and EIP-2930 transactions + // Note: This block does not contain any EIP-4844 transactions + let block2 = SealedBlockWithSenders { + block: SealedBlock { + header: SealedHeader::new( + Header { number: 11, ..Default::default() }, + B256::random(), + ), + body: BlockBody { + transactions: vec![ + TransactionSigned { + hash: tx3_hash, + transaction: Transaction::Eip1559(Default::default()), + ..Default::default() + }, + TransactionSigned { + hash: tx2_hash, + transaction: Transaction::Eip2930(Default::default()), + ..Default::default() + }, + ], + ..Default::default() + }, + }, + ..Default::default() + }; + + // Extract blocks from the chain + let chain = Chain::new(vec![block1, block2], Default::default(), None); + let blocks = chain.into_inner().0; + + // Add new chain blocks to the tracker + tracker.add_new_chain_blocks(&blocks); + + // Tx1 and tx2 should be in the block containing EIP-4844 transactions + assert_eq!(tracker.blob_txs_in_blocks.get(&10).unwrap(), &vec![tx1_hash, tx2_hash]); + // No transactions should be in the block containing non-EIP-4844 transactions + assert!(tracker.blob_txs_in_blocks.get(&11).unwrap().is_empty()); + } } diff --git a/crates/transaction-pool/src/config.rs b/crates/transaction-pool/src/config.rs index 1b4b010a8e..d451884625 100644 --- a/crates/transaction-pool/src/config.rs +++ b/crates/transaction-pool/src/config.rs @@ -2,11 +2,9 @@ use crate::{ pool::{NEW_TX_LISTENER_BUFFER_SIZE, PENDING_TX_LISTENER_BUFFER_SIZE}, PoolSize, TransactionOrigin, }; +use alloy_consensus::constants::EIP4844_TX_TYPE_ID; +use alloy_eips::eip1559::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; use alloy_primitives::Address; -use reth_primitives::{ - constants::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}, - EIP4844_TX_TYPE_ID, -}; use std::{collections::HashSet, ops::Mul}; /// Guarantees max transactions for one sender, compatible with geth/erigon diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index a4766a89d5..f71bf01880 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -1,7 +1,8 @@ //! Transaction pool errors +use alloy_eips::eip4844::BlobTransactionValidationError; use alloy_primitives::{Address, TxHash, U256}; -use reth_primitives::{BlobTransactionValidationError, InvalidTransactionError}; +use reth_primitives::InvalidTransactionError; /// Transaction pool result type. pub type PoolResult = Result; diff --git a/crates/transaction-pool/src/identifier.rs b/crates/transaction-pool/src/identifier.rs index 97d4bda8d0..2e5312a210 100644 --- a/crates/transaction-pool/src/identifier.rs +++ b/crates/transaction-pool/src/identifier.rs @@ -59,6 +59,11 @@ impl SenderId { pub const fn start_bound(self) -> std::ops::Bound { std::ops::Bound::Included(TransactionId::new(self, 0)) } + + /// Converts the sender to a [`TransactionId`] with the given nonce. + pub const fn into_transaction_id(self, nonce: u64) -> TransactionId { + TransactionId::new(self, nonce) + } } impl From for SenderId { diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index a5acd6edba..0203759943 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -430,13 +430,6 @@ where Box::new(self.pool.best_transactions()) } - fn best_transactions_with_base_fee( - &self, - base_fee: u64, - ) -> Box>>> { - self.pool.best_transactions_with_attributes(BestTransactionsAttributes::base_fee(base_fee)) - } - fn best_transactions_with_attributes( &self, best_transactions_attributes: BestTransactionsAttributes, @@ -463,6 +456,20 @@ where self.pool.remove_transactions(hashes) } + fn remove_transactions_and_descendants( + &self, + hashes: Vec, + ) -> Vec>> { + self.pool.remove_transactions_and_descendants(hashes) + } + + fn remove_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>> { + self.pool.remove_transactions_by_sender(sender) + } + fn retain_unknown(&self, announcement: &mut A) where A: HandleMempoolData, @@ -489,6 +496,27 @@ where self.pool.get_transactions_by_sender(sender) } + fn get_pending_transactions_with_predicate( + &self, + predicate: impl FnMut(&ValidPoolTransaction) -> bool, + ) -> Vec>> { + self.pool.pending_transactions_with_predicate(predicate) + } + + fn get_pending_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>> { + self.pool.get_pending_transactions_by_sender(sender) + } + + fn get_queued_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>> { + self.pool.get_queued_transactions_by_sender(sender) + } + fn get_highest_transaction_by_sender( &self, sender: Address, @@ -496,6 +524,14 @@ where self.pool.get_highest_transaction_by_sender(sender) } + fn get_highest_consecutive_transaction_by_sender( + &self, + sender: Address, + on_chain_nonce: u64, + ) -> Option>> { + self.pool.get_highest_consecutive_transaction_by_sender(sender, on_chain_nonce) + } + fn get_transaction_by_sender_and_nonce( &self, sender: Address, @@ -525,21 +561,24 @@ where self.pool.unique_senders() } - fn get_blob(&self, tx_hash: TxHash) -> Result, BlobStoreError> { + fn get_blob( + &self, + tx_hash: TxHash, + ) -> Result>, BlobStoreError> { self.pool.blob_store().get(tx_hash) } fn get_all_blobs( &self, tx_hashes: Vec, - ) -> Result, BlobStoreError> { + ) -> Result)>, BlobStoreError> { self.pool.blob_store().get_all(tx_hashes) } fn get_all_blobs_exact( &self, tx_hashes: Vec, - ) -> Result, BlobStoreError> { + ) -> Result>, BlobStoreError> { self.pool.blob_store().get_exact(tx_hashes) } diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 66b9861473..608f8d5745 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -7,6 +7,7 @@ use crate::{ traits::{CanonicalStateUpdate, TransactionPool, TransactionPoolExt}, BlockInfo, PoolTransaction, }; +use alloy_eips::BlockNumberOrTag; use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable}; use futures_util::{ future::{BoxFuture, Fuse, FusedFuture}, @@ -17,7 +18,7 @@ use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_execution_types::ChangedAccount; use reth_fs_util::FsPathError; use reth_primitives::{ - BlockNumberOrTag, PooledTransactionsElementEcRecovered, SealedHeader, TransactionSigned, + PooledTransactionsElementEcRecovered, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, }; use reth_storage_api::{errors::provider::ProviderError, BlockReaderIdExt, StateProviderFactory}; @@ -27,6 +28,7 @@ use std::{ collections::HashSet, hash::{Hash, Hasher}, path::{Path, PathBuf}, + sync::Arc, }; use tokio::sync::oneshot; use tracing::{debug, error, info, trace, warn}; @@ -328,6 +330,7 @@ pub async fn maintain_transaction_pool( pool.get_blob(tx.hash) .ok() .flatten() + .map(Arc::unwrap_or_clone) .and_then(|sidecar| { PooledTransactionsElementEcRecovered::try_from_blob_transaction( tx, sidecar, @@ -454,21 +457,11 @@ impl FinalizedBlockTracker { /// Updates the tracked finalized block and returns the new finalized block if it changed fn update(&mut self, finalized_block: Option) -> Option { - match (self.last_finalized_block, finalized_block) { - (Some(last), Some(finalized)) => { - self.last_finalized_block = Some(finalized); - if last < finalized { - Some(finalized) - } else { - None - } - } - (None, Some(finalized)) => { - self.last_finalized_block = Some(finalized); - Some(finalized) - } - _ => None, - } + let finalized = finalized_block?; + self.last_finalized_block + .replace(finalized) + .map_or(true, |last| last < finalized) + .then_some(finalized) } } @@ -490,7 +483,7 @@ impl MaintainedPoolState { } } -/// A unique `ChangedAccount` identified by its address that can be used for deduplication +/// A unique [`ChangedAccount`] identified by its address that can be used for deduplication #[derive(Eq)] struct ChangedAccountEntry(ChangedAccount); @@ -585,7 +578,7 @@ where // Filter out errors ::try_from_consensus(tx.into()).ok() }) - .collect::>(); + .collect(); let outcome = pool.add_transactions(crate::TransactionOrigin::Local, pool_transactions).await; diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index ddab4f6227..47a26ee29a 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -16,10 +16,10 @@ use crate::{ PooledTransactionsElement, PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool, TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, }; -use alloy_eips::eip4844::BlobAndProofV1; +use alloy_eips::{eip1559::ETHEREUM_BLOCK_GAS_LIMIT, eip4844::BlobAndProofV1}; use alloy_primitives::{Address, TxHash, B256, U256}; use reth_eth_wire_types::HandleMempoolData; -use reth_primitives::{constants::ETHEREUM_BLOCK_GAS_LIMIT, BlobTransactionSidecar}; +use reth_primitives::BlobTransactionSidecar; use std::{collections::HashSet, marker::PhantomData, sync::Arc}; use tokio::sync::{mpsc, mpsc::Receiver}; @@ -150,13 +150,6 @@ impl TransactionPool for NoopTransactionPool { Box::new(std::iter::empty()) } - fn best_transactions_with_base_fee( - &self, - _: u64, - ) -> Box>>> { - Box::new(std::iter::empty()) - } - fn best_transactions_with_attributes( &self, _: BestTransactionsAttributes, @@ -183,6 +176,20 @@ impl TransactionPool for NoopTransactionPool { vec![] } + fn remove_transactions_and_descendants( + &self, + _hashes: Vec, + ) -> Vec>> { + vec![] + } + + fn remove_transactions_by_sender( + &self, + _sender: Address, + ) -> Vec>> { + vec![] + } + fn retain_unknown(&self, _announcement: &mut A) where A: HandleMempoolData, @@ -206,6 +213,27 @@ impl TransactionPool for NoopTransactionPool { vec![] } + fn get_pending_transactions_with_predicate( + &self, + _predicate: impl FnMut(&ValidPoolTransaction) -> bool, + ) -> Vec>> { + vec![] + } + + fn get_pending_transactions_by_sender( + &self, + _sender: Address, + ) -> Vec>> { + vec![] + } + + fn get_queued_transactions_by_sender( + &self, + _sender: Address, + ) -> Vec>> { + vec![] + } + fn get_highest_transaction_by_sender( &self, _sender: Address, @@ -213,6 +241,14 @@ impl TransactionPool for NoopTransactionPool { None } + fn get_highest_consecutive_transaction_by_sender( + &self, + _sender: Address, + _on_chain_nonce: u64, + ) -> Option>> { + None + } + fn get_transaction_by_sender_and_nonce( &self, _sender: Address, @@ -239,21 +275,24 @@ impl TransactionPool for NoopTransactionPool { Default::default() } - fn get_blob(&self, _tx_hash: TxHash) -> Result, BlobStoreError> { + fn get_blob( + &self, + _tx_hash: TxHash, + ) -> Result>, BlobStoreError> { Ok(None) } fn get_all_blobs( &self, _tx_hashes: Vec, - ) -> Result, BlobStoreError> { + ) -> Result)>, BlobStoreError> { Ok(vec![]) } fn get_all_blobs_exact( &self, tx_hashes: Vec, - ) -> Result, BlobStoreError> { + ) -> Result>, BlobStoreError> { if tx_hashes.is_empty() { return Ok(vec![]) } diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 5880a73f51..36a14edaa2 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -1,14 +1,14 @@ use crate::{ - identifier::TransactionId, pool::pending::PendingTransaction, PoolTransaction, - TransactionOrdering, ValidPoolTransaction, + identifier::{SenderId, TransactionId}, + pool::pending::PendingTransaction, + PoolTransaction, TransactionOrdering, ValidPoolTransaction, }; -use alloy_primitives::B256 as TxHash; +use alloy_primitives::Address; use core::fmt; use std::{ - collections::{BTreeMap, BTreeSet, HashSet}, + collections::{BTreeMap, BTreeSet, HashSet, VecDeque}, sync::Arc, }; - use tokio::sync::broadcast::{error::TryRecvError, Receiver}; use tracing::debug; @@ -80,7 +80,7 @@ pub(crate) struct BestTransactions { /// then can be moved from the `all` set to the `independent` set. pub(crate) independent: BTreeSet>, /// There might be the case where a yielded transactions is invalid, this will track it. - pub(crate) invalid: HashSet, + pub(crate) invalid: HashSet, /// Used to receive any new pending transactions that have been added to the pool after this /// iterator was static fileted /// @@ -94,7 +94,7 @@ pub(crate) struct BestTransactions { impl BestTransactions { /// Mark the transaction and it's descendants as invalid. pub(crate) fn mark_invalid(&mut self, tx: &Arc>) { - self.invalid.insert(*tx.hash()); + self.invalid.insert(tx.sender_id()); } /// Returns the ancestor the given transaction, the transaction with `nonce - 1`. @@ -132,9 +132,8 @@ impl BestTransactions { /// created and inserts them fn add_new_transactions(&mut self) { while let Some(pending_tx) = self.try_recv() { - let tx = pending_tx.transaction.clone(); // same logic as PendingPool::add_transaction/PendingPool::best_with_unlocked - let tx_id = *tx.id(); + let tx_id = *pending_tx.transaction.id(); if self.ancestor(&tx_id).is_none() { self.independent.insert(pending_tx.clone()); } @@ -169,14 +168,14 @@ impl Iterator for BestTransactions { self.add_new_transactions(); // Remove the next independent tx with the highest priority let best = self.independent.pop_last()?; - let hash = best.transaction.hash(); + let sender_id = best.transaction.sender_id(); - // skip transactions that were marked as invalid - if self.invalid.contains(hash) { + // skip transactions for which sender was marked as invalid + if self.invalid.contains(&sender_id) { debug!( target: "txpool", "[{:?}] skipping invalid transaction", - hash + best.transaction.hash() ); continue } @@ -187,7 +186,7 @@ impl Iterator for BestTransactions { } if self.skip_blobs && best.transaction.transaction.is_eip4844() { - // blobs should be skipped, marking the as invalid will ensure that no dependent + // blobs should be skipped, marking them as invalid will ensure that no dependent // transactions are returned self.mark_invalid(&best.transaction) } else { @@ -197,7 +196,7 @@ impl Iterator for BestTransactions { } } -/// A[`BestTransactions`](crate::traits::BestTransactions) implementation that filters the +/// A [`BestTransactions`](crate::traits::BestTransactions) implementation that filters the /// transactions of iter with predicate. /// /// Filter out transactions are marked as invalid: @@ -209,7 +208,7 @@ pub struct BestTransactionFilter { impl BestTransactionFilter { /// Create a new [`BestTransactionFilter`] with the given predicate. - pub(crate) const fn new(best: I, predicate: P) -> Self { + pub const fn new(best: I, predicate: P) -> Self { Self { best, predicate } } } @@ -260,6 +259,88 @@ impl fmt::Debug for BestTransactionFilter { } } +/// Wrapper over [`crate::traits::BestTransactions`] that prioritizes transactions of certain +/// senders capping total gas used by such transactions. +#[derive(Debug)] +pub struct BestTransactionsWithPrioritizedSenders { + /// Inner iterator + inner: I, + /// A set of senders which transactions should be prioritized + prioritized_senders: HashSet
, + /// Maximum total gas limit of prioritized transactions + max_prioritized_gas: u64, + /// Buffer with transactions that are not being prioritized. Those will be the first to be + /// included after the prioritized transactions + buffer: VecDeque, + /// Tracker of total gas limit of prioritized transactions. Once it reaches + /// `max_prioritized_gas` no more transactions will be prioritized + prioritized_gas: u64, +} + +impl BestTransactionsWithPrioritizedSenders { + /// Constructs a new [`BestTransactionsWithPrioritizedSenders`]. + pub fn new(prioritized_senders: HashSet
, max_prioritized_gas: u64, inner: I) -> Self { + Self { + inner, + prioritized_senders, + max_prioritized_gas, + buffer: Default::default(), + prioritized_gas: Default::default(), + } + } +} + +impl Iterator for BestTransactionsWithPrioritizedSenders +where + I: crate::traits::BestTransactions>>, + T: PoolTransaction, +{ + type Item = ::Item; + + fn next(&mut self) -> Option { + // If we have space, try prioritizing transactions + if self.prioritized_gas < self.max_prioritized_gas { + for item in &mut self.inner { + if self.prioritized_senders.contains(&item.transaction.sender()) && + self.prioritized_gas + item.transaction.gas_limit() <= + self.max_prioritized_gas + { + self.prioritized_gas += item.transaction.gas_limit(); + return Some(item) + } + self.buffer.push_back(item); + } + } + + if let Some(item) = self.buffer.pop_front() { + Some(item) + } else { + self.inner.next() + } + } +} + +impl crate::traits::BestTransactions for BestTransactionsWithPrioritizedSenders +where + I: crate::traits::BestTransactions>>, + T: PoolTransaction, +{ + fn mark_invalid(&mut self, tx: &Self::Item) { + self.inner.mark_invalid(tx) + } + + fn no_updates(&mut self) { + self.inner.no_updates() + } + + fn set_skip_blobs(&mut self, skip_blobs: bool) { + if skip_blobs { + self.buffer.retain(|tx| !tx.transaction.is_eip4844()) + } + self.inner.set_skip_blobs(skip_blobs) + } +} + #[cfg(test)] mod tests { use super::*; @@ -320,6 +401,29 @@ mod tests { assert!(best.next().is_none()); } + #[test] + fn test_best_transactions_iter_invalid() { + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let num_tx = 10; + // insert 10 gapless tx + let tx = MockTransaction::eip1559(); + for nonce in 0..num_tx { + let tx = tx.clone().rng_hash().with_nonce(nonce); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + let mut best: Box< + dyn crate::traits::BestTransactions>>, + > = Box::new(pool.best()); + + let tx = best.next().unwrap(); + best.mark_invalid(&tx); + assert!(best.next().is_none()); + } + #[test] fn test_best_with_fees_iter_base_fee_satisfied() { let mut pool = PendingPool::new(MockOrdering::default()); diff --git a/crates/transaction-pool/src/pool/blob.rs b/crates/transaction-pool/src/pool/blob.rs index cb09e82340..ac39c6ab78 100644 --- a/crates/transaction-pool/src/pool/blob.rs +++ b/crates/transaction-pool/src/pool/blob.rs @@ -11,7 +11,7 @@ use std::{ /// A set of validated blob transactions in the pool that are __not pending__. /// -/// The purpose of this pool is keep track of blob transactions that are queued and to evict the +/// The purpose of this pool is to keep track of blob transactions that are queued and to evict the /// worst blob transactions once the sub-pool is full. /// /// This expects that certain constraints are met: @@ -198,14 +198,13 @@ impl BlobTransactions { &mut self, pending_fees: &PendingFees, ) -> Vec>> { - let to_remove = self.satisfy_pending_fee_ids(pending_fees); - - let mut removed = Vec::with_capacity(to_remove.len()); - for id in to_remove { - removed.push(self.remove_transaction(&id).expect("transaction exists")); - } + let removed = self + .satisfy_pending_fee_ids(pending_fees) + .into_iter() + .map(|id| self.remove_transaction(&id).expect("transaction exists")) + .collect(); - // set pending fees and reprioritize / resort + // Update pending fees and reprioritize self.pending_fees = pending_fees.clone(); self.reprioritize(); diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 090b92fb65..77446a5237 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -106,7 +106,7 @@ use crate::{ traits::{GetPooledTransactionLimit, NewBlobSidecar, TransactionListenerKind}, validate::ValidTransaction, }; -pub use best::BestTransactionFilter; +pub use best::{BestTransactionFilter, BestTransactionsWithPrioritizedSenders}; pub use blob::{blob_tx_priority, fee_delta}; pub use events::{FullTransactionEvent, TransactionEvent}; pub use listener::{AllTransactionsEvents, TransactionEvents}; @@ -195,7 +195,7 @@ where pub(crate) fn block_info(&self) -> BlockInfo { self.get_pool_data().block_info() } - /// Returns the currently tracked block + /// Sets the currently tracked block pub(crate) fn set_block_info(&self, info: BlockInfo) { self.pool.write().set_block_info(info) } @@ -307,7 +307,9 @@ where /// Caution: this assumes the given transaction is eip-4844 fn get_blob_transaction(&self, transaction: TransactionSigned) -> Option { if let Ok(Some(sidecar)) = self.blob_store.get(transaction.hash()) { - if let Ok(blob) = BlobTransaction::try_from_signed(transaction, sidecar) { + if let Ok(blob) = + BlobTransaction::try_from_signed(transaction, Arc::unwrap_or_clone(sidecar)) + { return Some(blob) } } @@ -715,6 +717,38 @@ where removed } + /// Removes and returns all matching transactions and their dependent transactions from the + /// pool. + pub(crate) fn remove_transactions_and_descendants( + &self, + hashes: Vec, + ) -> Vec>> { + if hashes.is_empty() { + return Vec::new() + } + let removed = self.pool.write().remove_transactions_and_descendants(hashes); + + let mut listener = self.event_listener.write(); + + removed.iter().for_each(|tx| listener.discarded(tx.hash())); + + removed + } + + pub(crate) fn remove_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>> { + let sender_id = self.get_sender_id(sender); + let removed = self.pool.write().remove_transactions_by_sender(sender_id); + + let mut listener = self.event_listener.write(); + + removed.iter().for_each(|tx| listener.discarded(tx.hash())); + + removed + } + /// Removes and returns all transactions that are present in the pool. pub(crate) fn retain_unknown(&self, announcement: &mut A) where @@ -744,6 +778,32 @@ where self.get_pool_data().get_transactions_by_sender(sender_id) } + /// Returns all queued transactions of the address by sender + pub(crate) fn get_queued_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>> { + let sender_id = self.get_sender_id(sender); + self.get_pool_data().pending_txs_by_sender(sender_id) + } + + /// Returns all pending transactions filtered by predicate + pub(crate) fn pending_transactions_with_predicate( + &self, + predicate: impl FnMut(&ValidPoolTransaction) -> bool, + ) -> Vec>> { + self.get_pool_data().pending_transactions_with_predicate(predicate) + } + + /// Returns all pending transactions of the address by sender + pub(crate) fn get_pending_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>> { + let sender_id = self.get_sender_id(sender); + self.get_pool_data().queued_txs_by_sender(sender_id) + } + /// Returns the highest transaction of the address pub(crate) fn get_highest_transaction_by_sender( &self, @@ -753,6 +813,18 @@ where self.get_pool_data().get_highest_transaction_by_sender(sender_id) } + /// Returns the transaction with the highest nonce that is executable given the on chain nonce. + pub(crate) fn get_highest_consecutive_transaction_by_sender( + &self, + sender: Address, + on_chain_nonce: u64, + ) -> Option>> { + let sender_id = self.get_sender_id(sender); + self.get_pool_data().get_highest_consecutive_transaction_by_sender( + sender_id.into_transaction_id(on_chain_nonce), + ) + } + /// Returns all transactions that where submitted with the given [`TransactionOrigin`] pub(crate) fn get_transactions_by_origin( &self, @@ -1174,7 +1246,8 @@ mod tests { validate::ValidTransaction, BlockInfo, PoolConfig, SubPoolLimit, TransactionOrigin, TransactionValidationOutcome, U256, }; - use reth_primitives::{kzg::Blob, transaction::generate_blob_sidecar}; + use alloy_eips::eip4844::BlobTransactionSidecar; + use reth_primitives::kzg::Blob; use std::{fs, path::PathBuf}; #[test] @@ -1209,7 +1282,7 @@ mod tests { .unwrap()]; // Generate a BlobTransactionSidecar from the blobs. - let sidecar = generate_blob_sidecar(blobs); + let sidecar = BlobTransactionSidecar::try_from_blobs(blobs).unwrap(); // Create an in-memory blob store. let blob_store = InMemoryBlobStore::default(); diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index b591fdb539..407f04fd5b 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -35,8 +35,8 @@ pub struct ParkedPool { best: BTreeSet>, /// Keeps track of last submission id for each sender. /// - /// This are sorted in Reverse order, so the last (highest) submission id is first, and the - /// lowest(oldest) is the last. + /// This are sorted in reverse order, so the last (highest) submission id is first, and the + /// lowest (oldest) is the last. last_sender_submission: BTreeSet, /// Keeps track of the number of transactions in the pool by the sender and the last submission /// id. @@ -856,4 +856,64 @@ mod tests { assert_eq!(submission_info2.sender_id, sender2); assert_eq!(submission_info2.submission_id, 2); } + + #[test] + fn test_remove_sender_count() { + // Initialize a mock transaction factory + let mut f = MockTransactionFactory::default(); + // Create an empty transaction pool + let mut pool = ParkedPool::>::default(); + // Generate two validated transactions and add them to the pool + let tx1 = f.validated_arc(MockTransaction::eip1559().inc_price()); + let tx2 = f.validated_arc(MockTransaction::eip1559().inc_price()); + pool.add_transaction(tx1); + pool.add_transaction(tx2); + + // Define two different sender IDs and their corresponding submission IDs + let sender1: SenderId = 11.into(); + let sender2: SenderId = 22.into(); + + // Add the sender counts to the pool + pool.add_sender_count(sender1, 1); + + // We add sender 2 multiple times to test the removal of sender counts + pool.add_sender_count(sender2, 2); + pool.add_sender_count(sender2, 3); + + // Before removing the sender count we should have 4 sender transaction counts + assert_eq!(pool.sender_transaction_count.len(), 4); + assert!(pool.sender_transaction_count.contains_key(&sender1)); + + // We should have 1 sender transaction count for sender 1 before removing the sender count + assert_eq!(pool.sender_transaction_count.get(&sender1).unwrap().count, 1); + + // Remove the sender count for sender 1 + pool.remove_sender_count(sender1); + + // After removing the sender count we should have 3 sender transaction counts remaining + assert_eq!(pool.sender_transaction_count.len(), 3); + assert!(!pool.sender_transaction_count.contains_key(&sender1)); + + // Check the sender transaction count for sender 2 before removing the sender count + assert_eq!( + *pool.sender_transaction_count.get(&sender2).unwrap(), + SenderTransactionCount { count: 2, last_submission_id: 3 } + ); + + // Remove the sender count for sender 2 + pool.remove_sender_count(sender2); + + // After removing the sender count for sender 2, we still have 3 sender transaction counts + // remaining. + // + // This is because we added sender 2 multiple times and we only removed the last submission. + assert_eq!(pool.sender_transaction_count.len(), 3); + assert!(pool.sender_transaction_count.contains_key(&sender2)); + + // Sender transaction count for sender 2 should be updated correctly + assert_eq!( + *pool.sender_transaction_count.get(&sender2).unwrap(), + SenderTransactionCount { count: 1, last_submission_id: 3 } + ); + } } diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index ff3ecf65a4..ff5269014c 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -197,7 +197,7 @@ impl PendingPool { } } else { self.size_of += tx.transaction.size(); - self.update_independents_and_highest_nonces(&tx, &id); + self.update_independents_and_highest_nonces(&tx); self.all.insert(tx.clone()); self.by_id.insert(id, tx); } @@ -243,7 +243,7 @@ impl PendingPool { tx.priority = self.ordering.priority(&tx.transaction.transaction, base_fee); self.size_of += tx.transaction.size(); - self.update_independents_and_highest_nonces(&tx, &id); + self.update_independents_and_highest_nonces(&tx); self.all.insert(tx.clone()); self.by_id.insert(id, tx); } @@ -254,12 +254,8 @@ impl PendingPool { /// Updates the independent transaction and highest nonces set, assuming the given transaction /// is being _added_ to the pool. - fn update_independents_and_highest_nonces( - &mut self, - tx: &PendingTransaction, - tx_id: &TransactionId, - ) { - let ancestor_id = tx_id.unchecked_ancestor(); + fn update_independents_and_highest_nonces(&mut self, tx: &PendingTransaction) { + let ancestor_id = tx.transaction.id().unchecked_ancestor(); if let Some(ancestor) = ancestor_id.and_then(|id| self.by_id.get(&id)) { // the transaction already has an ancestor, so we only need to ensure that the // highest nonces set actually contains the highest nonce for that sender @@ -305,7 +301,7 @@ impl PendingPool { let priority = self.ordering.priority(&tx.transaction, base_fee); let tx = PendingTransaction { submission_id, transaction: tx, priority }; - self.update_independents_and_highest_nonces(&tx, &tx_id); + self.update_independents_and_highest_nonces(&tx); self.all.insert(tx.clone()); // send the new transaction to any existing pendingpool static file iterators diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 10605565c8..8679a4318b 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -18,14 +18,15 @@ use crate::{ PoolConfig, PoolResult, PoolTransaction, PriceBumpConfig, TransactionOrdering, ValidPoolTransaction, U256, }; -use alloy_primitives::{Address, TxHash, B256}; -use reth_primitives::{ - constants::{ - eip4844::BLOB_TX_MIN_BLOB_GASPRICE, ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE, - }, +use alloy_consensus::constants::{ EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; +use alloy_eips::{ + eip1559::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}, + eip4844::BLOB_TX_MIN_BLOB_GASPRICE, +}; +use alloy_primitives::{Address, TxHash, B256}; use rustc_hash::FxHashMap; use smallvec::SmallVec; use std::{ @@ -108,6 +109,35 @@ impl TxPool { self.all().txs_iter(sender).last().map(|(_, tx)| Arc::clone(&tx.transaction)) } + /// Returns the transaction with the highest nonce that is executable given the on chain nonce. + /// + /// If the pool already tracks a higher nonce for the given sender, then this nonce is used + /// instead. + /// + /// Note: The next pending pooled transaction must have the on chain nonce. + pub(crate) fn get_highest_consecutive_transaction_by_sender( + &self, + mut on_chain: TransactionId, + ) -> Option>> { + let mut last_consecutive_tx = None; + + // ensure this operates on the most recent + if let Some(current) = self.sender_info.get(&on_chain.sender) { + on_chain.nonce = on_chain.nonce.max(current.state_nonce); + } + + let mut next_expected_nonce = on_chain.nonce; + for (id, tx) in self.all().descendant_txs_inclusive(&on_chain) { + if next_expected_nonce != id.nonce { + break + } + next_expected_nonce = id.next_nonce(); + last_consecutive_tx = Some(tx); + } + + last_consecutive_tx.map(|tx| Arc::clone(&tx.transaction)) + } + /// Returns access to the [`AllTransactions`] container. pub(crate) const fn all(&self) -> &AllTransactions { &self.all_transactions @@ -334,11 +364,34 @@ impl TxPool { self.pending_pool.all() } + /// Returns all pending transactions filtered by predicate + pub(crate) fn pending_transactions_with_predicate( + &self, + mut predicate: impl FnMut(&ValidPoolTransaction) -> bool, + ) -> Vec>> { + self.pending_transactions_iter().filter(|tx| predicate(tx)).collect() + } + + /// Returns all pending transactions for the specified sender + pub(crate) fn pending_txs_by_sender( + &self, + sender: SenderId, + ) -> Vec>> { + self.pending_transactions_iter().filter(|tx| tx.sender_id() == sender).collect() + } + /// Returns all transactions from parked pools pub(crate) fn queued_transactions(&self) -> Vec>> { self.basefee_pool.all().chain(self.queued_pool.all()).collect() } + /// Returns an iterator over all transactions from parked pools + pub(crate) fn queued_transactions_iter( + &self, + ) -> impl Iterator>> + '_ { + self.basefee_pool.all().chain(self.queued_pool.all()) + } + /// Returns queued and pending transactions for the specified sender pub fn queued_and_pending_txs_by_sender( &self, @@ -347,6 +400,14 @@ impl TxPool { (self.queued_pool.get_txs_by_sender(sender), self.pending_pool.get_txs_by_sender(sender)) } + /// Returns all queued transactions for the specified sender + pub(crate) fn queued_txs_by_sender( + &self, + sender: SenderId, + ) -> Vec>> { + self.queued_transactions_iter().filter(|tx| tx.sender_id() == sender).collect() + } + /// Returns `true` if the transaction with the given hash is already included in this pool. pub(crate) fn contains(&self, tx_hash: &TxHash) -> bool { self.all_transactions.contains(tx_hash) @@ -575,8 +636,8 @@ impl TxPool { *transaction.hash(), PoolErrorKind::InvalidTransaction( InvalidPoolTransactionError::ExceedsGasLimit( - block_gas_limit, tx_gas_limit, + block_gas_limit, ), ), )), @@ -663,6 +724,38 @@ impl TxPool { txs } + /// Removes and returns all matching transactions and their descendants from the pool. + pub(crate) fn remove_transactions_and_descendants( + &mut self, + hashes: Vec, + ) -> Vec>> { + let mut removed = Vec::new(); + for hash in hashes { + if let Some(tx) = self.remove_transaction_by_hash(&hash) { + removed.push(tx.clone()); + self.remove_descendants(tx.id(), &mut removed); + } + } + self.update_size_metrics(); + removed + } + + /// Removes all transactions from the given sender. + pub(crate) fn remove_transactions_by_sender( + &mut self, + sender_id: SenderId, + ) -> Vec>> { + let mut removed = Vec::new(); + let txs = self.get_transactions_by_sender(sender_id); + for tx in txs { + if let Some(tx) = self.remove_transaction(tx.id()) { + removed.push(tx); + } + } + self.update_size_metrics(); + removed + } + /// Remove the transaction from the __entire__ pool. /// /// This includes the total set of transaction and the subpool it currently resides in. @@ -1344,12 +1437,9 @@ impl AllTransactions { /// Caution: This assumes that mutually exclusive invariant is always true for the same sender. #[inline] fn contains_conflicting_transaction(&self, tx: &ValidPoolTransaction) -> bool { - let mut iter = self.txs_iter(tx.transaction_id.sender); - if let Some((_, existing)) = iter.next() { - return tx.tx_type_conflicts_with(&existing.transaction) - } - // no existing transaction for this sender - false + self.txs_iter(tx.transaction_id.sender) + .next() + .map_or(false, |(_, existing)| tx.tx_type_conflicts_with(&existing.transaction)) } /// Additional checks for a new transaction. @@ -1363,11 +1453,15 @@ impl AllTransactions { fn ensure_valid( &self, transaction: ValidPoolTransaction, + on_chain_nonce: u64, ) -> Result, InsertErr> { if !self.local_transactions_config.is_local(transaction.origin, transaction.sender()) { let current_txs = self.tx_counter.get(&transaction.sender_id()).copied().unwrap_or_default(); - if current_txs >= self.max_account_slots { + + // Reject transactions if sender's capacity is exceeded. + // If transaction's nonce matches on-chain nonce always let it through + if current_txs >= self.max_account_slots && transaction.nonce() > on_chain_nonce { return Err(InsertErr::ExceededSenderTransactionsCapacity { transaction: Arc::new(transaction), }) @@ -1449,52 +1543,6 @@ impl AllTransactions { Ok(new_blob_tx) } - /// Returns true if the replacement candidate is underpriced and can't replace the existing - /// transaction. - #[inline] - fn is_underpriced( - existing_transaction: &ValidPoolTransaction, - maybe_replacement: &ValidPoolTransaction, - price_bumps: &PriceBumpConfig, - ) -> bool { - let price_bump = price_bumps.price_bump(existing_transaction.tx_type()); - - if maybe_replacement.max_fee_per_gas() <= - existing_transaction.max_fee_per_gas() * (100 + price_bump) / 100 - { - return true - } - - let existing_max_priority_fee_per_gas = - existing_transaction.transaction.max_priority_fee_per_gas().unwrap_or(0); - let replacement_max_priority_fee_per_gas = - maybe_replacement.transaction.max_priority_fee_per_gas().unwrap_or(0); - - if replacement_max_priority_fee_per_gas <= - existing_max_priority_fee_per_gas * (100 + price_bump) / 100 && - existing_max_priority_fee_per_gas != 0 && - replacement_max_priority_fee_per_gas != 0 - { - return true - } - - // check max blob fee per gas - if let Some(existing_max_blob_fee_per_gas) = - existing_transaction.transaction.max_fee_per_blob_gas() - { - // this enforces that blob txs can only be replaced by blob txs - let replacement_max_blob_fee_per_gas = - maybe_replacement.transaction.max_fee_per_blob_gas().unwrap_or(0); - if replacement_max_blob_fee_per_gas <= - existing_max_blob_fee_per_gas * (100 + price_bump) / 100 - { - return true - } - } - - false - } - /// Inserts a new _valid_ transaction into the pool. /// /// If the transaction already exists, it will be replaced if not underpriced. @@ -1534,7 +1582,7 @@ impl AllTransactions { ) -> InsertResult { assert!(on_chain_nonce <= transaction.nonce(), "Invalid transaction"); - let mut transaction = self.ensure_valid(transaction)?; + let mut transaction = self.ensure_valid(transaction, on_chain_nonce)?; let inserted_tx_id = *transaction.id(); let mut state = TxState::default(); @@ -1609,8 +1657,7 @@ impl AllTransactions { let maybe_replacement = transaction.as_ref(); // Ensure the new transaction is not underpriced - if Self::is_underpriced(existing_transaction, maybe_replacement, &self.price_bumps) - { + if existing_transaction.is_underpriced(maybe_replacement, &self.price_bumps) { return Err(InsertErr::Underpriced { transaction: pool_tx.transaction, existing: *entry.get().transaction.hash(), @@ -2573,6 +2620,7 @@ mod tests { let mut pool = AllTransactions::default(); let mut tx = MockTransaction::eip1559(); + let unblocked_tx = tx.clone(); for _ in 0..pool.max_account_slots { tx = tx.next(); pool.insert_tx(f.validated(tx.clone()), on_chain_balance, on_chain_nonce).unwrap(); @@ -2586,6 +2634,10 @@ mod tests { let err = pool.insert_tx(f.validated(tx.next()), on_chain_balance, on_chain_nonce).unwrap_err(); assert!(matches!(err, InsertErr::ExceededSenderTransactionsCapacity { .. })); + + assert!(pool + .insert_tx(f.validated(unblocked_tx), on_chain_balance, on_chain_nonce) + .is_ok()); } #[test] @@ -2723,6 +2775,47 @@ mod tests { assert_eq!(highest_tx.as_ref().transaction, tx1); } + #[test] + fn get_highest_consecutive_transaction_by_sender() { + // Set up a mock transaction factory and a new transaction pool. + let mut pool = TxPool::new(MockOrdering::default(), PoolConfig::default()); + let mut f = MockTransactionFactory::default(); + + // Create transactions with nonces 0, 1, 2, 4, 5. + let sender = Address::random(); + let txs: Vec<_> = vec![0, 1, 2, 4, 5, 8, 9]; + for nonce in txs { + let mut mock_tx = MockTransaction::eip1559(); + mock_tx.set_sender(sender); + mock_tx.set_nonce(nonce); + + let validated_tx = f.validated(mock_tx); + pool.add_transaction(validated_tx, U256::from(1000), 0).unwrap(); + } + + // Get last consecutive transaction + let sender_id = f.ids.sender_id(&sender).unwrap(); + let next_tx = + pool.get_highest_consecutive_transaction_by_sender(sender_id.into_transaction_id(0)); + assert_eq!(next_tx.map(|tx| tx.nonce()), Some(2), "Expected nonce 2 for on-chain nonce 0"); + + let next_tx = + pool.get_highest_consecutive_transaction_by_sender(sender_id.into_transaction_id(4)); + assert_eq!(next_tx.map(|tx| tx.nonce()), Some(5), "Expected nonce 5 for on-chain nonce 4"); + + let next_tx = + pool.get_highest_consecutive_transaction_by_sender(sender_id.into_transaction_id(5)); + assert_eq!(next_tx.map(|tx| tx.nonce()), Some(5), "Expected nonce 5 for on-chain nonce 5"); + + // update the tracked nonce + let mut info = SenderInfo::default(); + info.update(8, U256::ZERO); + pool.sender_info.insert(sender_id, info); + let next_tx = + pool.get_highest_consecutive_transaction_by_sender(sender_id.into_transaction_id(5)); + assert_eq!(next_tx.map(|tx| tx.nonce()), Some(9), "Expected nonce 9 for on-chain nonce 8"); + } + #[test] fn discard_nonce_too_low() { let mut f = MockTransactionFactory::default(); @@ -2963,6 +3056,148 @@ mod tests { assert_eq!(vec![v1.nonce()], pool_txs); } #[test] + fn test_remove_transactions() { + let on_chain_balance = U256::from(10_000); + let on_chain_nonce = 0; + let mut f = MockTransactionFactory::default(); + let mut pool = TxPool::new(MockOrdering::default(), Default::default()); + + let tx_0 = MockTransaction::eip1559().set_gas_price(100).inc_limit(); + let tx_1 = tx_0.next(); + let tx_2 = MockTransaction::eip1559().set_gas_price(100).inc_limit(); + let tx_3 = tx_2.next(); + + // Create 4 transactions + let v0 = f.validated(tx_0); + let v1 = f.validated(tx_1); + let v2 = f.validated(tx_2); + let v3 = f.validated(tx_3); + + // Add them to the pool + let _res = pool.add_transaction(v0.clone(), on_chain_balance, on_chain_nonce).unwrap(); + let _res = pool.add_transaction(v1.clone(), on_chain_balance, on_chain_nonce).unwrap(); + let _res = pool.add_transaction(v2.clone(), on_chain_balance, on_chain_nonce).unwrap(); + let _res = pool.add_transaction(v3.clone(), on_chain_balance, on_chain_nonce).unwrap(); + + assert_eq!(0, pool.queued_transactions().len()); + assert_eq!(4, pool.pending_transactions().len()); + + pool.remove_transactions(vec![*v0.hash(), *v2.hash()]); + + assert_eq!(0, pool.queued_transactions().len()); + assert_eq!(2, pool.pending_transactions().len()); + assert!(pool.contains(v1.hash())); + assert!(pool.contains(v3.hash())); + } + + #[test] + fn test_remove_transactions_and_descendants() { + let on_chain_balance = U256::from(10_000); + let on_chain_nonce = 0; + let mut f = MockTransactionFactory::default(); + let mut pool = TxPool::new(MockOrdering::default(), Default::default()); + + let tx_0 = MockTransaction::eip1559().set_gas_price(100).inc_limit(); + let tx_1 = tx_0.next(); + let tx_2 = MockTransaction::eip1559().set_gas_price(100).inc_limit(); + let tx_3 = tx_2.next(); + let tx_4 = tx_3.next(); + + // Create 5 transactions + let v0 = f.validated(tx_0); + let v1 = f.validated(tx_1); + let v2 = f.validated(tx_2); + let v3 = f.validated(tx_3); + let v4 = f.validated(tx_4); + + // Add them to the pool + let _res = pool.add_transaction(v0.clone(), on_chain_balance, on_chain_nonce).unwrap(); + let _res = pool.add_transaction(v1, on_chain_balance, on_chain_nonce).unwrap(); + let _res = pool.add_transaction(v2.clone(), on_chain_balance, on_chain_nonce).unwrap(); + let _res = pool.add_transaction(v3, on_chain_balance, on_chain_nonce).unwrap(); + let _res = pool.add_transaction(v4, on_chain_balance, on_chain_nonce).unwrap(); + + assert_eq!(0, pool.queued_transactions().len()); + assert_eq!(5, pool.pending_transactions().len()); + + pool.remove_transactions_and_descendants(vec![*v0.hash(), *v2.hash()]); + + assert_eq!(0, pool.queued_transactions().len()); + assert_eq!(0, pool.pending_transactions().len()); + } + #[test] + fn test_remove_descendants() { + let on_chain_balance = U256::from(10_000); + let on_chain_nonce = 0; + let mut f = MockTransactionFactory::default(); + let mut pool = TxPool::new(MockOrdering::default(), Default::default()); + + let tx_0 = MockTransaction::eip1559().set_gas_price(100).inc_limit(); + let tx_1 = tx_0.next(); + let tx_2 = tx_1.next(); + let tx_3 = tx_2.next(); + + // Create 4 transactions + let v0 = f.validated(tx_0); + let v1 = f.validated(tx_1); + let v2 = f.validated(tx_2); + let v3 = f.validated(tx_3); + + // Add them to the pool + let _res = pool.add_transaction(v0.clone(), on_chain_balance, on_chain_nonce).unwrap(); + let _res = pool.add_transaction(v1, on_chain_balance, on_chain_nonce).unwrap(); + let _res = pool.add_transaction(v2, on_chain_balance, on_chain_nonce).unwrap(); + let _res = pool.add_transaction(v3, on_chain_balance, on_chain_nonce).unwrap(); + + assert_eq!(0, pool.queued_transactions().len()); + assert_eq!(4, pool.pending_transactions().len()); + + let mut removed = Vec::new(); + pool.remove_transaction(v0.id()); + pool.remove_descendants(v0.id(), &mut removed); + + assert_eq!(0, pool.queued_transactions().len()); + assert_eq!(0, pool.pending_transactions().len()); + assert_eq!(3, removed.len()); + } + #[test] + fn test_remove_transactions_by_sender() { + let on_chain_balance = U256::from(10_000); + let on_chain_nonce = 0; + let mut f = MockTransactionFactory::default(); + let mut pool = TxPool::new(MockOrdering::default(), Default::default()); + + let tx_0 = MockTransaction::eip1559().set_gas_price(100).inc_limit(); + let tx_1 = tx_0.next(); + let tx_2 = MockTransaction::eip1559().set_gas_price(100).inc_limit(); + let tx_3 = tx_2.next(); + let tx_4 = tx_3.next(); + + // Create 5 transactions + let v0 = f.validated(tx_0); + let v1 = f.validated(tx_1); + let v2 = f.validated(tx_2); + let v3 = f.validated(tx_3); + let v4 = f.validated(tx_4); + + // Add them to the pool + let _res = pool.add_transaction(v0.clone(), on_chain_balance, on_chain_nonce).unwrap(); + let _res = pool.add_transaction(v1.clone(), on_chain_balance, on_chain_nonce).unwrap(); + let _res = pool.add_transaction(v2.clone(), on_chain_balance, on_chain_nonce).unwrap(); + let _res = pool.add_transaction(v3, on_chain_balance, on_chain_nonce).unwrap(); + let _res = pool.add_transaction(v4, on_chain_balance, on_chain_nonce).unwrap(); + + assert_eq!(0, pool.queued_transactions().len()); + assert_eq!(5, pool.pending_transactions().len()); + + pool.remove_transactions_by_sender(v2.sender_id()); + + assert_eq!(0, pool.queued_transactions().len()); + assert_eq!(2, pool.pending_transactions().len()); + assert!(pool.contains(v0.hash())); + assert!(pool.contains(v1.hash())); + } + #[test] fn wrong_best_order_of_transactions() { let on_chain_balance = U256::from(10_000); let mut on_chain_nonce = 0; diff --git a/crates/transaction-pool/src/test_utils/gen.rs b/crates/transaction-pool/src/test_utils/gen.rs index d51bf80270..858098ec91 100644 --- a/crates/transaction-pool/src/test_utils/gen.rs +++ b/crates/transaction-pool/src/test_utils/gen.rs @@ -1,12 +1,10 @@ use crate::EthPooledTransaction; use alloy_consensus::{TxEip1559, TxEip4844, TxLegacy}; -use alloy_eips::{eip2718::Encodable2718, eip2930::AccessList}; +use alloy_eips::{eip1559::MIN_PROTOCOL_BASE_FEE, eip2718::Encodable2718, eip2930::AccessList}; use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; use rand::Rng; use reth_chainspec::MAINNET; -use reth_primitives::{ - constants::MIN_PROTOCOL_BASE_FEE, sign_message, Transaction, TransactionSigned, -}; +use reth_primitives::{sign_message, Transaction, TransactionSigned}; /// A generator for transactions for testing purposes. #[derive(Debug)] diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 6b470bb6fb..a272e8d00e 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -7,20 +7,25 @@ use crate::{ CoinbaseTipOrdering, EthBlobTransactionSidecar, EthPoolTransaction, PoolTransaction, ValidPoolTransaction, }; -use alloy_consensus::{TxEip1559, TxEip2930, TxEip4844, TxLegacy}; -use alloy_eips::eip2930::AccessList; -use alloy_primitives::{Address, Bytes, ChainId, TxHash, TxKind, B256, U256}; +use alloy_consensus::{ + constants::{EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID}, + TxEip1559, TxEip2930, TxEip4844, TxLegacy, +}; +use alloy_eips::{ + eip1559::MIN_PROTOCOL_BASE_FEE, + eip2930::AccessList, + eip4844::{BlobTransactionValidationError, DATA_GAS_PER_BLOB}, +}; +use alloy_primitives::{Address, Bytes, ChainId, Signature, TxHash, TxKind, B256, U256}; use paste::paste; use rand::{ distributions::{Uniform, WeightedIndex}, prelude::Distribution, }; use reth_primitives::{ - constants::{eip4844::DATA_GAS_PER_BLOB, MIN_PROTOCOL_BASE_FEE}, - transaction::TryFromRecoveredTransactionError, - BlobTransactionSidecar, BlobTransactionValidationError, PooledTransactionsElementEcRecovered, - Signature, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxType, - EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, + transaction::TryFromRecoveredTransactionError, BlobTransactionSidecar, + PooledTransactionsElementEcRecovered, Transaction, TransactionSigned, + TransactionSignedEcRecovered, TxType, }; use std::{ops::Range, sync::Arc, time::Instant, vec::IntoIter}; @@ -760,7 +765,7 @@ impl EthPoolTransaction for MockTransaction { &self, _blob: &BlobTransactionSidecar, _settings: &reth_primitives::kzg::KzgSettings, - ) -> Result<(), reth_primitives::BlobTransactionValidationError> { + ) -> Result<(), alloy_eips::eip4844::BlobTransactionValidationError> { match &self { Self::Eip4844 { .. } => Ok(()), _ => Err(BlobTransactionValidationError::NotBlobTransaction(self.tx_type())), @@ -1338,10 +1343,8 @@ impl MockTransactionDistribution { nonce_range: Range, rng: &mut impl rand::Rng, ) -> MockTransactionSet { - let mut txs = Vec::new(); - for nonce in nonce_range { - txs.push(self.tx(nonce, rng).with_sender(sender)); - } + let txs = + nonce_range.map(|nonce| self.tx(nonce, rng).with_sender(sender)).collect::>(); MockTransactionSet::new(txs) } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index adae238e46..512e3e31f1 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -2,21 +2,28 @@ use crate::{ blobstore::BlobStoreError, - error::PoolResult, + error::{InvalidPoolTransactionError, PoolResult}, pool::{state::SubPool, BestTransactionFilter, TransactionEvents}, validate::ValidPoolTransaction, AllTransactionsEvents, }; -use alloy_eips::{eip2718::Encodable2718, eip2930::AccessList, eip4844::BlobAndProofV1}; +use alloy_consensus::{ + constants::{EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}, + Transaction as _, +}; +use alloy_eips::{ + eip2718::Encodable2718, + eip2930::AccessList, + eip4844::{BlobAndProofV1, BlobTransactionValidationError}, +}; use alloy_primitives::{Address, TxHash, TxKind, B256, U256}; use futures_util::{ready, Stream}; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; use reth_primitives::{ kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, BlobTransactionSidecar, - BlobTransactionValidationError, PooledTransactionsElement, - PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionSignedEcRecovered, - EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, + PooledTransactionsElement, PooledTransactionsElementEcRecovered, SealedBlock, Transaction, + TransactionSignedEcRecovered, }; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -69,7 +76,6 @@ pub trait TransactionPool: Send + Sync + Clone { /// Imports all _external_ transactions /// - /// /// Consumer: Utility fn add_external_transactions( &self, @@ -80,7 +86,7 @@ pub trait TransactionPool: Send + Sync + Clone { /// Adds an _unvalidated_ transaction into the pool and subscribe to state changes. /// - /// This is the same as [TransactionPool::add_transaction] but returns an event stream for the + /// This is the same as [`TransactionPool::add_transaction`] but returns an event stream for the /// given transaction. /// /// Consumer: Custom @@ -246,16 +252,6 @@ pub trait TransactionPool: Send + Sync + Clone { &self, ) -> Box>>>; - /// Returns an iterator that yields transactions that are ready for block production with the - /// given base fee. - /// - /// Consumer: Block production - #[deprecated(note = "Use best_transactions_with_attributes instead.")] - fn best_transactions_with_base_fee( - &self, - base_fee: u64, - ) -> Box>>>; - /// Returns an iterator that yields transactions that are ready for block production with the /// given base fee and optional blob fee attributes. /// @@ -291,16 +287,32 @@ pub trait TransactionPool: Send + Sync + Clone { /// Consumer: RPC fn all_transactions(&self) -> AllPoolTransactions; + /// Removes all transactions corresponding to the given hashes. + /// + /// Consumer: Utility + fn remove_transactions( + &self, + hashes: Vec, + ) -> Vec>>; + /// Removes all transactions corresponding to the given hashes. /// /// Also removes all _dependent_ transactions. /// /// Consumer: Utility - fn remove_transactions( + fn remove_transactions_and_descendants( &self, hashes: Vec, ) -> Vec>>; + /// Removes all transactions from the given sender + /// + /// Consumer: Utility + fn remove_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>>; + /// Retains only those hashes that are unknown to the pool. /// In other words, removes all transactions from the given set that are currently present in /// the pool. Returns hashes already known to the pool. @@ -334,12 +346,45 @@ pub trait TransactionPool: Send + Sync + Clone { sender: Address, ) -> Vec>>; + /// Returns all pending transactions filtered by predicate + fn get_pending_transactions_with_predicate( + &self, + predicate: impl FnMut(&ValidPoolTransaction) -> bool, + ) -> Vec>>; + + /// Returns all pending transactions sent by a given user + fn get_pending_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>>; + + /// Returns all queued transactions sent by a given user + fn get_queued_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>>; + /// Returns the highest transaction sent by a given user fn get_highest_transaction_by_sender( &self, sender: Address, ) -> Option>>; + /// Returns the transaction with the highest nonce that is executable given the on chain nonce. + /// In other words the highest non nonce gapped transaction. + /// + /// Note: The next pending pooled transaction must have the on chain nonce. + /// + /// For example, for a given on chain nonce of `5`, the next transaction must have that nonce. + /// If the pool contains txs `[5,6,7]` this returns tx `7`. + /// If the pool contains txs `[6,7]` this returns `None` because the next valid nonce (5) is + /// missing, which means txs `[6,7]` are nonce gapped. + fn get_highest_consecutive_transaction_by_sender( + &self, + sender: Address, + on_chain_nonce: u64, + ) -> Option>>; + /// Returns a transaction sent by a given user and a nonce fn get_transaction_by_sender_and_nonce( &self, @@ -398,7 +443,10 @@ pub trait TransactionPool: Send + Sync + Clone { /// Returns the [BlobTransactionSidecar] for the given transaction hash if it exists in the blob /// store. - fn get_blob(&self, tx_hash: TxHash) -> Result, BlobStoreError>; + fn get_blob( + &self, + tx_hash: TxHash, + ) -> Result>, BlobStoreError>; /// Returns all [BlobTransactionSidecar] for the given transaction hashes if they exists in the /// blob store. @@ -408,7 +456,7 @@ pub trait TransactionPool: Send + Sync + Clone { fn get_all_blobs( &self, tx_hashes: Vec, - ) -> Result, BlobStoreError>; + ) -> Result)>, BlobStoreError>; /// Returns the exact [BlobTransactionSidecar] for the given transaction hashes in the order /// they were requested. @@ -417,7 +465,7 @@ pub trait TransactionPool: Send + Sync + Clone { fn get_all_blobs_exact( &self, tx_hashes: Vec, - ) -> Result, BlobStoreError>; + ) -> Result>, BlobStoreError>; /// Return the [`BlobTransactionSidecar`]s for a list of blob versioned hashes. fn get_blobs_for_versioned_hashes( @@ -714,6 +762,15 @@ pub trait BestTransactions: Iterator + Send { /// listen to pool updates. fn no_updates(&mut self); + /// Convenience function for [`Self::no_updates`] that returns the iterator again. + fn without_updates(mut self) -> Self + where + Self: Sized, + { + self.no_updates(); + self + } + /// Skip all blob transactions. /// /// There's only limited blob space available in a block, once exhausted, EIP-4844 transactions @@ -722,12 +779,36 @@ pub trait BestTransactions: Iterator + Send { /// If called then the iterator will no longer yield blob transactions. /// /// Note: this will also exclude any transactions that depend on blob transactions. - fn skip_blobs(&mut self); + fn skip_blobs(&mut self) { + self.set_skip_blobs(true); + } /// Controls whether the iterator skips blob transactions or not. /// /// If set to true, no blob transactions will be returned. fn set_skip_blobs(&mut self, skip_blobs: bool); + + /// Convenience function for [`Self::skip_blobs`] that returns the iterator again. + fn without_blobs(mut self) -> Self + where + Self: Sized, + { + self.skip_blobs(); + self + } + + /// Creates an iterator which uses a closure to determine whether a transaction should be + /// returned by the iterator. + /// + /// All items the closure returns false for are marked as invalid via [`Self::mark_invalid`] and + /// descendant transactions will be skipped. + fn filter_transactions

(self, predicate: P) -> BestTransactionFilter + where + P: FnMut(&Self::Item) -> bool, + Self: Sized, + { + BestTransactionFilter::new(self, predicate) + } } impl BestTransactions for Box @@ -751,25 +832,6 @@ where } } -/// A subtrait on the [`BestTransactions`] trait that allows to filter transactions. -pub trait BestTransactionsFilter: BestTransactions { - /// Creates an iterator which uses a closure to determine if a transaction should be yielded. - /// - /// Given an element the closure must return true or false. The returned iterator will yield - /// only the elements for which the closure returns true. - /// - /// Descendant transactions will be skipped. - fn filter

(self, predicate: P) -> BestTransactionFilter - where - P: FnMut(&Self::Item) -> bool, - Self: Sized, - { - BestTransactionFilter::new(self, predicate) - } -} - -impl BestTransactionsFilter for T where T: BestTransactions {} - /// A no-op implementation that yields no transactions. impl BestTransactions for std::iter::Empty { fn mark_invalid(&mut self, _tx: &T) {} @@ -781,6 +843,36 @@ impl BestTransactions for std::iter::Empty { fn set_skip_blobs(&mut self, _skip_blobs: bool) {} } +/// A filter that allows to check if a transaction satisfies a set of conditions +pub trait TransactionFilter { + /// The type of the transaction to check. + type Transaction; + + /// Returns true if the transaction satisfies the conditions. + fn is_valid(&self, transaction: &Self::Transaction) -> bool; +} + +/// A no-op implementation of [`TransactionFilter`] which +/// marks all transactions as valid. +#[derive(Debug, Clone)] +pub struct NoopTransactionFilter(std::marker::PhantomData); + +// We can't derive Default because this forces T to be +// Default as well, which isn't necessary. +impl Default for NoopTransactionFilter { + fn default() -> Self { + Self(std::marker::PhantomData) + } +} + +impl TransactionFilter for NoopTransactionFilter { + type Transaction = T; + + fn is_valid(&self, _transaction: &Self::Transaction) -> bool { + true + } +} + /// A Helper type that bundles the best transactions attributes together. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct BestTransactionsAttributes { @@ -928,6 +1020,26 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { /// Returns `chain_id` fn chain_id(&self) -> Option; + + /// Ensures that the transaction's code size does not exceed the provided `max_init_code_size`. + /// + /// This is specifically relevant for contract creation transactions ([`TxKind::Create`]), + /// where the input data contains the initialization code. If the input code size exceeds + /// the configured limit, an [`InvalidPoolTransactionError::ExceedsMaxInitCodeSize`] error is + /// returned. + fn ensure_max_init_code_size( + &self, + max_init_code_size: usize, + ) -> Result<(), InvalidPoolTransactionError> { + if self.kind().is_create() && self.input().len() > max_init_code_size { + Err(InvalidPoolTransactionError::ExceedsMaxInitCodeSize( + self.size(), + max_init_code_size, + )) + } else { + Ok(()) + } + } } /// Super trait for transactions that can be converted to and from Eth transactions @@ -1151,7 +1263,7 @@ impl PoolTransaction for EthPooledTransaction { /// For EIP-1559 transactions: `min(max_fee_per_gas - base_fee, max_priority_fee_per_gas)`. /// For legacy transactions: `gas_price - base_fee`. fn effective_tip_per_gas(&self, base_fee: u64) -> Option { - self.transaction.effective_tip_per_gas(Some(base_fee)) + self.transaction.effective_tip_per_gas(base_fee) } /// Returns the max priority fee per gas if the transaction is an EIP-1559 transaction, and @@ -1167,7 +1279,7 @@ impl PoolTransaction for EthPooledTransaction { } fn input(&self) -> &[u8] { - self.transaction.input().as_ref() + self.transaction.input() } /// Returns a measurement of the heap usage of this type and all its internals. @@ -1237,7 +1349,7 @@ impl TryFrom for EthPooledTransaction { } EIP4844_TX_TYPE_ID => { // doesn't have a blob sidecar - return Err(TryFromRecoveredTransactionError::BlobSidecarMissing) + return Err(TryFromRecoveredTransactionError::BlobSidecarMissing); } unsupported => { // unsupported transaction type @@ -1389,7 +1501,9 @@ impl Stream for NewSubpoolTransactionStream { mod tests { use super::*; use alloy_consensus::{TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy}; - use reth_primitives::{constants::eip4844::DATA_GAS_PER_BLOB, Signature, TransactionSigned}; + use alloy_eips::eip4844::DATA_GAS_PER_BLOB; + use alloy_primitives::Signature; + use reth_primitives::TransactionSigned; #[test] fn test_pool_size_invariants() { @@ -1541,4 +1655,27 @@ mod tests { assert_eq!(pooled_tx.blob_sidecar, EthBlobTransactionSidecar::None); assert_eq!(pooled_tx.cost, U256::from(100) + U256::from(10 * 1000)); } + + #[test] + fn test_pooled_transaction_limit() { + // No limit should never exceed + let limit_none = GetPooledTransactionLimit::None; + // Any size should return false + assert!(!limit_none.exceeds(1000)); + + // Size limit of 2MB (2 * 1024 * 1024 bytes) + let size_limit_2mb = GetPooledTransactionLimit::ResponseSizeSoftLimit(2 * 1024 * 1024); + + // Test with size below the limit + // 1MB is below 2MB, should return false + assert!(!size_limit_2mb.exceeds(1024 * 1024)); + + // Test with size exactly at the limit + // 2MB equals the limit, should return false + assert!(!size_limit_2mb.exceeds(2 * 1024 * 1024)); + + // Test with size exceeding the limit + // 3MB is above the 2MB limit, should return true + assert!(size_limit_2mb.exceeds(3 * 1024 * 1024)); + } } diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 49165f189a..62e9f3f291 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -8,15 +8,16 @@ use crate::{ }, traits::TransactionOrigin, validate::{ValidTransaction, ValidationTask, MAX_INIT_CODE_BYTE_SIZE}, - EthBlobTransactionSidecar, EthPoolTransaction, LocalTransactionConfig, PoolTransaction, + EthBlobTransactionSidecar, EthPoolTransaction, LocalTransactionConfig, TransactionValidationOutcome, TransactionValidationTaskExecutor, TransactionValidator, }; -use reth_chainspec::{ChainSpec, EthereumHardforks}; -use reth_primitives::{ - constants::eip4844::MAX_BLOBS_PER_BLOCK, GotExpected, InvalidTransactionError, SealedBlock, +use alloy_consensus::constants::{ EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; +use alloy_eips::eip4844::MAX_BLOBS_PER_BLOCK; +use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_primitives::{GotExpected, InvalidTransactionError, SealedBlock}; use reth_storage_api::{AccountReader, StateProviderFactory}; use reth_tasks::TaskSpawner; use revm::{ @@ -221,7 +222,7 @@ where // Check whether the init code size has been exceeded. if self.fork_tracker.is_shanghai_activated() { - if let Err(err) = ensure_max_init_code_size(&transaction, MAX_INIT_CODE_BYTE_SIZE) { + if let Err(err) = transaction.ensure_max_init_code_size(MAX_INIT_CODE_BYTE_SIZE) { return TransactionValidationOutcome::Invalid(transaction, err) } } @@ -639,6 +640,7 @@ impl EthTransactionValidatorBuilder { pub fn with_head_timestamp(mut self, timestamp: u64) -> Self { self.cancun = self.chain_spec.is_cancun_active_at_timestamp(timestamp); self.shanghai = self.chain_spec.is_shanghai_active_at_timestamp(timestamp); + self.prague = self.chain_spec.is_prague_active_at_timestamp(timestamp); self } @@ -708,7 +710,7 @@ impl EthTransactionValidatorBuilder { EthTransactionValidator { inner: Arc::new(inner) } } - /// Builds a the [`EthTransactionValidator`] and spawns validation tasks via the + /// Builds a [`EthTransactionValidator`] and spawns validation tasks via the /// [`TransactionValidationTaskExecutor`] /// /// The validator will spawn `additional_tasks` additional tasks for validation. @@ -780,22 +782,6 @@ impl ForkTracker { } } -/// Ensure that the code size is not greater than `max_init_code_size`. -/// `max_init_code_size` should be configurable so this will take it as an argument. -pub fn ensure_max_init_code_size( - transaction: &T, - max_init_code_size: usize, -) -> Result<(), InvalidPoolTransactionError> { - if transaction.kind().is_create() && transaction.input().len() > max_init_code_size { - Err(InvalidPoolTransactionError::ExceedsMaxInitCodeSize( - transaction.size(), - max_init_code_size, - )) - } else { - Ok(()) - } -} - /// Ensures that gas limit of the transaction exceeds the intrinsic gas of the transaction. /// /// Caution: This only checks past the Merge hardfork. @@ -830,8 +816,8 @@ pub fn ensure_intrinsic_gas( mod tests { use super::*; use crate::{ - blobstore::InMemoryBlobStore, error::PoolErrorKind, CoinbaseTipOrdering, - EthPooledTransaction, Pool, TransactionPool, + blobstore::InMemoryBlobStore, error::PoolErrorKind, traits::PoolTransaction, + CoinbaseTipOrdering, EthPooledTransaction, Pool, TransactionPool, }; use alloy_eips::eip2718::Decodable2718; use alloy_primitives::{hex, U256}; diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 4395cc9790..4a82a1a148 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -4,6 +4,7 @@ use crate::{ error::InvalidPoolTransactionError, identifier::{SenderId, TransactionId}, traits::{PoolTransaction, TransactionOrigin}, + PriceBumpConfig, }; use alloy_primitives::{Address, TxHash, B256, U256}; use futures_util::future::Either; @@ -372,6 +373,58 @@ impl ValidPoolTransaction { pub(crate) fn tx_type_conflicts_with(&self, other: &Self) -> bool { self.is_eip4844() != other.is_eip4844() } + + /// Determines whether a candidate transaction (`maybe_replacement`) is underpriced compared to + /// an existing transaction in the pool. + /// + /// A transaction is considered underpriced if it doesn't meet the required fee bump threshold. + /// This applies to both standard gas fees and, for blob-carrying transactions (EIP-4844), + /// the blob-specific fees. + #[inline] + pub(crate) fn is_underpriced( + &self, + maybe_replacement: &Self, + price_bumps: &PriceBumpConfig, + ) -> bool { + // Retrieve the required price bump percentage for this type of transaction. + // + // The bump is different for EIP-4844 and other transactions. See `PriceBumpConfig`. + let price_bump = price_bumps.price_bump(self.tx_type()); + + // Check if the max fee per gas is underpriced. + if maybe_replacement.max_fee_per_gas() <= self.max_fee_per_gas() * (100 + price_bump) / 100 + { + return true + } + + let existing_max_priority_fee_per_gas = + self.transaction.max_priority_fee_per_gas().unwrap_or_default(); + let replacement_max_priority_fee_per_gas = + maybe_replacement.transaction.max_priority_fee_per_gas().unwrap_or_default(); + + // Check max priority fee per gas (relevant for EIP-1559 transactions only) + if existing_max_priority_fee_per_gas != 0 && + replacement_max_priority_fee_per_gas != 0 && + replacement_max_priority_fee_per_gas <= + existing_max_priority_fee_per_gas * (100 + price_bump) / 100 + { + return true + } + + // Check max blob fee per gas + if let Some(existing_max_blob_fee_per_gas) = self.transaction.max_fee_per_blob_gas() { + // This enforces that blob txs can only be replaced by blob txs + let replacement_max_blob_fee_per_gas = + maybe_replacement.transaction.max_fee_per_blob_gas().unwrap_or_default(); + if replacement_max_blob_fee_per_gas <= + existing_max_blob_fee_per_gas * (100 + price_bump) / 100 + { + return true + } + } + + false + } } impl>> ValidPoolTransaction { diff --git a/crates/transaction-pool/tests/it/best.rs b/crates/transaction-pool/tests/it/best.rs new file mode 100644 index 0000000000..20e8336764 --- /dev/null +++ b/crates/transaction-pool/tests/it/best.rs @@ -0,0 +1,11 @@ +//! Best transaction and filter testing + +use reth_transaction_pool::{noop::NoopTransactionPool, BestTransactions, TransactionPool}; + +#[test] +fn test_best_transactions() { + let noop = NoopTransactionPool::default(); + let mut best = + noop.best_transactions().filter_transactions(|_| true).without_blobs().without_updates(); + assert!(best.next().is_none()); +} diff --git a/crates/transaction-pool/tests/it/evict.rs b/crates/transaction-pool/tests/it/evict.rs index c7438c9964..fea50962fd 100644 --- a/crates/transaction-pool/tests/it/evict.rs +++ b/crates/transaction-pool/tests/it/evict.rs @@ -1,8 +1,8 @@ //! Transaction pool eviction tests. +use alloy_eips::eip1559::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; use alloy_primitives::{Address, B256}; use rand::distributions::Uniform; -use reth_primitives::constants::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; use reth_transaction_pool::{ error::PoolErrorKind, test_utils::{ diff --git a/crates/transaction-pool/tests/it/main.rs b/crates/transaction-pool/tests/it/main.rs index ead33a328d..7db2b14c95 100644 --- a/crates/transaction-pool/tests/it/main.rs +++ b/crates/transaction-pool/tests/it/main.rs @@ -9,4 +9,6 @@ mod listeners; #[cfg(feature = "test-utils")] mod pending; +mod best; + const fn main() {} diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 0bd28140f4..0616e25971 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -41,8 +41,20 @@ hash-db = "=0.15.2" plain_hasher = "0.2" [features] -test-utils = ["dep:plain_hasher", "dep:hash-db", "arbitrary"] +test-utils = [ + "dep:plain_hasher", + "dep:hash-db", + "arbitrary", + "reth-primitives-traits/test-utils", + "reth-codecs/test-utils" +] arbitrary = [ - "alloy-trie/arbitrary", - "dep:arbitrary", + "alloy-trie/arbitrary", + "dep:arbitrary", + "reth-primitives-traits/arbitrary", + "alloy-consensus/arbitrary", + "alloy-primitives/arbitrary", + "nybbles/arbitrary", + "revm-primitives/arbitrary", + "reth-codecs/arbitrary" ] diff --git a/crates/trie/common/src/hash_builder/state.rs b/crates/trie/common/src/hash_builder/state.rs index 4679316727..c5cae21a1a 100644 --- a/crates/trie/common/src/hash_builder/state.rs +++ b/crates/trie/common/src/hash_builder/state.rs @@ -1,5 +1,5 @@ use crate::TrieMask; -use alloy_trie::{hash_builder::HashBuilderValue, HashBuilder}; +use alloy_trie::{hash_builder::HashBuilderValue, nodes::RlpNode, HashBuilder}; use bytes::Buf; use nybbles::Nibbles; use reth_codecs::Compact; @@ -7,7 +7,7 @@ use serde::{Deserialize, Serialize}; /// The hash builder state for storing in the database. /// Check the `reth-trie` crate for more info on hash builder. -#[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] #[cfg_attr( feature = "arbitrary", derive(arbitrary::Arbitrary), @@ -16,10 +16,10 @@ use serde::{Deserialize, Serialize}; pub struct HashBuilderState { /// The current key. pub key: Vec, - /// The builder stack. - pub stack: Vec>, /// The current node value. pub value: HashBuilderValue, + /// The builder stack. + pub stack: Vec, /// Group masks. pub groups: Vec, @@ -112,7 +112,7 @@ impl Compact for HashBuilderState { let mut stack = Vec::with_capacity(stack_len); for _ in 0..stack_len { let item_len = buf.get_u16() as usize; - stack.push(Vec::from(&buf[..item_len])); + stack.push(RlpNode::from_raw(&buf[..item_len]).unwrap()); buf.advance(item_len); } @@ -154,7 +154,7 @@ mod tests { #[test] fn hash_builder_state_regression() { let mut state = HashBuilderState::default(); - state.stack.push(vec![]); + state.stack.push(Default::default()); let mut buf = vec![]; let len = state.clone().to_compact(&mut buf); let (decoded, _) = HashBuilderState::from_compact(&buf, len); diff --git a/crates/trie/common/src/key.rs b/crates/trie/common/src/key.rs new file mode 100644 index 0000000000..9e440d199f --- /dev/null +++ b/crates/trie/common/src/key.rs @@ -0,0 +1,18 @@ +use alloy_primitives::B256; +use revm_primitives::keccak256; + +/// Trait for hashing keys in state. +pub trait KeyHasher: Default + Clone + Send + Sync + 'static { + /// Hashes the given bytes into a 256-bit hash. + fn hash_key>(bytes: T) -> B256; +} + +/// A key hasher that uses the Keccak-256 hash function. +#[derive(Clone, Debug, Default)] +pub struct KeccakKeyHasher; + +impl KeyHasher for KeccakKeyHasher { + fn hash_key>(bytes: T) -> B256 { + keccak256(bytes) + } +} diff --git a/crates/trie/common/src/lib.rs b/crates/trie/common/src/lib.rs index bdec36028b..7645ebd3a1 100644 --- a/crates/trie/common/src/lib.rs +++ b/crates/trie/common/src/lib.rs @@ -14,6 +14,9 @@ pub mod hash_builder; mod account; pub use account::TrieAccount; +mod key; +pub use key::{KeccakKeyHasher, KeyHasher}; + mod nibbles; pub use nibbles::{Nibbles, StoredNibbles, StoredNibblesSubKey}; diff --git a/crates/trie/common/src/nibbles.rs b/crates/trie/common/src/nibbles.rs index 991fb68f3c..cf94f135f5 100644 --- a/crates/trie/common/src/nibbles.rs +++ b/crates/trie/common/src/nibbles.rs @@ -19,6 +19,7 @@ pub use nybbles::Nibbles; Deserialize, derive_more::Index, )] +#[cfg_attr(feature = "test-utils", derive(arbitrary::Arbitrary))] pub struct StoredNibbles(pub Nibbles); impl From for StoredNibbles { @@ -74,6 +75,7 @@ impl Compact for StoredNibbles { /// The representation of nibbles of the merkle trie stored in the database. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, PartialOrd, Ord, Hash, Deref)] +#[cfg_attr(feature = "test-utils", derive(arbitrary::Arbitrary))] pub struct StoredNibblesSubKey(pub Nibbles); impl From for StoredNibblesSubKey { diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index 8aca67f8d1..a94b2b96fb 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -1,6 +1,7 @@ //! Merkle trie proofs. use crate::{Nibbles, TrieAccount}; +use alloy_consensus::constants::KECCAK_EMPTY; use alloy_primitives::{keccak256, Address, Bytes, B256, U256}; use alloy_rlp::{encode_fixed_size, Decodable, EMPTY_STRING_CODE}; use alloy_trie::{ @@ -9,13 +10,13 @@ use alloy_trie::{ EMPTY_ROOT_HASH, }; use itertools::Itertools; -use reth_primitives_traits::{constants::KECCAK_EMPTY, Account}; +use reth_primitives_traits::Account; use serde::{Deserialize, Serialize}; use std::collections::HashMap; /// The state multiproof of target accounts and multiproofs of their storage tries. /// Multiproof is effectively a state subtrie that only contains the nodes -/// in the paths of target accounts. +/// in the paths of target accounts. #[derive(Clone, Default, Debug)] pub struct MultiProof { /// State trie multiproof for requested accounts. diff --git a/crates/trie/common/src/subnode.rs b/crates/trie/common/src/subnode.rs index 98ce76a323..c64b2317cf 100644 --- a/crates/trie/common/src/subnode.rs +++ b/crates/trie/common/src/subnode.rs @@ -51,16 +51,14 @@ impl Compact for StoredSubNode { buf.advance(key_len); let nibbles_exists = buf.get_u8() != 0; - let nibble = if nibbles_exists { Some(buf.get_u8()) } else { None }; + let nibble = nibbles_exists.then(|| buf.get_u8()); let node_exists = buf.get_u8() != 0; - let node = if node_exists { + let node = node_exists.then(|| { let (node, rest) = BranchNodeCompact::from_compact(buf, 0); buf = rest; - Some(node) - } else { - None - }; + node + }); (Self { key, nibble, node }, buf) } diff --git a/crates/trie/db/Cargo.toml b/crates/trie/db/Cargo.toml index 6d322ba3ff..55fa9a851b 100644 --- a/crates/trie/db/Cargo.toml +++ b/crates/trie/db/Cargo.toml @@ -17,7 +17,6 @@ reth-primitives.workspace = true reth-execution-errors.workspace = true reth-db.workspace = true reth-db-api.workspace = true -reth-stages-types.workspace = true reth-storage-errors.workspace = true reth-trie-common.workspace = true reth-trie.workspace = true @@ -32,10 +31,7 @@ alloy-primitives.workspace = true tracing.workspace = true # misc -rayon.workspace = true derive_more.workspace = true -auto_impl.workspace = true -itertools.workspace = true # `metrics` feature reth-metrics = { workspace = true, optional = true } @@ -56,7 +52,8 @@ reth-provider = { workspace = true, features = ["test-utils"] } reth-storage-errors.workspace = true reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } reth-trie = { workspace = true, features = ["test-utils"] } -reth-node-types.workspace = true + +alloy-consensus.workspace = true # trie triehash = "0.8" @@ -64,16 +61,28 @@ triehash = "0.8" # misc proptest.workspace = true proptest-arbitrary-interop.workspace = true -tokio = { workspace = true, default-features = false, features = [ - "sync", - "rt", - "macros", -] } -tokio-stream.workspace = true serde_json.workspace = true similar-asserts.workspace = true [features] metrics = ["reth-metrics", "reth-trie/metrics", "dep:metrics"] -serde = ["dep:serde"] -test-utils = ["triehash", "reth-trie-common/test-utils"] +serde = [ + "dep:serde", + "reth-provider/serde", + "reth-trie/serde", + "alloy-consensus/serde", + "alloy-primitives/serde", + "revm/serde", + "similar-asserts/serde" +] +test-utils = [ + "triehash", + "reth-trie-common/test-utils", + "reth-chainspec/test-utils", + "reth-primitives/test-utils", + "reth-db/test-utils", + "reth-db-api/test-utils", + "reth-provider/test-utils", + "reth-trie/test-utils", + "revm/test-utils" +] diff --git a/crates/trie/db/src/commitment.rs b/crates/trie/db/src/commitment.rs new file mode 100644 index 0000000000..c608aefff8 --- /dev/null +++ b/crates/trie/db/src/commitment.rs @@ -0,0 +1,39 @@ +use crate::{ + DatabaseHashedCursorFactory, DatabaseProof, DatabaseStateRoot, DatabaseStorageRoot, + DatabaseTrieCursorFactory, DatabaseTrieWitness, +}; +use reth_db::transaction::DbTx; +use reth_trie::{ + proof::Proof, witness::TrieWitness, KeccakKeyHasher, KeyHasher, StateRoot, StorageRoot, +}; + +/// The `StateCommitment` trait provides associated types for state commitment operations. +pub trait StateCommitment: std::fmt::Debug + Send + Sync + Unpin + 'static { + /// The state root type. + type StateRoot<'a, TX: DbTx + 'a>: DatabaseStateRoot<'a, TX>; + /// The storage root type. + type StorageRoot<'a, TX: DbTx + 'a>: DatabaseStorageRoot<'a, TX>; + /// The state proof type. + type StateProof<'a, TX: DbTx + 'a>: DatabaseProof<'a, TX>; + /// The state witness type. + type StateWitness<'a, TX: DbTx + 'a>: DatabaseTrieWitness<'a, TX>; + /// The key hasher type. + type KeyHasher: KeyHasher; +} + +/// The state commitment type for Ethereum's Merkle Patricia Trie. +#[derive(Debug)] +#[non_exhaustive] +pub struct MerklePatriciaTrie; + +impl StateCommitment for MerklePatriciaTrie { + type StateRoot<'a, TX: DbTx + 'a> = + StateRoot, DatabaseHashedCursorFactory<'a, TX>>; + type StorageRoot<'a, TX: DbTx + 'a> = + StorageRoot, DatabaseHashedCursorFactory<'a, TX>>; + type StateProof<'a, TX: DbTx + 'a> = + Proof, DatabaseHashedCursorFactory<'a, TX>>; + type StateWitness<'a, TX: DbTx + 'a> = + TrieWitness, DatabaseHashedCursorFactory<'a, TX>>; + type KeyHasher = KeccakKeyHasher; +} diff --git a/crates/trie/db/src/lib.rs b/crates/trie/db/src/lib.rs index 3a9b1e3282..27c18af6cb 100644 --- a/crates/trie/db/src/lib.rs +++ b/crates/trie/db/src/lib.rs @@ -1,5 +1,6 @@ //! An integration of [`reth-trie`] with [`reth-db`]. +mod commitment; mod hashed_cursor; mod prefix_set; mod proof; @@ -8,6 +9,7 @@ mod storage; mod trie_cursor; mod witness; +pub use commitment::{MerklePatriciaTrie, StateCommitment}; pub use hashed_cursor::{ DatabaseHashedAccountCursor, DatabaseHashedCursorFactory, DatabaseHashedStorageCursor, }; diff --git a/crates/trie/db/src/prefix_set.rs b/crates/trie/db/src/prefix_set.rs index 079fe39376..cd50503bc7 100644 --- a/crates/trie/db/src/prefix_set.rs +++ b/crates/trie/db/src/prefix_set.rs @@ -36,13 +36,13 @@ impl PrefixSetLoader<'_, TX> { // Walk account changeset and insert account prefixes. let mut account_changeset_cursor = self.cursor_read::()?; - let mut account_plain_state_cursor = self.cursor_read::()?; + let mut account_hashed_state_cursor = self.cursor_read::()?; for account_entry in account_changeset_cursor.walk_range(range.clone())? { let (_, AccountBeforeTx { address, .. }) = account_entry?; let hashed_address = keccak256(address); account_prefix_set.insert(Nibbles::unpack(hashed_address)); - if account_plain_state_cursor.seek_exact(address)?.is_none() { + if account_hashed_state_cursor.seek_exact(hashed_address)?.is_none() { destroyed_accounts.insert(hashed_address); } } diff --git a/crates/trie/db/src/state.rs b/crates/trie/db/src/state.rs index 5acb9e0d1b..4d46183dfd 100644 --- a/crates/trie/db/src/state.rs +++ b/crates/trie/db/src/state.rs @@ -244,22 +244,24 @@ impl DatabaseHashedPostState for HashedPostState { } } - let hashed_accounts = HashMap::from_iter( - accounts.into_iter().map(|(address, info)| (keccak256(address), info)), - ); + let hashed_accounts = + accounts.into_iter().map(|(address, info)| (keccak256(address), info)).collect(); - let hashed_storages = HashMap::from_iter(storages.into_iter().map(|(address, storage)| { - ( - keccak256(address), - HashedStorage::from_iter( - // The `wiped` flag indicates only whether previous storage entries - // should be looked up in db or not. For reverts it's a noop since all - // wiped changes had been written as storage reverts. - false, - storage.into_iter().map(|(slot, value)| (keccak256(slot), value)), - ), - ) - })); + let hashed_storages = storages + .into_iter() + .map(|(address, storage)| { + ( + keccak256(address), + HashedStorage::from_iter( + // The `wiped` flag indicates only whether previous storage entries + // should be looked up in db or not. For reverts it's a noop since all + // wiped changes had been written as storage reverts. + false, + storage.into_iter().map(|(slot, value)| (keccak256(slot), value)), + ), + ) + }) + .collect(); Ok(Self { accounts: hashed_accounts, storages: hashed_storages }) } diff --git a/crates/trie/db/tests/post_state.rs b/crates/trie/db/tests/post_state.rs index ceadf7cde5..ce6f10d76a 100644 --- a/crates/trie/db/tests/post_state.rs +++ b/crates/trie/db/tests/post_state.rs @@ -55,7 +55,7 @@ fn assert_storage_cursor_order( #[test] fn post_state_only_accounts() { let accounts = - Vec::from_iter((1..11).map(|key| (B256::with_last_byte(key), Account::default()))); + (1..11).map(|key| (B256::with_last_byte(key), Account::default())).collect::>(); let mut hashed_post_state = HashedPostState::default(); for (hashed_address, account) in &accounts { @@ -73,7 +73,7 @@ fn post_state_only_accounts() { #[test] fn db_only_accounts() { let accounts = - Vec::from_iter((1..11).map(|key| (B256::with_last_byte(key), Account::default()))); + (1..11).map(|key| (B256::with_last_byte(key), Account::default())).collect::>(); let db = create_test_rw_db(); db.update(|tx| { @@ -96,7 +96,7 @@ fn db_only_accounts() { fn account_cursor_correct_order() { // odd keys are in post state, even keys are in db let accounts = - Vec::from_iter((1..111).map(|key| (B256::with_last_byte(key), Account::default()))); + (1..111).map(|key| (B256::with_last_byte(key), Account::default())).collect::>(); let db = create_test_rw_db(); db.update(|tx| { @@ -121,9 +121,9 @@ fn account_cursor_correct_order() { fn removed_accounts_are_discarded() { // odd keys are in post state, even keys are in db let accounts = - Vec::from_iter((1..111).map(|key| (B256::with_last_byte(key), Account::default()))); + (1..111).map(|key| (B256::with_last_byte(key), Account::default())).collect::>(); // accounts 5, 9, 11 should be considered removed from post state - let removed_keys = Vec::from_iter([5, 9, 11].into_iter().map(B256::with_last_byte)); + let removed_keys = [5, 9, 11].into_iter().map(B256::with_last_byte).collect::>(); let db = create_test_rw_db(); db.update(|tx| { @@ -150,9 +150,9 @@ fn removed_accounts_are_discarded() { #[test] fn post_state_accounts_take_precedence() { - let accounts = Vec::from_iter((1..10).map(|key| { - (B256::with_last_byte(key), Account { nonce: key as u64, ..Default::default() }) - })); + let accounts = (1..10) + .map(|key| (B256::with_last_byte(key), Account { nonce: key as u64, ..Default::default() })) + .collect::>(); let db = create_test_rw_db(); db.update(|tx| { @@ -224,7 +224,7 @@ fn storage_is_empty() { } let db_storage = - BTreeMap::from_iter((0..10).map(|key| (B256::with_last_byte(key), U256::from(key)))); + (0..10).map(|key| (B256::with_last_byte(key), U256::from(key))).collect::>(); db.update(|tx| { for (slot, value) in &db_storage { // insert zero value accounts to the database @@ -299,9 +299,10 @@ fn storage_is_empty() { fn storage_cursor_correct_order() { let address = B256::random(); let db_storage = - BTreeMap::from_iter((1..11).map(|key| (B256::with_last_byte(key), U256::from(key)))); - let post_state_storage = - BTreeMap::from_iter((11..21).map(|key| (B256::with_last_byte(key), U256::from(key)))); + (1..11).map(|key| (B256::with_last_byte(key), U256::from(key))).collect::>(); + let post_state_storage = (11..21) + .map(|key| (B256::with_last_byte(key), U256::from(key))) + .collect::>(); let db = create_test_rw_db(); db.update(|tx| { @@ -334,10 +335,12 @@ fn storage_cursor_correct_order() { fn zero_value_storage_entries_are_discarded() { let address = B256::random(); let db_storage = - BTreeMap::from_iter((0..10).map(|key| (B256::with_last_byte(key), U256::from(key)))); // every even number is changed to zero value - let post_state_storage = BTreeMap::from_iter((0..10).map(|key| { - (B256::with_last_byte(key), if key % 2 == 0 { U256::ZERO } else { U256::from(key) }) - })); + (0..10).map(|key| (B256::with_last_byte(key), U256::from(key))).collect::>(); // every even number is changed to zero value + let post_state_storage = (0..10) + .map(|key| { + (B256::with_last_byte(key), if key % 2 == 0 { U256::ZERO } else { U256::from(key) }) + }) + .collect::>(); let db = create_test_rw_db(); db.update(|tx| { @@ -371,9 +374,10 @@ fn zero_value_storage_entries_are_discarded() { fn wiped_storage_is_discarded() { let address = B256::random(); let db_storage = - BTreeMap::from_iter((1..11).map(|key| (B256::with_last_byte(key), U256::from(key)))); - let post_state_storage = - BTreeMap::from_iter((11..21).map(|key| (B256::with_last_byte(key), U256::from(key)))); + (1..11).map(|key| (B256::with_last_byte(key), U256::from(key))).collect::>(); + let post_state_storage = (11..21) + .map(|key| (B256::with_last_byte(key), U256::from(key))) + .collect::>(); let db = create_test_rw_db(); db.update(|tx| { @@ -404,7 +408,7 @@ fn wiped_storage_is_discarded() { fn post_state_storages_take_precedence() { let address = B256::random(); let storage = - BTreeMap::from_iter((1..10).map(|key| (B256::with_last_byte(key), U256::from(key)))); + (1..10).map(|key| (B256::with_last_byte(key), U256::from(key))).collect::>(); let db = create_test_rw_db(); db.update(|tx| { diff --git a/crates/trie/db/tests/proof.rs b/crates/trie/db/tests/proof.rs index 5ffa6729b4..79a2ce96fc 100644 --- a/crates/trie/db/tests/proof.rs +++ b/crates/trie/db/tests/proof.rs @@ -1,9 +1,10 @@ #![allow(missing_docs)] +use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{keccak256, Address, Bytes, B256, U256}; use alloy_rlp::EMPTY_STRING_CODE; use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET}; -use reth_primitives::{constants::EMPTY_ROOT_HASH, Account}; +use reth_primitives::Account; use reth_provider::test_utils::{create_test_provider_factory, insert_genesis}; use reth_trie::{proof::Proof, Nibbles}; use reth_trie_common::{AccountProof, StorageProof}; diff --git a/crates/trie/db/tests/trie.rs b/crates/trie/db/tests/trie.rs index 59fffec58d..aee2643647 100644 --- a/crates/trie/db/tests/trie.rs +++ b/crates/trie/db/tests/trie.rs @@ -1,5 +1,6 @@ #![allow(missing_docs)] +use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{hex_literal::hex, keccak256, Address, B256, U256}; use proptest::{prelude::ProptestConfig, proptest}; use proptest_arbitrary_interop::arb; @@ -8,9 +9,10 @@ use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, transaction::DbTxMut, }; -use reth_primitives::{constants::EMPTY_ROOT_HASH, Account, StorageEntry}; +use reth_primitives::{Account, StorageEntry}; use reth_provider::{ - test_utils::create_test_provider_factory, DatabaseProviderRW, StorageTrieWriter, TrieWriter, + providers::ProviderNodeTypes, test_utils::create_test_provider_factory, DatabaseProviderRW, + StorageTrieWriter, TrieWriter, }; use reth_trie::{ prefix_set::PrefixSetMut, @@ -692,8 +694,8 @@ fn storage_trie_around_extension_node() { assert_trie_updates(updates.storage_nodes_ref()); } -fn extension_node_storage_trie( - tx: &DatabaseProviderRW>, Spec>, +fn extension_node_storage_trie( + tx: &DatabaseProviderRW>, N>, hashed_address: B256, ) -> (B256, StorageTrieUpdates) { let value = U256::from(1); @@ -720,8 +722,8 @@ fn extension_node_storage_trie( (root, trie_updates) } -fn extension_node_trie( - tx: &DatabaseProviderRW>, Spec>, +fn extension_node_trie( + tx: &DatabaseProviderRW>, N>, ) -> B256 { let a = Account { nonce: 0, balance: U256::from(1u64), bytecode_hash: Some(B256::random()) }; let val = encode_account(a, None); diff --git a/crates/trie/db/tests/witness.rs b/crates/trie/db/tests/witness.rs index 20f8cfbb90..8e00472b47 100644 --- a/crates/trie/db/tests/witness.rs +++ b/crates/trie/db/tests/witness.rs @@ -1,5 +1,6 @@ #![allow(missing_docs)] +use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{ keccak256, map::{HashMap, HashSet}, @@ -8,7 +9,7 @@ use alloy_primitives::{ use alloy_rlp::EMPTY_STRING_CODE; use reth_db::{cursor::DbCursorRW, tables}; use reth_db_api::transaction::DbTxMut; -use reth_primitives::{constants::EMPTY_ROOT_HASH, Account, StorageEntry}; +use reth_primitives::{Account, StorageEntry}; use reth_provider::{test_utils::create_test_provider_factory, HashingWriter}; use reth_trie::{proof::Proof, witness::TrieWitness, HashedPostState, HashedStorage, StateRoot}; use reth_trie_db::{DatabaseProof, DatabaseStateRoot, DatabaseTrieWitness}; diff --git a/crates/trie/parallel/Cargo.toml b/crates/trie/parallel/Cargo.toml index 1ffb21dc1d..809055549b 100644 --- a/crates/trie/parallel/Cargo.toml +++ b/crates/trie/parallel/Cargo.toml @@ -15,7 +15,6 @@ workspace = true # reth reth-primitives.workspace = true reth-db.workspace = true -reth-db-api.workspace = true reth-trie.workspace = true reth-trie-db.workspace = true reth-execution-errors.workspace = true @@ -47,7 +46,11 @@ reth-trie = { workspace = true, features = ["test-utils"] } # misc rand.workspace = true -tokio = { workspace = true, default-features = false, features = ["sync", "rt", "macros"] } +tokio = { workspace = true, default-features = false, features = [ + "sync", + "rt", + "macros", +] } rayon.workspace = true criterion = { workspace = true, features = ["async_tokio"] } proptest.workspace = true diff --git a/crates/trie/parallel/src/parallel_root.rs b/crates/trie/parallel/src/parallel_root.rs index 796191e913..bd70935031 100644 --- a/crates/trie/parallel/src/parallel_root.rs +++ b/crates/trie/parallel/src/parallel_root.rs @@ -341,7 +341,7 @@ mod tests { hashed_state .storages .entry(hashed_address) - .or_insert_with(|| HashedStorage::new(false)) + .or_insert_with(HashedStorage::default) .storage .insert(hashed_slot, *value); } diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml new file mode 100644 index 0000000000..1c5bb7d8a3 --- /dev/null +++ b/crates/trie/sparse/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "reth-trie-sparse" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Sparse MPT implementation" + +[lints] +workspace = true + + +[dependencies] +# reth +reth-tracing.workspace = true +reth-trie-common.workspace = true +reth-trie.workspace = true + +# alloy +alloy-primitives.workspace = true +alloy-rlp.workspace = true + +# misc +smallvec = { workspace = true, features = ["const_new"] } +thiserror.workspace = true + +[dev-dependencies] +reth-testing-utils.workspace = true +reth-trie = { workspace = true, features = ["test-utils"] } +reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } + +assert_matches.workspace = true +criterion.workspace = true +itertools.workspace = true +pretty_assertions = "1.4" +proptest.workspace = true +rand.workspace = true + +[[bench]] +name = "root" +harness = false + +[[bench]] +name = "rlp_node" +harness = false diff --git a/crates/trie/sparse/benches/rlp_node.rs b/crates/trie/sparse/benches/rlp_node.rs new file mode 100644 index 0000000000..57ab52978b --- /dev/null +++ b/crates/trie/sparse/benches/rlp_node.rs @@ -0,0 +1,78 @@ +#![allow(missing_docs, unreachable_pub)] + +use std::time::{Duration, Instant}; + +use alloy_primitives::{B256, U256}; +use criterion::{criterion_group, criterion_main, Criterion}; +use prop::strategy::ValueTree; +use proptest::{prelude::*, test_runner::TestRunner}; +use rand::seq::IteratorRandom; +use reth_testing_utils::generators; +use reth_trie::Nibbles; +use reth_trie_sparse::RevealedSparseTrie; + +pub fn update_rlp_node_level(c: &mut Criterion) { + let mut rng = generators::rng(); + + let mut group = c.benchmark_group("update rlp node level"); + group.sample_size(20); + + for size in [100_000] { + let mut runner = TestRunner::new(ProptestConfig::default()); + let state = proptest::collection::hash_map(any::(), any::(), size) + .new_tree(&mut runner) + .unwrap() + .current(); + + // Create a sparse trie with `size` leaves + let mut sparse = RevealedSparseTrie::default(); + for (key, value) in &state { + sparse + .update_leaf(Nibbles::unpack(key), alloy_rlp::encode_fixed_size(value).to_vec()) + .unwrap(); + } + sparse.root(); + + for updated_leaves in [0.1, 1.0] { + for key in state + .keys() + .choose_multiple(&mut rng, (size as f64 * (updated_leaves / 100.0)) as usize) + { + sparse + .update_leaf( + Nibbles::unpack(key), + alloy_rlp::encode_fixed_size(&rng.gen::()).to_vec(), + ) + .unwrap(); + } + + // Calculate the maximum depth of the trie for the given number of leaves + let max_depth = (size as f64).log(16.0).ceil() as usize; + + for depth in 0..=max_depth { + group.bench_function( + format!("size {size} | updated {updated_leaves}% | depth {depth}"), + |b| { + // Use `iter_custom` to avoid measuring clones and drops + b.iter_custom(|iters| { + let mut elapsed = Duration::ZERO; + + let mut cloned = sparse.clone(); + for _ in 0..iters { + let start = Instant::now(); + cloned.update_rlp_node_level(depth); + elapsed += start.elapsed(); + cloned = sparse.clone(); + } + + elapsed + }) + }, + ); + } + } + } +} + +criterion_group!(rlp_node, update_rlp_node_level); +criterion_main!(rlp_node); diff --git a/crates/trie/sparse/benches/root.rs b/crates/trie/sparse/benches/root.rs new file mode 100644 index 0000000000..30ce566fb5 --- /dev/null +++ b/crates/trie/sparse/benches/root.rs @@ -0,0 +1,207 @@ +#![allow(missing_docs, unreachable_pub)] + +use alloy_primitives::{map::HashMap, B256, U256}; +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; +use itertools::Itertools; +use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; +use reth_trie::{ + hashed_cursor::{noop::NoopHashedStorageCursor, HashedPostStateStorageCursor}, + node_iter::{TrieElement, TrieNodeIter}, + trie_cursor::{noop::NoopStorageTrieCursor, InMemoryStorageTrieCursor}, + updates::StorageTrieUpdates, + walker::TrieWalker, + HashedStorage, +}; +use reth_trie_common::{HashBuilder, Nibbles}; +use reth_trie_sparse::SparseTrie; + +pub fn calculate_root_from_leaves(c: &mut Criterion) { + let mut group = c.benchmark_group("calculate root from leaves"); + group.sample_size(20); + + for size in [1_000, 5_000, 10_000, 100_000] { + let state = generate_test_data(size); + + // hash builder + group.bench_function(BenchmarkId::new("hash builder", size), |b| { + b.iter_with_setup(HashBuilder::default, |mut hb| { + for (key, value) in state.iter().sorted_by_key(|(key, _)| *key) { + hb.add_leaf(Nibbles::unpack(key), &alloy_rlp::encode_fixed_size(value)); + } + hb.root(); + }) + }); + + // sparse trie + group.bench_function(BenchmarkId::new("sparse trie", size), |b| { + b.iter_with_setup(SparseTrie::revealed_empty, |mut sparse| { + for (key, value) in &state { + sparse + .update_leaf( + Nibbles::unpack(key), + alloy_rlp::encode_fixed_size(value).to_vec(), + ) + .unwrap(); + } + sparse.root().unwrap(); + }) + }); + } +} + +pub fn calculate_root_from_leaves_repeated(c: &mut Criterion) { + let mut group = c.benchmark_group("calculate root from leaves repeated"); + group.sample_size(20); + + for init_size in [1_000, 10_000, 100_000] { + let init_state = generate_test_data(init_size); + + for update_size in [100, 1_000, 5_000, 10_000] { + for num_updates in [1, 3, 5, 10] { + let updates = + (0..num_updates).map(|_| generate_test_data(update_size)).collect::>(); + + // hash builder + let benchmark_id = BenchmarkId::new( + "hash builder", + format!("init size {init_size} | update size {update_size} | num updates {num_updates}"), + ); + group.bench_function(benchmark_id, |b| { + b.iter_with_setup( + || { + let init_storage = HashedStorage::from_iter(false, init_state.clone()); + let storage_updates = updates + .clone() + .into_iter() + .map(|update| HashedStorage::from_iter(false, update)) + .collect::>(); + + let mut hb = HashBuilder::default().with_updates(true); + for (key, value) in init_state.iter().sorted_by_key(|(key, _)| *key) { + hb.add_leaf( + Nibbles::unpack(key), + &alloy_rlp::encode_fixed_size(value), + ); + } + hb.root(); + + let (_, updates) = hb.split(); + let trie_updates = StorageTrieUpdates::new(updates); + (init_storage, storage_updates, trie_updates) + }, + |(init_storage, storage_updates, mut trie_updates)| { + let mut storage = init_storage; + let mut storage_updates = storage_updates.into_iter().peekable(); + while let Some(update) = storage_updates.next() { + storage.extend(&update); + + let prefix_set = update.construct_prefix_set().freeze(); + let (storage_sorted, trie_updates_sorted) = + if storage_updates.peek().is_some() { + ( + storage.clone().into_sorted(), + trie_updates.clone().into_sorted(), + ) + } else { + ( + std::mem::take(&mut storage).into_sorted(), + std::mem::take(&mut trie_updates).into_sorted(), + ) + }; + + let walker = TrieWalker::new( + InMemoryStorageTrieCursor::new( + B256::ZERO, + NoopStorageTrieCursor::default(), + Some(&trie_updates_sorted), + ), + prefix_set, + ); + let mut node_iter = TrieNodeIter::new( + walker, + HashedPostStateStorageCursor::new( + NoopHashedStorageCursor::default(), + Some(&storage_sorted), + ), + ); + + let mut hb = HashBuilder::default().with_updates(true); + while let Some(node) = node_iter.try_next().unwrap() { + match node { + TrieElement::Branch(node) => { + hb.add_branch( + node.key, + node.value, + node.children_are_in_trie, + ); + } + TrieElement::Leaf(hashed_slot, value) => { + hb.add_leaf( + Nibbles::unpack(hashed_slot), + alloy_rlp::encode_fixed_size(&value).as_ref(), + ); + } + } + } + hb.root(); + + if storage_updates.peek().is_some() { + trie_updates.finalize(node_iter.walker, hb); + } + } + }, + ) + }); + + // sparse trie + let benchmark_id = BenchmarkId::new( + "sparse trie", + format!("init size {init_size} | update size {update_size} | num updates {num_updates}"), + ); + group.bench_function(benchmark_id, |b| { + b.iter_with_setup( + || { + let mut sparse = SparseTrie::revealed_empty(); + for (key, value) in &init_state { + sparse + .update_leaf( + Nibbles::unpack(key), + alloy_rlp::encode_fixed_size(value).to_vec(), + ) + .unwrap(); + } + sparse.root().unwrap(); + sparse + }, + |mut sparse| { + for update in &updates { + for (key, value) in update { + sparse + .update_leaf( + Nibbles::unpack(key), + alloy_rlp::encode_fixed_size(value).to_vec(), + ) + .unwrap(); + } + sparse.root().unwrap(); + } + }, + ) + }); + } + } + } +} + +fn generate_test_data(size: usize) -> HashMap { + let mut runner = TestRunner::new(ProptestConfig::default()); + proptest::collection::hash_map(any::(), any::(), size) + .new_tree(&mut runner) + .unwrap() + .current() + .into_iter() + .collect() +} + +criterion_group!(root, calculate_root_from_leaves, calculate_root_from_leaves_repeated); +criterion_main!(root); diff --git a/crates/trie/sparse/src/errors.rs b/crates/trie/sparse/src/errors.rs new file mode 100644 index 0000000000..506b206fdd --- /dev/null +++ b/crates/trie/sparse/src/errors.rs @@ -0,0 +1,59 @@ +//! Errors for sparse trie. + +use alloy_primitives::{Bytes, B256}; +use reth_trie::Nibbles; +use thiserror::Error; + +use crate::SparseNode; + +/// Result type with [`SparseStateTrieError`] as error. +pub type SparseStateTrieResult = Result; + +/// Error encountered in [`crate::SparseStateTrie`]. +#[derive(Error, Debug)] +pub enum SparseStateTrieError { + /// Encountered invalid root node. + #[error("invalid root node at {path:?}: {node:?}")] + InvalidRootNode { + /// Path to first proof node. + path: Nibbles, + /// Encoded first proof node. + node: Bytes, + }, + /// Sparse trie error. + #[error(transparent)] + Sparse(#[from] SparseTrieError), + /// RLP error. + #[error(transparent)] + Rlp(#[from] alloy_rlp::Error), +} + +/// Result type with [`SparseTrieError`] as error. +pub type SparseTrieResult = Result; + +/// Error encountered in [`crate::SparseTrie`]. +#[derive(Error, Debug)] +pub enum SparseTrieError { + /// Sparse trie is still blind. Thrown on attempt to update it. + #[error("sparse trie is blind")] + Blind, + /// Encountered blinded node on update. + #[error("attempted to update blind node at {path:?}: {hash}")] + BlindedNode { + /// Blind node path. + path: Nibbles, + /// Node hash + hash: B256, + }, + /// Encountered unexpected node at path when revealing. + #[error("encountered an invalid node at path {path:?} when revealing: {node:?}")] + Reveal { + /// Path to the node. + path: Nibbles, + /// Node that was at the path when revealing. + node: Box, + }, + /// RLP error. + #[error(transparent)] + Rlp(#[from] alloy_rlp::Error), +} diff --git a/crates/trie/sparse/src/lib.rs b/crates/trie/sparse/src/lib.rs new file mode 100644 index 0000000000..b3cb2c5fdf --- /dev/null +++ b/crates/trie/sparse/src/lib.rs @@ -0,0 +1,10 @@ +//! The implementation of sparse MPT. + +mod state; +pub use state::*; + +mod trie; +pub use trie::*; + +mod errors; +pub use errors::*; diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs new file mode 100644 index 0000000000..cfb17ef36f --- /dev/null +++ b/crates/trie/sparse/src/state.rs @@ -0,0 +1,131 @@ +use crate::{SparseStateTrieError, SparseStateTrieResult, SparseTrie}; +use alloy_primitives::{ + map::{HashMap, HashSet}, + Bytes, B256, +}; +use alloy_rlp::Decodable; +use reth_trie::{Nibbles, TrieNode}; + +/// Sparse state trie representing lazy-loaded Ethereum state trie. +#[derive(Default, Debug)] +pub struct SparseStateTrie { + /// Sparse account trie. + pub(crate) state: SparseTrie, + /// Sparse storage tries. + #[allow(dead_code)] + pub(crate) storages: HashMap, + /// Collection of revealed account and storage keys. + #[allow(dead_code)] + pub(crate) revealed: HashMap>, +} + +impl SparseStateTrie { + /// Create state trie from state trie. + pub fn from_state(state: SparseTrie) -> Self { + Self { state, ..Default::default() } + } + + /// Returns `true` if account was already revealed. + pub fn is_account_revealed(&self, account: &B256) -> bool { + self.revealed.contains_key(account) + } + + /// Returns `true` if storage slot for account was already revealed. + pub fn is_storage_slot_revealed(&self, account: &B256, slot: &B256) -> bool { + self.revealed.get(account).map_or(false, |slots| slots.contains(slot)) + } + + /// Reveal unknown trie paths from provided leaf path and its proof. + /// NOTE: This method does not extensively validate the proof. + pub fn reveal_account( + &mut self, + account: B256, + proof: impl IntoIterator, + ) -> SparseStateTrieResult<()> { + let mut proof = proof.into_iter().peekable(); + + // reveal root and initialize the trie if not already + let Some((path, node)) = proof.next() else { return Ok(()) }; + if !path.is_empty() { + return Err(SparseStateTrieError::InvalidRootNode { path, node }) + } + + // Decode root node and perform sanity check. + let root_node = TrieNode::decode(&mut &node[..])?; + if matches!(root_node, TrieNode::EmptyRoot) && proof.peek().is_some() { + return Err(SparseStateTrieError::InvalidRootNode { path, node }) + } + + // Reveal root node if it wasn't already. + let trie = self.state.reveal_root(root_node)?; + + // add the remaining proof nodes + for (path, bytes) in proof { + let node = TrieNode::decode(&mut &bytes[..])?; + trie.reveal_node(path, node)?; + } + + // Mark leaf path as revealed. + self.revealed.entry(account).or_default(); + + Ok(()) + } + + /// Update the leaf node. + pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseStateTrieResult<()> { + self.state.update_leaf(path, value)?; + Ok(()) + } + + /// Returns sparse trie root if the trie has been revealed. + pub fn root(&mut self) -> Option { + self.state.root() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::Bytes; + use alloy_rlp::EMPTY_STRING_CODE; + use assert_matches::assert_matches; + use reth_trie::HashBuilder; + use reth_trie_common::proof::ProofRetainer; + + #[test] + fn sparse_trie_reveal_empty() { + let retainer = ProofRetainer::from_iter([Nibbles::default()]); + let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); + hash_builder.root(); + let proofs = hash_builder.take_proof_nodes(); + assert_eq!(proofs.len(), 1); + + let mut sparse = SparseStateTrie::default(); + assert_eq!(sparse.state, SparseTrie::Blind); + sparse.reveal_account(Default::default(), proofs.into_inner()).unwrap(); + assert_eq!(sparse.state, SparseTrie::revealed_empty()); + } + + #[test] + fn reveal_first_node_not_root() { + let mut sparse = SparseStateTrie::default(); + let proof = [(Nibbles::from_nibbles([0x1]), Bytes::from([EMPTY_STRING_CODE]))]; + assert_matches!( + sparse.reveal_account(Default::default(), proof), + Err(SparseStateTrieError::InvalidRootNode { .. }) + ); + } + + #[test] + fn reveal_invalid_proof_with_empty_root() { + let mut sparse = SparseStateTrie::default(); + let proof = [ + (Nibbles::default(), Bytes::from([EMPTY_STRING_CODE])), + (Nibbles::from_nibbles([0x1]), Bytes::new()), + ]; + assert_matches!( + sparse.reveal_account(Default::default(), proof), + Err(SparseStateTrieError::InvalidRootNode { .. }) + ); + } +} diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs new file mode 100644 index 0000000000..9db1dff531 --- /dev/null +++ b/crates/trie/sparse/src/trie.rs @@ -0,0 +1,1674 @@ +use crate::{SparseTrieError, SparseTrieResult}; +use alloy_primitives::{hex, keccak256, map::HashMap, B256}; +use alloy_rlp::Decodable; +use reth_tracing::tracing::debug; +use reth_trie::{ + prefix_set::{PrefixSet, PrefixSetMut}, + RlpNode, +}; +use reth_trie_common::{ + BranchNodeRef, ExtensionNodeRef, LeafNodeRef, Nibbles, TrieMask, TrieNode, CHILD_INDEX_RANGE, + EMPTY_ROOT_HASH, +}; +use smallvec::SmallVec; +use std::fmt; + +/// Inner representation of the sparse trie. +/// Sparse trie is blind by default until nodes are revealed. +#[derive(PartialEq, Eq, Default, Debug)] +pub enum SparseTrie { + /// None of the trie nodes are known. + #[default] + Blind, + /// The trie nodes have been revealed. + Revealed(RevealedSparseTrie), +} + +impl SparseTrie { + /// Creates new revealed empty trie. + pub fn revealed_empty() -> Self { + Self::Revealed(RevealedSparseTrie::default()) + } + + /// Returns `true` if the sparse trie has no revealed nodes. + pub const fn is_blind(&self) -> bool { + matches!(self, Self::Blind) + } + + /// Returns mutable reference to revealed sparse trie if the trie is not blind. + pub fn as_revealed_mut(&mut self) -> Option<&mut RevealedSparseTrie> { + if let Self::Revealed(revealed) = self { + Some(revealed) + } else { + None + } + } + + /// Reveals the root node if the trie is blinded. + /// + /// # Returns + /// + /// Mutable reference to [`RevealedSparseTrie`]. + pub fn reveal_root(&mut self, root: TrieNode) -> SparseTrieResult<&mut RevealedSparseTrie> { + if self.is_blind() { + *self = Self::Revealed(RevealedSparseTrie::from_root(root)?) + } + Ok(self.as_revealed_mut().unwrap()) + } + + /// Update the leaf node. + pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseTrieResult<()> { + let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; + revealed.update_leaf(path, value)?; + Ok(()) + } + + /// Calculates and returns the trie root if the trie has been revealed. + pub fn root(&mut self) -> Option { + Some(self.as_revealed_mut()?.root()) + } +} + +/// The representation of revealed sparse trie. +/// +/// ## Invariants +/// +/// - The root node is always present in `nodes` collection. +/// - Each leaf entry in `nodes` collection must have a corresponding entry in `values` collection. +/// The opposite is also true. +/// - All keys in `values` collection are full leaf paths. +#[derive(Clone, PartialEq, Eq)] +pub struct RevealedSparseTrie { + /// All trie nodes. + nodes: HashMap, + /// All leaf values. + values: HashMap>, + /// Prefix set. + prefix_set: PrefixSetMut, + /// Reusable buffer for RLP encoding of nodes. + rlp_buf: Vec, +} + +impl fmt::Debug for RevealedSparseTrie { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RevealedSparseTrie") + .field("nodes", &self.nodes) + .field("values", &self.values) + .field("prefix_set", &self.prefix_set) + .field("rlp_buf", &hex::encode(&self.rlp_buf)) + .finish() + } +} + +impl Default for RevealedSparseTrie { + fn default() -> Self { + Self { + nodes: HashMap::from_iter([(Nibbles::default(), SparseNode::Empty)]), + values: HashMap::default(), + prefix_set: PrefixSetMut::default(), + rlp_buf: Vec::new(), + } + } +} + +impl RevealedSparseTrie { + /// Create new revealed sparse trie from the given root node. + pub fn from_root(node: TrieNode) -> SparseTrieResult { + let mut this = Self { + nodes: HashMap::default(), + values: HashMap::default(), + prefix_set: PrefixSetMut::default(), + rlp_buf: Vec::new(), + }; + this.reveal_node(Nibbles::default(), node)?; + Ok(this) + } + + /// Reveal the trie node only if it was not known already. + pub fn reveal_node(&mut self, path: Nibbles, node: TrieNode) -> SparseTrieResult<()> { + // TODO: revise all inserts to not overwrite existing entries + match node { + TrieNode::EmptyRoot => { + debug_assert!(path.is_empty()); + self.nodes.insert(path, SparseNode::Empty); + } + TrieNode::Branch(branch) => { + let mut stack_ptr = branch.as_ref().first_child_index(); + for idx in CHILD_INDEX_RANGE { + if branch.state_mask.is_bit_set(idx) { + let mut child_path = path.clone(); + child_path.push_unchecked(idx); + self.reveal_node_or_hash(child_path, &branch.stack[stack_ptr])?; + stack_ptr += 1; + } + } + + match self.nodes.get(&path) { + // Blinded and non-existent nodes can be replaced. + Some(SparseNode::Hash(_)) | None => { + self.nodes.insert( + path, + SparseNode::Branch { state_mask: branch.state_mask, hash: None }, + ); + } + // Branch node already exists, or an extension node was placed where a + // branch node was before. + Some(SparseNode::Branch { .. } | SparseNode::Extension { .. }) => {} + // All other node types can't be handled. + Some(node @ (SparseNode::Empty | SparseNode::Leaf { .. })) => { + return Err(SparseTrieError::Reveal { path, node: Box::new(node.clone()) }) + } + } + } + TrieNode::Extension(ext) => match self.nodes.get(&path) { + Some(SparseNode::Hash(_)) | None => { + let mut child_path = path.clone(); + child_path.extend_from_slice_unchecked(&ext.key); + self.reveal_node_or_hash(child_path, &ext.child)?; + self.nodes.insert(path, SparseNode::Extension { key: ext.key, hash: None }); + } + // Extension node already exists, or an extension node was placed where a branch + // node was before. + Some(SparseNode::Extension { .. } | SparseNode::Branch { .. }) => {} + // All other node types can't be handled. + Some(node @ (SparseNode::Empty | SparseNode::Leaf { .. })) => { + return Err(SparseTrieError::Reveal { path, node: Box::new(node.clone()) }) + } + }, + TrieNode::Leaf(leaf) => match self.nodes.get(&path) { + Some(SparseNode::Hash(_)) | None => { + let mut full = path.clone(); + full.extend_from_slice_unchecked(&leaf.key); + self.values.insert(full, leaf.value); + self.nodes.insert(path, SparseNode::new_leaf(leaf.key)); + } + // Left node already exists. + Some(SparseNode::Leaf { .. }) => {} + // All other node types can't be handled. + Some( + node @ (SparseNode::Empty | + SparseNode::Extension { .. } | + SparseNode::Branch { .. }), + ) => return Err(SparseTrieError::Reveal { path, node: Box::new(node.clone()) }), + }, + } + + Ok(()) + } + + fn reveal_node_or_hash(&mut self, path: Nibbles, child: &[u8]) -> SparseTrieResult<()> { + if child.len() == B256::len_bytes() + 1 { + let hash = B256::from_slice(&child[1..]); + match self.nodes.get(&path) { + // Hash node with a different hash can't be handled. + Some(node @ SparseNode::Hash(previous_hash)) if previous_hash != &hash => { + return Err(SparseTrieError::Reveal { path, node: Box::new(node.clone()) }) + } + None => { + self.nodes.insert(path, SparseNode::Hash(hash)); + } + // All other node types mean that it has already been revealed. + Some(_) => {} + } + return Ok(()) + } + + self.reveal_node(path, TrieNode::decode(&mut &child[..])?) + } + + /// Update the leaf node with provided value. + pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseTrieResult<()> { + self.prefix_set.insert(path.clone()); + let existing = self.values.insert(path.clone(), value); + if existing.is_some() { + // trie structure unchanged, return immediately + return Ok(()) + } + + let mut current = Nibbles::default(); + while let Some(node) = self.nodes.get_mut(¤t) { + match node { + SparseNode::Empty => { + *node = SparseNode::new_leaf(path); + break + } + SparseNode::Hash(hash) => { + return Err(SparseTrieError::BlindedNode { path: current, hash: *hash }) + } + SparseNode::Leaf { key: current_key, .. } => { + current.extend_from_slice_unchecked(current_key); + + // this leaf is being updated + if current == path { + unreachable!("we already checked leaf presence in the beginning"); + } + + // find the common prefix + let common = current.common_prefix_length(&path); + + // update existing node + let new_ext_key = current.slice(current.len() - current_key.len()..common); + *node = SparseNode::new_ext(new_ext_key); + + // create a branch node and corresponding leaves + self.nodes.insert( + current.slice(..common), + SparseNode::new_split_branch(current[common], path[common]), + ); + self.nodes.insert( + path.slice(..=common), + SparseNode::new_leaf(path.slice(common + 1..)), + ); + self.nodes.insert( + current.slice(..=common), + SparseNode::new_leaf(current.slice(common + 1..)), + ); + + break; + } + SparseNode::Extension { key, .. } => { + current.extend_from_slice(key); + if !path.starts_with(¤t) { + // find the common prefix + let common = current.common_prefix_length(&path); + + *key = current.slice(current.len() - key.len()..common); + + // create state mask for new branch node + // NOTE: this might overwrite the current extension node + let branch = SparseNode::new_split_branch(current[common], path[common]); + self.nodes.insert(current.slice(..common), branch); + + // create new leaf + let new_leaf = SparseNode::new_leaf(path.slice(common + 1..)); + self.nodes.insert(path.slice(..=common), new_leaf); + + // recreate extension to previous child if needed + let key = current.slice(common + 1..); + if !key.is_empty() { + self.nodes.insert(current.slice(..=common), SparseNode::new_ext(key)); + } + + break; + } + } + SparseNode::Branch { state_mask, .. } => { + let nibble = path[current.len()]; + current.push_unchecked(nibble); + if !state_mask.is_bit_set(nibble) { + state_mask.set_bit(nibble); + let new_leaf = SparseNode::new_leaf(path.slice(current.len()..)); + self.nodes.insert(current, new_leaf); + break; + } + } + }; + } + + Ok(()) + } + + /// Remove leaf node from the trie. + pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { + self.prefix_set.insert(path.clone()); + self.values.remove(path); + + // If the path wasn't present in `values`, we still need to walk the trie and ensure that + // there is no node at the path. When a leaf node is a blinded `Hash`, it will have an entry + // in `nodes`, but not in the `values`. + + // If the path wasn't present in `values`, we still need to walk the trie and ensure that + // there is no node at the path. When a leaf node is a blinded `Hash`, it will have an entry + // in `nodes`, but not in the `values`. + + let mut removed_nodes = self.take_nodes_for_path(path)?; + debug!(target: "trie::sparse", ?path, ?removed_nodes, "Removed nodes for path"); + // Pop the first node from the stack which is the leaf node we want to remove. + let mut child = removed_nodes.pop().expect("leaf exists"); + #[cfg(debug_assertions)] + { + let mut child_path = child.path.clone(); + let SparseNode::Leaf { key, .. } = &child.node else { panic!("expected leaf node") }; + child_path.extend_from_slice_unchecked(key); + assert_eq!(&child_path, path); + } + + // If we don't have any other removed nodes, insert an empty node at the root. + if removed_nodes.is_empty() { + debug_assert!(self.nodes.is_empty()); + self.nodes.insert(Nibbles::default(), SparseNode::Empty); + + return Ok(()) + } + + // Walk the stack of removed nodes from the back and re-insert them back into the trie, + // adjusting the node type as needed. + while let Some(removed_node) = removed_nodes.pop() { + let removed_path = removed_node.path; + + let new_node = match &removed_node.node { + SparseNode::Empty => return Err(SparseTrieError::Blind), + SparseNode::Hash(hash) => { + return Err(SparseTrieError::BlindedNode { path: removed_path, hash: *hash }) + } + SparseNode::Leaf { .. } => { + unreachable!("we already popped the leaf node") + } + SparseNode::Extension { key, .. } => { + // If the node is an extension node, we need to look at its child to see if we + // need to merge them. + match &child.node { + SparseNode::Empty => return Err(SparseTrieError::Blind), + SparseNode::Hash(hash) => { + return Err(SparseTrieError::BlindedNode { + path: child.path, + hash: *hash, + }) + } + // For a leaf node, we collapse the extension node into a leaf node, + // extending the key. While it's impossible to encounter an extension node + // followed by a leaf node in a complete trie, it's possible here because we + // could have downgraded the extension node's child into a leaf node from + // another node type. + SparseNode::Leaf { key: leaf_key, .. } => { + self.nodes.remove(&child.path); + + let mut new_key = key.clone(); + new_key.extend_from_slice_unchecked(leaf_key); + SparseNode::new_leaf(new_key) + } + // For an extension node, we collapse them into one extension node, + // extending the key + SparseNode::Extension { key: extension_key, .. } => { + self.nodes.remove(&child.path); + + let mut new_key = key.clone(); + new_key.extend_from_slice_unchecked(extension_key); + SparseNode::new_ext(new_key) + } + // For a branch node, we just leave the extension node as-is. + SparseNode::Branch { .. } => removed_node.node, + } + } + SparseNode::Branch { mut state_mask, hash: _ } => { + // If the node is a branch node, we need to check the number of children left + // after deleting the child at the given nibble. + + if let Some(removed_nibble) = removed_node.unset_branch_nibble { + state_mask.unset_bit(removed_nibble); + } + + // If only one child is left set in the branch node, we need to collapse it. + if state_mask.count_bits() == 1 { + let child_nibble = + state_mask.first_set_bit_index().expect("state mask is not empty"); + + // Get full path of the only child node left. + let mut child_path = removed_path.clone(); + child_path.push_unchecked(child_nibble); + + // Remove the only child node. + let child = self.nodes.get(&child_path).unwrap(); + + debug!(target: "trie::sparse", ?removed_path, ?child_path, ?child, "Branch node has only one child"); + + let mut delete_child = false; + let new_node = match child { + SparseNode::Empty => return Err(SparseTrieError::Blind), + SparseNode::Hash(hash) => { + return Err(SparseTrieError::BlindedNode { + path: child_path, + hash: *hash, + }) + } + // If the only child is a leaf node, we downgrade the branch node into a + // leaf node, prepending the nibble to the key, and delete the old + // child. + SparseNode::Leaf { key, .. } => { + delete_child = true; + + let mut new_key = Nibbles::from_nibbles_unchecked([child_nibble]); + new_key.extend_from_slice_unchecked(key); + SparseNode::new_leaf(new_key) + } + // If the only child node is an extension node, we downgrade the branch + // node into an even longer extension node, prepending the nibble to the + // key, and delete the old child. + SparseNode::Extension { key, .. } => { + delete_child = true; + + let mut new_key = Nibbles::from_nibbles_unchecked([child_nibble]); + new_key.extend_from_slice_unchecked(key); + SparseNode::new_ext(new_key) + } + // If the only child is a branch node, we downgrade the current branch + // node into a one-nibble extension node. + SparseNode::Branch { .. } => { + SparseNode::new_ext(Nibbles::from_nibbles_unchecked([child_nibble])) + } + }; + + if delete_child { + self.nodes.remove(&child_path); + } + + new_node + } + // If more than one child is left set in the branch, we just re-insert it + // as-is. + else { + SparseNode::new_branch(state_mask) + } + } + }; + + child = RemovedSparseNode { + path: removed_path.clone(), + node: new_node.clone(), + unset_branch_nibble: None, + }; + debug!(target: "trie::sparse", ?removed_path, ?new_node, "Re-inserting the node"); + self.nodes.insert(removed_path, new_node); + } + + Ok(()) + } + + /// Traverse trie nodes down to the leaf node and collect all nodes along the path. + fn take_nodes_for_path(&mut self, path: &Nibbles) -> SparseTrieResult> { + let mut current = Nibbles::default(); // Start traversal from the root + let mut nodes = Vec::new(); // Collect traversed nodes + + while let Some(node) = self.nodes.remove(¤t) { + match &node { + SparseNode::Empty => return Err(SparseTrieError::Blind), + SparseNode::Hash(hash) => { + return Err(SparseTrieError::BlindedNode { path: current, hash: *hash }) + } + SparseNode::Leaf { key: _key, .. } => { + // Leaf node is always the one that we're deleting, and no other leaf nodes can + // be found during traversal. + + #[cfg(debug_assertions)] + { + let mut current = current.clone(); + current.extend_from_slice_unchecked(_key); + assert_eq!(¤t, path); + } + + nodes.push(RemovedSparseNode { + path: current.clone(), + node, + unset_branch_nibble: None, + }); + break + } + SparseNode::Extension { key, .. } => { + #[cfg(debug_assertions)] + { + let mut current = current.clone(); + current.extend_from_slice_unchecked(key); + assert!(path.starts_with(¤t)); + } + + let path = current.clone(); + current.extend_from_slice_unchecked(key); + nodes.push(RemovedSparseNode { path, node, unset_branch_nibble: None }); + } + SparseNode::Branch { state_mask, .. } => { + let nibble = path[current.len()]; + debug_assert!(state_mask.is_bit_set(nibble)); + + // If the branch node has a child that is a leaf node that we're removing, + // we need to unset this nibble. + // Any other branch nodes will not require unsetting the nibble, because + // deleting one leaf node can not remove the whole path + // where the branch node is located. + let mut child_path = + Nibbles::from_nibbles([current.as_slice(), &[nibble]].concat()); + let unset_branch_nibble = self + .nodes + .get(&child_path) + .map_or(false, move |node| match node { + SparseNode::Leaf { key, .. } => { + // Get full path of the leaf node + child_path.extend_from_slice_unchecked(key); + &child_path == path + } + _ => false, + }) + .then_some(nibble); + + nodes.push(RemovedSparseNode { + path: current.clone(), + node, + unset_branch_nibble, + }); + + current.push_unchecked(nibble); + } + } + } + + Ok(nodes) + } + + /// Return the root of the sparse trie. + /// Updates all remaining dirty nodes before calculating the root. + pub fn root(&mut self) -> B256 { + // take the current prefix set. + let mut prefix_set = std::mem::take(&mut self.prefix_set).freeze(); + let root_rlp = self.rlp_node_allocate(Nibbles::default(), &mut prefix_set); + if let Some(root_hash) = root_rlp.as_hash() { + root_hash + } else { + keccak256(root_rlp) + } + } + + /// Update hashes of the nodes that are located at a level deeper than or equal to the provided + /// depth. Root node has a level of 0. + pub fn update_rlp_node_level(&mut self, depth: usize) { + let mut prefix_set = self.prefix_set.clone().freeze(); + let mut buffers = RlpNodeBuffers::default(); + + let targets = self.get_changed_nodes_at_depth(&mut prefix_set, depth); + for target in targets { + buffers.path_stack.push((target, Some(true))); + self.rlp_node(&mut prefix_set, &mut buffers); + } + } + + /// Returns a list of paths to the nodes that were changed according to the prefix set and are + /// located at the provided depth when counting from the root node. If there's a leaf at a + /// depth less than the provided depth, it will be included in the result. + fn get_changed_nodes_at_depth(&self, prefix_set: &mut PrefixSet, depth: usize) -> Vec { + let mut paths = Vec::from([(Nibbles::default(), 0)]); + let mut targets = Vec::new(); + + while let Some((mut path, level)) = paths.pop() { + match self.nodes.get(&path).unwrap() { + SparseNode::Empty | SparseNode::Hash(_) => {} + SparseNode::Leaf { hash, .. } => { + if hash.is_some() && !prefix_set.contains(&path) { + continue + } + + targets.push(path); + } + SparseNode::Extension { key, hash } => { + if hash.is_some() && !prefix_set.contains(&path) { + continue + } + + if level >= depth { + targets.push(path); + } else { + path.extend_from_slice_unchecked(key); + paths.push((path, level + 1)); + } + } + SparseNode::Branch { state_mask, hash } => { + if hash.is_some() && !prefix_set.contains(&path) { + continue + } + + if level >= depth { + targets.push(path); + } else { + for bit in CHILD_INDEX_RANGE.rev() { + if state_mask.is_bit_set(bit) { + let mut child_path = path.clone(); + child_path.push_unchecked(bit); + paths.push((child_path, level + 1)); + } + } + } + } + } + } + + targets + } + + fn rlp_node_allocate(&mut self, path: Nibbles, prefix_set: &mut PrefixSet) -> RlpNode { + let mut buffers = RlpNodeBuffers::new_with_path(path); + self.rlp_node(prefix_set, &mut buffers) + } + + fn rlp_node(&mut self, prefix_set: &mut PrefixSet, buffers: &mut RlpNodeBuffers) -> RlpNode { + 'main: while let Some((path, mut is_in_prefix_set)) = buffers.path_stack.pop() { + // Check if the path is in the prefix set. + // First, check the cached value. If it's `None`, then check the prefix set, and update + // the cached value. + let mut prefix_set_contains = + |path: &Nibbles| *is_in_prefix_set.get_or_insert_with(|| prefix_set.contains(path)); + + let rlp_node = match self.nodes.get_mut(&path).unwrap() { + SparseNode::Empty => RlpNode::word_rlp(&EMPTY_ROOT_HASH), + SparseNode::Hash(hash) => RlpNode::word_rlp(hash), + SparseNode::Leaf { key, hash } => { + self.rlp_buf.clear(); + let mut path = path.clone(); + path.extend_from_slice_unchecked(key); + if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { + RlpNode::word_rlp(&hash) + } else { + let value = self.values.get(&path).unwrap(); + let rlp_node = LeafNodeRef { key, value }.rlp(&mut self.rlp_buf); + *hash = rlp_node.as_hash(); + rlp_node + } + } + SparseNode::Extension { key, hash } => { + let mut child_path = path.clone(); + child_path.extend_from_slice_unchecked(key); + if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { + RlpNode::word_rlp(&hash) + } else if buffers.rlp_node_stack.last().map_or(false, |e| e.0 == child_path) { + let (_, child) = buffers.rlp_node_stack.pop().unwrap(); + self.rlp_buf.clear(); + let rlp_node = ExtensionNodeRef::new(key, &child).rlp(&mut self.rlp_buf); + *hash = rlp_node.as_hash(); + rlp_node + } else { + // need to get rlp node for child first + buffers.path_stack.extend([(path, is_in_prefix_set), (child_path, None)]); + continue + } + } + SparseNode::Branch { state_mask, hash } => { + if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { + buffers.rlp_node_stack.push((path, RlpNode::word_rlp(&hash))); + continue + } + + buffers.branch_child_buf.clear(); + // Walk children in a reverse order from `f` to `0`, so we pop the `0` first + // from the stack. + for bit in CHILD_INDEX_RANGE.rev() { + if state_mask.is_bit_set(bit) { + let mut child = path.clone(); + child.push_unchecked(bit); + buffers.branch_child_buf.push(child); + } + } + + buffers + .branch_value_stack_buf + .resize(buffers.branch_child_buf.len(), Default::default()); + let mut added_children = false; + for (i, child_path) in buffers.branch_child_buf.iter().enumerate() { + if buffers.rlp_node_stack.last().map_or(false, |e| &e.0 == child_path) { + let (_, child) = buffers.rlp_node_stack.pop().unwrap(); + // Insert children in the resulting buffer in a normal order, because + // initially we iterated in reverse. + buffers.branch_value_stack_buf + [buffers.branch_child_buf.len() - i - 1] = child; + added_children = true; + } else { + debug_assert!(!added_children); + buffers.path_stack.push((path, is_in_prefix_set)); + buffers + .path_stack + .extend(buffers.branch_child_buf.drain(..).map(|p| (p, None))); + continue 'main + } + } + + self.rlp_buf.clear(); + let rlp_node = BranchNodeRef::new(&buffers.branch_value_stack_buf, *state_mask) + .rlp(&mut self.rlp_buf); + *hash = rlp_node.as_hash(); + rlp_node + } + }; + buffers.rlp_node_stack.push((path, rlp_node)); + } + + buffers.rlp_node_stack.pop().unwrap().1 + } +} + +/// Enum representing trie nodes in sparse trie. +#[derive(PartialEq, Eq, Clone, Debug)] +pub enum SparseNode { + /// Empty trie node. + Empty, + /// The hash of the node that was not revealed. + Hash(B256), + /// Sparse leaf node with remaining key suffix. + Leaf { + /// Remaining key suffix for the leaf node. + key: Nibbles, + /// Pre-computed hash of the sparse node. + /// Can be reused unless this trie path has been updated. + hash: Option, + }, + /// Sparse extension node with key. + Extension { + /// The key slice stored by this extension node. + key: Nibbles, + /// Pre-computed hash of the sparse node. + /// Can be reused unless this trie path has been updated. + hash: Option, + }, + /// Sparse branch node with state mask. + Branch { + /// The bitmask representing children present in the branch node. + state_mask: TrieMask, + /// Pre-computed hash of the sparse node. + /// Can be reused unless this trie path has been updated. + hash: Option, + }, +} + +impl SparseNode { + /// Create new sparse node from [`TrieNode`]. + pub fn from_node(node: TrieNode) -> Self { + match node { + TrieNode::EmptyRoot => Self::Empty, + TrieNode::Leaf(leaf) => Self::new_leaf(leaf.key), + TrieNode::Extension(ext) => Self::new_ext(ext.key), + TrieNode::Branch(branch) => Self::new_branch(branch.state_mask), + } + } + + /// Create new [`SparseNode::Branch`] from state mask. + pub const fn new_branch(state_mask: TrieMask) -> Self { + Self::Branch { state_mask, hash: None } + } + + /// Create new [`SparseNode::Branch`] with two bits set. + pub const fn new_split_branch(bit_a: u8, bit_b: u8) -> Self { + let state_mask = TrieMask::new( + // set bits for both children + (1u16 << bit_a) | (1u16 << bit_b), + ); + Self::Branch { state_mask, hash: None } + } + + /// Create new [`SparseNode::Extension`] from the key slice. + pub const fn new_ext(key: Nibbles) -> Self { + Self::Extension { key, hash: None } + } + + /// Create new [`SparseNode::Leaf`] from leaf key and value. + pub const fn new_leaf(key: Nibbles) -> Self { + Self::Leaf { key, hash: None } + } +} + +#[derive(Debug)] +struct RemovedSparseNode { + path: Nibbles, + node: SparseNode, + unset_branch_nibble: Option, +} + +/// Collection of reusable buffers for [`RevealedSparseTrie::rlp_node`]. +#[derive(Debug, Default)] +struct RlpNodeBuffers { + /// Stack of paths we need rlp nodes for and whether the path is in the prefix set. + path_stack: Vec<(Nibbles, Option)>, + /// Stack of rlp nodes + rlp_node_stack: Vec<(Nibbles, RlpNode)>, + /// Reusable branch child path + branch_child_buf: SmallVec<[Nibbles; 16]>, + /// Reusable branch value stack + branch_value_stack_buf: SmallVec<[RlpNode; 16]>, +} + +impl RlpNodeBuffers { + /// Creates a new instance of buffers with the given path on the stack. + fn new_with_path(path: Nibbles) -> Self { + Self { + path_stack: vec![(path, None)], + rlp_node_stack: Vec::new(), + branch_child_buf: SmallVec::<[Nibbles; 16]>::new_const(), + branch_value_stack_buf: SmallVec::<[RlpNode; 16]>::new_const(), + } + } +} + +#[cfg(test)] +mod tests { + use std::collections::BTreeMap; + + use super::*; + use alloy_primitives::{map::HashSet, U256}; + use assert_matches::assert_matches; + use itertools::Itertools; + use prop::sample::SizeRange; + use proptest::prelude::*; + use rand::seq::IteratorRandom; + use reth_trie::{BranchNode, ExtensionNode, LeafNode}; + use reth_trie_common::{ + proof::{ProofNodes, ProofRetainer}, + HashBuilder, + }; + + /// Calculate the state root by feeding the provided state to the hash builder and retaining the + /// proofs for the provided targets. + /// + /// Returns the state root and the retained proof nodes. + fn hash_builder_root_with_proofs>( + state: impl IntoIterator, + proof_targets: impl IntoIterator, + ) -> (B256, ProofNodes) { + let mut hash_builder = + HashBuilder::default().with_proof_retainer(ProofRetainer::from_iter(proof_targets)); + for (key, value) in state { + hash_builder.add_leaf(key, value.as_ref()); + } + (hash_builder.root(), hash_builder.take_proof_nodes()) + } + + /// Assert that the sparse trie nodes and the proof nodes from the hash builder are equal. + fn assert_eq_sparse_trie_proof_nodes( + sparse_trie: &RevealedSparseTrie, + proof_nodes: ProofNodes, + ) { + let proof_nodes = proof_nodes + .into_nodes_sorted() + .into_iter() + .map(|(path, node)| (path, TrieNode::decode(&mut node.as_ref()).unwrap())); + + let sparse_nodes = sparse_trie.nodes.iter().sorted_by_key(|(path, _)| *path); + + for ((proof_node_path, proof_node), (sparse_node_path, sparse_node)) in + proof_nodes.zip(sparse_nodes) + { + assert_eq!(&proof_node_path, sparse_node_path); + + let equals = match (&proof_node, &sparse_node) { + // Both nodes are empty + (TrieNode::EmptyRoot, SparseNode::Empty) => true, + // Both nodes are branches and have the same state mask + ( + TrieNode::Branch(BranchNode { state_mask: proof_state_mask, .. }), + SparseNode::Branch { state_mask: sparse_state_mask, .. }, + ) => proof_state_mask == sparse_state_mask, + // Both nodes are extensions and have the same key + ( + TrieNode::Extension(ExtensionNode { key: proof_key, .. }), + SparseNode::Extension { key: sparse_key, .. }, + ) | + // Both nodes are leaves and have the same key + ( + TrieNode::Leaf(LeafNode { key: proof_key, .. }), + SparseNode::Leaf { key: sparse_key, .. }, + ) => proof_key == sparse_key, + // Empty and hash nodes are specific to the sparse trie, skip them + (_, SparseNode::Empty | SparseNode::Hash(_)) => continue, + _ => false, + }; + assert!(equals, "proof node: {:?}, sparse node: {:?}", proof_node, sparse_node); + } + } + + #[test] + fn sparse_trie_is_blind() { + assert!(SparseTrie::default().is_blind()); + assert!(!SparseTrie::revealed_empty().is_blind()); + } + + #[test] + fn sparse_trie_empty_update_one() { + let path = Nibbles::unpack(B256::with_last_byte(42)); + let value = alloy_rlp::encode_fixed_size(&U256::from(1)); + + let (hash_builder_root, hash_builder_proof_nodes) = + hash_builder_root_with_proofs([(path.clone(), &value)], [path.clone()]); + + let mut sparse = RevealedSparseTrie::default(); + sparse.update_leaf(path, value.to_vec()).unwrap(); + let sparse_root = sparse.root(); + + assert_eq!(sparse_root, hash_builder_root); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + } + + #[test] + fn sparse_trie_empty_update_multiple_lower_nibbles() { + let paths = (0..=16).map(|b| Nibbles::unpack(B256::with_last_byte(b))).collect::>(); + let value = alloy_rlp::encode_fixed_size(&U256::from(1)); + + let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( + paths.iter().cloned().zip(std::iter::repeat_with(|| value.clone())), + paths.clone(), + ); + + let mut sparse = RevealedSparseTrie::default(); + for path in &paths { + sparse.update_leaf(path.clone(), value.to_vec()).unwrap(); + } + let sparse_root = sparse.root(); + + assert_eq!(sparse_root, hash_builder_root); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + } + + #[test] + fn sparse_trie_empty_update_multiple_upper_nibbles() { + let paths = (239..=255).map(|b| Nibbles::unpack(B256::repeat_byte(b))).collect::>(); + let value = alloy_rlp::encode_fixed_size(&U256::from(1)); + + let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( + paths.iter().cloned().zip(std::iter::repeat_with(|| value.clone())), + paths.clone(), + ); + + let mut sparse = RevealedSparseTrie::default(); + for path in &paths { + sparse.update_leaf(path.clone(), value.to_vec()).unwrap(); + } + let sparse_root = sparse.root(); + + assert_eq!(sparse_root, hash_builder_root); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + } + + #[test] + fn sparse_trie_empty_update_multiple() { + let paths = (0..=255) + .map(|b| { + Nibbles::unpack(if b % 2 == 0 { + B256::repeat_byte(b) + } else { + B256::with_last_byte(b) + }) + }) + .collect::>(); + let value = alloy_rlp::encode_fixed_size(&U256::from(1)); + + let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( + paths.iter().sorted_unstable().cloned().zip(std::iter::repeat_with(|| value.clone())), + paths.clone(), + ); + + let mut sparse = RevealedSparseTrie::default(); + for path in &paths { + sparse.update_leaf(path.clone(), value.to_vec()).unwrap(); + } + let sparse_root = sparse.root(); + + assert_eq!(sparse_root, hash_builder_root); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + } + + #[test] + fn sparse_trie_empty_update_repeated() { + let paths = (0..=255).map(|b| Nibbles::unpack(B256::repeat_byte(b))).collect::>(); + let old_value = alloy_rlp::encode_fixed_size(&U256::from(1)); + let new_value = alloy_rlp::encode_fixed_size(&U256::from(2)); + + let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( + paths.iter().cloned().zip(std::iter::repeat_with(|| old_value.clone())), + paths.clone(), + ); + + let mut sparse = RevealedSparseTrie::default(); + for path in &paths { + sparse.update_leaf(path.clone(), old_value.to_vec()).unwrap(); + } + let sparse_root = sparse.root(); + + assert_eq!(sparse_root, hash_builder_root); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + + let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( + paths.iter().cloned().zip(std::iter::repeat_with(|| new_value.clone())), + paths.clone(), + ); + + for path in &paths { + sparse.update_leaf(path.clone(), new_value.to_vec()).unwrap(); + } + let sparse_root = sparse.root(); + + assert_eq!(sparse_root, hash_builder_root); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + } + + #[test] + fn sparse_trie_remove_leaf() { + reth_tracing::init_test_tracing(); + + let mut sparse = RevealedSparseTrie::default(); + + let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); + + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone()) + .unwrap(); + sparse.update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value).unwrap(); + + // Extension (Key = 5) + // └── Branch (Mask = 1011) + // ├── 0 -> Extension (Key = 23) + // │ └── Branch (Mask = 0101) + // │ ├── 1 -> Leaf (Key = 1, Path = 50231) + // │ └── 3 -> Leaf (Key = 3, Path = 50233) + // ├── 2 -> Leaf (Key = 013, Path = 52013) + // └── 3 -> Branch (Mask = 0101) + // ├── 1 -> Leaf (Key = 3102, Path = 53102) + // └── 3 -> Branch (Mask = 1010) + // ├── 0 -> Leaf (Key = 3302, Path = 53302) + // └── 2 -> Leaf (Key = 3320, Path = 53320) + pretty_assertions::assert_eq!( + sparse.nodes.clone().into_iter().collect::>(), + BTreeMap::from_iter([ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1101.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_ext(Nibbles::from_nibbles([0x2, 0x3])) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3]), + SparseNode::new_branch(0b1010.into()) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::default()) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), + SparseNode::new_leaf(Nibbles::default()) + ), + ( + Nibbles::from_nibbles([0x5, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0, 0x1, 0x3])) + ), + (Nibbles::from_nibbles([0x5, 0x3]), SparseNode::new_branch(0b1010.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0, 0x2])) + ), + (Nibbles::from_nibbles([0x5, 0x3, 0x3]), SparseNode::new_branch(0b0101.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0])) + ) + ]) + ); + + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3])).unwrap(); + + // Extension (Key = 5) + // └── Branch (Mask = 1001) + // ├── 0 -> Extension (Key = 23) + // │ └── Branch (Mask = 0101) + // │ ├── 1 -> Leaf (Key = 0231, Path = 50231) + // │ └── 3 -> Leaf (Key = 0233, Path = 50233) + // └── 3 -> Branch (Mask = 0101) + // ├── 1 -> Leaf (Key = 3102, Path = 53102) + // └── 3 -> Branch (Mask = 1010) + // ├── 0 -> Leaf (Key = 3302, Path = 53302) + // └── 2 -> Leaf (Key = 3320, Path = 53320) + pretty_assertions::assert_eq!( + sparse.nodes.clone().into_iter().collect::>(), + BTreeMap::from_iter([ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_ext(Nibbles::from_nibbles([0x2, 0x3])) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3]), + SparseNode::new_branch(0b1010.into()) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::default()) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), + SparseNode::new_leaf(Nibbles::default()) + ), + (Nibbles::from_nibbles([0x5, 0x3]), SparseNode::new_branch(0b1010.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0, 0x2])) + ), + (Nibbles::from_nibbles([0x5, 0x3, 0x3]), SparseNode::new_branch(0b0101.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0])) + ) + ]) + ); + + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1])).unwrap(); + + // Extension (Key = 5) + // └── Branch (Mask = 1001) + // ├── 0 -> Leaf (Key = 0233, Path = 50233) + // └── 3 -> Branch (Mask = 0101) + // ├── 1 -> Leaf (Key = 3102, Path = 53102) + // └── 3 -> Branch (Mask = 1010) + // ├── 0 -> Leaf (Key = 3302, Path = 53302) + // └── 2 -> Leaf (Key = 3320, Path = 53320) + pretty_assertions::assert_eq!( + sparse.nodes.clone().into_iter().collect::>(), + BTreeMap::from_iter([ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2, 0x3, 0x3])) + ), + (Nibbles::from_nibbles([0x5, 0x3]), SparseNode::new_branch(0b1010.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0, 0x2])) + ), + (Nibbles::from_nibbles([0x5, 0x3, 0x3]), SparseNode::new_branch(0b0101.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0])) + ) + ]) + ); + + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2])).unwrap(); + + // Extension (Key = 5) + // └── Branch (Mask = 1001) + // ├── 0 -> Leaf (Key = 0233, Path = 50233) + // └── 3 -> Branch (Mask = 1010) + // ├── 0 -> Leaf (Key = 3302, Path = 53302) + // └── 2 -> Leaf (Key = 3320, Path = 53320) + pretty_assertions::assert_eq!( + sparse.nodes.clone().into_iter().collect::>(), + BTreeMap::from_iter([ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2, 0x3, 0x3])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3]), + SparseNode::new_ext(Nibbles::from_nibbles([0x3])) + ), + (Nibbles::from_nibbles([0x5, 0x3, 0x3]), SparseNode::new_branch(0b0101.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0])) + ) + ]) + ); + + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0])).unwrap(); + + // Extension (Key = 5) + // └── Branch (Mask = 1001) + // ├── 0 -> Leaf (Key = 0233, Path = 50233) + // └── 3 -> Leaf (Key = 3302, Path = 53302) + pretty_assertions::assert_eq!( + sparse.nodes.clone().into_iter().collect::>(), + BTreeMap::from_iter([ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2, 0x3, 0x3])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x3, 0x0, 0x2])) + ), + ]) + ); + + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3])).unwrap(); + + // Leaf (Key = 53302) + pretty_assertions::assert_eq!( + sparse.nodes.clone().into_iter().collect::>(), + BTreeMap::from_iter([( + Nibbles::default(), + SparseNode::new_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2])) + ),]) + ); + + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2])).unwrap(); + + // Empty + pretty_assertions::assert_eq!( + sparse.nodes.clone().into_iter().collect::>(), + BTreeMap::from_iter([(Nibbles::default(), SparseNode::Empty)]) + ); + } + + #[test] + fn sparse_trie_remove_leaf_blinded() { + let leaf = LeafNode::new( + Nibbles::default(), + alloy_rlp::encode_fixed_size(&U256::from(1)).to_vec(), + ); + let branch = TrieNode::Branch(BranchNode::new( + vec![ + RlpNode::word_rlp(&B256::repeat_byte(1)), + RlpNode::from_raw_rlp(&alloy_rlp::encode(leaf.clone())).unwrap(), + ], + TrieMask::new(0b11), + )); + + let mut sparse = RevealedSparseTrie::from_root(branch.clone()).unwrap(); + + // Reveal a branch node and one of its children + // + // Branch (Mask = 11) + // ├── 0 -> Hash (Path = 0) + // └── 1 -> Leaf (Path = 1) + sparse.reveal_node(Nibbles::default(), branch).unwrap(); + sparse.reveal_node(Nibbles::from_nibbles([0x1]), TrieNode::Leaf(leaf)).unwrap(); + + // Removing a blinded leaf should result in an error + assert_matches!( + sparse.remove_leaf(&Nibbles::from_nibbles([0x0])), + Err(SparseTrieError::BlindedNode { path, hash }) if path == Nibbles::from_nibbles([0x0]) && hash == B256::repeat_byte(1) + ); + } + + #[allow(clippy::type_complexity)] + #[test] + fn sparse_trie_fuzz() { + // Having only the first 3 nibbles set, we narrow down the range of keys + // to 4096 different hashes. It allows us to generate collisions more likely + // to test the sparse trie updates. + const KEY_NIBBLES_LEN: usize = 3; + + fn test(updates: Vec<(HashMap>, HashSet)>) { + { + let mut state = BTreeMap::default(); + let mut sparse = RevealedSparseTrie::default(); + + for (update, keys_to_delete) in updates { + // Insert state updates into the sparse trie and calculate the root + for (key, value) in update.clone() { + sparse.update_leaf(key, value).unwrap(); + } + let sparse_root = sparse.root(); + + // Insert state updates into the hash builder and calculate the root + state.extend(update); + let (hash_builder_root, hash_builder_proof_nodes) = + hash_builder_root_with_proofs( + state.clone(), + state.keys().cloned().collect::>(), + ); + + // Assert that the sparse trie root matches the hash builder root + assert_eq!(sparse_root, hash_builder_root); + // Assert that the sparse trie nodes match the hash builder proof nodes + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + + // Delete some keys from both the hash builder and the sparse trie and check + // that the sparse trie root still matches the hash builder root + for key in keys_to_delete { + state.remove(&key).unwrap(); + sparse.remove_leaf(&key).unwrap(); + } + + let sparse_root = sparse.root(); + + let (hash_builder_root, hash_builder_proof_nodes) = + hash_builder_root_with_proofs( + state.clone(), + state.keys().cloned().collect::>(), + ); + + // Assert that the sparse trie root matches the hash builder root + assert_eq!(sparse_root, hash_builder_root); + // Assert that the sparse trie nodes match the hash builder proof nodes + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + } + } + } + + /// Pad nibbles of length [`KEY_NIBBLES_LEN`] with zeros to the length of a B256 hash. + fn pad_nibbles(nibbles: Nibbles) -> Nibbles { + let mut base = + Nibbles::from_nibbles_unchecked([0; { B256::len_bytes() / 2 - KEY_NIBBLES_LEN }]); + base.extend_from_slice_unchecked(&nibbles); + base + } + + fn transform_updates( + updates: Vec>>, + mut rng: impl Rng, + ) -> Vec<(HashMap>, HashSet)> { + let mut keys = HashSet::new(); + updates + .into_iter() + .map(|update| { + keys.extend(update.keys().cloned()); + + let keys_to_delete_len = update.len() / 2; + let keys_to_delete = (0..keys_to_delete_len) + .map(|_| { + let key = keys.iter().choose(&mut rng).unwrap().clone(); + keys.take(&key).unwrap() + }) + .collect(); + + (update, keys_to_delete) + }) + .collect::>() + } + + proptest!(ProptestConfig::with_cases(10), |( + updates in proptest::collection::vec( + proptest::collection::hash_map( + any_with::(SizeRange::new(KEY_NIBBLES_LEN..=KEY_NIBBLES_LEN)).prop_map(pad_nibbles), + any::>(), + 1..100, + ).prop_map(HashMap::from_iter), + 1..100, + ).prop_perturb(transform_updates) + )| { + test(updates) + }); + } + + /// We have three leaves that share the same prefix: 0x00, 0x01 and 0x02. Hash builder trie has + /// only nodes 0x00 and 0x01, and we have proofs for them. Node B is new and inserted in the + /// sparse trie first. + /// + /// 1. Reveal the hash builder proof to leaf 0x00 in the sparse trie. + /// 2. Insert leaf 0x01 into the sparse trie. + /// 3. Reveal the hash builder proof to leaf 0x02 in the sparse trie. + /// + /// The hash builder proof to the leaf 0x02 didn't have the leaf 0x01 at the corresponding + /// nibble of the branch node, so we need to adjust the branch node instead of fully + /// replacing it. + #[test] + fn sparse_trie_reveal_node_1() { + let key1 = || Nibbles::from_nibbles_unchecked([0x00]); + let key2 = || Nibbles::from_nibbles_unchecked([0x01]); + let key3 = || Nibbles::from_nibbles_unchecked([0x02]); + let value = || alloy_rlp::encode_fixed_size(&B256::repeat_byte(1)); + + // Generate the proof for the root node and initialize the sparse trie with it + let (_, proof_nodes) = hash_builder_root_with_proofs( + [(key1(), value()), (key3(), value())], + [Nibbles::default()], + ); + let mut sparse = RevealedSparseTrie::from_root( + TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + ) + .unwrap(); + + // Generate the proof for the first key and reveal it in the sparse trie + let (_, proof_nodes) = + hash_builder_root_with_proofs([(key1(), value()), (key3(), value())], [key1()]); + for (path, node) in proof_nodes.nodes_sorted() { + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); + } + + // Check that the branch node exists with only two nibbles set + assert_eq!( + sparse.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_branch(0b101.into())) + ); + + // Insert the leaf for the second key + sparse.update_leaf(key2(), value().to_vec()).unwrap(); + + // Check that the branch node was updated and another nibble was set + assert_eq!( + sparse.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_branch(0b111.into())) + ); + + // Generate the proof for the third key and reveal it in the sparse trie + let (_, proof_nodes_3) = + hash_builder_root_with_proofs([(key1(), value()), (key3(), value())], [key3()]); + for (path, node) in proof_nodes_3.nodes_sorted() { + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); + } + + // Check that nothing changed in the branch node + assert_eq!( + sparse.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_branch(0b111.into())) + ); + + // Generate the nodes for the full trie with all three key using the hash builder, and + // compare them to the sparse trie + let (_, proof_nodes) = hash_builder_root_with_proofs( + [(key1(), value()), (key2(), value()), (key3(), value())], + [key1(), key2(), key3()], + ); + + assert_eq_sparse_trie_proof_nodes(&sparse, proof_nodes); + } + + /// We have three leaves: 0x0000, 0x0101, and 0x0102. Hash builder trie has all nodes, and we + /// have proofs for them. + /// + /// 1. Reveal the hash builder proof to leaf 0x00 in the sparse trie. + /// 2. Remove leaf 0x00 from the sparse trie (that will remove the branch node and create an + /// extension node with the key 0x0000). + /// 3. Reveal the hash builder proof to leaf 0x0101 in the sparse trie. + /// + /// The hash builder proof to the leaf 0x0101 had a branch node in the path, but we turned it + /// into an extension node, so it should ignore this node. + #[test] + fn sparse_trie_reveal_node_2() { + let key1 = || Nibbles::from_nibbles_unchecked([0x00, 0x00]); + let key2 = || Nibbles::from_nibbles_unchecked([0x01, 0x01]); + let key3 = || Nibbles::from_nibbles_unchecked([0x01, 0x02]); + let value = || alloy_rlp::encode_fixed_size(&B256::repeat_byte(1)); + + // Generate the proof for the root node and initialize the sparse trie with it + let (_, proof_nodes) = hash_builder_root_with_proofs( + [(key1(), value()), (key2(), value()), (key3(), value())], + [Nibbles::default()], + ); + let mut sparse = RevealedSparseTrie::from_root( + TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + ) + .unwrap(); + + // Generate the proof for the children of the root branch node and reveal it in the sparse + // trie + let (_, proof_nodes) = hash_builder_root_with_proofs( + [(key1(), value()), (key2(), value()), (key3(), value())], + [key1(), Nibbles::from_nibbles_unchecked([0x01])], + ); + for (path, node) in proof_nodes.nodes_sorted() { + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); + } + + // Check that the branch node exists + assert_eq!( + sparse.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_branch(0b11.into())) + ); + + // Remove the leaf for the first key + sparse.remove_leaf(&key1()).unwrap(); + + // Check that the branch node was turned into an extension node + assert_eq!( + sparse.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_ext(Nibbles::from_nibbles_unchecked([0x01]))) + ); + + // Generate the proof for the third key and reveal it in the sparse trie + let (_, proof_nodes) = hash_builder_root_with_proofs( + [(key1(), value()), (key2(), value()), (key3(), value())], + [key2()], + ); + for (path, node) in proof_nodes.nodes_sorted() { + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); + } + + // Check that nothing changed in the extension node + assert_eq!( + sparse.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_ext(Nibbles::from_nibbles_unchecked([0x01]))) + ); + } + + /// We have two leaves that share the same prefix: 0x0001 and 0x0002, and a leaf with a + /// different prefix: 0x0100. Hash builder trie has only the first two leaves, and we have + /// proofs for them. + /// + /// 1. Insert the leaf 0x0100 into the sparse trie, and check that the root extensino node was + /// turned into a branch node. + /// 2. Reveal the leaf 0x0001 in the sparse trie, and check that the root branch node wasn't + /// overwritten with the extension node from the proof. + #[test] + fn sparse_trie_reveal_node_3() { + let key1 = || Nibbles::from_nibbles_unchecked([0x00, 0x01]); + let key2 = || Nibbles::from_nibbles_unchecked([0x00, 0x02]); + let key3 = || Nibbles::from_nibbles_unchecked([0x01, 0x00]); + let value = || alloy_rlp::encode_fixed_size(&B256::repeat_byte(1)); + + // Generate the proof for the root node and initialize the sparse trie with it + let (_, proof_nodes) = hash_builder_root_with_proofs( + [(key1(), value()), (key2(), value())], + [Nibbles::default()], + ); + let mut sparse = RevealedSparseTrie::from_root( + TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + ) + .unwrap(); + + // Check that the root extension node exists + assert_matches!( + sparse.nodes.get(&Nibbles::default()), + Some(SparseNode::Extension { key, hash: None }) if *key == Nibbles::from_nibbles([0x00]) + ); + + // Insert the leaf with a different prefix + sparse.update_leaf(key3(), value().to_vec()).unwrap(); + + // Check that the extension node was turned into a branch node + assert_matches!( + sparse.nodes.get(&Nibbles::default()), + Some(SparseNode::Branch { state_mask, hash: None }) if *state_mask == TrieMask::new(0b11) + ); + + // Generate the proof for the first key and reveal it in the sparse trie + let (_, proof_nodes) = + hash_builder_root_with_proofs([(key1(), value()), (key2(), value())], [key1()]); + for (path, node) in proof_nodes.nodes_sorted() { + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); + } + + // Check that the branch node wasn't overwritten by the extension node in the proof + assert_matches!( + sparse.nodes.get(&Nibbles::default()), + Some(SparseNode::Branch { state_mask, hash: None }) if *state_mask == TrieMask::new(0b11) + ); + } + + #[test] + fn sparse_trie_get_changed_nodes_at_depth() { + let mut sparse = RevealedSparseTrie::default(); + + let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); + + // Extension (Key = 5) – Level 0 + // └── Branch (Mask = 1011) – Level 1 + // ├── 0 -> Extension (Key = 23) – Level 2 + // │ └── Branch (Mask = 0101) – Level 3 + // │ ├── 1 -> Leaf (Key = 1, Path = 50231) – Level 4 + // │ └── 3 -> Leaf (Key = 3, Path = 50233) – Level 4 + // ├── 2 -> Leaf (Key = 013, Path = 52013) – Level 2 + // └── 3 -> Branch (Mask = 0101) – Level 2 + // ├── 1 -> Leaf (Key = 3102, Path = 53102) – Level 3 + // └── 3 -> Branch (Mask = 1010) – Level 3 + // ├── 0 -> Leaf (Key = 3302, Path = 53302) – Level 4 + // └── 2 -> Leaf (Key = 3320, Path = 53320) – Level 4 + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone()) + .unwrap(); + sparse.update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value).unwrap(); + + assert_eq!( + sparse.get_changed_nodes_at_depth(&mut PrefixSet::default(), 0), + vec![Nibbles::default()] + ); + assert_eq!( + sparse.get_changed_nodes_at_depth(&mut PrefixSet::default(), 1), + vec![Nibbles::from_nibbles_unchecked([0x5])] + ); + assert_eq!( + sparse.get_changed_nodes_at_depth(&mut PrefixSet::default(), 2), + vec![ + Nibbles::from_nibbles_unchecked([0x5, 0x0]), + Nibbles::from_nibbles_unchecked([0x5, 0x2]), + Nibbles::from_nibbles_unchecked([0x5, 0x3]) + ] + ); + assert_eq!( + sparse.get_changed_nodes_at_depth(&mut PrefixSet::default(), 3), + vec![ + Nibbles::from_nibbles_unchecked([0x5, 0x0, 0x2, 0x3]), + Nibbles::from_nibbles_unchecked([0x5, 0x2]), + Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x1]), + Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x3]) + ] + ); + assert_eq!( + sparse.get_changed_nodes_at_depth(&mut PrefixSet::default(), 4), + vec![ + Nibbles::from_nibbles_unchecked([0x5, 0x0, 0x2, 0x3, 0x1]), + Nibbles::from_nibbles_unchecked([0x5, 0x0, 0x2, 0x3, 0x3]), + Nibbles::from_nibbles_unchecked([0x5, 0x2]), + Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x1]), + Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x3, 0x0]), + Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x3, 0x2]) + ] + ); + } +} diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index d0f0fa092a..134a3055c2 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -24,13 +24,13 @@ revm.workspace = true # alloy alloy-rlp.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true # tracing tracing.workspace = true # misc rayon.workspace = true -derive_more.workspace = true auto_impl.workspace = true itertools.workspace = true @@ -49,7 +49,6 @@ serde_with = { workspace = true, optional = true } [dev-dependencies] # reth -reth-chainspec.workspace = true reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] } reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } @@ -59,20 +58,30 @@ triehash = "0.8" # misc proptest.workspace = true proptest-arbitrary-interop.workspace = true -tokio = { workspace = true, default-features = false, features = [ - "sync", - "rt", - "macros", -] } serde_json.workspace = true criterion.workspace = true bincode.workspace = true [features] metrics = ["reth-metrics", "dep:metrics"] -serde = ["dep:serde"] -serde-bincode-compat = ["serde_with"] -test-utils = ["triehash", "reth-trie-common/test-utils"] +serde = [ + "dep:serde", + "alloy-consensus/serde", + "alloy-primitives/serde", + "revm/serde" +] +serde-bincode-compat = [ + "serde_with", + "reth-primitives/serde-bincode-compat", + "alloy-consensus/serde-bincode-compat" +] +test-utils = [ + "triehash", + "reth-trie-common/test-utils", + "reth-primitives/test-utils", + "revm/test-utils", + "reth-stages-types/test-utils" +] [[bench]] name = "prefix_set" diff --git a/crates/trie/trie/src/hashed_cursor/noop.rs b/crates/trie/trie/src/hashed_cursor/noop.rs index 4783d5afd9..a21e1026b3 100644 --- a/crates/trie/trie/src/hashed_cursor/noop.rs +++ b/crates/trie/trie/src/hashed_cursor/noop.rs @@ -32,11 +32,11 @@ pub struct NoopHashedAccountCursor; impl HashedCursor for NoopHashedAccountCursor { type Value = Account; - fn next(&mut self) -> Result, DatabaseError> { + fn seek(&mut self, _key: B256) -> Result, DatabaseError> { Ok(None) } - fn seek(&mut self, _key: B256) -> Result, DatabaseError> { + fn next(&mut self) -> Result, DatabaseError> { Ok(None) } } @@ -49,11 +49,11 @@ pub struct NoopHashedStorageCursor; impl HashedCursor for NoopHashedStorageCursor { type Value = U256; - fn next(&mut self) -> Result, DatabaseError> { + fn seek(&mut self, _key: B256) -> Result, DatabaseError> { Ok(None) } - fn seek(&mut self, _key: B256) -> Result, DatabaseError> { + fn next(&mut self) -> Result, DatabaseError> { Ok(None) } } diff --git a/crates/trie/trie/src/metrics.rs b/crates/trie/trie/src/metrics.rs index 7582f37418..006dc7e365 100644 --- a/crates/trie/trie/src/metrics.rs +++ b/crates/trie/trie/src/metrics.rs @@ -1,5 +1,5 @@ use crate::stats::TrieStats; -use metrics::Histogram; +use metrics::{Counter, Histogram}; use reth_metrics::Metrics; /// Wrapper for state root metrics. @@ -63,3 +63,23 @@ impl TrieType { } } } + +/// Metrics for trie walker +#[derive(Clone, Metrics)] +#[metrics(scope = "trie.walker")] +pub struct WalkerMetrics { + /// The number of subnodes out of order due to wrong tree mask. + out_of_order_subnode: Counter, +} + +impl WalkerMetrics { + /// Create new metrics for the given trie type. + pub fn new(ty: TrieType) -> Self { + Self::new_with_labels(&[("type", ty.as_str())]) + } + + /// Increment `out_of_order_subnode`. + pub fn inc_out_of_order_subnode(&self, amount: u64) { + self.out_of_order_subnode.increment(amount); + } +} diff --git a/crates/trie/trie/src/prefix_set.rs b/crates/trie/trie/src/prefix_set.rs index af0fb173d9..d904ef38fd 100644 --- a/crates/trie/trie/src/prefix_set.rs +++ b/crates/trie/trie/src/prefix_set.rs @@ -82,7 +82,7 @@ pub struct TriePrefixSets { /// assert!(prefix_set.contains(&[0xa, 0xb])); /// assert!(prefix_set.contains(&[0xa, 0xb, 0xc])); /// ``` -#[derive(Clone, Default, Debug)] +#[derive(PartialEq, Eq, Clone, Default, Debug)] pub struct PrefixSetMut { /// Flag indicating that any entry should be considered changed. /// If set, the keys will be discarded. @@ -168,8 +168,7 @@ pub struct PrefixSet { } impl PrefixSet { - /// Returns `true` if any of the keys in the set has the given prefix or - /// if the given prefix is a prefix of any key in the set. + /// Returns `true` if any of the keys in the set has the given prefix #[inline] pub fn contains(&mut self, prefix: &[u8]) -> bool { if self.all { @@ -212,8 +211,8 @@ impl PrefixSet { } impl<'a> IntoIterator for &'a PrefixSet { - type IntoIter = std::slice::Iter<'a, reth_trie_common::Nibbles>; type Item = &'a reth_trie_common::Nibbles; + type IntoIter = std::slice::Iter<'a, reth_trie_common::Nibbles>; fn into_iter(self) -> Self::IntoIter { self.iter() } diff --git a/crates/trie/trie/src/proof.rs b/crates/trie/trie/src/proof.rs index d31d63fd9a..e99d686aca 100644 --- a/crates/trie/trie/src/proof.rs +++ b/crates/trie/trie/src/proof.rs @@ -100,7 +100,7 @@ where let walker = TrieWalker::new(trie_cursor, prefix_set.freeze()); // Create a hash builder to rebuild the root node since it is not available in the database. - let retainer = ProofRetainer::from_iter(targets.keys().map(Nibbles::unpack)); + let retainer = targets.keys().map(Nibbles::unpack).collect(); let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); let mut storages = HashMap::default(); diff --git a/crates/trie/trie/src/state.rs b/crates/trie/trie/src/state.rs index 573f4a84a3..287f5d0fbb 100644 --- a/crates/trie/trie/src/state.rs +++ b/crates/trie/trie/src/state.rs @@ -368,6 +368,15 @@ impl HashedStorageSorted { #[cfg(test)] mod tests { + use alloy_primitives::Bytes; + use revm::{ + db::{ + states::{plain_account::PlainStorage, StorageSlot}, + PlainAccount, StorageWithOriginalValues, + }, + primitives::{AccountInfo, Bytecode}, + }; + use super::*; #[test] @@ -443,4 +452,143 @@ mod tests { ); assert_eq!(account_storage.map(|st| st.wiped), Some(true)); } + + #[test] + fn test_hashed_post_state_from_bundle_state() { + // Prepare a random Ethereum address as a key for the account. + let address = Address::random(); + + // Create a mock account info object. + let account_info = AccountInfo { + balance: U256::from(123), + nonce: 42, + code_hash: B256::random(), + code: Some(Bytecode::LegacyRaw(Bytes::from(vec![1, 2]))), + }; + + let mut storage = StorageWithOriginalValues::default(); + storage.insert( + U256::from(1), + StorageSlot { present_value: U256::from(4), ..Default::default() }, + ); + + // Create a `BundleAccount` struct to represent the account and its storage. + let account = BundleAccount { + status: AccountStatus::Changed, + info: Some(account_info.clone()), + storage, + original_info: None, + }; + + // Create a vector of tuples representing the bundle state. + let state = vec![(&address, &account)]; + + // Convert the bundle state into a hashed post state. + let hashed_state = HashedPostState::from_bundle_state(state); + + // Validate the hashed post state. + assert_eq!(hashed_state.accounts.len(), 1); + assert_eq!(hashed_state.storages.len(), 1); + + // Validate the account info. + assert_eq!( + *hashed_state.accounts.get(&keccak256(address)).unwrap(), + Some(account_info.into()) + ); + } + + #[test] + fn test_hashed_post_state_from_cache_state() { + // Prepare a random Ethereum address. + let address = Address::random(); + + // Create mock account info. + let account_info = AccountInfo { + balance: U256::from(500), + nonce: 5, + code_hash: B256::random(), + code: None, + }; + + let mut storage = PlainStorage::default(); + storage.insert(U256::from(1), U256::from(35636)); + + // Create a `CacheAccount` with the mock account info. + let account = CacheAccount { + account: Some(PlainAccount { info: account_info.clone(), storage }), + status: AccountStatus::Changed, + }; + + // Create a vector of tuples representing the cache state. + let state = vec![(&address, &account)]; + + // Convert the cache state into a hashed post state. + let hashed_state = HashedPostState::from_cache_state(state); + + // Validate the hashed post state. + assert_eq!(hashed_state.accounts.len(), 1); + assert_eq!(hashed_state.storages.len(), 1); + + // Validate the account info. + assert_eq!( + *hashed_state.accounts.get(&keccak256(address)).unwrap(), + Some(account_info.into()) + ); + } + + #[test] + fn test_hashed_post_state_with_accounts() { + // Prepare random addresses and mock account info. + let address_1 = Address::random(); + let address_2 = Address::random(); + + let account_info_1 = AccountInfo { + balance: U256::from(1000), + nonce: 1, + code_hash: B256::random(), + code: None, + }; + + // Create hashed accounts with addresses. + let account_1 = (keccak256(address_1), Some(account_info_1.into())); + let account_2 = (keccak256(address_2), None); + + // Add accounts to the hashed post state. + let hashed_state = HashedPostState::default().with_accounts(vec![account_1, account_2]); + + // Validate the hashed post state. + assert_eq!(hashed_state.accounts.len(), 2); + assert!(hashed_state.accounts.contains_key(&keccak256(address_1))); + assert!(hashed_state.accounts.contains_key(&keccak256(address_2))); + } + + #[test] + fn test_hashed_post_state_with_storages() { + // Prepare random addresses and mock storage entries. + let address_1 = Address::random(); + let address_2 = Address::random(); + + let storage_1 = (keccak256(address_1), HashedStorage::new(false)); + let storage_2 = (keccak256(address_2), HashedStorage::new(true)); + + // Add storages to the hashed post state. + let hashed_state = HashedPostState::default().with_storages(vec![storage_1, storage_2]); + + // Validate the hashed post state. + assert_eq!(hashed_state.storages.len(), 2); + assert!(hashed_state.storages.contains_key(&keccak256(address_1))); + assert!(hashed_state.storages.contains_key(&keccak256(address_2))); + } + + #[test] + fn test_hashed_post_state_is_empty() { + // Create an empty hashed post state and validate it's empty. + let empty_state = HashedPostState::default(); + assert!(empty_state.is_empty()); + + // Add an account and validate the state is no longer empty. + let non_empty_state = HashedPostState::default() + .with_accounts(vec![(keccak256(Address::random()), Some(Account::default()))]); + assert!(!non_empty_state.is_empty()); + } } diff --git a/crates/trie/trie/src/trie.rs b/crates/trie/trie/src/trie.rs index 914f6ce601..2452cc5e1c 100644 --- a/crates/trie/trie/src/trie.rs +++ b/crates/trie/trie/src/trie.rs @@ -9,10 +9,10 @@ use crate::{ walker::TrieWalker, HashBuilder, Nibbles, TrieAccount, }; +use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{keccak256, Address, B256}; use alloy_rlp::{BufMut, Encodable}; use reth_execution_errors::{StateRootError, StorageRootError}; -use reth_primitives::constants::EMPTY_ROOT_HASH; use tracing::trace; #[cfg(feature = "metrics")] diff --git a/crates/trie/trie/src/trie_cursor/subnode.rs b/crates/trie/trie/src/trie_cursor/subnode.rs index c2ba839ebf..9d5a2770b2 100644 --- a/crates/trie/trie/src/trie_cursor/subnode.rs +++ b/crates/trie/trie/src/trie_cursor/subnode.rs @@ -49,7 +49,7 @@ impl From for CursorSubNode { impl From for StoredSubNode { fn from(value: CursorSubNode) -> Self { - let nibble = if value.nibble >= 0 { Some(value.nibble as u8) } else { None }; + let nibble = (value.nibble >= 0).then_some(value.nibble as u8); Self { key: value.key.to_vec(), nibble, node: value.node } } } diff --git a/crates/trie/trie/src/updates.rs b/crates/trie/trie/src/updates.rs index 03e80cf52e..6d1bcab63d 100644 --- a/crates/trie/trie/src/updates.rs +++ b/crates/trie/trie/src/updates.rs @@ -236,7 +236,7 @@ mod serde_nibbles_set { S: Serializer, { let mut storage_nodes = - Vec::from_iter(map.iter().map(|elem| alloy_primitives::hex::encode(elem.pack()))); + map.iter().map(|elem| alloy_primitives::hex::encode(elem.pack())).collect::>(); storage_nodes.sort_unstable(); storage_nodes.serialize(serializer) } diff --git a/crates/trie/trie/src/walker.rs b/crates/trie/trie/src/walker.rs index e75a96d0f1..aaff293b37 100644 --- a/crates/trie/trie/src/walker.rs +++ b/crates/trie/trie/src/walker.rs @@ -7,6 +7,9 @@ use alloy_primitives::B256; use reth_storage_errors::db::DatabaseError; use std::collections::HashSet; +#[cfg(feature = "metrics")] +use crate::metrics::WalkerMetrics; + /// `TrieWalker` is a structure that enables traversal of a Merkle trie. /// It allows moving through the trie in a depth-first manner, skipping certain branches /// if they have not changed. @@ -24,13 +27,23 @@ pub struct TrieWalker { pub changes: PrefixSet, /// The retained trie node keys that need to be removed. removed_keys: Option>, + #[cfg(feature = "metrics")] + /// Walker metrics. + metrics: WalkerMetrics, } impl TrieWalker { /// Constructs a new `TrieWalker` from existing stack and a cursor. pub fn from_stack(cursor: C, stack: Vec, changes: PrefixSet) -> Self { - let mut this = - Self { cursor, changes, stack, can_skip_current_node: false, removed_keys: None }; + let mut this = Self { + cursor, + changes, + stack, + can_skip_current_node: false, + removed_keys: None, + #[cfg(feature = "metrics")] + metrics: WalkerMetrics::default(), + }; this.update_skip_node(); this } @@ -113,6 +126,8 @@ impl TrieWalker { stack: vec![CursorSubNode::default()], can_skip_current_node: false, removed_keys: None, + #[cfg(feature = "metrics")] + metrics: WalkerMetrics::default(), }; // Set up the root node of the trie in the stack, if it exists. @@ -179,6 +194,19 @@ impl TrieWalker { self.stack[0].set_nibble(key[0] as i8); } + // The current tree mask might have been set incorrectly. + // Sanity check that the newly retrieved trie node key is the child of the last item + // on the stack. If not, advance to the next sibling instead of adding the node to the + // stack. + if let Some(subnode) = self.stack.last() { + if !key.starts_with(subnode.full_key()) { + #[cfg(feature = "metrics")] + self.metrics.inc_out_of_order_subnode(1); + self.move_to_next_sibling(false)?; + return Ok(()) + } + } + // Create a new CursorSubNode and push it to the stack. let subnode = CursorSubNode::new(key, Some(node)); let nibble = subnode.nibble(); diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index 3238047c7b..39d82a7bda 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -1,5 +1,3 @@ -use std::collections::BTreeMap; - use crate::{ hashed_cursor::{HashedCursor, HashedCursorFactory}, prefix_set::TriePrefixSetsMut, @@ -7,6 +5,7 @@ use crate::{ trie_cursor::TrieCursorFactory, HashedPostState, }; +use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{ keccak256, map::{HashMap, HashSet}, @@ -15,10 +14,10 @@ use alloy_primitives::{ use alloy_rlp::{BufMut, Decodable, Encodable}; use itertools::{Either, Itertools}; use reth_execution_errors::{StateProofError, TrieWitnessError}; -use reth_primitives::constants::EMPTY_ROOT_HASH; use reth_trie_common::{ BranchNode, HashBuilder, Nibbles, StorageMultiProof, TrieAccount, TrieNode, CHILD_INDEX_RANGE, }; +use std::collections::BTreeMap; /// State transition witness for the trie. #[derive(Debug)] @@ -111,14 +110,13 @@ where .accounts .get(&hashed_address) .ok_or(TrieWitnessError::MissingAccount(hashed_address))?; - let value = if account.is_some() || storage_multiproof.root != EMPTY_ROOT_HASH { - account_rlp.clear(); - TrieAccount::from((account.unwrap_or_default(), storage_multiproof.root)) - .encode(&mut account_rlp as &mut dyn BufMut); - Some(account_rlp.clone()) - } else { - None - }; + let value = + (account.is_some() || storage_multiproof.root != EMPTY_ROOT_HASH).then(|| { + account_rlp.clear(); + TrieAccount::from((account.unwrap_or_default(), storage_multiproof.root)) + .encode(&mut account_rlp as &mut dyn BufMut); + account_rlp.clone() + }); let key = Nibbles::unpack(hashed_address); account_trie_nodes.extend( self.target_nodes( @@ -235,7 +233,10 @@ where TrieNode::Leaf(leaf) => { next_path.extend_from_slice(&leaf.key); if next_path != key { - trie_nodes.insert(next_path.clone(), Either::Right(leaf.value.clone())); + trie_nodes.insert( + next_path.clone(), + Either::Right(leaf.value.as_slice().to_vec()), + ); } } TrieNode::EmptyRoot => { diff --git a/docs/crates/db.md b/docs/crates/db.md index 3ccfb72e34..79eeae5ee4 100644 --- a/docs/crates/db.md +++ b/docs/crates/db.md @@ -61,7 +61,6 @@ There are many tables within the node, all used to store different types of data - StageCheckpointProgresses - PruneCheckpoints - VersionHistory -- BlockRequests - ChainState
@@ -283,7 +282,6 @@ fn unwind(&mut self, provider: &DatabaseProviderRW, input: UnwindInput) { let mut body_cursor = tx.cursor_write::()?; let mut ommers_cursor = tx.cursor_write::()?; let mut withdrawals_cursor = tx.cursor_write::()?; - let mut requests_cursor = tx.cursor_write::()?; // Cursors to unwind transitions let mut tx_block_cursor = tx.cursor_write::()?; @@ -322,7 +320,7 @@ fn unwind(&mut self, provider: &DatabaseProviderRW, input: UnwindInput) { } ``` -This function first grabs a mutable cursor for the `BlockBodyIndices`, `BlockOmmers`, `BlockWithdrawals`, `BlockRequests`, `TransactionBlocks` tables. +This function first grabs a mutable cursor for the `BlockBodyIndices`, `BlockOmmers`, `BlockWithdrawals`, `TransactionBlocks` tables. Then it gets a walker of the block body cursor, and then walk backwards through the cursor to delete the block body entries from the last block number to the block number specified in the `UnwindInput` struct. diff --git a/docs/repo/ci.md b/docs/repo/ci.md index da84e001f3..863a18f9c3 100644 --- a/docs/repo/ci.md +++ b/docs/repo/ci.md @@ -7,8 +7,7 @@ The CI runs a couple of workflows: - **[unit]**: Runs unit tests (tests in `src/`) and doc tests - **[integration]**: Runs integration tests (tests in `tests/` and sync tests) - **[bench]**: Runs benchmarks -- **[eth-sync]**: Runs Ethereum mainnet sync tests -- **[op-sync]**: Runs base mainnet sync tests for Optimism +- **[sync]**: Runs sync tests - **[stage]**: Runs all `stage run` commands ### Docs @@ -26,7 +25,7 @@ The CI runs a couple of workflows: ### Integration Testing -- **[assertoor]**: Runs Assertoor tests on Reth pairs. +- **[kurtosis]**: Spins up a Kurtosis testnet and runs Assertoor tests on Reth pairs. - **[hive]**: Runs `ethereum/hive` tests. ### Linting and Checks @@ -38,8 +37,7 @@ The CI runs a couple of workflows: [unit]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/unit.yml [integration]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/integration.yml [bench]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/bench.yml -[eth-sync]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/eth-sync.yml -[op-sync]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/op-sync.yml +[sync]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/sync.yml [stage]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/stage.yml [book]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/book.yml [deny]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/deny.yml @@ -48,7 +46,7 @@ The CI runs a couple of workflows: [dependencies]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/dependencies.yml [stale]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/stale.yml [docker]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/docker.yml -[assertoor]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/assertoor.yml +[kurtosis]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/kurtosis.yml [hive]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/hive.yml [lint]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/lint.yml [lint-actions]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/lint-actions.yml diff --git a/etc/README.md b/etc/README.md index f80b5b774b..4f4ce7f20e 100644 --- a/etc/README.md +++ b/etc/README.md @@ -2,7 +2,8 @@ This directory contains miscellaneous files, such as example Grafana dashboards and Prometheus configuration. -The files in this directory may undergo a lot of changes while reth is unstable, so do not expect them to necessarily be up to date. +The files in this directory may undergo a lot of changes while reth is unstable, so do not expect them to necessarily be +up to date. ### Overview @@ -11,8 +12,67 @@ The files in this directory may undergo a lot of changes while reth is unstable, ### Docker Compose -To run Reth, Grafana or Prometheus with Docker Compose, refer to the [docker docs](/book/installation/docker.md#using-docker-compose). +To run Reth, Grafana or Prometheus with Docker Compose, refer to +the [docker docs](/book/installation/docker.md#using-docker-compose). -### Import Grafana dashboards +### Grafana -Running Grafana in Docker makes it possible to import existing dashboards, refer to [docs on how to run only Grafana in Docker](/book/installation/docker.md#using-docker-compose#run-only-grafana-in-docker). \ No newline at end of file +#### Adding a new metric to Grafana + +To set up a new metric in Reth and its Grafana dashboard (this assumes running Reth and Grafana instances): + +1. Add the metric to the codebase following the [metrics section](../docs/design/metrics.md#creating-metrics) + documentation. + +1. Access Grafana: + + - Open `http://localhost:3000/` in a browser + - Log in with username and password `admin` + - Navigate to the `Dashboards` tab + +1. Create or modify a dashboard: + + - Select an existing dashboard or create a new one + - Click `Add` > `Visualization` to create a new panel + +1. Configure your metric panel: + + - Set a panel title and description + - Select metric(s) from the `Metrics browser` or use the `PromQL` terminal + - Document your metric(s) by setting units, legends, etc. + - When adding multiple metrics, use field overwrites if needed + +1. Save and arrange: + + - Click `Apply` to save the panel + - Drag the panel to desired position on the dashboard + +1. Export the dashboard: + + - Click `Share` > `Export` + - Toggle `Export for sharing externally` + - Click `Save to file` + +1. Update dashboard file: + - Replace the content of the corresponding file in the [dashboards folder](./grafana/dashboards) with the exported + JSON + +Your new metric is now integrated into the Reth Grafana dashboard. + +#### Import Grafana dashboards + +If you are running Reth and Grafana outside of docker, and wish to import new Grafana dashboards or update a dashboard: + +1. Go to `Home` > `Dashboards` + +1. Click `New` > `Import` + +1. Drag the JSON dashboard file to import it + +1. If updating an existing dashboard, you will need to change the name and UID of the imported dashboard in order to + avoid conflict + +1. Delete the old dashboard + +If you are running Reth and Grafana using docker, after having pulled the updated dashboards from `main`, restart the +Grafana service. This will update all dashboards. \ No newline at end of file diff --git a/etc/docker-compose.yml b/etc/docker-compose.yml index 618aa6f5ae..cd7dd6dd26 100644 --- a/etc/docker-compose.yml +++ b/etc/docker-compose.yml @@ -65,7 +65,7 @@ services: sh -c "cp -r /etc/grafana/provisioning_temp/dashboards/. /etc/grafana/provisioning/dashboards && find /etc/grafana/provisioning/dashboards/ -name '*.json' -exec sed -i 's/$${DS_PROMETHEUS}/Prometheus/g' {} \+ && /run.sh" - + volumes: mainnet_data: driver: local diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 15786764f4..25cc280fe0 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -1007,13 +1007,242 @@ "title": "Sync progress (stage progress as highest block number reached)", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Tracks the number of critical tasks currently ran by the executor.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "semi-dark-red", + "value": 0 + } + ] + }, + "unit": "tasks" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 20 + }, + "id": 248, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_executor_spawn_critical_tasks_total{instance=\"$instance\"}- reth_executor_spawn_finished_critical_tasks_total{instance=\"$instance\"}", + "hide": false, + "instant": false, + "legendFormat": "Tasks running", + "range": true, + "refId": "C" + } + ], + "title": "Task Executor critical tasks", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Tracks the number of regular tasks currently ran by the executor.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "semi-dark-red", + "value": 80 + } + ] + }, + "unit": "tasks/s" + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "C" + }, + "properties": [ + { + "id": "unit", + "value": "tasks" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 20 + }, + "id": 247, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "rate(reth_executor_spawn_regular_tasks_total{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Tasks started", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_executor_spawn_regular_tasks_total{instance=\"$instance\"}- reth_executor_spawn_finished_regular_tasks_total{instance=\"$instance\"}", + "hide": false, + "instant": false, + "legendFormat": "Tasks running", + "range": true, + "refId": "C" + } + ], + "title": "Task Executor regular tasks", + "type": "timeseries" + }, { "collapsed": false, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 20 + "y": 28 }, "id": 38, "panels": [], @@ -1085,7 +1314,7 @@ "h": 8, "w": 12, "x": 0, - "y": 21 + "y": 29 }, "id": 40, "options": { @@ -1145,7 +1374,7 @@ "h": 8, "w": 12, "x": 12, - "y": 21 + "y": 29 }, "id": 42, "maxDataPoints": 25, @@ -1273,7 +1502,7 @@ "h": 8, "w": 12, "x": 0, - "y": 29 + "y": 37 }, "id": 117, "options": { @@ -1370,7 +1599,7 @@ "h": 8, "w": 12, "x": 12, - "y": 29 + "y": 37 }, "id": 116, "options": { @@ -1471,7 +1700,7 @@ "h": 8, "w": 12, "x": 0, - "y": 37 + "y": 45 }, "id": 119, "options": { @@ -1572,7 +1801,7 @@ "h": 8, "w": 12, "x": 12, - "y": 37 + "y": 45 }, "id": 118, "options": { @@ -1634,7 +1863,7 @@ "h": 8, "w": 12, "x": 0, - "y": 45 + "y": 53 }, "id": 48, "options": { @@ -1724,6 +1953,7 @@ "mode": "off" } }, + "decimals": 4, "mappings": [], "thresholds": { "mode": "absolute", @@ -1746,7 +1976,7 @@ "h": 8, "w": 12, "x": 12, - "y": 45 + "y": 53 }, "id": 52, "options": { @@ -1804,7 +2034,7 @@ "h": 8, "w": 12, "x": 0, - "y": 53 + "y": 61 }, "id": 50, "options": { @@ -1972,7 +2202,7 @@ "h": 8, "w": 12, "x": 12, - "y": 53 + "y": 61 }, "id": 58, "options": { @@ -2073,7 +2303,7 @@ "h": 8, "w": 12, "x": 0, - "y": 61 + "y": 69 }, "id": 113, "options": { @@ -2110,7 +2340,7 @@ "h": 1, "w": 24, "x": 0, - "y": 69 + "y": 77 }, "id": 203, "panels": [], @@ -2144,7 +2374,7 @@ "h": 8, "w": 8, "x": 0, - "y": 70 + "y": 78 }, "id": 202, "options": { @@ -2305,7 +2535,7 @@ "h": 8, "w": 8, "x": 8, - "y": 70 + "y": 78 }, "id": 204, "options": { @@ -2455,7 +2685,7 @@ "h": 8, "w": 8, "x": 16, - "y": 70 + "y": 78 }, "id": 205, "options": { @@ -2556,7 +2786,7 @@ "h": 8, "w": 12, "x": 0, - "y": 78 + "y": 86 }, "id": 206, "options": { @@ -2653,7 +2883,7 @@ "h": 8, "w": 12, "x": 12, - "y": 78 + "y": 86 }, "id": 207, "options": { @@ -2690,7 +2920,7 @@ "h": 1, "w": 24, "x": 0, - "y": 86 + "y": 94 }, "id": 46, "panels": [], @@ -2761,7 +2991,7 @@ "h": 8, "w": 24, "x": 0, - "y": 87 + "y": 95 }, "id": 56, "options": { @@ -2857,7 +3087,7 @@ "h": 11, "w": 24, "x": 0, - "y": 95 + "y": 103 }, "id": 240, "options": { @@ -2916,7 +3146,7 @@ "h": 1, "w": 24, "x": 0, - "y": 106 + "y": 114 }, "id": 24, "panels": [], @@ -3014,7 +3244,7 @@ "h": 8, "w": 12, "x": 0, - "y": 107 + "y": 115 }, "id": 26, "options": { @@ -3148,7 +3378,7 @@ "h": 8, "w": 12, "x": 12, - "y": 107 + "y": 115 }, "id": 33, "options": { @@ -3268,7 +3498,7 @@ "h": 8, "w": 12, "x": 0, - "y": 115 + "y": 123 }, "id": 36, "options": { @@ -3317,7 +3547,7 @@ "h": 1, "w": 24, "x": 0, - "y": 123 + "y": 131 }, "id": 32, "panels": [], @@ -3425,7 +3655,7 @@ "h": 8, "w": 12, "x": 0, - "y": 124 + "y": 132 }, "id": 30, "options": { @@ -3591,7 +3821,7 @@ "h": 8, "w": 12, "x": 12, - "y": 124 + "y": 132 }, "id": 28, "options": { @@ -3711,7 +3941,7 @@ "h": 8, "w": 12, "x": 0, - "y": 132 + "y": 140 }, "id": 35, "options": { @@ -3837,7 +4067,7 @@ "h": 8, "w": 12, "x": 12, - "y": 132 + "y": 140 }, "id": 73, "options": { @@ -3964,7 +4194,7 @@ "h": 8, "w": 12, "x": 0, - "y": 140 + "y": 148 }, "id": 102, "options": { @@ -4027,7 +4257,7 @@ "h": 1, "w": 24, "x": 0, - "y": 148 + "y": 156 }, "id": 79, "panels": [], @@ -4101,7 +4331,7 @@ "h": 8, "w": 12, "x": 0, - "y": 149 + "y": 157 }, "id": 74, "options": { @@ -4198,7 +4428,7 @@ "h": 8, "w": 12, "x": 12, - "y": 149 + "y": 157 }, "id": 80, "options": { @@ -4295,7 +4525,7 @@ "h": 8, "w": 12, "x": 0, - "y": 157 + "y": 165 }, "id": 81, "options": { @@ -4392,7 +4622,7 @@ "h": 8, "w": 12, "x": 12, - "y": 157 + "y": 165 }, "id": 114, "options": { @@ -4489,7 +4719,7 @@ "h": 8, "w": 12, "x": 12, - "y": 165 + "y": 173 }, "id": 190, "options": { @@ -4527,7 +4757,7 @@ "h": 1, "w": 24, "x": 0, - "y": 173 + "y": 181 }, "id": 87, "panels": [], @@ -4601,7 +4831,7 @@ "h": 8, "w": 12, "x": 0, - "y": 174 + "y": 182 }, "id": 83, "options": { @@ -4697,7 +4927,7 @@ "h": 8, "w": 12, "x": 12, - "y": 174 + "y": 182 }, "id": 84, "options": { @@ -4805,7 +5035,7 @@ "h": 8, "w": 12, "x": 0, - "y": 182 + "y": 190 }, "id": 85, "options": { @@ -4902,7 +5132,7 @@ "h": 8, "w": 12, "x": 12, - "y": 182 + "y": 190 }, "id": 210, "options": { @@ -5227,7 +5457,7 @@ "h": 8, "w": 12, "x": 0, - "y": 190 + "y": 198 }, "id": 211, "options": { @@ -5552,7 +5782,7 @@ "h": 8, "w": 12, "x": 12, - "y": 190 + "y": 198 }, "id": 212, "options": { @@ -5775,9 +6005,9 @@ "h": 8, "w": 24, "x": 0, - "y": 198 + "y": 206 }, - "id": 213, + "id": 213, "options": { "legend": { "calcs": [], @@ -5811,7 +6041,7 @@ "h": 1, "w": 24, "x": 0, - "y": 198 + "y": 214 }, "id": 214, "panels": [], @@ -5883,7 +6113,7 @@ "h": 8, "w": 12, "x": 0, - "y": 199 + "y": 215 }, "id": 215, "options": { @@ -5979,7 +6209,7 @@ "h": 8, "w": 12, "x": 12, - "y": 199 + "y": 215 }, "id": 216, "options": { @@ -6030,7 +6260,7 @@ "h": 1, "w": 24, "x": 0, - "y": 207 + "y": 223 }, "id": 68, "panels": [], @@ -6104,7 +6334,7 @@ "h": 8, "w": 12, "x": 0, - "y": 208 + "y": 224 }, "id": 60, "options": { @@ -6200,7 +6430,7 @@ "h": 8, "w": 12, "x": 12, - "y": 208 + "y": 224 }, "id": 62, "options": { @@ -6296,7 +6526,7 @@ "h": 8, "w": 12, "x": 0, - "y": 216 + "y": 232 }, "id": 64, "options": { @@ -6333,7 +6563,7 @@ "h": 1, "w": 24, "x": 0, - "y": 224 + "y": 240 }, "id": 97, "panels": [], @@ -6418,7 +6648,7 @@ "h": 8, "w": 12, "x": 0, - "y": 225 + "y": 241 }, "id": 98, "options": { @@ -6581,7 +6811,7 @@ "h": 8, "w": 12, "x": 12, - "y": 225 + "y": 241 }, "id": 101, "options": { @@ -6679,7 +6909,7 @@ "h": 8, "w": 12, "x": 0, - "y": 233 + "y": 249 }, "id": 99, "options": { @@ -6777,7 +7007,7 @@ "h": 8, "w": 12, "x": 12, - "y": 233 + "y": 249 }, "id": 100, "options": { @@ -6815,7 +7045,7 @@ "h": 1, "w": 24, "x": 0, - "y": 241 + "y": 257 }, "id": 105, "panels": [], @@ -6888,7 +7118,7 @@ "h": 8, "w": 12, "x": 0, - "y": 242 + "y": 258 }, "id": 106, "options": { @@ -6986,7 +7216,7 @@ "h": 8, "w": 12, "x": 12, - "y": 242 + "y": 258 }, "id": 107, "options": { @@ -7083,7 +7313,7 @@ "h": 8, "w": 12, "x": 0, - "y": 250 + "y": 266 }, "id": 217, "options": { @@ -7121,7 +7351,7 @@ "h": 1, "w": 24, "x": 0, - "y": 258 + "y": 274 }, "id": 108, "panels": [], @@ -7219,7 +7449,7 @@ "h": 8, "w": 12, "x": 0, - "y": 259 + "y": 275 }, "id": 109, "options": { @@ -7281,7 +7511,7 @@ "h": 8, "w": 12, "x": 12, - "y": 259 + "y": 275 }, "id": 111, "maxDataPoints": 25, @@ -7411,7 +7641,7 @@ "h": 8, "w": 12, "x": 0, - "y": 267 + "y": 283 }, "id": 120, "options": { @@ -7469,7 +7699,7 @@ "h": 8, "w": 12, "x": 12, - "y": 267 + "y": 283 }, "id": 112, "maxDataPoints": 25, @@ -7623,7 +7853,7 @@ "h": 8, "w": 12, "x": 0, - "y": 275 + "y": 291 }, "id": 198, "options": { @@ -7809,9 +8039,9 @@ "h": 8, "w": 12, "x": 12, - "y": 275 + "y": 291 }, - "id": 213, + "id": 246, "options": { "legend": { "calcs": [], @@ -7848,7 +8078,7 @@ "h": 1, "w": 24, "x": 0, - "y": 283 + "y": 299 }, "id": 236, "panels": [], @@ -7920,7 +8150,7 @@ "h": 8, "w": 12, "x": 0, - "y": 284 + "y": 300 }, "id": 237, "options": { @@ -8017,7 +8247,7 @@ "h": 8, "w": 12, "x": 12, - "y": 284 + "y": 300 }, "id": 238, "options": { @@ -8114,7 +8344,7 @@ "h": 8, "w": 12, "x": 0, - "y": 292 + "y": 308 }, "id": 239, "options": { @@ -8223,7 +8453,7 @@ "h": 8, "w": 12, "x": 12, - "y": 292 + "y": 308 }, "id": 219, "options": { @@ -8288,7 +8518,7 @@ "h": 8, "w": 12, "x": 0, - "y": 300 + "y": 316 }, "id": 220, "options": { @@ -8332,7 +8562,7 @@ "h": 1, "w": 24, "x": 0, - "y": 308 + "y": 324 }, "id": 241, "panels": [], @@ -8405,7 +8635,7 @@ "h": 8, "w": 12, "x": 0, - "y": 309 + "y": 325 }, "id": 243, "options": { @@ -8517,7 +8747,7 @@ "h": 8, "w": 12, "x": 12, - "y": 309 + "y": 325 }, "id": 244, "options": { @@ -8630,7 +8860,7 @@ "h": 8, "w": 12, "x": 0, - "y": 317 + "y": 333 }, "id": 245, "options": { @@ -8669,7 +8899,7 @@ "h": 1, "w": 24, "x": 0, - "y": 325 + "y": 341 }, "id": 226, "panels": [], @@ -8767,7 +8997,7 @@ "h": 8, "w": 12, "x": 0, - "y": 326 + "y": 342 }, "id": 225, "options": { @@ -8896,7 +9126,7 @@ "h": 8, "w": 12, "x": 12, - "y": 326 + "y": 342 }, "id": 227, "options": { @@ -9025,7 +9255,7 @@ "h": 8, "w": 12, "x": 0, - "y": 334 + "y": 350 }, "id": 235, "options": { @@ -9154,7 +9384,7 @@ "h": 8, "w": 12, "x": 12, - "y": 334 + "y": 350 }, "id": 234, "options": { diff --git a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs index cc761aa98a..2436ee0210 100644 --- a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs +++ b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs @@ -13,6 +13,7 @@ use serde::{Deserialize, Serialize}; use std::{ collections::VecDeque, pin::Pin, + sync::Arc, task::{Context, Poll}, }; use thiserror::Error; @@ -109,9 +110,11 @@ where match self.pool.get_all_blobs_exact(txs.iter().map(|(tx, _)| tx.hash()).collect()) { Ok(blobs) => { - for ((tx, _), sidecar) in txs.iter().zip(blobs.iter()) { - let transaction = BlobTransaction::try_from_signed(tx.clone(), sidecar.clone()) - .expect("should not fail to convert blob tx if it is already eip4844"); + actions_to_queue.reserve_exact(txs.len()); + for ((tx, _), sidecar) in txs.iter().zip(blobs.into_iter()) { + let transaction = + BlobTransaction::try_from_signed(tx.clone(), Arc::unwrap_or_clone(sidecar)) + .expect("should not fail to convert blob tx if it is already eip4844"); let block_metadata = BlockMetadata { block_hash: block.hash(), diff --git a/examples/custom-beacon-withdrawals/Cargo.toml b/examples/custom-beacon-withdrawals/Cargo.toml new file mode 100644 index 0000000000..9c3341e6bb --- /dev/null +++ b/examples/custom-beacon-withdrawals/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "example-custom-beacon-withdrawals" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +reth.workspace = true +reth-node-ethereum.workspace = true +reth-evm-ethereum.workspace = true +reth-chainspec.workspace = true +reth-evm.workspace = true +reth-primitives.workspace = true + +alloy-sol-macro = "0.8.9" +alloy-sol-types.workspace = true +alloy-eips.workspace = true +alloy-consensus.workspace = true + +eyre.workspace = true + +tokio = { workspace = true, features = ["sync", "time"] } + +[features] +optimism = [ + "reth-primitives/optimism", + "reth-chainspec/optimism" +] +bsc = [ + "reth-primitives/bsc" +] diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs new file mode 100644 index 0000000000..1d2b3dec0c --- /dev/null +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -0,0 +1,290 @@ +//! Example for how to modify a block post-execution step. It credits beacon withdrawals with a +//! custom mechanism instead of minting native tokens + +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + +use alloy_eips::{eip4895::Withdrawal, eip7685::Requests}; +use alloy_sol_macro::sol; +use alloy_sol_types::SolCall; +#[cfg(feature = "optimism")] +use reth::revm::primitives::OptimismFields; +use reth::{ + api::{ConfigureEvm, ConfigureEvmEnv, NodeTypesWithEngine}, + builder::{components::ExecutorBuilder, BuilderContext, FullNodeTypes}, + cli::Cli, + providers::ProviderError, + revm::{ + interpreter::Host, + primitives::{Env, EvmState, TransactTo, TxEnv}, + Database, DatabaseCommit, Evm, State, + }, +}; +use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_evm::execute::{ + BlockExecutionError, BlockExecutionStrategy, BlockExecutionStrategyFactory, ExecuteOutput, + InternalBlockExecutionError, +}; +use reth_evm_ethereum::EthEvmConfig; +use reth_node_ethereum::{node::EthereumAddOns, BasicBlockExecutorProvider, EthereumNode}; +use reth_primitives::{ + revm_primitives::{ + address, Address, BlockEnv, Bytes, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, U256, + }, + BlockWithSenders, Receipt, +}; +use std::{fmt::Display, sync::Arc}; +use tokio::sync::mpsc::UnboundedSender; + +pub const SYSTEM_ADDRESS: Address = address!("fffffffffffffffffffffffffffffffffffffffe"); +pub const WITHDRAWALS_ADDRESS: Address = address!("4200000000000000000000000000000000000000"); + +fn main() { + Cli::parse_args() + .run(|builder, _| async move { + let handle = builder + // use the default ethereum node types + .with_types::() + // Configure the components of the node + // use default ethereum components but use our custom pool + .with_components( + EthereumNode::components().executor(CustomExecutorBuilder::default()), + ) + .with_add_ons(EthereumAddOns::default()) + .launch() + .await?; + + handle.wait_for_node_exit().await + }) + .unwrap(); +} + +/// A custom executor builder +#[derive(Debug, Default, Clone, Copy)] +#[non_exhaustive] +pub struct CustomExecutorBuilder; + +impl ExecutorBuilder for CustomExecutorBuilder +where + Types: NodeTypesWithEngine, + Node: FullNodeTypes, +{ + type EVM = EthEvmConfig; + type Executor = BasicBlockExecutorProvider; + + async fn build_evm( + self, + ctx: &BuilderContext, + ) -> eyre::Result<(Self::EVM, Self::Executor)> { + let chain_spec = ctx.chain_spec(); + let evm_config = EthEvmConfig::new(ctx.chain_spec()); + let strategy_factory = + CustomExecutorStrategyFactory { chain_spec, evm_config: evm_config.clone() }; + let executor = BasicBlockExecutorProvider::new(strategy_factory); + + Ok((evm_config, executor)) + } +} + +#[derive(Clone)] +pub struct CustomExecutorStrategyFactory { + /// The chainspec + chain_spec: Arc, + /// How to create an EVM. + evm_config: EthEvmConfig, +} + +impl BlockExecutionStrategyFactory for CustomExecutorStrategyFactory { + type Strategy + Display>> = CustomExecutorStrategy; + + fn create_strategy(&self, db: DB) -> Self::Strategy + where + DB: Database + Display>, + { + let state = + State::builder().with_database(db).with_bundle_update().without_state_clear().build(); + CustomExecutorStrategy { + state, + chain_spec: self.chain_spec.clone(), + evm_config: self.evm_config.clone(), + } + } +} + +pub struct CustomExecutorStrategy +where + DB: Database + Display>, +{ + /// The chainspec + chain_spec: Arc, + /// How to create an EVM. + evm_config: EthEvmConfig, + /// Current state for block execution. + state: State, +} + +impl CustomExecutorStrategy +where + DB: Database + Display>, +{ + /// Configures a new evm configuration and block environment for the given block. + /// + /// # Caution + /// + /// This does not initialize the tx environment. + fn evm_env_for_block( + &self, + header: &alloy_consensus::Header, + total_difficulty: U256, + ) -> EnvWithHandlerCfg { + let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); + let mut block_env = BlockEnv::default(); + self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); + + EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) + } +} + +impl BlockExecutionStrategy for CustomExecutorStrategy +where + DB: Database + Display>, +{ + type Error = BlockExecutionError; + + fn apply_pre_execution_changes( + &mut self, + block: &BlockWithSenders, + _total_difficulty: U256, + ) -> Result<(), Self::Error> { + // Set state clear flag if the block is after the Spurious Dragon hardfork. + let state_clear_flag = + (*self.chain_spec).is_spurious_dragon_active_at_block(block.header.number); + self.state.set_state_clear_flag(state_clear_flag); + + Ok(()) + } + + fn execute_transactions( + &mut self, + _block: &BlockWithSenders, + _total_difficulty: U256, + _tx: Option>, + ) -> Result { + Ok(ExecuteOutput { receipts: vec![], gas_used: 0 }) + } + + fn apply_post_execution_changes( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + _receipts: &[Receipt], + ) -> Result { + let env = self.evm_env_for_block(&block.header, total_difficulty); + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); + + if let Some(withdrawals) = block.body.withdrawals.as_ref() { + apply_withdrawals_contract_call(withdrawals, &mut evm)?; + } + + Ok(Requests::default()) + } + + fn state_ref(&self) -> &State { + &self.state + } + + fn state_mut(&mut self) -> &mut State { + &mut self.state + } +} + +sol!( + function withdrawals( + uint64[] calldata amounts, + address[] calldata addresses + ); +); + +/// Applies the post-block call to the withdrawal / deposit contract, using the given block, +/// [`ChainSpec`], EVM. +pub fn apply_withdrawals_contract_call( + withdrawals: &[Withdrawal], + evm: &mut Evm<'_, EXT, DB>, +) -> Result<(), BlockExecutionError> +where + DB::Error: std::fmt::Display, +{ + // get previous env + let previous_env = Box::new(evm.context.env().clone()); + + // modify env for pre block call + fill_tx_env_with_system_contract_call( + &mut evm.context.evm.env, + SYSTEM_ADDRESS, + WITHDRAWALS_ADDRESS, + withdrawalsCall { + amounts: withdrawals.iter().map(|w| w.amount).collect::>(), + addresses: withdrawals.iter().map(|w| w.address).collect::>(), + } + .abi_encode() + .into(), + ); + + let mut state = match evm.transact() { + Ok(res) => res.state, + Err(e) => { + evm.context.evm.env = previous_env; + return Err(BlockExecutionError::Internal(InternalBlockExecutionError::Other( + format!("withdrawal contract system call revert: {}", e).into(), + ))) + } + }; + + // Clean-up post system tx context + state.remove(&SYSTEM_ADDRESS); + state.remove(&evm.block().coinbase); + evm.context.evm.db.commit(state); + // re-set the previous env + evm.context.evm.env = previous_env; + + Ok(()) +} + +fn fill_tx_env_with_system_contract_call( + env: &mut Env, + caller: Address, + contract: Address, + data: Bytes, +) { + env.tx = TxEnv { + caller, + transact_to: TransactTo::Call(contract), + // Explicitly set nonce to None so revm does not do any nonce checks + nonce: None, + gas_limit: 30_000_000, + value: U256::ZERO, + data, + // Setting the gas price to zero enforces that no value is transferred as part of the call, + // and that the call will not count against the block's gas limit + gas_price: U256::ZERO, + // The chain ID check is not relevant here and is disabled if set to None + chain_id: None, + // Setting the gas priority fee to None ensures the effective gas price is derived from the + // `gas_price` field, which we need to be zero + gas_priority_fee: None, + access_list: Vec::new(), + // blob fields can be None for this tx + blob_hashes: Vec::new(), + max_fee_per_blob_gas: None, + authorization_list: None, + #[cfg(feature = "optimism")] + optimism: OptimismFields::default(), + #[cfg(feature = "bsc")] + bsc: Default::default(), + }; + + // ensure the block gas limit is >= the tx + env.block.gas_limit = U256::from(env.tx.gas_limit); + + // disable the base fee check for this call by setting the base fee to zero + env.block.basefee = U256::ZERO; +} diff --git a/examples/custom-engine-types/Cargo.toml b/examples/custom-engine-types/Cargo.toml index f826451d20..1fbb3c4947 100644 --- a/examples/custom-engine-types/Cargo.toml +++ b/examples/custom-engine-types/Cargo.toml @@ -16,6 +16,7 @@ reth-basic-payload-builder.workspace = true reth-ethereum-payload-builder.workspace = true reth-node-ethereum = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true +reth-trie-db.workspace = true alloy-genesis.workspace = true alloy-rpc-types = { workspace = true, features = ["engine"] } alloy-primitives.workspace = true diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index 697b5e3f95..0498802750 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -34,11 +34,15 @@ use alloy_rpc_types::{ use reth::{ api::PayloadTypes, builder::{ - components::{ComponentsBuilder, EngineValidatorBuilder, PayloadServiceBuilder}, + components::{ComponentsBuilder, PayloadServiceBuilder}, node::{NodeTypes, NodeTypesWithEngine}, - BuilderContext, FullNodeTypes, Node, NodeBuilder, PayloadBuilderConfig, + rpc::{EngineValidatorBuilder, RpcAddOns}, + BuilderContext, FullNodeTypes, Node, NodeAdapter, NodeBuilder, NodeComponentsBuilder, + PayloadBuilderConfig, }, + network::NetworkHandle, providers::{CanonStateSubscriptions, StateProviderFactory}, + rpc::eth::EthApi, tasks::TaskManager, transaction_pool::TransactionPool, }; @@ -49,13 +53,13 @@ use reth_basic_payload_builder::{ use reth_chainspec::{Chain, ChainSpec, ChainSpecProvider}; use reth_node_api::{ payload::{EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes}, - validate_version_specific_fields, EngineTypes, EngineValidator, PayloadAttributes, - PayloadBuilderAttributes, + validate_version_specific_fields, AddOnsContext, EngineTypes, EngineValidator, + FullNodeComponents, PayloadAttributes, PayloadBuilderAttributes, }; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::{ node::{ - EthereumAddOns, EthereumConsensusBuilder, EthereumExecutorBuilder, EthereumNetworkBuilder, + EthereumConsensusBuilder, EthereumExecutorBuilder, EthereumNetworkBuilder, EthereumParliaBuilder, EthereumPoolBuilder, }, EthEvmConfig, @@ -66,6 +70,7 @@ use reth_payload_builder::{ }; use reth_primitives::Withdrawals; use reth_tracing::{RethTracer, Tracer}; +use reth_trie_db::MerklePatriciaTrie; /// A custom payload attributes type. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] @@ -106,7 +111,11 @@ impl PayloadBuilderAttributes for CustomPayloadBuilderAttributes { type RpcPayloadAttributes = CustomPayloadAttributes; type Error = Infallible; - fn try_new(parent: B256, attributes: CustomPayloadAttributes) -> Result { + fn try_new( + parent: B256, + attributes: CustomPayloadAttributes, + _version: u8, + ) -> Result { Ok(Self(EthPayloadBuilderAttributes::new(parent, attributes.inner))) } @@ -152,10 +161,10 @@ impl PayloadTypes for CustomEngineTypes { } impl EngineTypes for CustomEngineTypes { - type ExecutionPayloadV1 = ExecutionPayloadV1; - type ExecutionPayloadV2 = ExecutionPayloadEnvelopeV2; - type ExecutionPayloadV3 = ExecutionPayloadEnvelopeV3; - type ExecutionPayloadV4 = ExecutionPayloadEnvelopeV4; + type ExecutionPayloadEnvelopeV1 = ExecutionPayloadV1; + type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2; + type ExecutionPayloadEnvelopeV3 = ExecutionPayloadEnvelopeV3; + type ExecutionPayloadEnvelopeV4 = ExecutionPayloadEnvelopeV4; } /// Custom engine validator @@ -201,12 +210,14 @@ pub struct CustomEngineValidatorBuilder; impl EngineValidatorBuilder for CustomEngineValidatorBuilder where - N: FullNodeTypes>, + N: FullNodeComponents< + Types: NodeTypesWithEngine, + >, { type Validator = CustomEngineValidator; - async fn build_validator(self, ctx: &BuilderContext) -> eyre::Result { - Ok(CustomEngineValidator { chain_spec: ctx.chain_spec() }) + async fn build(self, ctx: &AddOnsContext<'_, N>) -> eyre::Result { + Ok(CustomEngineValidator { chain_spec: ctx.config.chain.clone() }) } } @@ -218,6 +229,7 @@ struct MyCustomNode; impl NodeTypes for MyCustomNode { type Primitives = (); type ChainSpec = ChainSpec; + type StateCommitment = MerklePatriciaTrie; } /// Configure the node types with the custom engine types @@ -225,6 +237,18 @@ impl NodeTypesWithEngine for MyCustomNode { type Engine = CustomEngineTypes; } +/// Custom addons configuring RPC types +pub type MyNodeAddOns = RpcAddOns< + N, + EthApi< + ::Provider, + ::Pool, + NetworkHandle, + ::Evm, + >, + CustomEngineValidatorBuilder, +>; + /// Implement the Node trait for the custom node /// /// This provides a preset configuration for the node @@ -239,10 +263,11 @@ where EthereumNetworkBuilder, EthereumExecutorBuilder, EthereumConsensusBuilder, - CustomEngineValidatorBuilder, EthereumParliaBuilder, >; - type AddOns = EthereumAddOns; + type AddOns = MyNodeAddOns< + NodeAdapter>::Components>, + >; fn components_builder(&self) -> Self::ComponentsBuilder { ComponentsBuilder::default() @@ -252,12 +277,11 @@ where .network(EthereumNetworkBuilder::default()) .executor(EthereumExecutorBuilder::default()) .consensus(EthereumConsensusBuilder::default()) - .engine_validator(CustomEngineValidatorBuilder::default()) .parlia(EthereumParliaBuilder::default()) } fn add_ons(&self) -> Self::AddOns { - EthereumAddOns::default() + MyNodeAddOns::default() } } @@ -321,7 +345,7 @@ where args: BuildArguments, ) -> Result, PayloadBuilderError> { let BuildArguments { client, pool, cached_reads, config, cancel, best_payload } = args; - let PayloadConfig { parent_block, extra_data, attributes } = config; + let PayloadConfig { parent_header, extra_data, attributes } = config; let chain_spec = client.chain_spec(); @@ -334,7 +358,7 @@ where client, pool, cached_reads, - config: PayloadConfig { parent_block, extra_data, attributes: attributes.0 }, + config: PayloadConfig { parent_header, extra_data, attributes: attributes.0 }, cancel, best_payload, }) @@ -345,10 +369,10 @@ where client: &Client, config: PayloadConfig, ) -> Result { - let PayloadConfig { parent_block, extra_data, attributes } = config; + let PayloadConfig { parent_header, extra_data, attributes } = config; let chain_spec = client.chain_spec(); >::build_empty_payload(&reth_ethereum_payload_builder::EthereumPayloadBuilder::new(EthEvmConfig::new(chain_spec.clone())),client, - PayloadConfig { parent_block, extra_data, attributes: attributes.0}) + PayloadConfig { parent_header, extra_data, attributes: attributes.0}) } } diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index 9c421f9c6a..16aad63c09 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -31,14 +31,14 @@ use reth_node_api::{ use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::{ node::{EthereumAddOns, EthereumPayloadBuilder}, - EthExecutorProvider, EthereumNode, + BasicBlockExecutorProvider, EthExecutionStrategyFactory, EthereumNode, }; use reth_primitives::{ revm_primitives::{CfgEnvWithHandlerCfg, TxEnv}, Header, TransactionSigned, }; use reth_tracing::{RethTracer, Tracer}; -use std::sync::Arc; +use std::{convert::Infallible, sync::Arc}; /// Custom EVM configuration #[derive(Debug, Clone)] @@ -87,6 +87,7 @@ impl MyEvmConfig { impl ConfigureEvmEnv for MyEvmConfig { type Header = Header; + type Error = Infallible; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { self.inner.fill_tx_env(tx_env, transaction, sender); @@ -115,7 +116,7 @@ impl ConfigureEvmEnv for MyEvmConfig { &self, parent: &Self::Header, attributes: NextBlockEnvAttributes, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), Self::Error> { self.inner.next_cfg_and_block_env(parent, attributes) } } @@ -158,7 +159,7 @@ where Node: FullNodeTypes>, { type EVM = MyEvmConfig; - type Executor = EthExecutorProvider; + type Executor = BasicBlockExecutorProvider>; async fn build_evm( self, @@ -166,7 +167,10 @@ where ) -> eyre::Result<(Self::EVM, Self::Executor)> { Ok(( MyEvmConfig::new(ctx.chain_spec()), - EthExecutorProvider::new(ctx.chain_spec(), MyEvmConfig::new(ctx.chain_spec())), + BasicBlockExecutorProvider::new(EthExecutionStrategyFactory::new( + ctx.chain_spec(), + MyEvmConfig::new(ctx.chain_spec()), + )), )) } } diff --git a/examples/custom-inspector/Cargo.toml b/examples/custom-inspector/Cargo.toml index 18629556c4..e92e90fb9d 100644 --- a/examples/custom-inspector/Cargo.toml +++ b/examples/custom-inspector/Cargo.toml @@ -12,3 +12,4 @@ alloy-rpc-types.workspace = true clap = { workspace = true, features = ["derive"] } futures-util.workspace = true alloy-primitives.workspace = true +alloy-eips.workspace = true diff --git a/examples/custom-inspector/src/main.rs b/examples/custom-inspector/src/main.rs index 12b7620f4a..272da63a9b 100644 --- a/examples/custom-inspector/src/main.rs +++ b/examples/custom-inspector/src/main.rs @@ -10,6 +10,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +use alloy_eips::BlockNumberOrTag; use alloy_primitives::Address; use alloy_rpc_types::state::EvmOverrides; use clap::Parser; @@ -18,7 +19,6 @@ use reth::{ builder::NodeHandle, chainspec::EthereumChainSpecParser, cli::Cli, - primitives::BlockNumberOrTag, revm::{ inspector_handle_register, interpreter::{Interpreter, OpCode}, diff --git a/examples/custom-payload-builder/Cargo.toml b/examples/custom-payload-builder/Cargo.toml index 1c160fe5ec..b77a3f2945 100644 --- a/examples/custom-payload-builder/Cargo.toml +++ b/examples/custom-payload-builder/Cargo.toml @@ -16,6 +16,7 @@ reth-node-ethereum.workspace = true reth-ethereum-payload-builder.workspace = true alloy-primitives.workspace = true +alloy-eips.workspace = true tracing.workspace = true futures-util.workspace = true diff --git a/examples/custom-payload-builder/src/generator.rs b/examples/custom-payload-builder/src/generator.rs index f5d64e41cd..2e264d017a 100644 --- a/examples/custom-payload-builder/src/generator.rs +++ b/examples/custom-payload-builder/src/generator.rs @@ -1,4 +1,5 @@ use crate::job::EmptyBlockPayloadJob; +use alloy_eips::BlockNumberOrTag; use alloy_primitives::Bytes; use reth::{ providers::{BlockReaderIdExt, BlockSource, StateProviderFactory}, @@ -8,7 +9,7 @@ use reth::{ use reth_basic_payload_builder::{BasicPayloadJobGeneratorConfig, PayloadBuilder, PayloadConfig}; use reth_node_api::PayloadBuilderAttributes; use reth_payload_builder::{PayloadBuilderError, PayloadJobGenerator}; -use reth_primitives::BlockNumberOrTag; +use reth_primitives::SealedHeader; use std::sync::Arc; /// The generator type that creates new jobs that builds empty blocks. @@ -77,7 +78,10 @@ where // we already know the hash, so we can seal it block.seal(attributes.parent()) }; - let config = PayloadConfig::new(Arc::new(parent_block), Bytes::default(), attributes); + let hash = parent_block.hash(); + let header = SealedHeader::new(parent_block.header().clone(), hash); + + let config = PayloadConfig::new(Arc::new(header), Bytes::default(), attributes); Ok(EmptyBlockPayloadJob { client: self.client.clone(), _pool: self.pool.clone(), diff --git a/examples/custom-payload-builder/src/job.rs b/examples/custom-payload-builder/src/job.rs index 26b594be94..0141982595 100644 --- a/examples/custom-payload-builder/src/job.rs +++ b/examples/custom-payload-builder/src/job.rs @@ -3,6 +3,7 @@ use reth::{ providers::StateProviderFactory, tasks::TaskSpawner, transaction_pool::TransactionPool, }; use reth_basic_payload_builder::{PayloadBuilder, PayloadConfig}; +use reth_node_api::PayloadKind; use reth_payload_builder::{KeepPayloadJobAlive, PayloadBuilderError, PayloadJob}; use std::{ @@ -52,7 +53,10 @@ where Ok(self.config.attributes.clone()) } - fn resolve(&mut self) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { + fn resolve_kind( + &mut self, + _kind: PayloadKind, + ) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { let payload = self.best_payload(); (futures_util::future::ready(payload), KeepPayloadJobAlive::No) } diff --git a/examples/custom-rlpx-subprotocol/Cargo.toml b/examples/custom-rlpx-subprotocol/Cargo.toml index d59d16f35c..18c136671c 100644 --- a/examples/custom-rlpx-subprotocol/Cargo.toml +++ b/examples/custom-rlpx-subprotocol/Cargo.toml @@ -13,8 +13,6 @@ reth-eth-wire.workspace = true reth-network.workspace = true reth-network-api.workspace = true reth-node-ethereum.workspace = true -reth-provider = { workspace = true, features = ["test-utils"] } -reth-primitives.workspace = true reth.workspace = true tokio-stream.workspace = true eyre.workspace = true diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index ab018a0b07..5772461bd7 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -1,6 +1,7 @@ use alloy_primitives::{Address, Sealable, B256}; use alloy_rpc_types::{Filter, FilteredParams}; use reth_chainspec::ChainSpecBuilder; +use reth_db::{open_db_read_only, DatabaseEnv}; use reth_node_ethereum::EthereumNode; use reth_node_types::NodeTypesWithDBAdapter; use reth_primitives::SealedHeader; @@ -8,7 +9,7 @@ use reth_provider::{ providers::StaticFileProvider, AccountReader, BlockReader, BlockSource, HeaderProvider, ProviderFactory, ReceiptProvider, StateProvider, TransactionsProvider, }; -use std::path::Path; +use std::{path::Path, sync::Arc}; // Providers are zero cost abstractions on top of an opened MDBX Transaction // exposing a familiar API to query the chain's information without requiring knowledge @@ -20,17 +21,16 @@ fn main() -> eyre::Result<()> { // Opens a RO handle to the database file. let db_path = std::env::var("RETH_DB_PATH")?; let db_path = Path::new(&db_path); + let db = open_db_read_only(db_path.join("db").as_path(), Default::default())?; // Instantiate a provider factory for Ethereum mainnet using the provided DB. // TODO: Should the DB version include the spec so that you do not need to specify it here? let spec = ChainSpecBuilder::mainnet().build(); - let factory = - ProviderFactory::>::new_with_database_path( - db_path, - spec.into(), - Default::default(), - StaticFileProvider::read_only(db_path.join("static_files"), false)?, - )?; + let factory = ProviderFactory::>>::new( + db.into(), + spec.into(), + StaticFileProvider::read_only(db_path.join("static_files"), true)?, + ); // This call opens a RO transaction on the database. To write to the DB you'd need to call // the `provider_rw` function and look for the `Writer` variants of the traits. diff --git a/examples/manual-p2p/Cargo.toml b/examples/manual-p2p/Cargo.toml index 2303bfbfea..b1642f66ca 100644 --- a/examples/manual-p2p/Cargo.toml +++ b/examples/manual-p2p/Cargo.toml @@ -14,6 +14,8 @@ reth-eth-wire.workspace = true reth-ecies.workspace = true reth-network-peers.workspace = true +alloy-consensus.workspace = true + secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } futures.workspace = true diff --git a/examples/manual-p2p/src/main.rs b/examples/manual-p2p/src/main.rs index edaaf85848..79a2ff26a2 100644 --- a/examples/manual-p2p/src/main.rs +++ b/examples/manual-p2p/src/main.rs @@ -8,6 +8,7 @@ use std::time::Duration; +use alloy_consensus::constants::MAINNET_GENESIS_HASH; use futures::StreamExt; use reth_chainspec::{Chain, MAINNET}; use reth_discv4::{DiscoveryUpdate, Discv4, Discv4ConfigBuilder, DEFAULT_DISCOVERY_ADDRESS}; @@ -17,7 +18,7 @@ use reth_eth_wire::{ }; use reth_network::config::rng_secret_key; use reth_network_peers::{mainnet_nodes, pk2id, NodeRecord}; -use reth_primitives::{EthereumHardfork, Head, MAINNET_GENESIS_HASH}; +use reth_primitives::{EthereumHardfork, Head}; use secp256k1::{SecretKey, SECP256K1}; use std::sync::LazyLock; use tokio::net::TcpStream; @@ -105,7 +106,8 @@ async fn handshake_eth(p2p_stream: AuthedP2PStream) -> eyre::Result<(AuthedEthSt .forkid(MAINNET.hardfork_fork_id(EthereumHardfork::Shanghai).unwrap()) .build(); - let status = Status { version: p2p_stream.shared_capabilities().eth()?.version(), ..status }; + let status = + Status { version: p2p_stream.shared_capabilities().eth()?.version().try_into()?, ..status }; let eth_unauthed = UnauthedEthStream::new(p2p_stream); Ok(eth_unauthed.handshake(status, fork_filter).await?) } diff --git a/examples/polygon-p2p/Cargo.toml b/examples/polygon-p2p/Cargo.toml index bdf9a27ce5..e18f32a647 100644 --- a/examples/polygon-p2p/Cargo.toml +++ b/examples/polygon-p2p/Cargo.toml @@ -20,6 +20,5 @@ reth-primitives.workspace = true serde_json.workspace = true reth-tracing.workspace = true tokio-stream.workspace = true -reth-provider = { workspace = true, features = ["test-utils"] } reth-discv4 = { workspace = true, features = ["test-utils"] } alloy-primitives.workspace = true diff --git a/examples/rpc-db/src/main.rs b/examples/rpc-db/src/main.rs index 1b2899a648..92ae86f00b 100644 --- a/examples/rpc-db/src/main.rs +++ b/examples/rpc-db/src/main.rs @@ -16,6 +16,7 @@ use std::{path::Path, sync::Arc}; use reth::{ api::NodeTypesWithDBAdapter, + beacon_consensus::EthBeaconConsensus, providers::{ providers::{BlockchainProvider, StaticFileProvider}, ProviderFactory, @@ -66,9 +67,10 @@ async fn main() -> eyre::Result<()> { .with_noop_pool() .with_noop_network() .with_executor(TokioTaskExecutor::default()) - .with_evm_config(EthEvmConfig::new(spec)) + .with_evm_config(EthEvmConfig::new(spec.clone())) .with_events(TestCanonStateSubscriptions::default()) - .with_block_executor(EthExecutorProvider::ethereum(provider.chain_spec())); + .with_block_executor(EthExecutorProvider::ethereum(provider.chain_spec())) + .with_consensus(EthBeaconConsensus::new(spec)); // Pick which namespaces to expose. let config = TransportRpcModuleConfig::default().with_http([RethRpcModule::Eth]); diff --git a/examples/stateful-precompile/src/main.rs b/examples/stateful-precompile/src/main.rs index 26ebdfe412..371fbf4f78 100644 --- a/examples/stateful-precompile/src/main.rs +++ b/examples/stateful-precompile/src/main.rs @@ -20,14 +20,17 @@ use reth::{ use reth_chainspec::{Chain, ChainSpec}; use reth_node_api::{ConfigureEvm, ConfigureEvmEnv, FullNodeTypes, NodeTypes}; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; -use reth_node_ethereum::{node::EthereumAddOns, EthEvmConfig, EthExecutorProvider, EthereumNode}; +use reth_node_ethereum::{ + node::EthereumAddOns, BasicBlockExecutorProvider, EthEvmConfig, EthExecutionStrategyFactory, + EthereumNode, +}; use reth_primitives::{ revm_primitives::{SpecId, StatefulPrecompileMut}, Header, TransactionSigned, }; use reth_tracing::{RethTracer, Tracer}; use schnellru::{ByLength, LruMap}; -use std::{collections::HashMap, sync::Arc}; +use std::{collections::HashMap, convert::Infallible, sync::Arc}; /// Type alias for the LRU cache used within the [`PrecompileCache`]. type PrecompileLRUCache = LruMap<(Bytes, u64), PrecompileResult>; @@ -144,6 +147,7 @@ impl StatefulPrecompileMut for WrappedPrecompile { impl ConfigureEvmEnv for MyEvmConfig { type Header = Header; + type Error = Infallible; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { self.inner.fill_tx_env(tx_env, transaction, sender) @@ -172,7 +176,7 @@ impl ConfigureEvmEnv for MyEvmConfig { &self, parent: &Self::Header, attributes: NextBlockEnvAttributes, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), Self::Error> { self.inner.next_cfg_and_block_env(parent, attributes) } } @@ -224,7 +228,7 @@ where Node: FullNodeTypes>, { type EVM = MyEvmConfig; - type Executor = EthExecutorProvider; + type Executor = BasicBlockExecutorProvider>; async fn build_evm( self, @@ -234,7 +238,13 @@ where inner: EthEvmConfig::new(ctx.chain_spec()), precompile_cache: self.precompile_cache.clone(), }; - Ok((evm_config.clone(), EthExecutorProvider::new(ctx.chain_spec(), evm_config))) + Ok(( + evm_config.clone(), + BasicBlockExecutorProvider::new(EthExecutionStrategyFactory::new( + ctx.chain_spec(), + evm_config, + )), + )) } } diff --git a/op.Dockerfile b/op.Dockerfile index b7c7e7f3f2..b3f32086ad 100644 --- a/op.Dockerfile +++ b/op.Dockerfile @@ -1,4 +1,4 @@ -FROM lukemathwalker/cargo-chef:latest-rust-1.81 AS chef +FROM lukemathwalker/cargo-chef:latest-rust-1.82 AS chef WORKDIR /app LABEL org.opencontainers.image.source=https://github.com/bnb-chain/reth diff --git a/testing/ef-tests/Cargo.toml b/testing/ef-tests/Cargo.toml index ca23ffcce3..a56c44ec3d 100644 --- a/testing/ef-tests/Cargo.toml +++ b/testing/ef-tests/Cargo.toml @@ -13,7 +13,11 @@ workspace = true [features] ef-tests = [] -asm-keccak = ["reth-primitives/asm-keccak"] +asm-keccak = [ + "reth-primitives/asm-keccak", + "alloy-primitives/asm-keccak", + "revm/asm-keccak" +] [dependencies] reth-chainspec.workspace = true @@ -23,6 +27,9 @@ reth-db-api.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } reth-stages.workspace = true reth-evm-ethereum.workspace = true +reth-revm = { workspace = true, features = ["std"] } + +revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } alloy-rlp.workspace = true alloy-primitives.workspace = true diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index d29aafa821..7d80ec6c47 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -6,6 +6,7 @@ use crate::{ }; use alloy_rlp::Decodable; use rayon::iter::{ParallelBridge, ParallelIterator}; +use reth_chainspec::ChainSpec; use reth_primitives::{BlockBody, SealedBlock, StaticFileSegment}; use reth_provider::{ providers::StaticFileWriter, test_utils::create_test_provider_factory_with_chain_spec, @@ -83,11 +84,10 @@ impl Case for BlockchainTestCase { .par_bridge() .try_for_each(|case| { // Create a new test database and initialize a provider for the test case. - let provider = create_test_provider_factory_with_chain_spec(Arc::new( - case.network.clone().into(), - )) - .database_provider_rw() - .unwrap(); + let chain_spec: Arc = Arc::new(case.network.into()); + let provider = create_test_provider_factory_with_chain_spec(chain_spec.clone()) + .database_provider_rw() + .unwrap(); // Insert initial test state into the provider. provider.insert_historical_block( @@ -127,9 +127,7 @@ impl Case for BlockchainTestCase { // Execute the execution stage using the EVM processor factory for the test case // network. let _ = ExecutionStage::new_with_executor( - reth_evm_ethereum::execute::EthExecutorProvider::ethereum(Arc::new( - case.network.clone().into(), - )), + reth_evm_ethereum::execute::EthExecutorProvider::ethereum(chain_spec), ) .execute( &provider, diff --git a/testing/ef-tests/src/lib.rs b/testing/ef-tests/src/lib.rs index 45f296d1f5..ca5e47d2d3 100644 --- a/testing/ef-tests/src/lib.rs +++ b/testing/ef-tests/src/lib.rs @@ -7,6 +7,9 @@ )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +use reth_revm as _; +use revm as _; + pub mod case; pub mod result; pub mod suite; diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index 3f3df15363..b5dc073c1d 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -87,7 +87,7 @@ pub struct Header { /// Parent beacon block root. pub parent_beacon_block_root: Option, /// Requests root. - pub requests_root: Option, + pub requests_hash: Option, } impl From

for SealedHeader { @@ -113,7 +113,7 @@ impl From
for SealedHeader { blob_gas_used: value.blob_gas_used.map(|v| v.to::()), excess_blob_gas: value.excess_blob_gas.map(|v| v.to::()), parent_beacon_block_root: value.parent_beacon_block_root, - requests_root: value.requests_root, + requests_hash: value.requests_hash, }; Self::new(header, value.hash) } @@ -257,7 +257,7 @@ impl Account { } /// Fork specification. -#[derive(Debug, PartialEq, Eq, PartialOrd, Hash, Ord, Clone, Deserialize)] +#[derive(Debug, PartialEq, Eq, PartialOrd, Hash, Ord, Clone, Copy, Deserialize)] pub enum ForkSpec { /// Frontier Frontier, diff --git a/testing/testing-utils/Cargo.toml b/testing/testing-utils/Cargo.toml index 49a59ecf6a..3e0f58a7bd 100644 --- a/testing/testing-utils/Cargo.toml +++ b/testing/testing-utils/Cargo.toml @@ -14,10 +14,13 @@ workspace = true [dependencies] reth-primitives = { workspace = true, features = ["secp256k1"] } -alloy-eips.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true +alloy-eips.workspace = true rand.workspace = true secp256k1 = { workspace = true, features = ["rand"] } + +[dev-dependencies] +alloy-eips.workspace = true diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index bf64cdc3ee..ebcf4b4e01 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -1,17 +1,15 @@ //! Generators for different data structures like block headers, block bodies and ranges of those. -use alloy_consensus::TxLegacy; -use alloy_eips::{ - eip6110::DepositRequest, eip7002::WithdrawalRequest, eip7251::ConsolidationRequest, -}; +use alloy_consensus::{Transaction as _, TxLegacy}; +use alloy_eips::eip4895::Withdrawal; use alloy_primitives::{Address, BlockNumber, Bytes, Parity, Sealable, TxKind, B256, U256}; pub use rand::Rng; use rand::{ distributions::uniform::SampleRange, rngs::StdRng, seq::SliceRandom, thread_rng, SeedableRng, }; use reth_primitives::{ - proofs, sign_message, Account, BlockBody, Header, Log, Receipt, Request, Requests, SealedBlock, - SealedHeader, StorageEntry, Transaction, TransactionSigned, Withdrawal, Withdrawals, + proofs, sign_message, Account, BlockBody, Header, Log, Receipt, SealedBlock, SealedHeader, + StorageEntry, Transaction, TransactionSigned, Withdrawals, }; use secp256k1::{Keypair, Secp256k1}; use std::{ @@ -201,11 +199,6 @@ pub fn random_block(rng: &mut R, number: u64, block_params: BlockParams) let transactions_root = proofs::calculate_transaction_root(&transactions); let ommers_hash = proofs::calculate_ommers_root(&ommers); - let requests = block_params - .requests_count - .map(|count| (0..count).map(|_| random_request(rng)).collect::>()); - let requests_root = requests.as_ref().map(|requests| proofs::calculate_requests_root(requests)); - let withdrawals = block_params.withdrawals_count.map(|count| { (0..count) .map(|i| Withdrawal { @@ -226,7 +219,8 @@ pub fn random_block(rng: &mut R, number: u64, block_params: BlockParams) transactions_root, ommers_hash, base_fee_per_gas: Some(rng.gen()), - requests_root, + // TODO(onbjerg): Proper EIP-7685 request support + requests_hash: None, withdrawals_root, ..Default::default() } @@ -241,7 +235,6 @@ pub fn random_block(rng: &mut R, number: u64, block_params: BlockParams) ommers, withdrawals: withdrawals.map(Withdrawals::new), sidecars: Some(Default::default()), - requests: requests.map(Requests), }, } } @@ -471,38 +464,13 @@ pub fn random_log(rng: &mut R, address: Option
, topics_count: O ) } -/// Generate random request -pub fn random_request(rng: &mut R) -> Request { - let request_type = rng.gen_range(0..3); - match request_type { - 0 => Request::DepositRequest(DepositRequest { - pubkey: rng.gen(), - withdrawal_credentials: rng.gen(), - amount: rng.gen(), - signature: rng.gen(), - index: rng.gen(), - }), - 1 => Request::WithdrawalRequest(WithdrawalRequest { - source_address: rng.gen(), - validator_pubkey: rng.gen(), - amount: rng.gen(), - }), - 2 => Request::ConsolidationRequest(ConsolidationRequest { - source_address: rng.gen(), - source_pubkey: rng.gen(), - target_pubkey: rng.gen(), - }), - _ => panic!("invalid request type"), - } -} - #[cfg(test)] mod tests { use super::*; use alloy_consensus::TxEip1559; use alloy_eips::eip2930::AccessList; - use alloy_primitives::{hex, Parity}; - use reth_primitives::{public_key_to_address, Signature}; + use alloy_primitives::{hex, Parity, Signature}; + use reth_primitives::public_key_to_address; use std::str::FromStr; #[test]