From 5329228947d72f297977c77a7d76916b879ae851 Mon Sep 17 00:00:00 2001 From: Alessandro Tagliapietra <0xalex88@proton.me> Date: Sat, 20 Jan 2024 10:04:35 -0800 Subject: [PATCH] Implement frax DA changes --- .github/CODEOWNERS | 43 ------ .github/actions/setup/action.yml | 7 - .github/dependabot.yml | 54 -------- .github/workflows/close-stale.yml | 18 --- .github/workflows/release.yml | 50 +++++++ .github/workflows/slither.yml | 32 ----- .github/workflows/tag-service.yml | 66 --------- Makefile | 6 +- docker-bake.hcl | 43 ++++-- frax-da/cli.go | 73 ++++++++++ frax-da/client.go | 126 ++++++++++++++++++ frax-da/da.go | 7 + frax-da/types.go | 5 + go.mod | 2 +- op-batcher/batcher/config.go | 6 + op-batcher/batcher/driver.go | 16 +++ op-batcher/batcher/service.go | 15 +++ op-batcher/flags/flags.go | 2 + op-bootnode/Dockerfile | 9 ++ op-node/flags/flags.go | 2 + op-node/node/config.go | 7 + op-node/node/node.go | 7 + op-node/rollup/derive/blob_data_source.go | 17 ++- .../rollup/derive/blob_data_source_test.go | 15 ++- op-node/rollup/derive/calldata_source.go | 68 +++++++++- op-node/rollup/derive/calldata_source_test.go | 3 +- op-node/rollup/driver/da.go | 14 ++ op-node/service.go | 4 +- ops-bedrock/docker-compose.yml | 15 ++- ops/docker/op-stack-go/Dockerfile | 9 ++ .../op-stack-go/Dockerfile.dockerignore | 1 + 31 files changed, 480 insertions(+), 262 deletions(-) delete mode 100644 .github/CODEOWNERS delete mode 100644 .github/actions/setup/action.yml delete mode 100644 .github/dependabot.yml delete mode 100644 .github/workflows/close-stale.yml create mode 100644 .github/workflows/release.yml delete mode 100644 .github/workflows/slither.yml delete mode 100644 .github/workflows/tag-service.yml create mode 100644 frax-da/cli.go create mode 100644 frax-da/client.go create mode 100644 frax-da/da.go create mode 100644 frax-da/types.go create mode 100644 op-bootnode/Dockerfile create mode 100644 op-node/rollup/driver/da.go diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index 23ac0fa26c..0000000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1,43 +0,0 @@ -# Packages -/packages/contracts-bedrock @ethereum-optimism/contract-reviewers -/packages/sdk @ethereum-optimism/devxpod - -# Bedrock codebases -/bedrock-devnet @ethereum-optimism/go-reviewers -/cannon @ethereum-optimism/go-reviewers -/op-batcher @ethereum-optimism/go-reviewers -/op-bootnode @ethereum-optimism/go-reviewers -/op-chain-ops @ethereum-optimism/go-reviewers -/op-challenger @ethereum-optimism/go-reviewers -/op-dispute-mon @ethereum-optimism/go-reviewers -/op-e2e @ethereum-optimism/go-reviewers -/op-node @ethereum-optimism/go-reviewers -/op-node/rollup @protolambda @ajsutton -/op-alt-da @ethereum-optimism/go-reviewers -/op-preimage @ethereum-optimism/go-reviewers -/op-program @ethereum-optimism/go-reviewers -/op-proposer @ethereum-optimism/go-reviewers -/op-service @ethereum-optimism/go-reviewers -/op-supervisor @ethereum-optimism/go-reviewers -/op-wheel @ethereum-optimism/go-reviewers -/ops-bedrock @ethereum-optimism/go-reviewers -/op-conductor @0x00101010 @zhwrd @mslipper - -# Ops -/.circleci @ethereum-optimism/monorepo-ops-reviewers -/.github @ethereum-optimism/monorepo-ops-reviewers -/ops @ethereum-optimism/monorepo-ops-reviewers -/docker-bake.hcl @ethereum-optimism/monorepo-ops-reviewers - -# Misc -/proxyd @ethereum-optimism/infra-reviewers -/infra @ethereum-optimism/infra-reviewers -/specs @ethereum-optimism/contract-reviewers @ethereum-optimism/go-reviewers - -# Don't add owners if only package.json is updated -/packages/*/package.json -/*/package.json - -# JavaScript Releases -/packages/*/CHANGELOG.md @ethereum-optimism/release-managers -/*/CHANGELOG.md @ethereum-optimism/release-managers diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml deleted file mode 100644 index 4c89185664..0000000000 --- a/.github/actions/setup/action.yml +++ /dev/null @@ -1,7 +0,0 @@ -name: Setup -description: Common setup steps used by our workflows -runs: - using: composite - steps: - - name: Setup foundry - uses: foundry-rs/foundry-toolchain@v1 diff --git a/.github/dependabot.yml b/.github/dependabot.yml deleted file mode 100644 index e2c739a570..0000000000 --- a/.github/dependabot.yml +++ /dev/null @@ -1,54 +0,0 @@ -version: 2 -updates: - - package-ecosystem: "docker" - directory: "/ops-bedrock" - schedule: - interval: "daily" - day: "tuesday" - time: "14:30" - timezone: "America/New_York" - open-pull-requests-limit: 10 - commit-message: - prefix: "dependabot(docker): " - labels: - - "M-dependabot" - - - package-ecosystem: "github-actions" - directory: "/" - schedule: - interval: "weekly" - day: "tuesday" - time: "14:30" - timezone: "America/New_York" - open-pull-requests-limit: 10 - commit-message: - prefix: "dependabot(actions): " - labels: - - "M-dependabot" - - - package-ecosystem: "npm" - directory: "/" - schedule: - interval: "weekly" - day: "tuesday" - time: "14:30" - timezone: "America/New_York" - open-pull-requests-limit: 10 - versioning-strategy: "auto" - commit-message: - prefix: "dependabot(npm): " - labels: - - "M-dependabot" - - - package-ecosystem: "gomod" - directory: "/" - schedule: - interval: "daily" - day: "tuesday" - time: "14:30" - timezone: "America/New_York" - open-pull-requests-limit: 10 - commit-message: - prefix: "dependabot(gomod): " - labels: - - "M-dependabot" diff --git a/.github/workflows/close-stale.yml b/.github/workflows/close-stale.yml deleted file mode 100644 index 68e8b4ec82..0000000000 --- a/.github/workflows/close-stale.yml +++ /dev/null @@ -1,18 +0,0 @@ -name: 'Close stale issues and PRs' -on: - schedule: - - cron: '30 1 * * *' - -jobs: - stale: - runs-on: ubuntu-latest - steps: - - uses: actions/stale@v9 - with: - stale-pr-message: 'This PR is stale because it has been open 14 days with no activity. Remove stale label or comment or this will be closed in 5 days.' - stale-issue-label: 'S-stale' - exempt-pr-labels: 'S-exempt-stale' - days-before-issue-stale: 999 - days-before-pr-stale: 14 - days-before-close: 5 - repo-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000000..c28316fe34 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,50 @@ +name: Release docker image + +on: + workflow_dispatch: + inputs: + service: + description: Service to release + required: true + type: choice + options: + - op-node + - op-batcher + - op-proposer + - op-bootnode + version: + description: Service version to publish (will be the docker tag) + required: true + +jobs: + build-and-push-image: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Log in to the Container registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Build and push + uses: docker/bake-action@v4 + env: + REGISTRY: ghcr.io + REPOSITORY: fraxfinance/fraxtal + PLATFORMS: linux/amd64,linux/arm64 + GIT_COMMIT: ${{ github.sha }} + GIT_VERSION: ${{ inputs.version }} + IMAGE_TAGS: ${{ inputs.version }} + with: + push: true + set: | + *.cache-from=type=gha + *.cache-to=type=gha,mode=max + targets: ${{ inputs.service }} diff --git a/.github/workflows/slither.yml b/.github/workflows/slither.yml deleted file mode 100644 index 069c6eb2f0..0000000000 --- a/.github/workflows/slither.yml +++ /dev/null @@ -1,32 +0,0 @@ -name: 'Slither Analysis' - -on: - workflow_dispatch: - pull_request: - push: - branches: - - develop - -jobs: - slither-analyze: - runs-on: ubuntu-latest - container: - image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:v0.49.0 - steps: - - uses: actions/checkout@v4 - - - name: Run Slither - uses: crytic/slither-action@v0.4.0 - id: slither - with: - target: packages/contracts-bedrock - slither-config: packages/contracts-bedrock/slither.config.json - fail-on: config - sarif: results.sarif - slither-args: --triage-database packages/contracts-bedrock/slither.db.json - - - name: Upload SARIF file - uses: github/codeql-action/upload-sarif@v3 - if: always() - with: - sarif_file: ${{ steps.slither.outputs.sarif }} diff --git a/.github/workflows/tag-service.yml b/.github/workflows/tag-service.yml deleted file mode 100644 index 42e06c9deb..0000000000 --- a/.github/workflows/tag-service.yml +++ /dev/null @@ -1,66 +0,0 @@ -name: Tag Service - -on: - workflow_dispatch: - inputs: - bump: - description: 'How much to bump the version by' - required: true - type: choice - options: - - major - - minor - - patch - - prerelease - - finalize-prerelease - service: - description: 'Which service to release' - required: true - type: choice - options: - - ci-builder - - ci-builder-rust - - op-node - - op-batcher - - op-proposer - - op-challenger - - op-program - - op-dispute-mon - - op-ufm - - da-server - - op-contracts - - op-conductor - prerelease: - description: Increment major/minor/patch as prerelease? - required: false - type: boolean - default: false - -jobs: - release: - runs-on: ubuntu-latest - environment: op-stack-production - - steps: - - uses: actions/checkout@v4 - - name: Fetch tags - run: git fetch --tags origin --force - - name: Setup Python 3.10 - uses: actions/setup-python@v5 - with: - python-version: "3.10" - - name: Install deps - run: pip install -r requirements.txt - working-directory: ops/tag-service - - run: ops/tag-service/tag-service.py --bump="$BUMP" --service="$SERVICE" - env: - INPUT_GITHUB_TOKEN: ${{ github.token }} - BUMP: ${{ github.event.inputs.bump }} - SERVICE: ${{ github.event.inputs.service }} - if: ${{ github.event.inputs.prerelease == 'false' }} - - run: ops/tag-service/tag-service.py --bump="$BUMP" --service="$SERVICE" --pre-release - env: - INPUT_GITHUB_TOKEN: ${{ github.token }} - BUMP: ${{ github.event.inputs.bump }} - SERVICE: ${{ github.event.inputs.service }} - if: ${{ github.event.inputs.prerelease == 'true' }} diff --git a/Makefile b/Makefile index 5431c5ff38..c40e165049 100644 --- a/Makefile +++ b/Makefile @@ -188,14 +188,14 @@ devnet-test: pre-devnet ## Runs tests on the local devnet make -C op-e2e test-devnet .PHONY: devnet-test -devnet-down: ## Stops the local devnet - @(cd ./ops-bedrock && GENESIS_TIMESTAMP=$(shell date +%s) docker compose stop) +devnet-down: + @(cd ./ops-bedrock && GENESIS_TIMESTAMP=$(shell date +%s) docker compose rm -sf) .PHONY: devnet-down devnet-clean: ## Cleans up local devnet environment rm -rf ./packages/contracts-bedrock/deployments/devnetL1 rm -rf ./.devnet - cd ./ops-bedrock && docker compose down + cd ./ops-bedrock && docker compose rm -sf docker image ls 'ops-bedrock*' --format='{{.Repository}}' | xargs -r docker rmi docker volume ls --filter name=ops-bedrock --format='{{.Name}}' | xargs -r docker volume rm .PHONY: devnet-clean diff --git a/docker-bake.hcl b/docker-bake.hcl index d57ab76e7e..bc39eff6fd 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -69,6 +69,10 @@ variable "OP_CONDUCTOR_VERSION" { default = "${GIT_VERSION}" } +variable "OP_BOOTNODE_VERSION" { + default = "${GIT_VERSION}" +} + target "op-node" { dockerfile = "ops/docker/op-stack-go/Dockerfile" @@ -80,7 +84,7 @@ target "op-node" { } target = "op-node-target" platforms = split(",", PLATFORMS) - tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-node:${tag}"] + tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}-op-node:${tag}"] } target "op-batcher" { @@ -93,7 +97,7 @@ target "op-batcher" { } target = "op-batcher-target" platforms = split(",", PLATFORMS) - tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-batcher:${tag}"] + tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}-op-batcher:${tag}"] } target "op-proposer" { @@ -106,7 +110,7 @@ target "op-proposer" { } target = "op-proposer-target" platforms = split(",", PLATFORMS) - tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-proposer:${tag}"] + tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}-op-proposer:${tag}"] } target "op-challenger" { @@ -119,7 +123,7 @@ target "op-challenger" { } target = "op-challenger-target" platforms = split(",", PLATFORMS) - tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-challenger:${tag}"] + tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}-op-challenger:${tag}"] } target "op-dispute-mon" { @@ -132,7 +136,7 @@ target "op-dispute-mon" { } target = "op-dispute-mon-target" platforms = split(",", PLATFORMS) - tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-dispute-mon:${tag}"] + tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}-op-dispute-mon:${tag}"] } target "op-conductor" { @@ -145,7 +149,7 @@ target "op-conductor" { } target = "op-conductor-target" platforms = split(",", PLATFORMS) - tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-conductor:${tag}"] + tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}-op-conductor:${tag}"] } target "da-server" { @@ -157,7 +161,7 @@ target "da-server" { } target = "da-server-target" platforms = split(",", PLATFORMS) - tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/da-server:${tag}"] + tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}-da-server:${tag}"] } target "op-program" { @@ -170,7 +174,7 @@ target "op-program" { } target = "op-program-target" platforms = split(",", PLATFORMS) - tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-program:${tag}"] + tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}-op-program:${tag}"] } target "op-supervisor" { @@ -183,7 +187,7 @@ target "op-supervisor" { } target = "op-supervisor-target" platforms = split(",", PLATFORMS) - tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-supervisor:${tag}"] + tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}-op-supervisor:${tag}"] } target "cannon" { @@ -196,7 +200,7 @@ target "cannon" { } target = "cannon-target" platforms = split(",", PLATFORMS) - tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/cannon:${tag}"] + tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}-cannon:${tag}"] } target "ci-builder" { @@ -204,7 +208,7 @@ target "ci-builder" { context = "." platforms = split(",", PLATFORMS) target="base-builder" - tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/ci-builder:${tag}"] + tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}-ci-builder:${tag}"] } target "ci-builder-rust" { @@ -212,7 +216,7 @@ target "ci-builder-rust" { context = "." platforms = split(",", PLATFORMS) target="rust-builder" - tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/ci-builder-rust:${tag}"] + tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}-ci-builder-rust:${tag}"] } target "contracts-bedrock" { @@ -220,5 +224,18 @@ target "contracts-bedrock" { context = "." target = "contracts-bedrock" platforms = split(",", PLATFORMS) - tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/contracts-bedrock:${tag}"] + tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}-contracts-bedrock:${tag}"] +} + +target "op-bootnode" { + dockerfile = "ops/docker/op-stack-go/Dockerfile" + context = "." + args = { + GIT_COMMIT = "${GIT_COMMIT}" + GIT_DATE = "${GIT_DATE}" + OP_BOOTNODE_VERSION = "${OP_BOOTNODE_VERSION}" + } + target = "op-bootnode-target" + platforms = split(",", PLATFORMS) + tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}-op-bootnode:${tag}"] } diff --git a/frax-da/cli.go b/frax-da/cli.go new file mode 100644 index 0000000000..9f73dbcce4 --- /dev/null +++ b/frax-da/cli.go @@ -0,0 +1,73 @@ +package fraxda + +import ( + "fmt" + "net/url" + + "github.com/urfave/cli/v2" + + opservice "github.com/ethereum-optimism/optimism/op-service" +) + +const ( + DaRpcFlagName = "da.rpc" +) + +var ( + defaultDaRpc = "https://da-rpc.mainnet.frax.com" +) + +func CLIFlags(envPrefix string) []cli.Flag { + return []cli.Flag{ + &cli.StringFlag{ + Name: DaRpcFlagName, + Usage: "DA endpoint", + Value: defaultDaRpc, + EnvVars: opservice.PrefixEnvVar(envPrefix, "DA_RPC"), + }, + } +} + +type Config struct { + DaRpc string +} + +func (c Config) Check() error { + if c.DaRpc == "" { + c.DaRpc = defaultDaRpc + } + + if _, err := url.Parse(c.DaRpc); err != nil { + return fmt.Errorf("invalid da rpc url: %w", err) + } + + return nil +} + +type CLIConfig struct { + DaRpc string +} + +func (c CLIConfig) Check() error { + if c.DaRpc == "" { + c.DaRpc = defaultDaRpc + } + + if _, err := url.Parse(c.DaRpc); err != nil { + return fmt.Errorf("invalid da rpc url: %w", err) + } + + return nil +} + +func NewCLIConfig() CLIConfig { + return CLIConfig{ + DaRpc: defaultDaRpc, + } +} + +func ReadCLIConfig(ctx *cli.Context) CLIConfig { + return CLIConfig{ + DaRpc: ctx.String(DaRpcFlagName), + } +} diff --git a/frax-da/client.go b/frax-da/client.go new file mode 100644 index 0000000000..9532d739ff --- /dev/null +++ b/frax-da/client.go @@ -0,0 +1,126 @@ +package fraxda + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "time" + + "github.com/multiformats/go-multibase" +) + +type DAClient struct { + baseUrl *url.URL + httpClient *http.Client +} + +func NewDAClient(rpc string) (*DAClient, error) { + baseUrl, err := url.Parse(rpc) + if err != nil { + return nil, fmt.Errorf("unable to parse DA endpoint: %s", err) + } + + httpClient := &http.Client{ + Timeout: 30 * time.Second, + } + + return &DAClient{ + baseUrl: baseUrl, + httpClient: httpClient, + }, nil +} + +func (c DAClient) Read(ctx context.Context, id []byte) ([]byte, error) { + ipfsCID, err := multibase.Encode(multibase.Base32, id) + if err != nil { + return nil, fmt.Errorf("unable to decode CID: %w", err) + } + + fetchUrl := c.baseUrl.ResolveReference(&url.URL{Path: fmt.Sprintf("/v1/blobs/%s", ipfsCID)}) + request, err := http.NewRequestWithContext(ctx, "GET", fetchUrl.String(), nil) + if err != nil { + return nil, fmt.Errorf("unable to create request to fetch data from DA: %w", err) + } + resp, err := c.httpClient.Do(request) + + if err != nil { + return nil, fmt.Errorf("unable to fetch DA data: %w", err) + } + if resp.StatusCode != 200 { + return nil, fmt.Errorf("unable to fetch DA data, got status code %d", resp.StatusCode) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to read DA data fetch response: %w", err) + } + + return body, nil +} + +func (c DAClient) ReadCelestia(ctx context.Context, hexString string) ([]byte, error) { + fetchUrl := c.baseUrl.ResolveReference(&url.URL{Path: fmt.Sprintf("/v1/blobs/celestia-%s", hexString)}) + request, err := http.NewRequestWithContext(ctx, "GET", fetchUrl.String(), nil) + if err != nil { + return nil, fmt.Errorf("unable to create request to fetch celestia data from DA: %w", err) + } + resp, err := c.httpClient.Do(request) + + if err != nil { + return nil, fmt.Errorf("unable to fetch DA celestia data: %w", err) + } + if resp.StatusCode != 200 { + return nil, fmt.Errorf("unable to fetch DA celestia data, got status code %d", resp.StatusCode) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to read DA celestia data fetch response: %w", err) + } + + return body, nil +} + +func (c DAClient) Write(ctx context.Context, data []byte) ([]byte, error) { + submitUrl := c.baseUrl.ResolveReference(&url.URL{Path: "/v1/blobs"}) + + request, err := http.NewRequestWithContext(ctx, "POST", submitUrl.String(), bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("unable to create request to submit data to DA: %w", err) + } + + resp, err := c.httpClient.Do(request) + + if err != nil { + return nil, fmt.Errorf("unable to submit data to DA: %w", err) + } + if resp.StatusCode > 299 { + return nil, fmt.Errorf("unable to submit data to DA, got status code %d", resp.StatusCode) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to read DA data submit response: %w", err) + } + + var respDto daSubmitResponse + err = json.Unmarshal(body, &respDto) + if err != nil { + return nil, fmt.Errorf("unable to parse DA data submit response json: %w", err) + } + + if respDto.ID == "" { + return nil, fmt.Errorf("DA data submit response returned empty ID") + } + + _, ipfsCID, err := multibase.Decode(respDto.ID) + if err != nil { + return nil, fmt.Errorf("DA data submit response returned invalid multibase encoded value: %w", err) + } + + return ipfsCID, nil +} diff --git a/frax-da/da.go b/frax-da/da.go new file mode 100644 index 0000000000..e369f4adb3 --- /dev/null +++ b/frax-da/da.go @@ -0,0 +1,7 @@ +package fraxda + +// DerivationVersionFraxDa is a byte prefix of frax DA based references +const DerivationVersionFraxDa = 0xfc + +// DerivationVersionCelestia is used for retrocompatibility with the old testnet data +const DerivationVersionCelestia = 0xce diff --git a/frax-da/types.go b/frax-da/types.go new file mode 100644 index 0000000000..e9f479436f --- /dev/null +++ b/frax-da/types.go @@ -0,0 +1,5 @@ +package fraxda + +type daSubmitResponse struct { + ID string `json:"id"` +} diff --git a/go.mod b/go.mod index e8a150cb6a..2e04d32136 100644 --- a/go.mod +++ b/go.mod @@ -35,6 +35,7 @@ require ( github.com/multiformats/go-base32 v0.1.0 github.com/multiformats/go-multiaddr v0.13.0 github.com/multiformats/go-multiaddr-dns v0.3.1 + github.com/multiformats/go-multibase v0.2.0 github.com/olekukonko/tablewriter v0.0.5 github.com/onsi/gomega v1.34.1 github.com/pkg/errors v0.9.1 @@ -158,7 +159,6 @@ require ( github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect - github.com/multiformats/go-multibase v0.2.0 // indirect github.com/multiformats/go-multicodec v0.9.0 // indirect github.com/multiformats/go-multihash v0.2.3 // indirect github.com/multiformats/go-multistream v0.5.0 // indirect diff --git a/op-batcher/batcher/config.go b/op-batcher/batcher/config.go index 250d893e2a..c00202565b 100644 --- a/op-batcher/batcher/config.go +++ b/op-batcher/batcher/config.go @@ -8,6 +8,7 @@ import ( "github.com/urfave/cli/v2" + fraxda "github.com/ethereum-optimism/optimism/frax-da" altda "github.com/ethereum-optimism/optimism/op-alt-da" "github.com/ethereum-optimism/optimism/op-batcher/compressor" "github.com/ethereum-optimism/optimism/op-batcher/flags" @@ -105,6 +106,7 @@ type CLIConfig struct { PprofConfig oppprof.CLIConfig RPC oprpc.CLIConfig AltDA altda.CLIConfig + DaConfig fraxda.CLIConfig } func (c *CLIConfig) Check() error { @@ -159,6 +161,9 @@ func (c *CLIConfig) Check() error { if err := c.RPC.Check(); err != nil { return err } + if err := c.DaConfig.Check(); err != nil { + return err + } return nil } @@ -193,5 +198,6 @@ func NewConfig(ctx *cli.Context) *CLIConfig { PprofConfig: oppprof.ReadCLIConfig(ctx), RPC: oprpc.ReadCLIConfig(ctx), AltDA: altda.ReadCLIConfig(ctx), + DaConfig: fraxda.ReadCLIConfig(ctx), } } diff --git a/op-batcher/batcher/driver.go b/op-batcher/batcher/driver.go index 39ebf2f25b..3560dd7624 100644 --- a/op-batcher/batcher/driver.go +++ b/op-batcher/batcher/driver.go @@ -2,6 +2,7 @@ package batcher import ( "context" + "encoding/hex" "errors" "fmt" "io" @@ -11,6 +12,7 @@ import ( "sync/atomic" "time" + fraxda "github.com/ethereum-optimism/optimism/frax-da" altda "github.com/ethereum-optimism/optimism/op-alt-da" "github.com/ethereum-optimism/optimism/op-batcher/metrics" "github.com/ethereum-optimism/optimism/op-node/rollup" @@ -66,6 +68,7 @@ type DriverSetup struct { EndpointProvider dial.L2EndpointProvider ChannelConfig ChannelConfigProvider AltDA *altda.DAClient + DAClient *fraxda.DAClient } // BatchSubmitter encapsulates a service responsible for submitting L2 tx @@ -581,6 +584,19 @@ func (l *BatchSubmitter) sendTransaction(ctx context.Context, txdata txData, que l.Log.Info("Set AltDA input", "commitment", comm, "tx", txdata.ID()) // signal AltDA commitment tx with TxDataVersion1 data = comm.TxData() + } else { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + l.Log.Info("fraxda: submitting data", "bytes", len(data)) + id, err := l.DAClient.Write(ctx, data) + cancel() + if err == nil { + l.Log.Info("fraxda: data successfully submitted", "id", hex.EncodeToString(id)) + data = append([]byte{fraxda.DerivationVersionFraxDa}, id...) + } else { + l.Log.Error("fraxda: data submission failed", "err", err) + l.recordFailedTx(txdata.ID(), err) + return nil + } } candidate = l.calldataTxCandidate(data) } diff --git a/op-batcher/batcher/service.go b/op-batcher/batcher/service.go index 00d3d32071..2bd96a9ba7 100644 --- a/op-batcher/batcher/service.go +++ b/op-batcher/batcher/service.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" + fraxda "github.com/ethereum-optimism/optimism/frax-da" altda "github.com/ethereum-optimism/optimism/op-alt-da" "github.com/ethereum-optimism/optimism/op-batcher/flags" "github.com/ethereum-optimism/optimism/op-batcher/metrics" @@ -71,6 +72,7 @@ type BatcherService struct { stopped atomic.Bool NotSubmittingOnStart bool + DAClient *fraxda.DAClient } // BatcherServiceFromCLIConfig creates a new BatcherService from a CLIConfig. @@ -115,6 +117,9 @@ func (bs *BatcherService) initFromCLIConfig(ctx context.Context, version string, if err := bs.initPProf(cfg); err != nil { return fmt.Errorf("failed to init profiling: %w", err) } + if err := bs.initDA(cfg); err != nil { + return fmt.Errorf("failed to start da client: %w", err) + } // init before driver if err := bs.initAltDA(cfg); err != nil { return fmt.Errorf("failed to init AltDA: %w", err) @@ -323,6 +328,7 @@ func (bs *BatcherService) initDriver() { EndpointProvider: bs.EndpointProvider, ChannelConfig: bs.ChannelConfig, AltDA: bs.AltDA, + DAClient: bs.DAClient, }) } @@ -357,6 +363,15 @@ func (bs *BatcherService) initAltDA(cfg *CLIConfig) error { return nil } +func (bs *BatcherService) initDA(cfg *CLIConfig) error { + client, err := fraxda.NewDAClient(cfg.DaConfig.DaRpc) + if err != nil { + return err + } + bs.DAClient = client + return nil +} + // Start runs once upon start of the batcher lifecycle, // and starts batch-submission work if the batcher is configured to start submit data on startup. func (bs *BatcherService) Start(_ context.Context) error { diff --git a/op-batcher/flags/flags.go b/op-batcher/flags/flags.go index 3fe66f3398..5ab0313b8a 100644 --- a/op-batcher/flags/flags.go +++ b/op-batcher/flags/flags.go @@ -8,6 +8,7 @@ import ( "github.com/urfave/cli/v2" + fraxda "github.com/ethereum-optimism/optimism/frax-da" altda "github.com/ethereum-optimism/optimism/op-alt-da" "github.com/ethereum-optimism/optimism/op-batcher/compressor" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" @@ -193,6 +194,7 @@ func init() { optionalFlags = append(optionalFlags, oppprof.CLIFlags(EnvVarPrefix)...) optionalFlags = append(optionalFlags, txmgr.CLIFlags(EnvVarPrefix)...) optionalFlags = append(optionalFlags, altda.CLIFlags(EnvVarPrefix, "")...) + optionalFlags = append(optionalFlags, fraxda.CLIFlags(EnvVarPrefix)...) Flags = append(requiredFlags, optionalFlags...) } diff --git a/op-bootnode/Dockerfile b/op-bootnode/Dockerfile new file mode 100644 index 0000000000..3b4b8242c9 --- /dev/null +++ b/op-bootnode/Dockerfile @@ -0,0 +1,9 @@ +ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest +FROM $OP_STACK_GO_BUILDER as builder +# See "make golang-docker" and /ops/docker/op-stack-go + +FROM alpine:3.18 + +COPY --from=builder /usr/local/bin/op-bootnode /usr/local/bin/op-bootnode + +CMD ["op-bootnode"] diff --git a/op-node/flags/flags.go b/op-node/flags/flags.go index 6ea12000cc..5224f87b58 100644 --- a/op-node/flags/flags.go +++ b/op-node/flags/flags.go @@ -6,6 +6,7 @@ import ( "github.com/urfave/cli/v2" + fraxda "github.com/ethereum-optimism/optimism/frax-da" altda "github.com/ethereum-optimism/optimism/op-alt-da" "github.com/ethereum-optimism/optimism/op-node/rollup/engine" "github.com/ethereum-optimism/optimism/op-node/rollup/sync" @@ -441,6 +442,7 @@ func init() { optionalFlags = append(optionalFlags, DeprecatedFlags...) optionalFlags = append(optionalFlags, opflags.CLIFlags(EnvVarPrefix, RollupCategory)...) optionalFlags = append(optionalFlags, altda.CLIFlags(EnvVarPrefix, AltDACategory)...) + optionalFlags = append(optionalFlags, fraxda.CLIFlags(EnvVarPrefix)...) Flags = append(requiredFlags, optionalFlags...) } diff --git a/op-node/node/config.go b/op-node/node/config.go index 5ca724d905..9754d24c5c 100644 --- a/op-node/node/config.go +++ b/op-node/node/config.go @@ -7,6 +7,7 @@ import ( "math" "time" + fraxda "github.com/ethereum-optimism/optimism/frax-da" altda "github.com/ethereum-optimism/optimism/op-alt-da" "github.com/ethereum-optimism/optimism/op-node/flags" "github.com/ethereum-optimism/optimism/op-node/p2p" @@ -75,6 +76,8 @@ type Config struct { // AltDA config AltDA altda.CLIConfig + + DaConfig fraxda.Config } type RPCConfig struct { @@ -170,5 +173,9 @@ func (cfg *Config) Check() error { if cfg.AltDA.Enabled { log.Warn("Alt-DA Mode is a Beta feature of the MIT licensed OP Stack. While it has received initial review from core contributors, it is still undergoing testing, and may have bugs or other issues.") } + if err := cfg.DaConfig.Check(); err != nil { + return fmt.Errorf("da config error: %w", err) + } + return nil } diff --git a/op-node/node/node.go b/op-node/node/node.go index a4fd3e8db0..9d14aded17 100644 --- a/op-node/node/node.go +++ b/op-node/node/node.go @@ -149,9 +149,16 @@ func (n *OpNode) init(ctx context.Context, cfg *Config) error { if err := n.initPProf(cfg); err != nil { return fmt.Errorf("failed to init profiling: %w", err) } + if err := n.initDA(ctx, cfg); err != nil { + return fmt.Errorf("failed to init da: %w", err) + } return nil } +func (n *OpNode) initDA(ctx context.Context, cfg *Config) error { + return driver.SetDAClient(cfg.DaConfig) +} + func (n *OpNode) initTracer(ctx context.Context, cfg *Config) error { if cfg.Tracer != nil { n.tracer = cfg.Tracer diff --git a/op-node/rollup/derive/blob_data_source.go b/op-node/rollup/derive/blob_data_source.go index 7780b60650..3d0fd94651 100644 --- a/op-node/rollup/derive/blob_data_source.go +++ b/op-node/rollup/derive/blob_data_source.go @@ -86,7 +86,10 @@ func (ds *BlobDataSource) open(ctx context.Context) ([]blobOrCalldata, error) { return nil, NewTemporaryError(fmt.Errorf("failed to open blob data source: %w", err)) } - data, hashes := dataAndHashesFromTxs(txs, &ds.dsCfg, ds.batcherAddr) + data, hashes, err := dataAndHashesFromTxs(txs, &ds.dsCfg, ds.batcherAddr) + if err != nil { + return nil, err + } if len(hashes) == 0 { // there are no blobs to fetch so we can return immediately @@ -115,11 +118,12 @@ func (ds *BlobDataSource) open(ctx context.Context) ([]blobOrCalldata, error) { // dataAndHashesFromTxs extracts calldata and datahashes from the input transactions and returns them. It // creates a placeholder blobOrCalldata element for each returned blob hash that must be populated // by fillBlobPointers after blob bodies are retrieved. -func dataAndHashesFromTxs(txs types.Transactions, config *DataSourceConfig, batcherAddr common.Address) ([]blobOrCalldata, []eth.IndexedBlobHash) { +func dataAndHashesFromTxs(txs types.Transactions, config *DataSourceConfig, batcherAddr common.Address) ([]blobOrCalldata, []eth.IndexedBlobHash, error) { data := []blobOrCalldata{} var hashes []eth.IndexedBlobHash blobIndex := 0 // index of each blob in the block's blob sidecar for _, tx := range txs { + logger := log.New("tx", tx.Hash()) // skip any non-batcher transactions if !isValidBatchTx(tx, config.l1Signer, config.batchInboxAddress, batcherAddr) { blobIndex += len(tx.BlobHashes()) @@ -127,8 +131,11 @@ func dataAndHashesFromTxs(txs types.Transactions, config *DataSourceConfig, batc } // handle non-blob batcher transactions by extracting their calldata if tx.Type() != types.BlobTxType { - calldata := eth.Data(tx.Data()) - data = append(data, blobOrCalldata{nil, &calldata}) + calldata, err := DataFromEVMTransactions(*config, batcherAddr, types.Transactions{tx}, logger) + if err != nil { + return nil, nil, err + } + data = append(data, blobOrCalldata{nil, &calldata[0]}) continue } // handle blob batcher transactions by extracting their blob hashes, ignoring any calldata. @@ -145,7 +152,7 @@ func dataAndHashesFromTxs(txs types.Transactions, config *DataSourceConfig, batc blobIndex += 1 } } - return data, hashes + return data, hashes, nil } // fillBlobPointers goes back through the data array and fills in the pointers to the fetched blob diff --git a/op-node/rollup/derive/blob_data_source_test.go b/op-node/rollup/derive/blob_data_source_test.go index aa9ef82cb9..0fea0cd562 100644 --- a/op-node/rollup/derive/blob_data_source_test.go +++ b/op-node/rollup/derive/blob_data_source_test.go @@ -42,7 +42,8 @@ func TestDataAndHashesFromTxs(t *testing.T) { } calldataTx, _ := types.SignNewTx(privateKey, signer, txData) txs := types.Transactions{calldataTx} - data, blobHashes := dataAndHashesFromTxs(txs, &config, batcherAddr) + data, blobHashes, err := dataAndHashesFromTxs(txs, &config, batcherAddr) + require.NoError(t, err) require.Equal(t, 1, len(data)) require.Equal(t, 0, len(blobHashes)) @@ -57,14 +58,16 @@ func TestDataAndHashesFromTxs(t *testing.T) { } blobTx, _ := types.SignNewTx(privateKey, signer, blobTxData) txs = types.Transactions{blobTx} - data, blobHashes = dataAndHashesFromTxs(txs, &config, batcherAddr) + data, blobHashes, err = dataAndHashesFromTxs(txs, &config, batcherAddr) + require.NoError(t, err) require.Equal(t, 1, len(data)) require.Equal(t, 1, len(blobHashes)) require.Nil(t, data[0].calldata) // try again with both the blob & calldata transactions and make sure both are picked up txs = types.Transactions{blobTx, calldataTx} - data, blobHashes = dataAndHashesFromTxs(txs, &config, batcherAddr) + data, blobHashes, err = dataAndHashesFromTxs(txs, &config, batcherAddr) + require.NoError(t, err) require.Equal(t, 2, len(data)) require.Equal(t, 1, len(blobHashes)) require.NotNil(t, data[1].calldata) @@ -72,7 +75,8 @@ func TestDataAndHashesFromTxs(t *testing.T) { // make sure blob tx to the batch inbox is ignored if not signed by the batcher blobTx, _ = types.SignNewTx(testutils.RandomKey(), signer, blobTxData) txs = types.Transactions{blobTx} - data, blobHashes = dataAndHashesFromTxs(txs, &config, batcherAddr) + data, blobHashes, err = dataAndHashesFromTxs(txs, &config, batcherAddr) + require.NoError(t, err) require.Equal(t, 0, len(data)) require.Equal(t, 0, len(blobHashes)) @@ -81,7 +85,8 @@ func TestDataAndHashesFromTxs(t *testing.T) { blobTxData.To = testutils.RandomAddress(rng) blobTx, _ = types.SignNewTx(privateKey, signer, blobTxData) txs = types.Transactions{blobTx} - data, blobHashes = dataAndHashesFromTxs(txs, &config, batcherAddr) + data, blobHashes, err = dataAndHashesFromTxs(txs, &config, batcherAddr) + require.NoError(t, err) require.Equal(t, 0, len(data)) require.Equal(t, 0, len(blobHashes)) } diff --git a/op-node/rollup/derive/calldata_source.go b/op-node/rollup/derive/calldata_source.go index 0a5d791577..e51875c0b2 100644 --- a/op-node/rollup/derive/calldata_source.go +++ b/op-node/rollup/derive/calldata_source.go @@ -2,19 +2,31 @@ package derive import ( "context" + "encoding/hex" "errors" "fmt" "io" + "time" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" + fraxda "github.com/ethereum-optimism/optimism/frax-da" "github.com/ethereum-optimism/optimism/op-service/eth" ) -// CalldataSource is a fault tolerant approach to fetching data. +var daClient *fraxda.DAClient + +func SetDAClient(c *fraxda.DAClient) error { + if daClient != nil { + return errors.New("da client already configured") + } + daClient = c + return nil +} + // The constructor will never fail & it will instead re-attempt the fetcher // at a later point. type CalldataSource struct { @@ -44,9 +56,21 @@ func NewCalldataSource(ctx context.Context, log log.Logger, dsCfg DataSourceConf batcherAddr: batcherAddr, } } + + data, err := DataFromEVMTransactions(dsCfg, batcherAddr, txs, log.New("origin", ref)) + if err != nil { + return &CalldataSource{ + open: false, + ref: ref, + dsCfg: dsCfg, + fetcher: fetcher, + log: log, + batcherAddr: batcherAddr, + } + } return &CalldataSource{ open: true, - data: DataFromEVMTransactions(dsCfg, batcherAddr, txs, log.New("origin", ref)), + data: data, } } @@ -57,7 +81,11 @@ func (ds *CalldataSource) Next(ctx context.Context) (eth.Data, error) { if !ds.open { if _, txs, err := ds.fetcher.InfoAndTxsByHash(ctx, ds.ref.Hash); err == nil { ds.open = true - ds.data = DataFromEVMTransactions(ds.dsCfg, ds.batcherAddr, txs, ds.log) + ds.data, err = DataFromEVMTransactions(ds.dsCfg, ds.batcherAddr, txs, ds.log) + if err != nil { + // already wrapped + return nil, err + } } else if errors.Is(err, ethereum.NotFound) { return nil, NewResetError(fmt.Errorf("failed to open calldata source: %w", err)) } else { @@ -76,12 +104,40 @@ func (ds *CalldataSource) Next(ctx context.Context) (eth.Data, error) { // DataFromEVMTransactions filters all of the transactions and returns the calldata from transactions // that are sent to the batch inbox address from the batch sender address. // This will return an empty array if no valid transactions are found. -func DataFromEVMTransactions(dsCfg DataSourceConfig, batcherAddr common.Address, txs types.Transactions, log log.Logger) []eth.Data { +func DataFromEVMTransactions(dsCfg DataSourceConfig, batcherAddr common.Address, txs types.Transactions, log log.Logger) ([]eth.Data, error) { out := []eth.Data{} for _, tx := range txs { if isValidBatchTx(tx, dsCfg.l1Signer, dsCfg.batchInboxAddress, batcherAddr) { - out = append(out, tx.Data()) + data := tx.Data() + switch len(data) { + case 0: + out = append(out, data) + default: + switch data[0] { + case fraxda.DerivationVersionFraxDa: + log.Info("fraxda: requesting data", "id", hex.EncodeToString(data)) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + data, err := daClient.Read(ctx, data[1:]) + cancel() + if err != nil { + return nil, NewResetError(fmt.Errorf("fraxda: failed to fetch data for id %s: %w", hex.EncodeToString(data), err)) + } + out = append(out, data) + case fraxda.DerivationVersionCelestia: + log.Info("fraxda: requesting old celestia data", "id", hex.EncodeToString(data)) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + data, err := daClient.ReadCelestia(ctx, hex.EncodeToString(data[1:])) + cancel() + if err != nil { + return nil, NewResetError(fmt.Errorf("fraxda: failed to fetch celestia data for id %s: %w", hex.EncodeToString(data), err)) + } + out = append(out, data) + default: + out = append(out, data) + log.Info("fraxda: using eth fallback") + } + } } } - return out + return out, nil } diff --git a/op-node/rollup/derive/calldata_source_test.go b/op-node/rollup/derive/calldata_source_test.go index 01b2616cca..7db68a8b3f 100644 --- a/op-node/rollup/derive/calldata_source_test.go +++ b/op-node/rollup/derive/calldata_source_test.go @@ -121,7 +121,8 @@ func TestDataFromEVMTransactions(t *testing.T) { } } - out := DataFromEVMTransactions(DataSourceConfig{cfg.L1Signer(), cfg.BatchInboxAddress, false}, batcherAddr, txs, testlog.Logger(t, log.LevelCrit)) + out, err := DataFromEVMTransactions(DataSourceConfig{cfg.L1Signer(), cfg.BatchInboxAddress, false}, batcherAddr, txs, testlog.Logger(t, log.LevelCrit)) + require.NoError(t, err) require.ElementsMatch(t, expectedData, out) } diff --git a/op-node/rollup/driver/da.go b/op-node/rollup/driver/da.go new file mode 100644 index 0000000000..0644f90f52 --- /dev/null +++ b/op-node/rollup/driver/da.go @@ -0,0 +1,14 @@ +package driver + +import ( + fraxda "github.com/ethereum-optimism/optimism/frax-da" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" +) + +func SetDAClient(cfg fraxda.Config) error { + client, err := fraxda.NewDAClient(cfg.DaRpc) + if err != nil { + return err + } + return derive.SetDAClient(client) +} diff --git a/op-node/service.go b/op-node/service.go index cae25afd38..60a3916069 100644 --- a/op-node/service.go +++ b/op-node/service.go @@ -18,6 +18,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/urfave/cli/v2" + fraxda "github.com/ethereum-optimism/optimism/frax-da" "github.com/ethereum-optimism/optimism/op-node/flags" "github.com/ethereum-optimism/optimism/op-node/node" p2pcli "github.com/ethereum-optimism/optimism/op-node/p2p/cli" @@ -112,7 +113,8 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) { ConductorRpc: ctx.String(flags.ConductorRpcFlag.Name), ConductorRpcTimeout: ctx.Duration(flags.ConductorRpcTimeoutFlag.Name), - AltDA: altda.ReadCLIConfig(ctx), + AltDA: altda.ReadCLIConfig(ctx), + DaConfig: fraxda.Config(fraxda.ReadCLIConfig(ctx)), } if err := cfg.LoadPersisted(log); err != nil { diff --git a/ops-bedrock/docker-compose.yml b/ops-bedrock/docker-compose.yml index 1cc5626876..44d414e8dd 100644 --- a/ops-bedrock/docker-compose.yml +++ b/ops-bedrock/docker-compose.yml @@ -11,10 +11,9 @@ volumes: challenger_data: da_data: op_log: - + da: services: - l1: build: context: . @@ -34,8 +33,8 @@ services: depends_on: - l1 build: - context: . - dockerfile: l1-lighthouse.Dockerfile + context: . + dockerfile: l1-lighthouse.Dockerfile ports: - "9000:9000" - "5052:5052" @@ -56,8 +55,8 @@ services: - l1 - l1-bn build: - context: . - dockerfile: l1-lighthouse.Dockerfile + context: . + dockerfile: l1-lighthouse.Dockerfile volumes: - "l1_vc_data:/db" - "${PWD}/beacon-data/data/keys:/validator_setup/validators" @@ -82,7 +81,7 @@ services: - "l2_data:/db" - "${PWD}/../.devnet/genesis-l2.json:/genesis.json" - "${PWD}/test-jwt-secret.txt:/config/jwt-secret.txt" - entrypoint: # pass the L2 specific flags by overriding the entry-point and adding extra arguments + entrypoint: # pass the L2 specific flags by overriding the entry-point and adding extra arguments - "/bin/sh" - "/entrypoint.sh" environment: @@ -129,6 +128,7 @@ services: --altda.enabled=${ALTDA_ENABLED} --altda.da-service=${ALTDA_SERVICE} --altda.da-server=http://da-server:3100 + --da.rpc=http://host.docker.internal:8080 ports: - "7545:8545" - "9003:9003" @@ -204,6 +204,7 @@ services: OP_BATCHER_ALTDA_DA_SERVICE: "${ALTDA_SERVICE}" OP_BATCHER_ALTDA_DA_SERVER: "http://da-server:3100" OP_BATCHER_DATA_AVAILABILITY_TYPE: "${DA_TYPE}" + OP_BATCHER_DA_RPC: http://host.docker.internal:8080 op-challenger: depends_on: diff --git a/ops/docker/op-stack-go/Dockerfile b/ops/docker/op-stack-go/Dockerfile index 18163a86c3..1f71528775 100644 --- a/ops/docker/op-stack-go/Dockerfile +++ b/ops/docker/op-stack-go/Dockerfile @@ -101,6 +101,11 @@ ARG OP_SUPERVISOR_VERSION=v0.0.0 RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build cd op-supervisor && make op-supervisor \ GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_SUPERVISOR_VERSION" +FROM --platform=$BUILDPLATFORM builder AS op-bootnode-builder +ARG OP_BOOTNODE_VERSION=v0.0.0 +RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build cd op-bootnode && make op-bootnode \ + GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_BOOTNODE_VERSION" + FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE AS cannon-target COPY --from=cannon-builder /app/cannon/bin/cannon /usr/local/bin/ CMD ["cannon"] @@ -150,3 +155,7 @@ CMD ["da-server"] FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE AS op-supervisor-target COPY --from=op-supervisor-builder /app/op-supervisor/bin/op-supervisor /usr/local/bin/ CMD ["op-supervisor"] + +FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE AS op-bootnode-target +COPY --from=op-bootnode-builder /app/op-bootnode/bin/op-bootnode /usr/local/bin/ +CMD ["op-bootnode"] diff --git a/ops/docker/op-stack-go/Dockerfile.dockerignore b/ops/docker/op-stack-go/Dockerfile.dockerignore index 1c0841df1f..f2f2d34777 100644 --- a/ops/docker/op-stack-go/Dockerfile.dockerignore +++ b/ops/docker/op-stack-go/Dockerfile.dockerignore @@ -3,6 +3,7 @@ * !/cannon +!/frax-da !/op-batcher !/op-bootnode !/op-chain-ops