diff --git a/.github/helm-ci-values/values-aztec-node.yaml b/.github/helm-ci-values/values-aztec-node.yaml new file mode 100644 index 0000000..7696035 --- /dev/null +++ b/.github/helm-ci-values/values-aztec-node.yaml @@ -0,0 +1,51 @@ +# CI test values for aztec-node chart +# Minimal configuration for fullnode role + +# Deployment role +role: fullnode + +# Network to connect to +network: testnet + +# Container image configuration +image: + repository: aztecprotocol/aztec + tag: latest + pullPolicy: IfNotPresent + +# Node configuration +node: + replicas: 1 + logLevel: "info" + + # L1 Ethereum configuration (mock URLs for CI) + l1ExecutionUrls: + - "http://mock-l1-execution:8545" + l1ConsensusUrls: + - "http://mock-l1-consensus:5052" + + # Resource allocation (minimal for CI) + resources: + requests: + cpu: "100m" + memory: "256Mi" + limits: + cpu: "200m" + memory: "512Mi" + +# Persistence disabled for CI +persistence: + enabled: false + +# Service configuration +service: + httpPort: 8080 + + p2p: + enabled: true + nodePortEnabled: false + port: 40400 + + admin: + enabled: true + port: 8081 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1aff534..1d18c53 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,12 +2,25 @@ repos: - repo: local hooks: - id: helm-docs - name: Helm Docs + name: Helm Docs (auto-regenerates README files) args: [] description: > Uses 'helm-docs' to create documentation from the Helm chart's 'values.yaml' file, and inserts the result into a corresponding 'README.md' file. + If this hook modifies files, stage the changes (git add) and commit again. entry: jnorwood/helm-docs:v1.11.0 files: (README\.md\.gotmpl|(Chart|requirements|values)\.yaml)$ language: docker_image require_serial: true + + - id: helm-lint + name: Helm Lint + description: > + Lint Helm charts using the same logic as CI workflow. + Skips charts listed in .github/helm-ci-values/skip-charts.txt. + Uses CI-specific values files when available. + entry: bash -c 'scripts/helm-lint-pre-commit.sh' + language: system + files: ^charts/.+\.(yaml|yml|tpl)$ + pass_filenames: false + require_serial: true diff --git a/README.md b/README.md index d16ef05..9bddd68 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,8 @@ This repo contains Helm Charts for deploying Obol Distributed Validator [middlew - [`charon-cluster`](charts/charon-cluster) - A chart for running a number of charon instances. - [`charon-relay`](charts/charon-relay) - A chart for running a charon [relay](https://docs.obol.org/learn/charon/charon-cli-reference#host-a-relay). - [`dv-pod`](charts/dv-pod) - A chart for running a Charon client + a Validator client, with automatic DKG completion as a feature. -- [`obol-app`](charts/obol-app) - A chart for running arbitrary docker images in the [Obol Stack](https://obol.org/stack). +- [`obol-app`](charts/obol-app) - A chart for running arbitrary docker images in the [Obol Stack](https://obol.org/stack). +- [`aztec-node`](charts/aztec-node) - Aztec network node deployment (Full Node, Sequencer, or Prover) ## Before you begin diff --git a/charts/aztec-node/Chart.yaml b/charts/aztec-node/Chart.yaml new file mode 100644 index 0000000..0158cd5 --- /dev/null +++ b/charts/aztec-node/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: aztec-node +description: A Helm chart for deploying an Aztec node +type: application +version: 0.1.0 +appVersion: "2.0.2" diff --git a/charts/aztec-node/README.md b/charts/aztec-node/README.md new file mode 100644 index 0000000..0bc2bb7 --- /dev/null +++ b/charts/aztec-node/README.md @@ -0,0 +1,495 @@ + +Aztec Node +=========== + +![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.0.2](https://img.shields.io/badge/AppVersion-2.0.2-informational?style=flat-square) + +A Helm chart for deploying an Aztec node + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| certificate.domains | list | `[]` | | +| certificate.enabled | bool | `false` | | +| customNetwork | object | `{"feeAssetHandlerContractAddress":null,"l1ChainId":null,"registryContractAddress":null,"slashFactoryAddress":null}` | Custom network - (not recommended) - Only for custom testnet usecases Must have deployed your own protocol contracts first | +| fullnameOverride | string | `""` | Overrides the chart computed fullname | +| hostNetwork | bool | `true` | Use host network - provides best P2P performance by binding directly to node's network This is the recommended configuration for Aztec nodes | +| image | object | `{"pullPolicy":"IfNotPresent","repository":"aztecprotocol/aztec","tag":"2.1.0-rc.24"}` | Image to use for the container | +| image.pullPolicy | string | `"IfNotPresent"` | Container pull policy | +| image.repository | string | `"aztecprotocol/aztec"` | Image repository | +| image.tag | string | `"2.1.0-rc.24"` | Image tag | +| initContainers | list | `[]` | Additional init containers | +| nameOverride | string | `""` | Overrides the chart name | +| network | string | `nil` | Network name - this is a predefined network - testnet, devnet | +| networkName | string | `"staging-public"` | Network identifier used in resource naming (l2-{role}-node-{networkName}-{component}) This appears in service/statefulset names for easy identification | +| node | object | `{"coinbase":null,"l1ConsensusHostApiKeyHeaders":[],"l1ConsensusHostApiKeys":[],"l1ConsensusUrls":["http://l1-full-node-sepolia-beacon.l1.svc.cluster.local:5052"],"l1ExecutionUrls":["http://l1-full-node-sepolia-execution.l1.svc.cluster.local:8545"],"logLevel":"info","metrics":{"otelCollectorEndpoint":"","otelExcludeMetrics":"","useGcloudLogging":false},"nodeJsOptions":["--no-warnings","--max-old-space-size=4096"],"preStartScript":"","remoteUrl":{"archiver":null,"blobSink":null,"proverBroker":null,"proverCoordinationNodes":[]},"replicas":1,"resources":{},"sentinel":{"enabled":false},"startCmd":[],"startupProbe":{"failureThreshold":20,"periodSeconds":30},"storage":{"archiveStorageMapSize":null,"dataDirectory":"/data","dataStoreMapSize":"134217728","p2pStorageMapSize":null,"worldStateMapSize":"134217728"}}` | Aztec node configuration | +| node.coinbase | string | `nil` | Address that will receive block or proof rewards For prover roles, this is the PROVER_ID | +| node.l1ExecutionUrls | list | `["http://l1-full-node-sepolia-execution.l1.svc.cluster.local:8545"]` | L1 Ethereum configuration Ethereum execution layer RPC endpoint(s) - comma separated list | +| node.logLevel | string | `"info"` | Log level - info, verbose, debug, trace | +| node.metrics | object | `{"otelCollectorEndpoint":"","otelExcludeMetrics":"","useGcloudLogging":false}` | Metrics configuration | +| node.nodeJsOptions | list | `["--no-warnings","--max-old-space-size=4096"]` | Node.js options | +| node.preStartScript | string | `""` | Pre-start script (runs before node starts) | +| node.remoteUrl | object | `{"archiver":null,"blobSink":null,"proverBroker":null,"proverCoordinationNodes":[]}` | Remote service URLs | +| node.replicas | int | `1` | Number of replicas | +| node.resources | object | `{}` | Resource requests and limits | +| node.sentinel | object | `{"enabled":false}` | Sentinel configuration - gathers slashing information | +| node.startCmd | list | `[]` | Start command flags Auto-generated based on role, but can be overridden for custom configurations Leave empty to use role-based defaults | +| node.startupProbe | object | `{"failureThreshold":20,"periodSeconds":30}` | Startup probe configuration | +| node.storage | object | `{"archiveStorageMapSize":null,"dataDirectory":"/data","dataStoreMapSize":"134217728","p2pStorageMapSize":null,"worldStateMapSize":"134217728"}` | Storage configuration | +| persistence.accessModes | list | `["ReadWriteOnce"]` | AccessModes | +| persistence.annotations | object | `{}` | Annotations for volume claim template | +| persistence.enabled | bool | `false` | Uses an emptyDir when not enabled | +| persistence.existingClaim | string | `nil` | Use an existing PVC | +| persistence.selector | object | `{}` | Selector for volume claim template | +| persistence.size | string | `"100Gi"` | Requested size | +| persistence.storageClassName | string | `nil` | Use a specific storage class | +| podAnnotations | object | `{}` | Pod annotations (e.g., for Keel auto-updates) | +| podManagementPolicy | string | `"Parallel"` | Pod management policy | +| prover.agent.count | int | `1` | Number of prover agents to run per pod | +| prover.agent.persistence | object | `{"size":"10Gi"}` | Persistence configuration for prover-agent Agent needs 10GB SSD per agent for CRS and temporary files | +| prover.agent.pollIntervalMs | int | `1000` | Agent polling interval in milliseconds | +| prover.agent.replicas | int | `1` | Number of prover agent replicas | +| prover.agent.resources | object | `{}` | Resource requests/limits for prover-agent pods Recommended: 32 cores, 128GB RAM per agent | +| prover.broker.persistence | object | `{"size":"10Gi"}` | Persistence configuration for prover-broker Broker needs 10GB SSD for job queue | +| prover.broker.resources | object | `{}` | Resource requests/limits for prover-broker pod | +| prover.id | string | `""` | Prover ID - address for receiving proof rewards Used by prover-node (usually matches publisherPrivateKey address) | +| prover.node.persistence | object | `{"size":"1000Gi"}` | Persistence configuration for prover-node Prover-node needs 1TB NVMe SSD for archiver data | +| prover.node.publisherPrivateKey | string | `""` | Ethereum private key for publishing proofs to L1 REQUIRED when role is 'prover' | +| prover.node.resources | object | `{}` | Resource requests/limits for prover-node pod | +| rbac.clusterRules | list | See `values.yaml` | Required ClusterRole rules | +| rbac.create | bool | `true` | Specifies whether RBAC resources are to be created | +| rbac.rules | list | See `values.yaml` | Required ClusterRole rules | +| role | string | `"sequencer"` | Role determines the type of Aztec node deployment Valid roles: fullnode, sequencer, prover | +| rollupVersion | string | `"canonical"` | Which rollup contract we want to follow from the registry | +| sequencer.attesterPrivateKey | string | `""` | Ethereum private key for attester (signs blocks and attestations) REQUIRED when role is 'sequencer' | +| service.admin.enabled | bool | `true` | | +| service.admin.port | int | `8081` | | +| service.headless.enabled | bool | `true` | | +| service.httpPort | int | `8080` | | +| service.ingress.annotations | object | `{}` | | +| service.ingress.enabled | bool | `false` | | +| service.ingress.hosts | list | `[]` | | +| service.p2p.announcePort | int | `40400` | | +| service.p2p.enabled | bool | `true` | | +| service.p2p.nodePort | int | `30400` | | +| service.p2p.nodePortEnabled | bool | `false` | | +| service.p2p.port | int | `40400` | | +| serviceAccount.annotations | object | `{}` | Annotations for the service account | +| serviceAccount.create | bool | `true` | Create a service account | +| serviceAccount.name | string | `""` | Name of the service account - if not set, the fullname will be used | +| updateStrategy | object | `{"type":"RollingUpdate"}` | Update strategy for the statefulset | + +# Aztec Node Helm Chart + +A Kubernetes Helm chart for deploying Aztec network nodes in different roles: Full Node, Sequencer, or Prover. + +## Overview + +This chart deploys Aztec nodes using a role-based architecture. A single chart handles three distinct deployment types: + +- **Full Node** - Participates in the network by syncing and validating blocks +- **Sequencer** - Produces blocks and participates in consensus +- **Prover** - Generates zero-knowledge proofs for the network + +## Quick Start + +### Prerequisites + +- Kubernetes cluster (v1.19+) +- Helm 3.0+ +- L1 Ethereum RPC access (execution + consensus layers) + +### Install a Full Node + +```bash +helm install aztec-fullnode ./charts/aztec-node \ + -f charts/aztec-node/values-examples/fullnode.yaml \ + -n aztec-testnet --create-namespace +``` + +### Install a Sequencer + +```bash +helm install aztec-sequencer ./charts/aztec-node \ + -f charts/aztec-node/values-examples/sequencer.yaml \ + --set sequencer.attesterPrivateKey="0xYOUR_PRIVATE_KEY" \ + -n aztec-testnet --create-namespace +``` + +**Requirements:** +- Ethereum private key with minimum 0.1 ETH on L1 (Sepolia for testnet) + +### Install a Prover + +```bash +helm install aztec-prover ./charts/aztec-node \ + -f charts/aztec-node/values-examples/prover.yaml \ + --set prover.node.publisherPrivateKey="0xYOUR_PRIVATE_KEY" \ + -n aztec-testnet --create-namespace +``` + +**Note:** The prover role creates 3 StatefulSets: +- `l2-prover-node-{networkName}-broker` - Manages the job queue +- `l2-prover-node-{networkName}-node` - Creates jobs and publishes proofs to L1 +- `l2-prover-node-{networkName}-agent` - Executes proof generation (can scale replicas) + +## Architecture + +### Role-Based Deployment + +The chart uses a `role` field to determine the deployment type: + +```yaml +role: fullnode # Options: fullnode | sequencer | prover +``` + +Each role automatically configures the appropriate: +- Container command and flags +- Resource requirements +- Storage configuration +- Service endpoints +- Required secrets + +### Storage Requirements + +All storage sizes match [Aztec's official specifications](https://docs.aztec.network/the_aztec_network): + +| Role/Component | Storage | Type | +|----------------|---------|------| +| Full Node | 1TB | NVMe SSD | +| Sequencer | 1TB | NVMe SSD | +| Prover Node | 1TB | NVMe SSD | +| Prover Broker | 10GB | SSD | +| Prover Agent | 10GB | SSD | + +## Configuration + +### Networks + +Connect to predefined Aztec networks: + +```yaml +network: testnet # Options: testnet | devnet +``` + +Or configure a custom network: + +```yaml +network: null # Disable predefined network +customNetwork: + l1ChainId: "11155111" + registryContractAddress: "0x..." + slashFactoryAddress: "0x..." + feeAssetHandlerContractAddress: "0x..." +``` + +### L1 Ethereum Configuration + +All roles require L1 Ethereum RPC access: + +```yaml +node: + l1ExecutionUrls: + - "http://l1-full-node-sepolia-execution.l1.svc.cluster.local:8545" + l1ConsensusUrls: + - "http://l1-full-node-sepolia-beacon.l1.svc.cluster.local:5052" +``` + +### Persistence + +Persistence is enabled by default and uses the cluster's default storage class: + +```yaml +persistence: + enabled: true + size: 1000Gi # Automatically set per role + # storageClassName: local-path # Optional: specify storage class + accessModes: + - ReadWriteOnce +``` + +**Component-Specific Sizes (Prover Role):** + +The prover role allows per-component storage configuration: + +```yaml +prover: + node: + persistence: + size: 1000Gi # Prover node (archiver data) + broker: + persistence: + size: 10Gi # Broker (job queue) + agent: + persistence: + size: 10Gi # Agent (CRS files) +``` + +### Networking + +**P2P Configuration:** + +```yaml +service: + p2p: + enabled: true + nodePortEnabled: false # Set true for external P2P + port: 40400 +``` + +**Host Networking (Optional):** + +```yaml +hostNetwork: true # Use host network for better P2P performance +``` + +**Note:** When using `hostNetwork: true`, ensure pod affinity is set to distribute pods across different nodes if running multiple replicas. + +### Resource Allocation + +Example resource configuration: + +```yaml +node: + resources: + requests: + cpu: "4" + memory: "16Gi" + limits: + cpu: "8" + memory: "32Gi" +``` + +**Prover-Specific Resources:** + +```yaml +prover: + broker: + resources: + requests: + cpu: "1" + memory: "4Gi" + node: + resources: + requests: + cpu: "2" + memory: "8Gi" + agent: + replicas: 2 # Scale prover agents + resources: + requests: + cpu: "16" # High CPU for proof generation + memory: "64Gi" +``` + +## Examples + +See detailed configuration examples in [`values-examples/`](./values-examples/): + +- [`fullnode.yaml`](./values-examples/fullnode.yaml) - Full node configuration +- [`sequencer.yaml`](./values-examples/sequencer.yaml) - Sequencer configuration +- [`prover.yaml`](./values-examples/prover.yaml) - Prover configuration +- [`README.md`](./values-examples/README.md) - Detailed deployment guide + +## Monitoring + +Access node endpoints (replace `sepolia` with your `networkName` value): + +```bash +# HTTP RPC endpoint +# For fullnode +kubectl port-forward -n aztec-testnet svc/l2-full-node-sepolia-node 8080:8080 +# For sequencer +kubectl port-forward -n aztec-testnet svc/l2-sequencer-node-sepolia-node 8080:8080 +# For prover (prover-node component) +kubectl port-forward -n aztec-testnet svc/l2-prover-node-sepolia-node 8080:8080 + +# Admin endpoint (example for sequencer) +kubectl port-forward -n aztec-testnet svc/l2-sequencer-node-sepolia-node 8081:8081 +``` + +### Verify Node is Running Properly + +**1. Check node sync status:** + +```bash +curl -X POST http://localhost:8080 --data '{"method": "node_getL2Tips"}' +``` + +You should see JSON response with the latest block number. If the block number is increasing, your node is syncing correctly. + +**2. Check P2P connectivity (TCP):** + +```bash +# Get the external IP or node port +kubectl get svc -n aztec-testnet + +# Test TCP connectivity (replace with your IP/port) +nc -vz 40400 +``` + +Expected: "Connection succeeded" + +**3. Check P2P connectivity (UDP):** + +```bash +nc -vu 40400 +``` + +Expected: "Connection succeeded" + +**4. View logs:** + +```bash +kubectl logs -n aztec-testnet -l app.kubernetes.io/name=aztec-node --tail=100 -f +``` + +Look for messages indicating: +- Block synchronization progress +- P2P peer connections +- No error messages + +## Upgrading + +### Upgrade a Release + +```bash +helm upgrade aztec-node ./charts/aztec-node \ + -f your-values.yaml \ + -n aztec-testnet +``` + +### Auto-Updates + +Enable automatic image updates by setting the image pull policy: + +```yaml +image: + repository: aztecprotocol/aztec + tag: latest + pullPolicy: Always # Pull latest image on every pod restart +``` + +**Important Notes:** +- Using `pullPolicy: Always` with `tag: latest` ensures you get the newest version when pods restart +- This is recommended for testnet/devnet deployments to stay up-to-date +- For production, pin to specific versions (e.g., `tag: "2.0.2"`) and use `pullPolicy: IfNotPresent` +- Sequencers should use `pullPolicy: Always` to maintain network compatibility + +**Trigger an update manually:** + +```bash +# Force pod restart to pull latest image (replace 'sepolia' with your networkName) +# For fullnode +kubectl rollout restart statefulset/l2-full-node-sepolia-node -n aztec-testnet +# For sequencer +kubectl rollout restart statefulset/l2-sequencer-node-sepolia-node -n aztec-testnet + +# For prover components +kubectl rollout restart statefulset/l2-prover-node-sepolia-broker -n aztec-testnet +kubectl rollout restart statefulset/l2-prover-node-sepolia-node -n aztec-testnet +kubectl rollout restart statefulset/l2-prover-node-sepolia-agent -n aztec-testnet +``` + +### Scale Prover Agents + +```bash +helm upgrade aztec-prover ./charts/aztec-node \ + -f charts/aztec-node/values-examples/prover.yaml \ + --set prover.agent.replicas=4 \ + -n aztec-testnet +``` + +## Uninstalling + +```bash +helm uninstall aztec-node -n aztec-testnet +``` + +**Note:** PersistentVolumeClaims are not automatically deleted. Remove manually if needed: + +```bash +kubectl delete pvc -n aztec-testnet -l app.kubernetes.io/name=aztec-node +``` + +## Security + +**⚠️ Important Security Notes:** + +1. **Private Keys:** Never commit private keys to version control +2. **Sequencer:** Use `--set sequencer.attesterPrivateKey="0x..."` when deploying +3. **Prover:** Use `--set prover.node.publisherPrivateKey="0x..."` when deploying +4. **Secrets Management:** Consider using external secret managers (Vault, Sealed Secrets, etc.) + +## Troubleshooting + +### Pod Not Starting + +Check startup probe timeout (sequencers may need longer): + +```yaml +node: + startupProbe: + periodSeconds: 60 + failureThreshold: 30 # 30 minutes max +``` + +### Storage Issues + +Verify PVC creation: + +```bash +kubectl get pvc -n aztec-testnet +``` + +Check storage class availability: + +```bash +kubectl get storageclass +``` + +### P2P Connectivity + +For external P2P access, enable NodePort: + +```yaml +service: + p2p: + nodePortEnabled: true +``` + +Or use host networking: + +```yaml +hostNetwork: true +``` + +### Prover Components Not Communicating + +Verify all 3 StatefulSets are running: + +```bash +kubectl get statefulsets -n aztec-testnet +``` + +Check service DNS resolution: + +```bash +kubectl get svc -n aztec-testnet | grep prover +``` + +## Resources + +- [Aztec Documentation](https://docs.aztec.network) +- [Aztec Network Guide](https://docs.aztec.network/the_aztec_network) +- [Running an Aztec Node](https://docs.aztec.network/the_aztec_network/guides/run_nodes) +- [Chart Values Reference](./values.yaml) + +## License + +Apache 2.0 + +## Contributing + +Contributions welcome! Please submit issues and pull requests to the repository. diff --git a/charts/aztec-node/README.md.gotmpl b/charts/aztec-node/README.md.gotmpl new file mode 100644 index 0000000..346c0d0 --- /dev/null +++ b/charts/aztec-node/README.md.gotmpl @@ -0,0 +1,430 @@ + +Aztec Node +=========== +{{ template "chart.deprecationWarning" . }} + +{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }} + +{{ template "chart.description" . }} + +{{ template "chart.homepageLine" . }} + +{{ template "chart.sourcesSection" . }} + +{{ template "chart.requirementsSection" . }} + +{{ template "chart.valuesSection" . }} + +# Aztec Node Helm Chart + +A Kubernetes Helm chart for deploying Aztec network nodes in different roles: Full Node, Sequencer, or Prover. + +## Overview + +This chart deploys Aztec nodes using a role-based architecture. A single chart handles three distinct deployment types: + +- **Full Node** - Participates in the network by syncing and validating blocks +- **Sequencer** - Produces blocks and participates in consensus +- **Prover** - Generates zero-knowledge proofs for the network + +## Quick Start + +### Prerequisites + +- Kubernetes cluster (v1.19+) +- Helm 3.0+ +- L1 Ethereum RPC access (execution + consensus layers) + +### Install a Full Node + +```bash +helm install aztec-fullnode ./charts/aztec-node \ + -f charts/aztec-node/values-examples/fullnode.yaml \ + -n aztec-testnet --create-namespace +``` + +### Install a Sequencer + +```bash +helm install aztec-sequencer ./charts/aztec-node \ + -f charts/aztec-node/values-examples/sequencer.yaml \ + --set sequencer.attesterPrivateKey="0xYOUR_PRIVATE_KEY" \ + -n aztec-testnet --create-namespace +``` + +**Requirements:** +- Ethereum private key with minimum 0.1 ETH on L1 (Sepolia for testnet) + +### Install a Prover + +```bash +helm install aztec-prover ./charts/aztec-node \ + -f charts/aztec-node/values-examples/prover.yaml \ + --set prover.node.publisherPrivateKey="0xYOUR_PRIVATE_KEY" \ + -n aztec-testnet --create-namespace +``` + +**Note:** The prover role creates 3 StatefulSets: +- `l2-prover-node-{networkName}-broker` - Manages the job queue +- `l2-prover-node-{networkName}-node` - Creates jobs and publishes proofs to L1 +- `l2-prover-node-{networkName}-agent` - Executes proof generation (can scale replicas) + +## Architecture + +### Role-Based Deployment + +The chart uses a `role` field to determine the deployment type: + +```yaml +role: fullnode # Options: fullnode | sequencer | prover +``` + +Each role automatically configures the appropriate: +- Container command and flags +- Resource requirements +- Storage configuration +- Service endpoints +- Required secrets + +### Storage Requirements + +All storage sizes match [Aztec's official specifications](https://docs.aztec.network/the_aztec_network): + +| Role/Component | Storage | Type | +|----------------|---------|------| +| Full Node | 1TB | NVMe SSD | +| Sequencer | 1TB | NVMe SSD | +| Prover Node | 1TB | NVMe SSD | +| Prover Broker | 10GB | SSD | +| Prover Agent | 10GB | SSD | + +## Configuration + +### Networks + +Connect to predefined Aztec networks: + +```yaml +network: testnet # Options: testnet | devnet +``` + +Or configure a custom network: + +```yaml +network: null # Disable predefined network +customNetwork: + l1ChainId: "11155111" + registryContractAddress: "0x..." + slashFactoryAddress: "0x..." + feeAssetHandlerContractAddress: "0x..." +``` + +### L1 Ethereum Configuration + +All roles require L1 Ethereum RPC access: + +```yaml +node: + l1ExecutionUrls: + - "http://l1-full-node-sepolia-execution.l1.svc.cluster.local:8545" + l1ConsensusUrls: + - "http://l1-full-node-sepolia-beacon.l1.svc.cluster.local:5052" +``` + +### Persistence + +Persistence is enabled by default and uses the cluster's default storage class: + +```yaml +persistence: + enabled: true + size: 1000Gi # Automatically set per role + # storageClassName: local-path # Optional: specify storage class + accessModes: + - ReadWriteOnce +``` + +**Component-Specific Sizes (Prover Role):** + +The prover role allows per-component storage configuration: + +```yaml +prover: + node: + persistence: + size: 1000Gi # Prover node (archiver data) + broker: + persistence: + size: 10Gi # Broker (job queue) + agent: + persistence: + size: 10Gi # Agent (CRS files) +``` + +### Networking + +**P2P Configuration:** + +```yaml +service: + p2p: + enabled: true + nodePortEnabled: false # Set true for external P2P + port: 40400 +``` + +**Host Networking (Optional):** + +```yaml +hostNetwork: true # Use host network for better P2P performance +``` + +**Note:** When using `hostNetwork: true`, ensure pod affinity is set to distribute pods across different nodes if running multiple replicas. + +### Resource Allocation + +Example resource configuration: + +```yaml +node: + resources: + requests: + cpu: "4" + memory: "16Gi" + limits: + cpu: "8" + memory: "32Gi" +``` + +**Prover-Specific Resources:** + +```yaml +prover: + broker: + resources: + requests: + cpu: "1" + memory: "4Gi" + node: + resources: + requests: + cpu: "2" + memory: "8Gi" + agent: + replicas: 2 # Scale prover agents + resources: + requests: + cpu: "16" # High CPU for proof generation + memory: "64Gi" +``` + +## Examples + +See detailed configuration examples in [`values-examples/`](./values-examples/): + +- [`fullnode.yaml`](./values-examples/fullnode.yaml) - Full node configuration +- [`sequencer.yaml`](./values-examples/sequencer.yaml) - Sequencer configuration +- [`prover.yaml`](./values-examples/prover.yaml) - Prover configuration +- [`README.md`](./values-examples/README.md) - Detailed deployment guide + +## Monitoring + +Access node endpoints (replace `sepolia` with your `networkName` value): + +```bash +# HTTP RPC endpoint +# For fullnode +kubectl port-forward -n aztec-testnet svc/l2-full-node-sepolia-node 8080:8080 +# For sequencer +kubectl port-forward -n aztec-testnet svc/l2-sequencer-node-sepolia-node 8080:8080 +# For prover (prover-node component) +kubectl port-forward -n aztec-testnet svc/l2-prover-node-sepolia-node 8080:8080 + +# Admin endpoint (example for sequencer) +kubectl port-forward -n aztec-testnet svc/l2-sequencer-node-sepolia-node 8081:8081 +``` + +### Verify Node is Running Properly + +**1. Check node sync status:** + +```bash +curl -X POST http://localhost:8080 --data '{"method": "node_getL2Tips"}' +``` + +You should see JSON response with the latest block number. If the block number is increasing, your node is syncing correctly. + +**2. Check P2P connectivity (TCP):** + +```bash +# Get the external IP or node port +kubectl get svc -n aztec-testnet + +# Test TCP connectivity (replace with your IP/port) +nc -vz 40400 +``` + +Expected: "Connection succeeded" + +**3. Check P2P connectivity (UDP):** + +```bash +nc -vu 40400 +``` + +Expected: "Connection succeeded" + +**4. View logs:** + +```bash +kubectl logs -n aztec-testnet -l app.kubernetes.io/name=aztec-node --tail=100 -f +``` + +Look for messages indicating: +- Block synchronization progress +- P2P peer connections +- No error messages + +## Upgrading + +### Upgrade a Release + +```bash +helm upgrade aztec-node ./charts/aztec-node \ + -f your-values.yaml \ + -n aztec-testnet +``` + +### Auto-Updates + +Enable automatic image updates by setting the image pull policy: + +```yaml +image: + repository: aztecprotocol/aztec + tag: latest + pullPolicy: Always # Pull latest image on every pod restart +``` + +**Important Notes:** +- Using `pullPolicy: Always` with `tag: latest` ensures you get the newest version when pods restart +- This is recommended for testnet/devnet deployments to stay up-to-date +- For production, pin to specific versions (e.g., `tag: "2.0.2"`) and use `pullPolicy: IfNotPresent` +- Sequencers should use `pullPolicy: Always` to maintain network compatibility + +**Trigger an update manually:** + +```bash +# Force pod restart to pull latest image (replace 'sepolia' with your networkName) +# For fullnode +kubectl rollout restart statefulset/l2-full-node-sepolia-node -n aztec-testnet +# For sequencer +kubectl rollout restart statefulset/l2-sequencer-node-sepolia-node -n aztec-testnet + +# For prover components +kubectl rollout restart statefulset/l2-prover-node-sepolia-broker -n aztec-testnet +kubectl rollout restart statefulset/l2-prover-node-sepolia-node -n aztec-testnet +kubectl rollout restart statefulset/l2-prover-node-sepolia-agent -n aztec-testnet +``` + +### Scale Prover Agents + +```bash +helm upgrade aztec-prover ./charts/aztec-node \ + -f charts/aztec-node/values-examples/prover.yaml \ + --set prover.agent.replicas=4 \ + -n aztec-testnet +``` + +## Uninstalling + +```bash +helm uninstall aztec-node -n aztec-testnet +``` + +**Note:** PersistentVolumeClaims are not automatically deleted. Remove manually if needed: + +```bash +kubectl delete pvc -n aztec-testnet -l app.kubernetes.io/name=aztec-node +``` + +## Security + +**⚠️ Important Security Notes:** + +1. **Private Keys:** Never commit private keys to version control +2. **Sequencer:** Use `--set sequencer.attesterPrivateKey="0x..."` when deploying +3. **Prover:** Use `--set prover.node.publisherPrivateKey="0x..."` when deploying +4. **Secrets Management:** Consider using external secret managers (Vault, Sealed Secrets, etc.) + +## Troubleshooting + +### Pod Not Starting + +Check startup probe timeout (sequencers may need longer): + +```yaml +node: + startupProbe: + periodSeconds: 60 + failureThreshold: 30 # 30 minutes max +``` + +### Storage Issues + +Verify PVC creation: + +```bash +kubectl get pvc -n aztec-testnet +``` + +Check storage class availability: + +```bash +kubectl get storageclass +``` + +### P2P Connectivity + +For external P2P access, enable NodePort: + +```yaml +service: + p2p: + nodePortEnabled: true +``` + +Or use host networking: + +```yaml +hostNetwork: true +``` + +### Prover Components Not Communicating + +Verify all 3 StatefulSets are running: + +```bash +kubectl get statefulsets -n aztec-testnet +``` + +Check service DNS resolution: + +```bash +kubectl get svc -n aztec-testnet | grep prover +``` + +## Resources + +- [Aztec Documentation](https://docs.aztec.network) +- [Aztec Network Guide](https://docs.aztec.network/the_aztec_network) +- [Running an Aztec Node](https://docs.aztec.network/the_aztec_network/guides/run_nodes) +- [Chart Values Reference](./values.yaml) + +## License + +Apache 2.0 + +## Contributing + +Contributions welcome! Please submit issues and pull requests to the repository. diff --git a/charts/aztec-node/aztec-node.yaml b/charts/aztec-node/aztec-node.yaml new file mode 100644 index 0000000..f2500a9 --- /dev/null +++ b/charts/aztec-node/aztec-node.yaml @@ -0,0 +1,74 @@ +# Sequencer deployment for Sepolia testnet +role: sequencer + +# Sequencer configuration +sequencer: + # Ethereum private key for attester (signs blocks and attestations) + # Ensure this account has at least 0.1 ETH on Sepolia to avoid slashing + attesterPrivateKey: "" + # Aztec address to receive unburnt transaction fees + feeRecipient: "0x0000000000000000000000000000000000000000000000000000000000000000" + +# -- Image to use for the container +image: + # -- Image repository + repository: aztecprotocol/aztec + # -- Image tag + tag: latest # Auto-updates to latest stable (currently 2.0.3) + # -- Container pull policy + pullPolicy: Always # Required for Keel auto-updates + +network: testnet + +# Use host network for P2P connectivity (port 40400) +hostNetwork: true + +# Keel auto-update configuration +podAnnotations: + keel.sh/policy: force # Always update to latest tag + keel.sh/trigger: poll # Poll for updates + keel.sh/pollSchedule: "@every 10m" # Check every 10 minutes + +node: + replicas: 1 + logLevel: "debug; info: aztec:simulator, json-rpc" + + l1ExecutionUrls: ["http://l1-full-node-sepolia-execution.l1.svc.cluster.local:8545"] + l1ConsensusUrls: ["http://l1-full-node-sepolia-beacon.l1.svc.cluster.local:5052"] + + startCmd: + - --node + - --archiver + - --sequencer + - --network + - testnet + + + startupProbe: + # -- Period seconds + periodSeconds: 60 + # -- Failure threshold + failureThreshold: 60 + +persistence: + enabled: true + size: 512Gi + storageClassName: local-path + accessModes: + - ReadWriteOnce + selector: {} + +# Pin to specific node for local-path storage +nodeSelector: + kubernetes.io/hostname: silvernuc2 + +service: + p2p: + enabled: true + nodePortEnabled: false + port: 40400 + announcePort: 40400 + admin: + enabled: true + port: 8082 + httpPort: 8080 \ No newline at end of file diff --git a/charts/aztec-node/templates/_helpers.tpl b/charts/aztec-node/templates/_helpers.tpl new file mode 100644 index 0000000..a466f50 --- /dev/null +++ b/charts/aztec-node/templates/_helpers.tpl @@ -0,0 +1,167 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "chart.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "chart.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "chart.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "chart.labels" -}} +helm.sh/chart: {{ include "chart.chart" . }} +{{ include "chart.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "chart.selectorLabels" -}} +app.kubernetes.io/name: {{ include "chart.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + + +{{/* +Create the name of the service account to use +*/}} +{{- define "chart.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "chart.resourceName" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Create the name of the cluster role. +It needs to be namespace prefixed to avoid naming conflicts when using the same deployment name across namespaces. +*/}} +{{- define "chart.clusterRoleName" -}} +{{ .Release.Namespace }}-{{ include "chart.resourceName" . }} +{{- end }} + +{{/* +Generate standardized resource names following pattern: l2-{role}-node-{network}-{component} +Usage: + {{ include "chart.resourceName" . }} - For fullnode/sequencer + {{ include "chart.resourceName" (dict "context" . "component" "broker") }} - For prover components +*/}} +{{- define "chart.resourceName" -}} +{{- $context := . -}} +{{- $component := "" -}} +{{- if hasKey . "context" -}} + {{- $context = .context -}} + {{- $component = .component | default "" -}} +{{- end -}} +{{- $role := $context.Values.role -}} +{{- if eq $role "fullnode" -}} + {{- $role = "full" -}} +{{- end -}} +{{- $network := $context.Values.networkName | default "sepolia" -}} +{{- if $component -}} + {{- printf "l2-%s-node-%s-%s" $role $network $component -}} +{{- else -}} + {{- printf "l2-%s-node-%s-node" $role $network -}} +{{- end -}} +{{- end -}} + +{{/* +Validate that the role is one of the allowed values +*/}} +{{- define "chart.validateRole" -}} +{{- $validRoles := list "fullnode" "sequencer" "prover" -}} +{{- if not (has .Values.role $validRoles) -}} +{{- fail (printf "Invalid role '%s'. Must be one of: %s" .Values.role (join ", " $validRoles)) -}} +{{- end -}} +{{- end -}} + +{{/* +Validate sequencer configuration +*/}} +{{- define "chart.validateSequencer" -}} +{{- if eq .Values.role "sequencer" -}} +{{- if not .Values.sequencer.attesterPrivateKey -}} +{{- fail "sequencer.attesterPrivateKey is REQUIRED when role is 'sequencer'" -}} +{{- end -}} +{{- if not (hasPrefix "0x" .Values.sequencer.attesterPrivateKey) -}} +{{- fail "sequencer.attesterPrivateKey must start with '0x'" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Validate prover configuration +*/}} +{{- define "chart.validateProver" -}} +{{- if eq .Values.role "prover" -}} +{{- if not .Values.prover.node.publisherPrivateKey -}} +{{- fail "prover.node.publisherPrivateKey is REQUIRED when role is 'prover'" -}} +{{- end -}} +{{- if not (hasPrefix "0x" .Values.prover.node.publisherPrivateKey) -}} +{{- fail "prover.node.publisherPrivateKey must start with '0x'" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Generate startCmd based on role (if not overridden) +Only used for fullnode and sequencer roles +Prover role uses separate component-specific commands +*/}} +{{- define "chart.startCmd" -}} +{{- if .Values.node.startCmd -}} +{{- .Values.node.startCmd | toYaml -}} +{{- else -}} +{{- if eq .Values.role "fullnode" }} +- --node +- --archiver +{{- else if eq .Values.role "sequencer" }} +- --node +- --archiver +- --sequencer +{{- end }} +{{- if .Values.network }} +- --network +- {{ .Values.network }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Run all validations +*/}} +{{- define "chart.validate" -}} +{{- include "chart.validateRole" . -}} +{{- include "chart.validateSequencer" . -}} +{{- include "chart.validateProver" . -}} +{{- end -}} diff --git a/charts/aztec-node/templates/clusterrole.yaml b/charts/aztec-node/templates/clusterrole.yaml new file mode 100644 index 0000000..fc07c5e --- /dev/null +++ b/charts/aztec-node/templates/clusterrole.yaml @@ -0,0 +1,10 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "chart.clusterRoleName" . }} + labels: + {{- include "chart.labels" . | nindent 4 }} +rules: +{{- toYaml .Values.rbac.clusterRules | nindent 0 }} +{{- end }} diff --git a/charts/aztec-node/templates/clusterrolebinding.yaml b/charts/aztec-node/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..7b0bfd3 --- /dev/null +++ b/charts/aztec-node/templates/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "chart.clusterRoleName" . }} + labels: + {{- include "chart.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "chart.clusterRoleName" . }} +subjects: + - kind: ServiceAccount + name: {{ include "chart.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/aztec-node/templates/role.yaml b/charts/aztec-node/templates/role.yaml new file mode 100644 index 0000000..10d5acb --- /dev/null +++ b/charts/aztec-node/templates/role.yaml @@ -0,0 +1,10 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "chart.serviceAccountName" . }} + labels: + {{- include "chart.labels" . | nindent 4 }} +rules: +{{- toYaml .Values.rbac.rules | nindent 0 }} +{{- end }} diff --git a/charts/aztec-node/templates/rolebinding.yaml b/charts/aztec-node/templates/rolebinding.yaml new file mode 100644 index 0000000..32d7a79 --- /dev/null +++ b/charts/aztec-node/templates/rolebinding.yaml @@ -0,0 +1,15 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "chart.serviceAccountName" . }} + labels: + {{- include "chart.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "chart.serviceAccountName" . }} +subjects: + - kind: ServiceAccount + name: {{ include "chart.serviceAccountName" . }} +{{- end }} diff --git a/charts/aztec-node/templates/secret.yaml b/charts/aztec-node/templates/secret.yaml new file mode 100644 index 0000000..d51ff5d --- /dev/null +++ b/charts/aztec-node/templates/secret.yaml @@ -0,0 +1,19 @@ +{{- if eq .Values.role "sequencer" }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "chart.resourceName" . }}-keystore + labels: + {{- include "chart.labels" . | nindent 4 }} +stringData: + keystore.json: | + { + "schemaVersion": 1, + "validators": [ + { + "attester": ["{{ .Values.sequencer.attesterPrivateKey }}"], + "feeRecipient": "{{ .Values.sequencer.feeRecipient }}" + } + ] + } +{{- end }} diff --git a/charts/aztec-node/templates/service-prover-agent.yaml b/charts/aztec-node/templates/service-prover-agent.yaml new file mode 100644 index 0000000..15d0b00 --- /dev/null +++ b/charts/aztec-node/templates/service-prover-agent.yaml @@ -0,0 +1,26 @@ +{{- if eq .Values.role "prover" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "chart.resourceName" (dict "context" . "component" "agent") }} + labels: + {{- include "chart.labels" . | nindent 4 }} + aztec.network/role: prover + aztec.network/component: prover-agent +spec: + clusterIP: None + selector: + {{- include "chart.selectorLabels" . | nindent 4 }} + app: prover-agent + ports: + - port: {{ .Values.service.httpPort }} + name: http-rpc + protocol: TCP + targetPort: http-rpc + {{- if .Values.service.admin.enabled }} + - port: {{ .Values.service.admin.port }} + name: admin + protocol: TCP + targetPort: admin + {{- end }} +{{- end }} diff --git a/charts/aztec-node/templates/service-prover-broker.yaml b/charts/aztec-node/templates/service-prover-broker.yaml new file mode 100644 index 0000000..b301f65 --- /dev/null +++ b/charts/aztec-node/templates/service-prover-broker.yaml @@ -0,0 +1,26 @@ +{{- if eq .Values.role "prover" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "chart.resourceName" (dict "context" . "component" "broker") }} + labels: + {{- include "chart.labels" . | nindent 4 }} + aztec.network/role: prover + aztec.network/component: prover-broker +spec: + clusterIP: None + selector: + {{- include "chart.selectorLabels" . | nindent 4 }} + app: prover-broker + ports: + - port: {{ .Values.service.httpPort }} + name: http-rpc + protocol: TCP + targetPort: http-rpc + {{- if .Values.service.admin.enabled }} + - port: {{ .Values.service.admin.port }} + name: admin + protocol: TCP + targetPort: admin + {{- end }} +{{- end }} diff --git a/charts/aztec-node/templates/service-prover-node.yaml b/charts/aztec-node/templates/service-prover-node.yaml new file mode 100644 index 0000000..006b7e9 --- /dev/null +++ b/charts/aztec-node/templates/service-prover-node.yaml @@ -0,0 +1,36 @@ +{{- if eq .Values.role "prover" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "chart.resourceName" (dict "context" . "component" "node") }} + labels: + {{- include "chart.labels" . | nindent 4 }} + aztec.network/role: prover + aztec.network/component: prover-node +spec: + clusterIP: None + selector: + {{- include "chart.selectorLabels" . | nindent 4 }} + app: prover-node + ports: + - port: {{ .Values.service.httpPort }} + name: http-rpc + protocol: TCP + targetPort: http-rpc + {{- if .Values.service.admin.enabled }} + - port: {{ .Values.service.admin.port }} + name: admin + protocol: TCP + targetPort: admin + {{- end }} + {{- if .Values.service.p2p.enabled }} + - port: {{ .Values.service.p2p.port }} + name: p2p-tcp + protocol: TCP + targetPort: p2p-tcp + - port: {{ .Values.service.p2p.port }} + name: p2p-udp + protocol: UDP + targetPort: p2p-udp + {{- end }} +{{- end }} diff --git a/charts/aztec-node/templates/serviceaccount.yaml b/charts/aztec-node/templates/serviceaccount.yaml new file mode 100644 index 0000000..068ee50 --- /dev/null +++ b/charts/aztec-node/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "chart.serviceAccountName" . }} + labels: + {{- include "chart.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/aztec-node/templates/statefulset-prover-agent.yaml b/charts/aztec-node/templates/statefulset-prover-agent.yaml new file mode 100644 index 0000000..9239b2b --- /dev/null +++ b/charts/aztec-node/templates/statefulset-prover-agent.yaml @@ -0,0 +1,139 @@ +{{- if eq .Values.role "prover" }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "chart.resourceName" (dict "context" . "component" "agent") }} + labels: + {{- include "chart.labels" . | nindent 4 }} + aztec.network/role: prover + aztec.network/component: prover-agent +spec: + serviceName: {{ include "chart.resourceName" (dict "context" . "component" "agent") }} + replicas: {{ .Values.prover.agent.replicas }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + updateStrategy: + {{- toYaml .Values.node.updateStrategy | nindent 4 }} + selector: + matchLabels: + {{- include "chart.selectorLabels" . | nindent 6 }} + app: prover-agent + template: + metadata: + labels: + {{- include "chart.selectorLabels" . | nindent 8 }} + app: prover-agent + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ include "chart.serviceAccountName" . }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.initContainers }} + initContainers: + {{- tpl (toYaml .Values.initContainers | nindent 8) $ }} + {{- end }} + containers: + - name: prover-agent + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - /bin/bash + - -c + - | + start_cmd=("node" "/usr/src/yarn-project/aztec/dest/bin/index.js" "start" "--prover-agent") + {{- if .Values.network }} + start_cmd+=("--network" "{{ .Values.network }}") + {{- end }} + "${start_cmd[@]}" + volumeMounts: + - name: storage + mountPath: {{ .Values.node.storage.dataDirectory }} + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: K8S_POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid + - name: K8S_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OTEL_SERVICE_NAME + value: prover-agent + - name: K8S_NAMESPACE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.network }} + - name: NETWORK + value: "{{ .Values.network }}" + {{- else }} + - name: REGISTRY_CONTRACT_ADDRESS + value: "{{ .Values.customNetwork.registryContractAddress }}" + - name: L1_CHAIN_ID + value: "{{ .Values.customNetwork.l1ChainId }}" + - name: SLASH_FACTORY_ADDRESS + value: "{{ .Values.customNetwork.slashFactoryAddress }}" + - name: FEE_ASSET_HANDLER_CONTRACT_ADDRESS + value: "{{ .Values.customNetwork.feeAssetHandlerContractAddress }}" + {{- end }} + - name: NODE_OPTIONS + value: {{ join " " .Values.node.nodeJsOptions | quote }} + - name: LOG_LEVEL + value: "{{ .Values.node.logLevel }}" + - name: LOG_JSON + value: "1" + - name: PROVER_AGENT_COUNT + value: {{ .Values.prover.agent.count | quote }} + - name: PROVER_AGENT_POLL_INTERVAL_MS + value: {{ .Values.prover.agent.pollIntervalMs | quote }} + # Prover broker is running in the same deployment + - name: PROVER_BROKER_HOST + value: "http://{{ include "chart.resourceName" (dict "context" . "component" "broker") }}:{{ .Values.service.httpPort }}" + {{- if .Values.prover.id }} + - name: PROVER_ID + value: {{ .Values.prover.id | quote }} + {{- end }} + - name: DATA_DIRECTORY + value: {{ .Values.node.storage.dataDirectory | quote }} + resources: + {{- if .Values.prover.agent.resources }} + {{- toYaml .Values.prover.agent.resources | nindent 12 }} + {{- else }} + {{- toYaml .Values.node.resources | nindent 12 }} + {{- end }} + {{- if not .Values.persistence.enabled }} + volumes: + - name: storage + emptyDir: {} + {{- else if .Values.persistence.existingClaim }} + volumes: + - name: storage + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: storage + annotations: + {{- toYaml .Values.persistence.annotations | nindent 10 }} + spec: + accessModes: + {{- toYaml .Values.persistence.accessModes | nindent 12 }} + resources: + requests: + storage: {{ .Values.prover.agent.persistence.size | default .Values.persistence.size | quote}} + storageClassName: {{ .Values.persistence.storageClassName }} + {{- if .Values.persistence.selector }} + selector: + {{- toYaml .Values.persistence.selector | nindent 12 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/aztec-node/templates/statefulset-prover-broker.yaml b/charts/aztec-node/templates/statefulset-prover-broker.yaml new file mode 100644 index 0000000..d82770b --- /dev/null +++ b/charts/aztec-node/templates/statefulset-prover-broker.yaml @@ -0,0 +1,155 @@ +{{- if eq .Values.role "prover" }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "chart.resourceName" (dict "context" . "component" "broker") }} + labels: + {{- include "chart.labels" . | nindent 4 }} + aztec.network/role: prover + aztec.network/component: prover-broker +spec: + serviceName: {{ include "chart.resourceName" (dict "context" . "component" "broker") }} + replicas: 1 + podManagementPolicy: {{ .Values.podManagementPolicy }} + updateStrategy: + {{- toYaml .Values.node.updateStrategy | nindent 4 }} + selector: + matchLabels: + {{- include "chart.selectorLabels" . | nindent 6 }} + app: prover-broker + template: + metadata: + labels: + {{- include "chart.selectorLabels" . | nindent 8 }} + app: prover-broker + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ include "chart.serviceAccountName" . }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.initContainers }} + initContainers: + {{- tpl (toYaml .Values.initContainers | nindent 8) $ }} + {{- end }} + containers: + - name: prover-broker + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - /bin/bash + - -c + - | + start_cmd=("node" "/usr/src/yarn-project/aztec/dest/bin/index.js" "start" "--prover-broker") + {{- if .Values.network }} + start_cmd+=("--network" "{{ .Values.network }}") + {{- end }} + "${start_cmd[@]}" + startupProbe: + httpGet: + path: /status + port: {{ .Values.service.httpPort }} + periodSeconds: {{ .Values.node.startupProbe.periodSeconds }} + failureThreshold: {{ .Values.node.startupProbe.failureThreshold }} + livenessProbe: + httpGet: + path: /status + port: {{ .Values.service.httpPort }} + initialDelaySeconds: 30 + periodSeconds: 5 + timeoutSeconds: 30 + failureThreshold: 3 + volumeMounts: + - name: storage + mountPath: {{ .Values.node.storage.dataDirectory }} + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: K8S_POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid + - name: K8S_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OTEL_SERVICE_NAME + value: prover-broker + - name: K8S_NAMESPACE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.network }} + - name: NETWORK + value: "{{ .Values.network }}" + {{- else }} + - name: REGISTRY_CONTRACT_ADDRESS + value: "{{ .Values.customNetwork.registryContractAddress }}" + - name: L1_CHAIN_ID + value: "{{ .Values.customNetwork.l1ChainId }}" + - name: SLASH_FACTORY_ADDRESS + value: "{{ .Values.customNetwork.slashFactoryAddress }}" + - name: FEE_ASSET_HANDLER_CONTRACT_ADDRESS + value: "{{ .Values.customNetwork.feeAssetHandlerContractAddress }}" + {{- end }} + - name: NODE_OPTIONS + value: {{ join " " .Values.node.nodeJsOptions | quote }} + - name: AZTEC_PORT + value: "{{ .Values.service.httpPort }}" + - name: LOG_LEVEL + value: "{{ .Values.node.logLevel }}" + - name: LOG_JSON + value: "1" + {{- if gt (len .Values.node.l1ExecutionUrls) 0 }} + - name: ETHEREUM_HOSTS + value: {{ join "," .Values.node.l1ExecutionUrls | quote }} + {{- end }} + - name: DATA_DIRECTORY + value: {{ .Values.node.storage.dataDirectory | quote }} + ports: + - containerPort: {{ .Values.service.httpPort }} + name: http-rpc + {{- if .Values.service.admin.enabled }} + - containerPort: {{ .Values.service.admin.port }} + name: admin + {{- end }} + resources: + {{- if .Values.prover.broker.resources }} + {{- toYaml .Values.prover.broker.resources | nindent 12 }} + {{- else }} + {{- toYaml .Values.node.resources | nindent 12 }} + {{- end }} + {{- if not .Values.persistence.enabled }} + volumes: + - name: storage + emptyDir: {} + {{- else if .Values.persistence.existingClaim }} + volumes: + - name: storage + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: storage + annotations: + {{- toYaml .Values.persistence.annotations | nindent 10 }} + spec: + accessModes: + {{- toYaml .Values.persistence.accessModes | nindent 12 }} + resources: + requests: + storage: {{ .Values.prover.broker.persistence.size | default .Values.persistence.size | quote}} + storageClassName: {{ .Values.persistence.storageClassName }} + {{- if .Values.persistence.selector }} + selector: + {{- toYaml .Values.persistence.selector | nindent 12 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/aztec-node/templates/statefulset-prover-node.yaml b/charts/aztec-node/templates/statefulset-prover-node.yaml new file mode 100644 index 0000000..736903f --- /dev/null +++ b/charts/aztec-node/templates/statefulset-prover-node.yaml @@ -0,0 +1,192 @@ +{{- if eq .Values.role "prover" }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "chart.resourceName" (dict "context" . "component" "node") }} + labels: + {{- include "chart.labels" . | nindent 4 }} + aztec.network/role: prover + aztec.network/component: prover-node +spec: + serviceName: {{ include "chart.resourceName" (dict "context" . "component" "node") }} + replicas: 1 + podManagementPolicy: {{ .Values.podManagementPolicy }} + updateStrategy: + {{- toYaml .Values.node.updateStrategy | nindent 4 }} + selector: + matchLabels: + {{- include "chart.selectorLabels" . | nindent 6 }} + app: prover-node + template: + metadata: + labels: + {{- include "chart.selectorLabels" . | nindent 8 }} + app: prover-node + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ include "chart.serviceAccountName" . }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.initContainers }} + initContainers: + {{- tpl (toYaml .Values.initContainers | nindent 8) $ }} + {{- end }} + containers: + - name: prover-node + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - /bin/bash + - -c + - | + export P2P_IP=$(hostname -i) + start_cmd=("node" "/usr/src/yarn-project/aztec/dest/bin/index.js" "start" "--prover-node" "--archiver") + {{- if .Values.network }} + start_cmd+=("--network" "{{ .Values.network }}") + {{- end }} + "${start_cmd[@]}" + startupProbe: + httpGet: + path: /status + port: {{ .Values.service.httpPort }} + periodSeconds: {{ .Values.node.startupProbe.periodSeconds }} + failureThreshold: {{ .Values.node.startupProbe.failureThreshold }} + livenessProbe: + httpGet: + path: /status + port: {{ .Values.service.httpPort }} + initialDelaySeconds: 30 + periodSeconds: 5 + timeoutSeconds: 30 + failureThreshold: 3 + volumeMounts: + - name: storage + mountPath: {{ .Values.node.storage.dataDirectory }} + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: K8S_POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid + - name: K8S_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OTEL_SERVICE_NAME + value: prover-node + - name: K8S_NAMESPACE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.network }} + - name: NETWORK + value: "{{ .Values.network }}" + {{- else }} + - name: REGISTRY_CONTRACT_ADDRESS + value: "{{ .Values.customNetwork.registryContractAddress }}" + - name: L1_CHAIN_ID + value: "{{ .Values.customNetwork.l1ChainId }}" + - name: SLASH_FACTORY_ADDRESS + value: "{{ .Values.customNetwork.slashFactoryAddress }}" + - name: FEE_ASSET_HANDLER_CONTRACT_ADDRESS + value: "{{ .Values.customNetwork.feeAssetHandlerContractAddress }}" + {{- end }} + - name: NODE_OPTIONS + value: {{ join " " .Values.node.nodeJsOptions | quote }} + - name: AZTEC_PORT + value: "{{ .Values.service.httpPort }}" + - name: LOG_LEVEL + value: "{{ .Values.node.logLevel }}" + - name: LOG_JSON + value: "1" + - name: P2P_ENABLED + value: "{{ .Values.service.p2p.enabled }}" + - name: P2P_PORT + value: "{{ .Values.service.p2p.port }}" + - name: P2P_QUERY_FOR_IP + value: "true" + - name: PROVER_PUBLISHER_PRIVATE_KEY + value: {{ .Values.prover.node.publisherPrivateKey | quote }} + # Prover broker is running in the same deployment + - name: PROVER_BROKER_HOST + value: "http://{{ include "chart.resourceName" (dict "context" . "component" "broker") }}:{{ .Values.service.httpPort }}" + {{- if .Values.prover.id }} + - name: PROVER_ID + value: {{ .Values.prover.id | quote }} + {{- end }} + {{- if gt (len .Values.node.l1ExecutionUrls) 0 }} + - name: ETHEREUM_HOSTS + value: {{ join "," .Values.node.l1ExecutionUrls | quote }} + {{- end }} + {{- if gt (len .Values.node.l1ConsensusUrls) 0 }} + - name: L1_CONSENSUS_HOST_URLS + value: {{ join "," .Values.node.l1ConsensusUrls | quote }} + - name: L1_CONSENSUS_HOST_API_KEYS + value: {{ join "," .Values.node.l1ConsensusHostApiKeys | quote }} + - name: L1_CONSENSUS_HOST_API_KEY_HEADERS + value: {{ join "," .Values.node.l1ConsensusHostApiKeyHeaders | quote }} + {{- end }} + - name: ARCHIVER_POLLING_INTERVAL_MS + value: "10000" + - name: DATA_DIRECTORY + value: {{ .Values.node.storage.dataDirectory | quote }} + - name: DATA_STORE_MAP_SIZE_KB + value: {{ .Values.node.storage.dataStoreMapSize | quote }} + - name: WS_DB_MAP_SIZE_KB + value: {{ .Values.node.storage.worldStateMapSize | quote }} + ports: + - containerPort: {{ .Values.service.httpPort }} + name: http-rpc + {{- if .Values.service.admin.enabled }} + - containerPort: {{ .Values.service.admin.port }} + name: admin + {{- end }} + {{- if .Values.service.p2p.enabled }} + - containerPort: {{ .Values.service.p2p.port }} + name: p2p-tcp + - containerPort: {{ .Values.service.p2p.port }} + protocol: UDP + name: p2p-udp + {{- end }} + resources: + {{- if .Values.prover.node.resources }} + {{- toYaml .Values.prover.node.resources | nindent 12 }} + {{- else }} + {{- toYaml .Values.node.resources | nindent 12 }} + {{- end }} + {{- if not .Values.persistence.enabled }} + volumes: + - name: storage + emptyDir: {} + {{- else if .Values.persistence.existingClaim }} + volumes: + - name: storage + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: storage + annotations: + {{- toYaml .Values.persistence.annotations | nindent 10 }} + spec: + accessModes: + {{- toYaml .Values.persistence.accessModes | nindent 12 }} + resources: + requests: + storage: {{ .Values.prover.node.persistence.size | default .Values.persistence.size | quote}} + storageClassName: {{ .Values.persistence.storageClassName }} + {{- if .Values.persistence.selector }} + selector: + {{- toYaml .Values.persistence.selector | nindent 12 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/aztec-node/templates/statefulset.yaml b/charts/aztec-node/templates/statefulset.yaml new file mode 100644 index 0000000..8a7aa5b --- /dev/null +++ b/charts/aztec-node/templates/statefulset.yaml @@ -0,0 +1,255 @@ +{{- include "chart.validate" . -}} +{{- if or (eq .Values.role "fullnode") (eq .Values.role "sequencer") }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "chart.resourceName" . }} + labels: + {{- include "chart.labels" . | nindent 4 }} + aztec.network/role: {{ .Values.role }} +spec: + serviceName: {{ include "chart.resourceName" . }}-headless + replicas: {{ .Values.node.replicas }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + updateStrategy: + {{- toYaml .Values.node.updateStrategy | nindent 4 }} + selector: + matchLabels: + {{- include "chart.selectorLabels" . | nindent 6 }} + app: node + template: + metadata: + labels: + {{- include "chart.selectorLabels" . | nindent 8 }} + app: node + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ include "chart.serviceAccountName" . }} + dnsPolicy: ClusterFirstWithHostNet + hostNetwork: {{ .Values.hostNetwork }} + {{- if or .Values.service.p2p.nodePortEnabled .Values.hostNetwork }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - node + topologyKey: kubernetes.io/hostname + namespaceSelector: {} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.initContainers }} + initContainers: + {{- tpl (toYaml .Values.initContainers | nindent 8) $ }} + {{- end }} + containers: + - name: aztec + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - /bin/bash + - -c + - | + export P2P_IP=$(hostname -i) + + start_cmd=("node" "/usr/src/yarn-project/aztec/dest/bin/index.js" "start") + + {{- $startCmd := include "chart.startCmd" . | fromYamlArray }} + {{- range $startCmd }} + start_cmd+=("{{ . }}") + {{- end }} + + {{- if .Values.node.preStartScript }} + {{ .Values.node.preStartScript | nindent 14 }} + + {{- end }} + "${start_cmd[@]}" + startupProbe: + httpGet: + path: /status + port: {{ .Values.service.httpPort }} + periodSeconds: {{ .Values.node.startupProbe.periodSeconds }} + failureThreshold: {{ .Values.node.startupProbe.failureThreshold }} + livenessProbe: + httpGet: + path: /status + port: {{ .Values.service.httpPort }} + initialDelaySeconds: 30 + periodSeconds: 5 + timeoutSeconds: 30 + failureThreshold: 3 + volumeMounts: + - name: storage + mountPath: {{ .Values.node.storage.dataDirectory }} + {{- if eq .Values.role "sequencer" }} + - name: keystore + mountPath: /var/lib/keystore + readOnly: true + {{- end }} + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: K8S_POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid + - name: K8S_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OTEL_SERVICE_NAME + value: node + - name: K8S_NAMESPACE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.network }} + - name: NETWORK + value: "{{ .Values.network }}" + {{- else }} + - name: REGISTRY_CONTRACT_ADDRESS + value: "{{ .Values.customNetwork.registryContractAddress }}" + - name: L1_CHAIN_ID + value: "{{ .Values.customNetwork.l1ChainId }}" + - name: SLASH_FACTORY_ADDRESS + value: "{{ .Values.customNetwork.slashFactoryAddress }}" + - name: FEE_ASSET_HANDLER_CONTRACT_ADDRESS + value: "{{ .Values.customNetwork.feeAssetHandlerContractAddress }}" + {{- end }} + - name: NODE_OPTIONS + value: {{ join " " .Values.node.nodeJsOptions | quote }} + - name: AZTEC_PORT + value: "{{ .Values.service.httpPort }}" + - name: AZTEC_ADMIN_PORT + value: "{{ .Values.service.admin.port }}" + - name: LOG_LEVEL + value: "{{ .Values.node.logLevel }}" + - name: LOG_JSON + value: "1" + - name: P2P_ENABLED + value: "{{ .Values.service.p2p.enabled }}" + - name: P2P_PORT + value: "{{ .Values.service.p2p.port }}" + - name: P2P_QUERY_FOR_IP + value: "true" + {{- if .Values.node.remoteUrl.archiver }} + - name: ARCHIVER_URL + value: {{ .Values.node.remoteUrl.archiver | quote }} + {{- end }} + {{- if .Values.node.remoteUrl.proverBroker }} + - name: PROVER_BROKER_HOST + value: {{ .Values.node.remoteUrl.proverBroker | quote }} + {{- end }} + {{- if .Values.node.remoteUrl.blobSink }} + - name: BLOB_SINK_URL + value: {{ .Values.node.remoteUrl.blobSink | quote }} + {{- end }} + {{- if gt (len .Values.node.remoteUrl.proverCoordinationNodes) 0 }} + - name: PROVER_COORDINATION_NODE_URLS + value: {{ join "," .Values.node.remoteUrl.proverCoordinationNodes | quote }} + {{- end }} + {{- if gt (len .Values.node.l1ExecutionUrls) 0 }} + - name: ETHEREUM_HOSTS + value: {{ join "," .Values.node.l1ExecutionUrls | quote }} + {{- end }} + {{- if gt (len .Values.node.l1ConsensusUrls) 0 }} + - name: L1_CONSENSUS_HOST_URLS + value: {{ join "," .Values.node.l1ConsensusUrls | quote }} + - name: L1_CONSENSUS_HOST_API_KEYS + value: {{ join "," .Values.node.l1ConsensusHostApiKeys | quote }} + - name: L1_CONSENSUS_HOST_API_KEY_HEADERS + value: {{ join "," .Values.node.l1ConsensusHostApiKeyHeaders | quote }} + {{- end }} + - name: ARCHIVER_POLLING_INTERVAL_MS + value: "10000" + - name: DATA_DIRECTORY + value: {{ .Values.node.storage.dataDirectory | quote }} + - name: DATA_STORE_MAP_SIZE_KB + value: {{ .Values.node.storage.dataStoreMapSize | quote }} + - name: WS_DB_MAP_SIZE_KB + value: {{ .Values.node.storage.worldStateMapSize | quote }} + - name: USE_GCLOUD_LOGGING + value: {{ .Values.node.metrics.useGcloudLogging | quote }} + {{- if .Values.node.metrics.otelCollectorEndpoint }} + - name: OTEL_EXCLUDE_METRICS + value: {{ .Values.node.metrics.otelExcludeMetrics | quote }} + - name: OTEL_EXPORTER_OTLP_METRICS_ENDPOINT + value: "{{ .Values.node.metrics.otelCollectorEndpoint }}/v1/metrics" + - name: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT + value: "{{ .Values.node.metrics.otelCollectorEndpoint }}/v1/traces" + - name: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT + value: "{{ .Values.node.metrics.otelCollectorEndpoint }}/v1/logs" + {{- end }} + {{- if .Values.node.coinbase }} + - name: COINBASE + value: {{ .Values.node.coinbase | quote }} + - name: PROVER_ID + value: {{ .Values.node.coinbase | quote }} + {{- end }} + {{- if eq .Values.role "sequencer" }} + - name: KEY_STORE_DIRECTORY + value: /var/lib/keystore + - name: FEE_RECIPIENT + value: {{ .Values.sequencer.feeRecipient | quote }} + {{- end }} + - name: SENTINEL_ENABLED + value: {{ .Values.node.sentinel.enabled | quote }} + ports: + - containerPort: {{ .Values.service.httpPort }} + name: http-rpc + {{- if .Values.service.admin.enabled }} + - containerPort: {{ .Values.service.admin.port }} + name: admin + {{- end }} + {{- if .Values.service.p2p.enabled }} + - containerPort: {{ .Values.service.p2p.port }} + name: p2p-tcp + - containerPort: {{ .Values.service.p2p.port }} + protocol: UDP + name: p2p-udp + {{- end }} + resources: + {{- toYaml .Values.node.resources | nindent 12 }} + volumes: + {{- if eq .Values.role "sequencer" }} + - name: keystore + secret: + secretName: {{ include "chart.resourceName" . }}-keystore + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: storage + emptyDir: {} + {{- else if .Values.persistence.existingClaim }} + - name: storage + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: storage + annotations: + {{- toYaml .Values.persistence.annotations | nindent 10 }} + spec: + accessModes: + {{- toYaml .Values.persistence.accessModes | nindent 12 }} + resources: + requests: + storage: {{ .Values.persistence.size | quote}} + storageClassName: {{ .Values.persistence.storageClassName }} + {{- if .Values.persistence.selector }} + selector: + {{- toYaml .Values.persistence.selector | nindent 12 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/aztec-node/templates/svc.headless.yaml b/charts/aztec-node/templates/svc.headless.yaml new file mode 100644 index 0000000..7701057 --- /dev/null +++ b/charts/aztec-node/templates/svc.headless.yaml @@ -0,0 +1,33 @@ +{{- if .Values.service.headless.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "chart.resourceName" . }}-headless + labels: + {{- include "chart.labels" . | nindent 4 }} +spec: + clusterIP: None + ports: + {{- if .Values.service.p2p.enabled }} + - port: {{ .Values.service.p2p.port }} + targetPort: p2p-tcp + protocol: TCP + name: p2p-tcp + - port: {{ .Values.service.p2p.port }} + targetPort: p2p-udp + protocol: UDP + name: p2p-udp + {{- end }} + - port: {{ .Values.service.httpPort }} + targetPort: http-rpc + protocol: TCP + name: http-rpc + {{- if .Values.service.admin.enabled }} + - port: {{ .Values.service.admin.port }} + targetPort: admin + protocol: TCP + name: admin + {{- end }} + selector: + {{- include "chart.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/charts/aztec-node/templates/svc.nodeport.yaml b/charts/aztec-node/templates/svc.nodeport.yaml new file mode 100644 index 0000000..ce3a119 --- /dev/null +++ b/charts/aztec-node/templates/svc.nodeport.yaml @@ -0,0 +1,31 @@ +{{- if and .Values.service.p2p.enabled .Values.service.p2p.nodePortEnabled (not .Values.hostNetwork) -}} +{{- range $i, $e := until (.Values.node.replicas | int) }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "chart.resourceName" $ }}-p2p-node-port-{{ $i }} + labels: + {{- include "chart.labels" $ | nindent 4 }} + app: node + type: p2p + pod-index: "{{ $i }}" +spec: + type: NodePort + externalTrafficPolicy: Local + ports: + - name: p2p-tcp + port: {{ $.Values.service.p2p.port }} + protocol: TCP + targetPort: p2p-tcp + nodePort: {{ add $.Values.service.p2p.nodePort $i }} + - name: p2p-udp + port: {{ $.Values.service.p2p.port }} + protocol: UDP + targetPort: p2p-udp + nodePort: {{ add $.Values.service.p2p.nodePort $i }} + selector: + {{- include "chart.selectorLabels" $ | nindent 4 }} + statefulset.kubernetes.io/pod-name: "{{ include "chart.resourceName" $ }}-{{ $i }}" +{{- end }} +{{- end }} diff --git a/charts/aztec-node/templates/svc.yaml b/charts/aztec-node/templates/svc.yaml new file mode 100644 index 0000000..4b538fa --- /dev/null +++ b/charts/aztec-node/templates/svc.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "chart.resourceName" . }} + labels: + {{- include "chart.labels" . | nindent 4 }} + {{- with .Values.service.ingress.annotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} +spec: + type: ClusterIP + ports: + {{- if .Values.service.p2p.enabled }} + - port: {{ .Values.service.p2p.port }} + targetPort: p2p-tcp + protocol: TCP + name: p2p-tcp + - port: {{ .Values.service.p2p.port }} + targetPort: p2p-udp + protocol: UDP + name: p2p-udp + {{- end }} + - port: {{ .Values.service.httpPort }} + targetPort: http-rpc + protocol: TCP + name: http-rpc + {{- if .Values.service.admin.enabled }} + - port: {{ .Values.service.admin.port }} + targetPort: admin + protocol: TCP + name: admin + {{- end }} + selector: + {{- include "chart.selectorLabels" . | nindent 4 }} diff --git a/charts/aztec-node/values-examples/README.md b/charts/aztec-node/values-examples/README.md new file mode 100644 index 0000000..fa9b10c --- /dev/null +++ b/charts/aztec-node/values-examples/README.md @@ -0,0 +1,233 @@ +# Aztec Node Deployment Examples + +This directory contains example values files for deploying Aztec nodes in different roles. + +## L1 Infrastructure for Sequencers + +Before deploying a sequencer, you need L1 infrastructure (Ethereum execution + consensus clients). The examples directory includes production-tested configurations for Sepolia testnet. + +**Note**: L1 node requirements (Geth + Prysm) are separate from Aztec sequencer requirements. You can run them on the same machine or separate machines depending on your setup. + +### Quick Start - Deploy L1 Stack + +```bash +# 1. Add ethpandaops Helm repository +helm repo add ethpandaops https://ethpandaops.github.io/ethereum-helm-charts +helm repo update + +# 2. Deploy Geth (execution client) +helm install geth-sepolia ethpandaops/geth \ + -f charts/aztec-node/values-examples/geth-sepolia.yaml \ + -n l1 --create-namespace + +# 3. Deploy Prysm (consensus client) +helm install prysm-sepolia ethpandaops/prysm \ + -f charts/aztec-node/values-examples/prysm-sepolia.yaml \ + -n l1 + +# 4. Wait for sync (check logs) +kubectl logs -f geth-sepolia-0 -n l1 +kubectl logs -f prysm-sepolia-0 -n l1 + +# 5. Deploy Aztec sequencer (once L1 is synced) +helm install aztec-sequencer ./charts/aztec-node \ + -f charts/aztec-node/values-examples/sequencer.yaml \ + --set sequencer.attesterPrivateKey="0xYOUR_PRIVATE_KEY" \ + -n aztec-testnet --create-namespace +``` + +### L1 Configuration Files + +- **`geth-sepolia.yaml`** - Geth v1.16.7 execution client + - Snap sync mode for fast initial sync + - **Resources**: 2-4 cores, 8-16GB RAM + - **Storage**: 500Gi NVMe SSD (Sepolia currently ~300-400GB) + - Engine API on port 8551 (for Prysm) + - JSON-RPC on port 8545 (for Aztec) + +- **`prysm-sepolia.yaml`** - Prysm v6.1.4 consensus client + - **Critical flags**: `--subscribe-all-data-subnets`, `--blob-storage-layout=by-epoch` + - Checkpoint sync enabled (5-10 minute sync vs hours) + - **Resources**: 2-4 cores, 16GB RAM + - **Storage**: 800Gi NVMe SSD (part of ~637GB total Geth+Prysm stack) + - gRPC gateway on port 3500 (for Aztec) + - **Important**: 10-minute liveness probe delay for data column warm-up + +### JWT Configuration + +Both Geth and Prysm use the same JWT secret for Engine API authentication. The example files include a production JWT, but you should generate your own: + +```bash +# Generate new JWT secret +openssl rand -hex 32 +``` + +Then update both `geth-sepolia.yaml` and `prysm-sepolia.yaml` with the new JWT value. + +## Available Roles + +### 1. Full Node (`fullnode.yaml`) +A full node participates in the Aztec network by running archiver and node services. + +**Command:** `--node --archiver --network testnet` + +**Use case:** Network participation, development, testing + +**Requirements:** +- L1 Ethereum RPC access (execution + consensus) +- 1TB NVMe SSD +- 2-4 CPU cores, 8-16GB RAM + +**Deploy:** +```bash +helm install aztec-fullnode ./charts/aztec-node \ + -f charts/aztec-node/values-examples/fullnode.yaml \ + -n aztec-testnet --create-namespace +``` + +### 2. Sequencer (`sequencer.yaml`) +A sequencer produces blocks and attestations for the network. + +**Command:** `--node --archiver --sequencer --network staging-public` + +**Use case:** Block production, network consensus participation + +**Requirements:** +- **L1 Infrastructure**: Deploy Geth + Prysm first (see [L1 Infrastructure](#l1-infrastructure-for-sequencers)) +- Ethereum private key with minimum 0.1 ETH on L1 +- **Aztec Sequencer Hardware** (separate from L1 nodes): + - **CPU**: 2-4 cores minimum (released in 2015 or later) + - **RAM**: 16GB minimum + - **Storage**: 1TB NVMe SSD for archiver data + - **Network**: 25 Mbps or higher +- Host networking enabled for P2P performance + +**Critical Configuration:** +- `sequencer.feeRecipient` - **REQUIRED** (node will crash if not set) +- `sequencer.attesterPrivateKey` - **REQUIRED** (must have ETH on L1) +- `node.startCmd` - Must include `--node --archiver --sequencer --network` +- `node.startupProbe` - Set to 60s period, 60 failureThreshold (1hr max startup) + +**Deploy:** +```bash +# IMPORTANT: Deploy L1 infrastructure first! See "L1 Infrastructure for Sequencers" section above + +# Then deploy sequencer with your private key +helm install aztec-sequencer ./charts/aztec-node \ + -f charts/aztec-node/values-examples/sequencer.yaml \ + --set sequencer.attesterPrivateKey="0xYOUR_PRIVATE_KEY" \ + -n aztec-testnet --create-namespace +``` + +### 3. Prover (`prover.yaml`) +The prover role deploys a complete distributed proving system with 3 components: +- **Prover Broker**: Manages job queue (`--prover-broker --network testnet`) +- **Prover Node**: Creates jobs and publishes proofs to L1 (`--prover-node --archiver --network testnet`) +- **Prover Agent(s)**: Execute proof computation (`--prover-agent --network testnet`) + +**Use case:** Distributed proof generation and L1 publication + +**Requirements:** +- L1 Ethereum RPC access (for prover-node) +- Ethereum private key with ETH for L1 proof submissions +- **Prover Broker**: 1-2 CPU cores, 4-8GB RAM, 10GB SSD +- **Prover Node**: 2-4 CPU cores, 8-16GB RAM, 1TB NVMe SSD +- **Prover Agent(s)**: **16-32 CPU cores, 64-128GB RAM per agent**, 10GB SSD per agent (high-performance compute) + +**Deploy:** +```bash +# IMPORTANT: Set your publisher private key first! +helm install aztec-prover ./charts/aztec-node \ + -f charts/aztec-node/values-examples/prover.yaml \ + --set prover.node.publisherPrivateKey="0xYOUR_PRIVATE_KEY" \ + -n aztec-testnet --create-namespace +``` + +**Scale prover agents:** +```bash +# Increase number of prover agent pods +helm upgrade aztec-prover ./charts/aztec-node \ + -f charts/aztec-node/values-examples/prover.yaml \ + --set prover.agent.replicas=4 \ + --set prover.node.publisherPrivateKey="0xYOUR_PRIVATE_KEY" \ + -n aztec-testnet +``` + +**Persistence Configuration:** + +Each prover component has independent storage configuration matching Aztec's minimum requirements: + +```yaml +prover: + node: + persistence: + size: 1000Gi # Prover-node needs 1TB NVMe SSD for archiver data + broker: + persistence: + size: 10Gi # Broker needs 10GB SSD for job queue + agent: + persistence: + size: 10Gi # Agent needs 10GB SSD per agent for CRS +``` + +The global `persistence.enabled` flag controls persistence for all components. + +## Role-Based Architecture + +The chart uses a `role` field to determine the deployment type: + +```yaml +role: fullnode # fullnode | sequencer | prover +``` + +### Role Behavior + +| Role | StatefulSets Created | Services Started | +|------|---------------------|------------------| +| **fullnode** | 1 | Aztec node with archiver | +| **sequencer** | 1 | Aztec node with archiver + sequencer | +| **prover** | 3 | Prover broker, prover node, prover agent(s) | + +The prover role automatically handles internal communication between components: +- Prover node connects to broker at `http://-prover-broker:8080` +- Prover agents connect to broker at `http://-prover-broker:8080` + +## Required Fields by Role + +### Sequencer +- `sequencer.attesterPrivateKey` (required) - Must start with `0x` +- `sequencer.feeRecipient` (optional) - Defaults to zero address + +### Prover +- `prover.node.publisherPrivateKey` (required) - Must start with `0x` +- `prover.id` (optional) - Address for receiving proof rewards +- `prover.agent.replicas` (optional) - Number of prover agent pods (default: 1) +- `prover.agent.count` (optional) - Number of prover threads per pod (default: 1) + +## Network Selection + +Use the `network` field to connect to predefined networks: + +```yaml +network: testnet # or devnet +``` + +For custom networks, omit `network` and provide: + +```yaml +customNetwork: + l1ChainId: "11155111" + registryContractAddress: "0x..." + slashFactoryAddress: "0x..." + feeAssetHandlerContractAddress: "0x..." +``` + +## Advanced Configuration + +See the main [values.yaml](../values.yaml) for all available configuration options including: +- Component-specific resource limits +- Metrics and observability +- Storage configuration +- Network policies +- Service configuration +- And more... diff --git a/charts/aztec-node/values-examples/fullnode.yaml b/charts/aztec-node/values-examples/fullnode.yaml new file mode 100644 index 0000000..ba9d759 --- /dev/null +++ b/charts/aztec-node/values-examples/fullnode.yaml @@ -0,0 +1,65 @@ +# Example values for deploying an Aztec full node +# A full node participates in the network by running archiver and node services + +# Deployment role +role: fullnode + +# Network to connect to (testnet, devnet, or custom) +network: testnet + +# Use host network for best P2P performance +hostNetwork: true + +# Container image configuration +image: + repository: aztecprotocol/aztec + tag: latest + pullPolicy: IfNotPresent # Use 'Always' for auto-updates, 'IfNotPresent' for stability + +# Node configuration +node: + replicas: 1 + logLevel: "info" + + # L1 Ethereum configuration + l1ExecutionUrls: + - "http://l1-full-node-sepolia-execution.l1.svc.cluster.local:8545" + l1ConsensusUrls: + - "http://l1-full-node-sepolia-beacon.l1.svc.cluster.local:5052" + + # Resource allocation + resources: + requests: + cpu: "2" + memory: "8Gi" + limits: + cpu: "4" + memory: "16Gi" + + # Storage configuration + storage: + dataDirectory: /data + dataStoreMapSize: "134217728" # 128 GB + worldStateMapSize: "134217728" # 128 GB + +# Persistence +# Full node requires 1TB NVMe SSD for archiver data +persistence: + enabled: true + size: 1000Gi + # storageClassName: standard # Uncomment to use a specific storage class + accessModes: + - ReadWriteOnce + +# Service configuration +service: + httpPort: 8080 + + p2p: + enabled: true + nodePortEnabled: false # Not needed with hostNetwork + port: 40400 + + admin: + enabled: true + port: 8081 diff --git a/charts/aztec-node/values-examples/geth-sepolia.yaml b/charts/aztec-node/values-examples/geth-sepolia.yaml new file mode 100644 index 0000000..757a0bc --- /dev/null +++ b/charts/aztec-node/values-examples/geth-sepolia.yaml @@ -0,0 +1,103 @@ +# Example values for deploying Geth execution client on Sepolia +# This is designed for use with the ethpandaops/geth Helm chart +# +# Installation: +# helm repo add ethpandaops https://ethpandaops.github.io/ethereum-helm-charts +# helm install geth-sepolia ethpandaops/geth -n l1 -f geth-sepolia.yaml +# +# This Geth instance will serve as the L1 execution client for your Aztec sequencer + +# Override the full name to ensure consistent service naming +# This ensures the service is accessible at geth-sepolia.l1.svc.cluster.local +fullnameOverride: geth-sepolia + +# Number of Geth replicas (typically 1 for a single sequencer) +replicas: 1 + +# Container image +image: + repository: ethereum/client-go + tag: v1.16.7 + pullPolicy: IfNotPresent + +# Port configuration +p2pPort: 30303 # P2P networking port (TCP and UDP) +httpPort: 8545 # JSON-RPC HTTP endpoint +wsPort: 8546 # WebSocket endpoint +authPort: 8551 # Engine API (authenticated RPC for consensus client) +metricsPort: 6060 # Prometheus metrics + +# JWT secret for Engine API authentication +# This MUST match the JWT used by your consensus client (Prysm) +# The same JWT is used in prysm-sepolia.yaml +jwt: ecb22bc24e7d4061f7ed690ccd5846d7d73f5d2b9733267e12f56790398d908a + +# Additional Geth command-line arguments +# These are flags not covered by the chart's default configuration +extraArgs: + # CRITICAL: Network flag - must be first + - --sepolia + + # Sync mode - "snap" is fastest for initial sync + - --syncmode=snap + + # Maximum number of network peers + - --maxpeers=50 + + # HTTP API configuration + - --http.api=eth,net,web3,engine,admin + + # WebSocket API configuration + - --ws.api=eth,net,web3 + +# Persistence configuration +# Minimum requirement: 1TB NVMe SSD (500Gi is sufficient for Sepolia testnet) +# For mainnet, use 2TB+ to accommodate growth +persistence: + enabled: true + size: 500Gi # Sepolia testnet size - increase for mainnet + storageClassName: local-path # Use NVMe-backed storage class for best performance + accessModes: + - ReadWriteOnce + +# Pin to specific node for local storage persistence +# IMPORTANT: This should match your Aztec sequencer's node for low latency +nodeSelector: + kubernetes.io/hostname: silverfib + +# Resource allocation +# Sepolia testnet minimum: 4 cores, 8-16 GB RAM +# Recommended for production: 4 cores, 16GB RAM +resources: + requests: + cpu: 2000m # 2 cores minimum + memory: 8Gi # 8GB minimum, 16GB recommended + limits: + cpu: 4000m # 4 cores + memory: 16Gi # 16GB for optimal performance + +# Probes +livenessProbe: + enabled: true + initialDelaySeconds: 60 + periodSeconds: 120 + +readinessProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + +# Service configuration +service: + type: ClusterIP + +# RBAC and ServiceAccount +serviceAccount: + create: true + +rbac: + create: true + +# Pod management +podManagementPolicy: OrderedReady +terminationGracePeriodSeconds: 300 diff --git a/charts/aztec-node/values-examples/prover.yaml b/charts/aztec-node/values-examples/prover.yaml new file mode 100644 index 0000000..d446ddd --- /dev/null +++ b/charts/aztec-node/values-examples/prover.yaml @@ -0,0 +1,119 @@ +# Example configuration for Prover deployment +# This creates 3 StatefulSets: prover-broker, prover-node, and prover-agent + +role: prover + +# Network to connect to +network: testnet + +# Prover configuration +prover: + # Prover node configuration + node: + # REQUIRED: Ethereum private key for publishing proofs to L1 + # This key will be used to sign and submit rollup proofs + # WARNING: This is a TEST KEY - DO NOT USE IN PRODUCTION + # Replace with your actual private key for production use + publisherPrivateKey: "0xYOUR_PRIVATE_KEY_HERE" + + resources: + requests: + cpu: "2" + memory: "8Gi" + limits: + cpu: "4" + memory: "16Gi" + + # Prover-node needs 1TB NVMe SSD for archiver data + persistence: + size: 1000Gi + + # Prover broker configuration + broker: + resources: + requests: + cpu: "1" + memory: "4Gi" + limits: + cpu: "2" + memory: "8Gi" + + # Broker needs 10GB SSD for job queue + persistence: + size: 10Gi + + # Prover agent configuration + agent: + # Number of prover agent pods + replicas: 2 + + # Number of prover threads per pod + count: 4 + + # Polling interval in milliseconds + pollIntervalMs: 1000 + + # Recommended: 32 cores and 128GB RAM per agent + resources: + requests: + cpu: "16" + memory: "64Gi" + limits: + cpu: "32" + memory: "128Gi" + + # Agent needs 10GB SSD per agent for CRS and temporary files + persistence: + size: 10Gi + + # Prover ID - address for receiving proof rewards + # Should match the address derived from publisherPrivateKey + # The address below corresponds to the test key above + # REPLACE WITH YOUR ACTUAL ADDRESS when using a real private key + id: "" + +# Image configuration +image: + repository: aztecprotocol/aztec + tag: latest + pullPolicy: IfNotPresent # Use 'Always' for auto-updates, 'IfNotPresent' for stability + +# Use host network for best P2P performance (prover-node needs P2P) +hostNetwork: true + +# Node configuration +node: + logLevel: "info" + + # L1 Ethereum connections + l1ExecutionUrls: + - "http://l1-full-node-sepolia-execution.l1.svc.cluster.local:8545" + l1ConsensusUrls: + - "http://l1-full-node-sepolia-beacon.l1.svc.cluster.local:5052" + + resources: {} + +# Persistence for prover components +# Each component has its own size configured under prover.{component}.persistence.size +# This enabled flag applies to all prover components +persistence: + enabled: true + # storageClassName: local-path # Uncomment to use a specific storage class + +# Service configuration +service: + httpPort: 8080 + + # P2P is required for prover-node + p2p: + enabled: true + nodePortEnabled: false # Not needed with hostNetwork + port: 40400 + + admin: + enabled: true + port: 8081 + +# Optional: Pin to specific node +# nodeSelector: +# kubernetes.io/hostname: high-performance-node diff --git a/charts/aztec-node/values-examples/prysm-sepolia.yaml b/charts/aztec-node/values-examples/prysm-sepolia.yaml new file mode 100644 index 0000000..c172d90 --- /dev/null +++ b/charts/aztec-node/values-examples/prysm-sepolia.yaml @@ -0,0 +1,129 @@ +# Example values for deploying Prysm consensus client on Sepolia +# This is designed for use with the ethpandaops/prysm Helm chart +# +# Installation: +# helm repo add ethpandaops https://ethpandaops.github.io/ethereum-helm-charts +# helm install prysm-sepolia ethpandaops/prysm -n l1 -f prysm-sepolia.yaml +# +# This Prysm instance will serve as the L1 consensus client for your Aztec sequencer + +# Override the full name to ensure consistent service naming +# This ensures the service is accessible at prysm-sepolia.l1.svc.cluster.local +fullnameOverride: prysm-sepolia + +# Number of Prysm replicas (typically 1 for a single sequencer) +replicas: 1 + +# Container image +image: + repository: gcr.io/prysmaticlabs/prysm/beacon-chain + tag: v6.1.4 + pullPolicy: IfNotPresent + +# Port configuration +p2pPort: 13001 # P2P networking (TCP and UDP) +httpPort: 3500 # HTTP API (gRPC gateway) - used by Aztec +rpcPort: 4000 # gRPC RPC port +metricsPort: 8080 # Prometheus metrics + +# JWT secret for Engine API authentication +# This MUST match the JWT used by your execution client (Geth) +# The same JWT is used in geth-sepolia.yaml +jwt: ecb22bc24e7d4061f7ed690ccd5846d7d73f5d2b9733267e12f56790398d908a + +# Checkpoint sync configuration +# Using checkpoint sync significantly speeds up initial sync (minutes vs hours) +checkpointSync: + enabled: true + url: https://checkpoint-sync.sepolia.ethpandaops.io + +# CRITICAL: Additional Prysm flags for Aztec blob support +# These flags are essential for proper blob/data column handling +extraArgs: + # CRITICAL: Network flag - must be first + - --sepolia + + # CRITICAL: Execution endpoint - points to Geth service + - --execution-endpoint=http://geth-sepolia:8551 + + # Subscribe to all attestation subnets for complete network visibility + # Required for Aztec to fetch all blob sidecars + - --subscribe-all-data-subnets + + # Store blobs organized by epoch for efficient retrieval + # Required for Aztec blob API queries + - --blob-storage-layout=by-epoch + +# Persistence configuration +persistence: + enabled: true + size: 800Gi + storageClassName: local-path + accessModes: + - ReadWriteOnce + +# Pin to specific node for local storage persistence +# IMPORTANT: Should be on same node as Geth for best performance +nodeSelector: + kubernetes.io/hostname: silverfib + +# Resource allocation +# Sepolia testnet minimum: 4-8 cores, 16 GB RAM +# Total Sepolia stack (Geth + Prysm) uses ~637GB as of 2025 +resources: + requests: + cpu: 2000m # 2 cores minimum + memory: 16Gi # 16GB required for beacon chain + blobs + limits: + cpu: 4000m # 4 cores for optimal performance + memory: 16Gi + +# Probes +# CRITICAL: Prysm needs extended liveness probe delay +# Data column filesystem cache warm-up takes 5-6 minutes on startup +# Setting initialDelaySeconds to 600s prevents premature pod restarts +livenessProbe: + enabled: true + initialDelaySeconds: 600 # 10 minutes to allow data column warm-up + periodSeconds: 120 + failureThreshold: 3 + +readinessProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 3 + +# Service configuration +service: + type: ClusterIP + +# RBAC and ServiceAccount +serviceAccount: + create: true + +rbac: + create: true + +# Pod management +podManagementPolicy: OrderedReady +terminationGracePeriodSeconds: 300 + +# Security context +securityContext: + fsGroup: 10001 + runAsUser: 10001 + runAsGroup: 10001 + runAsNonRoot: true + +# Init container to fix permissions +initContainers: + - name: init-chown-data + image: busybox:1.34.0 + command: ["chown", "-R", "10001:10001", "/data"] + securityContext: + runAsUser: 0 + runAsNonRoot: false + volumeMounts: + - name: storage + mountPath: /data diff --git a/charts/aztec-node/values-examples/sequencer.yaml b/charts/aztec-node/values-examples/sequencer.yaml new file mode 100644 index 0000000..81bfa20 --- /dev/null +++ b/charts/aztec-node/values-examples/sequencer.yaml @@ -0,0 +1,125 @@ +# Example values for deploying an Aztec sequencer +# A sequencer produces blocks and attestations for the network +# +# IMPORTANT: This example assumes you have deployed: +# 1. Geth execution client (see geth-sepolia.yaml) +# 2. Prysm consensus client (see prysm-sepolia.yaml) +# +# Deploy those L1 clients first, then deploy this sequencer + +# Deployment role +role: sequencer + +# Sequencer-specific configuration +sequencer: + # REQUIRED: Ethereum private key for attester (signs blocks and attestations) + # Must start with '0x' and have sufficient ETH on L1 (minimum 0.1 ETH recommended) + # SECURITY: Never commit real private keys to version control + # Use --set sequencer.attesterPrivateKey="0xYOUR_KEY" when deploying + attesterPrivateKey: "0xYOUR_PRIVATE_KEY_HERE" + + # REQUIRED: Aztec address (32 bytes) to receive unburnt transaction fees + # WARNING: If not set, the node will crash with "Invalid AztecAddress length 0" + feeRecipient: "0x0000000000000000000000000000000000000000000000000000000000000000" + +# Network to connect to +network: staging-public +networkName: staging-public + +# Container image configuration +image: + repository: aztecprotocol/aztec + tag: latest + pullPolicy: Always # Use Always for auto-updates with keel.sh + +# Node configuration +node: + replicas: 1 + + # Log level configuration + # Format: "default_level; override: component, other_component" + logLevel: "debug; info: aztec:simulator, json-rpc" + + # Startup command - REQUIRED for sequencer + # These flags tell Aztec to run as a full sequencer with archiver + startCmd: + - --node + - --archiver + - --sequencer + - --network + - staging-public # WARNING: Must match network field above + + # L1 Ethereum configuration + # Point to your locally deployed Geth + Prysm stack + # These should match the service names from geth-sepolia.yaml and prysm-sepolia.yaml + l1ExecutionUrls: + - "http://geth-sepolia.l1.svc.cluster.local:8545" + l1ConsensusUrls: + - "http://prysm-sepolia.l1.svc.cluster.local:3500" + + # Resource allocation + # Aztec minimum requirements: 2-4 cores, 16GB RAM, 1TB NVMe SSD, 25 Mbps network + # These values provide headroom for production workloads + resources: + requests: + cpu: "4" # 4 cores (minimum: 2-4 cores) + memory: "16Gi" # 16GB (minimum: 16GB) + limits: + cpu: "8" # 8 cores for burst capacity + memory: "32Gi" # 32GB for optimal performance + + # Storage configuration + storage: + dataDirectory: /data + dataStoreMapSize: "134217728" # 128 GB + worldStateMapSize: "134217728" # 128 GB + + # Startup probe - sequencers need extended startup time + # 60s * 60 = 1 hour max startup time + startupProbe: + periodSeconds: 60 + failureThreshold: 60 + +# Persistence +# Aztec minimum requirement: 1TB NVMe SSD +# 512Gi is currently sufficient for staging-public network +# Increase size as network grows +persistence: + enabled: true + size: 512Gi # Minimum 1TB recommended for production + storageClassName: local-path # Use NVMe-backed storage class + accessModes: + - ReadWriteOnce + selector: {} + +# Use host networking for best P2P performance +# This allows direct binding to node's network interface +hostNetwork: true + +# Service configuration +service: + httpPort: 8080 + + # P2P networking + p2p: + enabled: true + nodePortEnabled: false # Not needed with hostNetwork + port: 40400 + announcePort: 40400 + + # Admin API for debugging and metrics + admin: + enabled: true + port: 8082 + +# Auto-update configuration with keel.sh +# Polls for new images and force updates every 10 minutes +podAnnotations: + keel.sh/policy: force + keel.sh/pollSchedule: "@every 10m" + keel.sh/trigger: poll + +# RECOMMENDED: Pin to specific node for persistent storage +# This ensures the pod always runs on the same node with local storage +nodeSelector: + kubernetes.io/hostname: your-node-name diff --git a/charts/aztec-node/values.yaml b/charts/aztec-node/values.yaml new file mode 100644 index 0000000..4127c32 --- /dev/null +++ b/charts/aztec-node/values.yaml @@ -0,0 +1,321 @@ +# -- Overrides the chart name +nameOverride: "" +# -- Overrides the chart computed fullname +fullnameOverride: "" + +# ===================================================================== +# DEPLOYMENT ROLE +# ===================================================================== + +# -- Role determines the type of Aztec node deployment +# Valid roles: fullnode, sequencer, prover +role: sequencer + +# ===================================================================== +# SEQUENCER CONFIGURATION (only used when role: sequencer) +# ===================================================================== + +sequencer: + # -- Ethereum private key for attester (signs blocks and attestations) + # REQUIRED when role is 'sequencer' + attesterPrivateKey: "" + # -- Aztec address (32 bytes) to receive unburnt transaction fees + # feeRecipient: "0x0000000000000000000000000000000000000000000000000000000000000000" + +# ===================================================================== +# PROVER CONFIGURATION (only used when role: prover) +# ===================================================================== +# When role is 'prover', creates 3 StatefulSets: +# - prover-broker: Manages job queue +# - prover-node: Creates jobs and submits proofs to L1 +# - prover-agent: Executes proof generation + +prover: + # Prover node configuration + node: + # -- Ethereum private key for publishing proofs to L1 + # REQUIRED when role is 'prover' + publisherPrivateKey: "" + # -- Resource requests/limits for prover-node pod + resources: {} + # -- Persistence configuration for prover-node + # Prover-node needs 1TB NVMe SSD for archiver data + persistence: + size: 1000Gi + + # Prover broker configuration + broker: + # -- Resource requests/limits for prover-broker pod + resources: {} + # -- Persistence configuration for prover-broker + # Broker needs 10GB SSD for job queue + persistence: + size: 10Gi + + # Prover agent configuration + agent: + # -- Number of prover agent replicas + replicas: 1 + # -- Number of prover agents to run per pod + count: 1 + # -- Agent polling interval in milliseconds + pollIntervalMs: 1000 + # -- Resource requests/limits for prover-agent pods + # Recommended: 32 cores, 128GB RAM per agent + resources: {} + # -- Persistence configuration for prover-agent + # Agent needs 10GB SSD per agent for CRS and temporary files + persistence: + size: 10Gi + + # -- Prover ID - address for receiving proof rewards + # Used by prover-node (usually matches publisherPrivateKey address) + id: "" + +# ===================================================================== +# IMAGE CONFIGURATION +# ===================================================================== + +# -- Image to use for the container +image: + # -- Image repository + repository: aztecprotocol/aztec + # -- Image tag + tag: 2.1.0-rc.24 + # -- Container pull policy + pullPolicy: IfNotPresent + +# ===================================================================== +# NETWORK CONFIGURATION +# ===================================================================== + +# -- Pod management policy +podManagementPolicy: Parallel + +# -- Network name - this is a predefined network - testnet, devnet +network: + +# -- Network identifier used in resource naming (l2-{role}-node-{networkName}-{component}) +# This appears in service/statefulset names for easy identification +networkName: staging-public + +# -- Custom network - (not recommended) - Only for custom testnet usecases +# Must have deployed your own protocol contracts first +customNetwork: + l1ChainId: + registryContractAddress: + slashFactoryAddress: + feeAssetHandlerContractAddress: + +# -- Which rollup contract we want to follow from the registry +rollupVersion: "canonical" + +# -- Use host network - provides best P2P performance by binding directly to node's network +# This is the recommended configuration for Aztec nodes +hostNetwork: true + +# -- Pod annotations (e.g., for Keel auto-updates) +podAnnotations: {} + +# ===================================================================== +# NODE CONFIGURATION +# ===================================================================== + +# -- Aztec node configuration +node: + # -- Number of replicas + replicas: 1 + + # -- Log level - info, verbose, debug, trace + logLevel: "info" + + # -- L1 Ethereum configuration + # Ethereum execution layer RPC endpoint(s) - comma separated list + l1ExecutionUrls: ["http://l1-full-node-sepolia-execution.l1.svc.cluster.local:8545"] + # L1 consensus layer RPC endpoint(s) - comma separated list + l1ConsensusUrls: ["http://l1-full-node-sepolia-beacon.l1.svc.cluster.local:5052"] + ## Only when api key is required via header, otherwise just provide in l1ConsensusUrls + ## Example: "1234abcd" + l1ConsensusHostApiKeys: [] + ## Example: "X-API-KEY" + l1ConsensusHostApiKeyHeaders: [] + + # -- Pre-start script (runs before node starts) + preStartScript: "" + + # -- Start command flags + # Auto-generated based on role, but can be overridden for custom configurations + # Leave empty to use role-based defaults + startCmd: [] + + # -- Remote service URLs + remoteUrl: + archiver: + proverBroker: + proverCoordinationNodes: [] + blobSink: + + # -- Address that will receive block or proof rewards + # For prover roles, this is the PROVER_ID + coinbase: + + # -- Sentinel configuration - gathers slashing information + sentinel: + enabled: false + + # -- Metrics configuration + metrics: + # Exclude metrics - comma separated list of metrics to exclude + otelExcludeMetrics: "" + # Collector endpoint - e.g. http://localhost:4318 + otelCollectorEndpoint: "" + # Use GCP logging + useGcloudLogging: false + + # -- Storage configuration + storage: + # Data directory + dataDirectory: /data + # Data store map size in kB (per database) + dataStoreMapSize: "134217728" # 128 GB + # World state map size in kB (per merkle tree) + worldStateMapSize: "134217728" # 128 GB + # P2P storage map size (kB) + p2pStorageMapSize: + # Archive storage map size (kB) + archiveStorageMapSize: + + # -- Node.js options + nodeJsOptions: + - --no-warnings + - --max-old-space-size=4096 + + # -- Startup probe configuration + startupProbe: + # Period seconds + periodSeconds: 30 + # Failure threshold (10 minutes default - may need more if downloading many blocks) + failureThreshold: 20 + + # -- Resource requests and limits + resources: {} + +# ===================================================================== +# PERSISTENCE +# ===================================================================== + +persistence: + # -- Uses an emptyDir when not enabled + enabled: false + # -- Use an existing PVC + existingClaim: null + # -- AccessModes + accessModes: + - ReadWriteOnce + # -- Requested size + size: 100Gi + # -- Use a specific storage class + storageClassName: null + # -- Annotations for volume claim template + annotations: {} + # -- Selector for volume claim template + selector: {} + +# ===================================================================== +# STATEFULSET CONFIGURATION +# ===================================================================== + +# -- Update strategy for the statefulset +updateStrategy: + type: RollingUpdate + +# -- Additional init containers +initContainers: [] +# - name: my-init-container +# image: busybox:latest +# command: ['sh', '-c', 'echo hello'] + +# ===================================================================== +# SERVICE CONFIGURATION +# ===================================================================== + +service: + ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.global-static-ip-name: my-static-ip + hosts: [] + # - node.example.com + + headless: + enabled: true + + p2p: + enabled: true + # NodePort is an alternative to hostNetwork for external P2P access + # Only used when hostNetwork: false + nodePortEnabled: false + port: 40400 + # NodePort must be in range 30000-32767 (Kubernetes limitation) + # This is the external port exposed on the node, which forwards to internal port 40400 + nodePort: 30400 + announcePort: 40400 + + admin: + enabled: true + port: 8081 + + httpPort: 8080 + +# ===================================================================== +# CERTIFICATE CONFIGURATION +# ===================================================================== + +# Certificate configuration +certificate: + enabled: false + domains: [] + # - example.com + # - api.example.com + +# ===================================================================== +# RBAC CONFIGURATION +# ===================================================================== + +rbac: + # -- Specifies whether RBAC resources are to be created + create: true + # -- Required ClusterRole rules + # @default -- See `values.yaml` + clusterRules: + # Required to obtain the nodes external IP + - apiGroups: [""] + resources: + - "nodes" + verbs: + - "get" + - "list" + - "watch" + # -- Required ClusterRole rules + # @default -- See `values.yaml` + rules: + # Required to get information about the services nodePort. + - apiGroups: [""] + resources: + - "services" + verbs: + - "get" + - "list" + - "watch" + +# ===================================================================== +# SERVICE ACCOUNT +# ===================================================================== + +serviceAccount: + # -- Create a service account + create: true + # -- Name of the service account - if not set, the fullname will be used + name: "" + # -- Annotations for the service account + annotations: {} diff --git a/scripts/helm-lint-pre-commit.sh b/scripts/helm-lint-pre-commit.sh new file mode 100755 index 0000000..60b446f --- /dev/null +++ b/scripts/helm-lint-pre-commit.sh @@ -0,0 +1,102 @@ +#!/usr/bin/env bash +# Helm lint pre-commit hook +# Mimics the CI lint workflow logic from .github/workflows/release.yml + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +SKIP_FILE="$REPO_ROOT/.github/helm-ci-values/skip-charts.txt" + +cd "$REPO_ROOT" + +# Read skip list +skip_list=() +if [[ -f "$SKIP_FILE" ]]; then + while IFS= read -r line; do + # Skip empty lines and comments + [[ -z "$line" || "$line" =~ ^[[:space:]]*# ]] && continue + skip_list+=("$line") + done < "$SKIP_FILE" +fi + +echo "Linting Helm charts..." +failed_charts=() +linted_count=0 +skipped_count=0 + +for chart_path in charts/*/; do + chart=$(basename "$chart_path") + values_file=".github/helm-ci-values/values-$chart.yaml" + + # Check if chart should be skipped + skip_chart=false + if [[ ${#skip_list[@]} -gt 0 ]]; then + for skip in "${skip_list[@]}"; do + if [[ "$chart" == "$skip" ]]; then + skip_chart=true + break + fi + done + fi + + if $skip_chart; then + echo "⊘ Skipping $chart (listed in skip-charts.txt)" + skipped_count=$((skipped_count + 1)) + continue + fi + + # Run helm lint and check exit code + set +e + if [[ -f "$values_file" ]]; then + echo "→ Linting $chart with values file: $values_file" + lint_output=$(helm lint "$chart_path" --values "$values_file" 2>&1) + lint_exit=$? + else + echo "→ Linting $chart without custom values" + lint_output=$(helm lint "$chart_path" 2>&1) + lint_exit=$? + fi + set -e + + # Display output + echo "$lint_output" + + # Check if lint failed (exit code 1) and if it's NOT just a dependency warning + if [[ $lint_exit -ne 0 ]]; then + # If it's only a dependency warning, don't fail + if echo "$lint_output" | grep -q "chart metadata is missing these dependencies"; then + echo " ℹ Ignoring dependency warning (local dependencies present)" + else + failed_charts+=("$chart") + fi + fi + + linted_count=$((linted_count + 1)) +done + +echo "" +echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" +echo "Summary:" +echo " Linted: $linted_count chart(s)" +echo " Skipped: $skipped_count chart(s)" + +if [[ ${#failed_charts[@]} -gt 0 ]]; then + echo " Failed: ${#failed_charts[@]} chart(s)" + echo "" + echo "Failed charts:" + for chart in "${failed_charts[@]}"; do + echo " ✗ $chart" + done + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + exit 1 +fi + +# Success if no failures (even if all charts were skipped) +if [[ $linted_count -eq 0 ]]; then + echo " All charts skipped (none to lint) ✓" +else + echo " All charts passed! ✓" +fi +echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" +exit 0