From 37ed2e1278cb70c6ff2c82b64852ff4c6324e969 Mon Sep 17 00:00:00 2001 From: kyscott18 <43524469+kyscott18@users.noreply.github.com> Date: Wed, 11 Dec 2024 13:10:29 -0500 Subject: [PATCH] v0.8 (#1235) * cached transport supports all methods * add more actions to context.client * fix: required block parameters for some methods * start new filter impl * debug rpc types * withLog * add debug helper functions * syncTrace in sync-historical added with filter matching * syncTrace filtering modified, blockwise processing added * sync-realtime: added tx/transfer factories * sync: fragments and encoding for transaction/transfer filters * sync-realtime: transaction/trasfer filter added * sync-realtime: handleBlock is modified for new filters * sync-realtime: fetchBlockEventData changed to debug and new filters for traces * update historical sync * build checkpoint for traces * record failed traces * implement insert traces db method * . * cleanup realtime * corrections: minor changes * sync-historical: traceCache added * sync-realtime: trace traversal added in fetchBlockEventData and handleBlock * sync: buildEvents and decodeEvents modfied * fix debug type * merge all interval tables into one * fix typo * filter/filterinterval migration into intervals * sync-store: fragment_id modified * fix fragment tests * stricter fragment id types * multi range * disable cache skips interval table * cleanup * batched getIntervals * fix: insert empty intervals * cleanup * migrations * order request * new rpc request table * notes * new filter types * decodeEvents + buildEvents * fix: range_agg * fix: range merging * fix: nits * more adjacent filters * rpc request migrations * required block actions * docs * fix * snake case * chore: changeset * preserve arrays * fix: ordering object arrays * chore: changeset * remove down migration * fix tests * fix: migration bug * fix: parse rpc response from cache * sync-historical: transaction filter * getEvents * account sources * accounts config * sync-realtime: fetchBlockEventData and handleBlock modified to handle account filters * cleanup realtime logs * minor fix * build accounts * start historical sync tests * update sync tests * sync: handle factory address * cleanup * fix transport tests * realtime sync tests * fix typecheck * account specific test * accounts example * fix virtual types * cleanup sync * note * transaction includeReverted * cleanup * back * factory fn * fix * filter * fix: config * sync: factory filter * sync: factory filter cont. * include * build * set * feature-accounts * cleanup * fix type * nits * nits * fragments * sync and events * update docs * update examples * update docs * cleanup * internal * trace type * nits * nits * remove transactionReceipt.logs * fix type error * fix * cleanup * fix config * fix query * normalize event * fix interval migrations * @/generated * fix accounts example * fix normalization * cleanup * fix * try fix actions * try fix actions * start migration guide * fix transaction event type * update mg * fix type test * use request hash to avoid large index * cleanup migration guide * fix migrations rounding * cleanup mg * config reference * cleanup * fix transfer txr type * update indexing functions docs * skip migrating checkpoint in trace migration * fix changeset ignore * docs * call traces * database schema manual deployments (#1312) * start deploy docs * more docs * cli * . * start new database service * test * dev * fix e2e * update direct sql * cli * fix schema bug * meta migration * explicit pre-build * database parameters * merge results * log * start list * db list * docs * improve error messages * cleanup * db list * cleanup list * dev fast path * accounts docs * remove as of * database docs mostly done * dev drop tables * direct sql * migration * fix: migrations compatible with postgres 14 * docs * error message tweaks --------- Co-authored-by: typedarray <90073088+0xOlias@users.noreply.github.com> * migration guide * changeset --------- Co-authored-by: Work Co-authored-by: typedarray <90073088+0xOlias@users.noreply.github.com> Co-authored-by: Kevin <90073088+typedarray@users.noreply.github.com> --- .changeset/config.json | 3 +- .changeset/early-days-turn.md | 5 + .changeset/rotten-socks-agree.md | 5 + .github/CONTRIBUTING.md | 4 +- .github/workflows/main.yml | 2 +- README.md | 14 +- benchmarks/apps/ponder-reth/ponder.config.ts | 2 +- benchmarks/apps/ponder-reth/ponder.schema.ts | 2 +- benchmarks/apps/ponder-reth/src/index.ts | 2 +- benchmarks/apps/ponder-univ2/ponder.config.ts | 2 +- benchmarks/apps/ponder-univ2/ponder.schema.ts | 2 +- benchmarks/apps/ponder-univ2/src/index.ts | 2 +- benchmarks/package.json | 2 +- docs/pages/docs/_meta.ts | 1 + docs/pages/docs/accounts.mdx | 98 + docs/pages/docs/advanced/foundry.mdx | 22 +- docs/pages/docs/advanced/telemetry.mdx | 4 +- docs/pages/docs/api-reference/config.mdx | 146 +- docs/pages/docs/api-reference/database.mdx | 76 +- .../docs/api-reference/indexing-functions.mdx | 237 ++- docs/pages/docs/api-reference/ponder-cli.mdx | 20 +- docs/pages/docs/api-reference/schema.mdx | 14 +- docs/pages/docs/block-intervals.mdx | 14 +- docs/pages/docs/call-traces.mdx | 18 +- docs/pages/docs/contracts-and-networks.mdx | 68 +- docs/pages/docs/getting-started/_meta.ts | 5 +- docs/pages/docs/getting-started/database.mdx | 62 + .../docs/getting-started/migrate-subgraph.mdx | 4 +- .../docs/getting-started/new-project.mdx | 6 +- ...stallation.mdx => system-requirements.mdx} | 15 +- docs/pages/docs/guides/time-series.mdx | 10 +- .../docs/indexing/read-contract-data.mdx | 56 +- .../docs/indexing/write-to-the-database.mdx | 34 +- docs/pages/docs/migration-guide.mdx | 213 ++ docs/pages/docs/production/_meta.ts | 1 - docs/pages/docs/production/deploy.mdx | 121 +- .../docs/production/horizontal-scaling.mdx | 73 - docs/pages/docs/query/api-functions.mdx | 32 +- docs/pages/docs/query/direct-sql.mdx | 109 +- docs/pages/docs/query/graphql.mdx | 6 +- docs/pages/docs/schema.mdx | 56 +- docs/pages/docs/utilities/merge-abis.mdx | 6 +- docs/pages/docs/utilities/replace-bigints.mdx | 12 +- docs/pages/docs/utilities/transports.mdx | 8 +- docs/pages/docs/utilities/types.mdx | 24 +- examples/feature-accounts/.env.example | 5 + examples/feature-accounts/.eslintrc.json | 3 + examples/feature-accounts/.gitignore | 18 + examples/feature-accounts/package.json | 28 + examples/feature-accounts/ponder-env.d.ts | 15 + examples/feature-accounts/ponder.config.ts | 22 + examples/feature-accounts/ponder.schema.ts | 7 + examples/feature-accounts/src/index.ts | 22 + examples/feature-accounts/tsconfig.json | 26 + examples/feature-api-functions/package.json | 5 +- .../feature-api-functions/ponder-env.d.ts | 34 +- .../feature-api-functions/ponder.config.ts | 2 +- .../feature-api-functions/ponder.schema.ts | 2 +- .../feature-api-functions/src/api/index.ts | 6 +- examples/feature-api-functions/src/index.ts | 4 +- examples/feature-blocks/package.json | 3 +- examples/feature-blocks/ponder-env.d.ts | 34 +- examples/feature-blocks/ponder.config.ts | 2 +- examples/feature-blocks/ponder.schema.ts | 2 +- examples/feature-blocks/src/index.ts | 4 +- examples/feature-call-traces/package.json | 3 +- examples/feature-call-traces/ponder-env.d.ts | 34 +- examples/feature-call-traces/ponder.config.ts | 4 +- examples/feature-call-traces/ponder.schema.ts | 2 +- examples/feature-call-traces/src/index.ts | 4 +- examples/feature-factory/package.json | 3 +- examples/feature-factory/ponder-env.d.ts | 34 +- examples/feature-factory/ponder.config.ts | 11 +- examples/feature-factory/ponder.schema.ts | 2 +- examples/feature-factory/src/LlamaCore.ts | 2 +- examples/feature-filter/package.json | 3 +- examples/feature-filter/ponder-env.d.ts | 34 +- examples/feature-filter/ponder.config.ts | 2 +- examples/feature-filter/ponder.schema.ts | 2 +- examples/feature-filter/src/index.ts | 4 +- examples/feature-multichain/package.json | 3 +- examples/feature-multichain/ponder-env.d.ts | 34 +- examples/feature-multichain/ponder.config.ts | 2 +- examples/feature-multichain/ponder.schema.ts | 2 +- examples/feature-multichain/src/index.ts | 4 +- examples/feature-proxy/package.json | 3 +- examples/feature-proxy/ponder-env.d.ts | 34 +- examples/feature-proxy/ponder.config.ts | 2 +- examples/feature-proxy/ponder.schema.ts | 2 +- examples/feature-proxy/src/index.ts | 4 +- examples/feature-read-contract/package.json | 3 +- .../feature-read-contract/ponder-env.d.ts | 34 +- .../feature-read-contract/ponder.config.ts | 2 +- .../feature-read-contract/ponder.schema.ts | 2 +- .../feature-read-contract/src/FileStore.ts | 4 +- examples/project-friendtech/package.json | 3 +- examples/project-friendtech/ponder-env.d.ts | 34 +- examples/project-friendtech/ponder.config.ts | 2 +- examples/project-friendtech/ponder.schema.ts | 2 +- .../src/FriendtechSharesV1.ts | 4 +- .../project-uniswap-v3-flash/package.json | 3 +- .../project-uniswap-v3-flash/ponder-env.d.ts | 34 +- .../project-uniswap-v3-flash/ponder.config.ts | 6 +- .../project-uniswap-v3-flash/ponder.schema.ts | 2 +- .../project-uniswap-v3-flash/src/index.ts | 4 +- examples/reference-erc1155/package.json | 3 +- examples/reference-erc1155/ponder-env.d.ts | 34 +- examples/reference-erc1155/ponder.config.ts | 2 +- examples/reference-erc1155/ponder.schema.ts | 2 +- examples/reference-erc1155/src/index.ts | 4 +- examples/reference-erc20/package.json | 3 +- examples/reference-erc20/ponder-env.d.ts | 34 +- examples/reference-erc20/ponder.config.ts | 2 +- examples/reference-erc20/ponder.schema.ts | 2 +- examples/reference-erc20/src/index.ts | 4 +- examples/reference-erc4626/package.json | 3 +- examples/reference-erc4626/ponder-env.d.ts | 34 +- examples/reference-erc4626/ponder.config.ts | 2 +- examples/reference-erc4626/ponder.schema.ts | 2 +- examples/reference-erc4626/src/index.ts | 4 +- examples/reference-erc721/package.json | 3 +- examples/reference-erc721/ponder-env.d.ts | 34 +- examples/reference-erc721/ponder.config.ts | 2 +- examples/reference-erc721/ponder.schema.ts | 2 +- examples/reference-erc721/src/index.ts | 4 +- examples/with-client/ponder/package.json | 3 +- examples/with-client/ponder/ponder-env.d.ts | 34 +- examples/with-client/ponder/ponder.config.ts | 2 +- examples/with-client/ponder/ponder.schema.ts | 2 +- examples/with-client/ponder/src/api/index.ts | 6 +- examples/with-client/ponder/src/index.ts | 4 +- examples/with-foundry/ponder/package.json | 2 +- examples/with-foundry/ponder/ponder-env.d.ts | 34 +- examples/with-foundry/ponder/ponder.config.ts | 2 +- examples/with-foundry/ponder/ponder.schema.ts | 2 +- examples/with-foundry/ponder/src/Counter.ts | 4 +- examples/with-nextjs/ponder/package.json | 3 +- examples/with-nextjs/ponder/ponder-env.d.ts | 34 +- examples/with-nextjs/ponder/ponder.config.ts | 2 +- examples/with-nextjs/ponder/ponder.schema.ts | 2 +- examples/with-nextjs/ponder/src/index.ts | 4 +- examples/with-trpc/ponder/package.json | 3 +- examples/with-trpc/ponder/ponder-env.d.ts | 34 +- examples/with-trpc/ponder/ponder.config.ts | 2 +- examples/with-trpc/ponder/ponder.schema.ts | 2 +- examples/with-trpc/ponder/src/api/index.ts | 6 +- examples/with-trpc/ponder/src/index.ts | 4 +- packages/client/dist/index.d.ts | 14 + packages/core/CHANGELOG.md | 10 +- packages/core/README.md | 14 +- packages/core/package.json | 11 +- packages/core/src/_test/constants.ts | 6 - .../core/src/_test/e2e/erc20/erc20.test.ts | 55 +- .../core/src/_test/e2e/erc20/ponder.config.ts | 8 +- .../core/src/_test/e2e/erc20/src/api/index.ts | 9 +- .../core/src/_test/e2e/erc20/src/index.ts | 14 +- .../src/_test/e2e/factory/factory.test.ts | 66 +- .../src/_test/e2e/factory/ponder.config.ts | 9 +- .../src/_test/e2e/factory/src/api/index.ts | 9 +- .../core/src/_test/e2e/factory/src/index.ts | 14 +- packages/core/src/_test/setup.ts | 83 +- packages/core/src/_test/simulate.ts | 197 +- packages/core/src/_test/utils.ts | 689 +----- packages/core/src/bin/commands/codegen.ts | 25 +- packages/core/src/bin/commands/dev.ts | 116 +- packages/core/src/bin/commands/list.ts | 141 ++ packages/core/src/bin/commands/serve.ts | 58 +- packages/core/src/bin/commands/start.ts | 67 +- packages/core/src/bin/ponder.ts | 21 + packages/core/src/bin/utils/run.test.ts | 105 +- packages/core/src/bin/utils/run.ts | 45 +- packages/core/src/bin/utils/runServer.ts | 22 +- .../build/configAndIndexingFunctions.test.ts | 337 +-- .../src/build/configAndIndexingFunctions.ts | 857 ++++---- packages/core/src/build/index.ts | 722 ++++++- packages/core/src/build/plugin.ts | 13 +- packages/core/src/build/pre.test.ts | 153 ++ packages/core/src/build/pre.ts | 141 ++ packages/core/src/build/schema.test.ts | 56 +- packages/core/src/build/schema.ts | 64 +- packages/core/src/build/service.ts | 806 ------- packages/core/src/common/codegen.ts | 36 +- packages/core/src/common/options.ts | 10 +- packages/core/src/common/telemetry.ts | 38 +- packages/core/src/config/address.test-d.ts | 79 +- packages/core/src/config/address.ts | 64 +- packages/core/src/config/config.test-d.ts | 54 +- packages/core/src/config/config.ts | 254 ++- packages/core/src/database/index.test.ts | 890 +++----- packages/core/src/database/index.ts | 848 +++----- packages/core/src/drizzle/index.test.ts | 16 +- packages/core/src/drizzle/index.ts | 178 +- packages/core/src/drizzle/kit/index.ts | 43 +- packages/core/src/graphql/middleware.ts | 4 +- packages/core/src/index.ts | 2 +- .../src/indexing-store/historical.test.ts | 1 - .../core/src/indexing-store/historical.ts | 39 +- .../core/src/indexing-store/metadata.test.ts | 41 +- packages/core/src/indexing-store/metadata.ts | 32 +- .../core/src/indexing-store/realtime.test.ts | 1 - packages/core/src/indexing-store/realtime.ts | 20 +- packages/core/src/indexing/ponderActions.ts | 310 +-- packages/core/src/indexing/service.test.ts | 571 ++--- packages/core/src/indexing/service.ts | 253 +-- packages/core/src/server/index.test.ts | 91 +- packages/core/src/server/index.ts | 46 +- .../core/src/sync-historical/index.test.ts | 615 +++++- packages/core/src/sync-historical/index.ts | 525 +++-- packages/core/src/sync-realtime/bloom.ts | 32 +- .../core/src/sync-realtime/filter.test.ts | 393 +++- packages/core/src/sync-realtime/filter.ts | 334 ++- packages/core/src/sync-realtime/index.test.ts | 878 ++++++-- packages/core/src/sync-realtime/index.ts | 318 ++- packages/core/src/sync-store/encoding.ts | 212 +- packages/core/src/sync-store/index.test.ts | 1874 +++++++++++------ packages/core/src/sync-store/index.ts | 1086 +++------- packages/core/src/sync-store/migrations.ts | 434 ++++ packages/core/src/sync/abi.ts | 36 +- packages/core/src/sync/events.test.ts | 908 ++++++-- packages/core/src/sync/events.ts | 528 +++-- packages/core/src/sync/fragments.test.ts | 176 +- packages/core/src/sync/fragments.ts | 459 ++-- packages/core/src/sync/index.test.ts | 399 +++- packages/core/src/sync/index.ts | 47 +- packages/core/src/sync/source.ts | 259 ++- packages/core/src/sync/transport.test.ts | 202 +- packages/core/src/sync/transport.ts | 115 +- packages/core/src/types.d.ts | 26 + packages/core/src/types/eth.ts | 99 +- packages/core/src/types/sync.ts | 85 +- packages/core/src/types/virtual.test-d.ts | 76 +- packages/core/src/types/virtual.ts | 112 +- packages/core/src/ui/Table.tsx | 22 +- packages/core/src/ui/app.tsx | 2 +- packages/core/src/utils/checkpoint.ts | 3 +- packages/core/src/utils/debug.ts | 110 + packages/core/src/utils/interval.ts | 6 + packages/core/src/utils/order.ts | 16 + packages/core/src/utils/requestQueue.test.ts | 7 +- packages/core/src/utils/requestQueue.ts | 14 +- packages/core/src/utils/result.ts | 24 + packages/core/src/utils/rpc.ts | 165 +- packages/core/tsup.config.ts | 2 +- packages/create-ponder/CHANGELOG.md | 2 +- packages/create-ponder/README.md | 14 +- packages/create-ponder/src/index.ts | 11 +- .../templates/empty/package.json | 3 +- .../templates/empty/ponder-env.d.ts | 35 +- .../templates/empty/ponder.config.ts | 2 +- .../templates/empty/ponder.schema.ts | 2 +- .../templates/empty/src/api/index.ts | 4 +- .../templates/empty/src/index.ts | 2 +- .../templates/etherscan/package.json | 3 +- .../templates/etherscan/ponder-env.d.ts | 35 +- .../templates/etherscan/ponder.schema.ts | 2 +- .../templates/subgraph/package.json | 3 +- .../templates/subgraph/ponder-env.d.ts | 40 +- .../templates/subgraph/ponder.schema.ts | 2 +- packages/eslint-config-ponder/README.md | 14 +- pnpm-lock.yaml | 158 +- 260 files changed, 12768 insertions(+), 9647 deletions(-) create mode 100644 .changeset/early-days-turn.md create mode 100644 .changeset/rotten-socks-agree.md create mode 100644 docs/pages/docs/accounts.mdx create mode 100644 docs/pages/docs/getting-started/database.mdx rename docs/pages/docs/getting-started/{installation.mdx => system-requirements.mdx} (68%) delete mode 100644 docs/pages/docs/production/horizontal-scaling.mdx create mode 100644 examples/feature-accounts/.env.example create mode 100644 examples/feature-accounts/.eslintrc.json create mode 100644 examples/feature-accounts/.gitignore create mode 100644 examples/feature-accounts/package.json create mode 100644 examples/feature-accounts/ponder-env.d.ts create mode 100644 examples/feature-accounts/ponder.config.ts create mode 100644 examples/feature-accounts/ponder.schema.ts create mode 100644 examples/feature-accounts/src/index.ts create mode 100644 examples/feature-accounts/tsconfig.json create mode 100644 packages/client/dist/index.d.ts create mode 100644 packages/core/src/bin/commands/list.ts create mode 100644 packages/core/src/build/pre.test.ts create mode 100644 packages/core/src/build/pre.ts delete mode 100644 packages/core/src/build/service.ts create mode 100644 packages/core/src/types.d.ts create mode 100644 packages/core/src/utils/debug.ts create mode 100644 packages/core/src/utils/order.ts create mode 100644 packages/core/src/utils/result.ts diff --git a/.changeset/config.json b/.changeset/config.json index b6606880a..9197a6464 100644 --- a/.changeset/config.json +++ b/.changeset/config.json @@ -2,9 +2,10 @@ "$schema": "https://unpkg.com/@changesets/config@2.3.0/schema.json", "changelog": ["@changesets/changelog-github", { "repo": "ponder-sh/ponder" }], "commit": false, - "fixed": [["@ponder/core", "create-ponder", "eslint-config-ponder"]], + "fixed": [["ponder", "create-ponder", "eslint-config-ponder"]], "ignore": [ "@ponder/common", + "ponder-examples-feature-accounts", "ponder-examples-feature-blocks", "ponder-examples-feature-factory", "ponder-examples-feature-filter", diff --git a/.changeset/early-days-turn.md b/.changeset/early-days-turn.md new file mode 100644 index 000000000..a40afb734 --- /dev/null +++ b/.changeset/early-days-turn.md @@ -0,0 +1,5 @@ +--- +"ponder": minor +--- + +Released `0.8`. Visit the [migration guide](https://ponder.sh/docs/migration-guide) for a full list of changes. diff --git a/.changeset/rotten-socks-agree.md b/.changeset/rotten-socks-agree.md new file mode 100644 index 000000000..0fe5135b6 --- /dev/null +++ b/.changeset/rotten-socks-agree.md @@ -0,0 +1,5 @@ +--- +"@ponder/core": patch +--- + +Added support for caching all available rpc methods. Several new client actions are available, including `context.client.simulateCall()`, `context.client.getTransactionCount()`, `context.client.getTransactionReceipt()`, and `context.client.getBlockTransactionCount()`. See [docs](https://ponder.sh/docs/indexing/read-contract-data#supported-actions) for all actions. diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 26769b79d..4155fac8d 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -93,7 +93,7 @@ In the root directory, run the build command: pnpm build ``` -After the build completes, pnpm links packages across the project for development. This means that if you run any of the projects in the `examples/` directory, they will use the local version of `@ponder/core`. +After the build completes, pnpm links packages across the project for development. This means that if you run any of the projects in the `examples/` directory, they will use the local version of `ponder`.
↑ back to top @@ -169,7 +169,7 @@ Ponder is still in alpha, so all changes should be marked as a patch. ### Changesets workflow 1. Write a PR that includes a public API change or bug fix. -2. Create a changeset using `pnpm changeset`. The changesets CLI will ask you which package is affected (`@ponder/core` or `create-ponder`) and if the change is a patch, minor, or major release. +2. Create a changeset using `pnpm changeset`. The changesets CLI will ask you which package is affected (`ponder` or `create-ponder`) and if the change is a patch, minor, or major release. 3. The changesets CLI will generate a Markdown file in `.changeset/` that includes the details you provided. Commit this file to your PR branch (e.g. `git commit -m "chore: changeset"`). 4. When you push this commit to remote, a GitHub bot will detect the changeset and add a comment to your PR with a preview of the changelog. 5. Merge your PR. The changesets Github Action workflow will open (or update) a PR with the title `"chore: version packages"`. The changes in your PR **will not be released** until this PR is merged. diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 405dea166..0e9156711 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -60,7 +60,7 @@ jobs: - name: Setup Postgres if: ${{ matrix.database == 'Postgres' }} - uses: ikalnytskyi/action-setup-postgres@v4 + uses: ikalnytskyi/action-setup-postgres@v7 id: postgres - name: Setup diff --git a/README.md b/README.md index 1b71bc618..55bc9b214 100644 --- a/README.md +++ b/README.md @@ -62,7 +62,7 @@ Ponder fetches event logs for the contracts added to `ponder.config.ts`, and pas ```ts // ponder.config.ts -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { BaseRegistrarAbi } from "./abis/BaseRegistrar"; @@ -92,7 +92,7 @@ The `ponder.schema.ts` file contains the database schema, and defines the shape ```ts // ponder.schema.ts -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const ensName = onchainTable("ens_name", (t) => ({ name: p.text().primaryKey(), @@ -108,8 +108,8 @@ Files in the `src/` directory contain **indexing functions**, which are TypeScri ```ts // src/BaseRegistrar.ts -import { ponder } from "@/generated"; -import * as schema from "../ponder.schema"; +import { ponder } from "ponder:registry"; +import schema from "ponder:schema"; ponder.on("BaseRegistrar:NameRegistered", async ({ event, context }) => { const { name, owner } = event.params; @@ -167,7 +167,7 @@ If you're interested in contributing to Ponder, please read the [contribution gu ## Packages -- `@ponder/core` +- `ponder` - `@ponder/utils` - `create-ponder` - `eslint-config-ponder` @@ -180,7 +180,7 @@ Ponder is MIT-licensed open-source software. [ci-url]: https://github.com/ponder-sh/ponder/actions/workflows/main.yml [tg-badge]: https://img.shields.io/endpoint?color=neon&logo=telegram&label=Chat&url=https%3A%2F%2Fmogyo.ro%2Fquart-apis%2Ftgmembercount%3Fchat_id%3Dponder_sh [tg-url]: https://t.me/ponder_sh -[license-badge]: https://img.shields.io/npm/l/@ponder/core?label=License +[license-badge]: https://img.shields.io/npm/l/ponder?label=License [license-url]: https://github.com/ponder-sh/ponder/blob/main/LICENSE -[version-badge]: https://img.shields.io/npm/v/@ponder/core +[version-badge]: https://img.shields.io/npm/v/ponder [version-url]: https://github.com/ponder-sh/ponder/releases diff --git a/benchmarks/apps/ponder-reth/ponder.config.ts b/benchmarks/apps/ponder-reth/ponder.config.ts index eaaa63a6a..2d3193cb8 100644 --- a/benchmarks/apps/ponder-reth/ponder.config.ts +++ b/benchmarks/apps/ponder-reth/ponder.config.ts @@ -1,4 +1,4 @@ -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { RocketTokenRETHAbi } from "./abis/RocketTokenRETH"; diff --git a/benchmarks/apps/ponder-reth/ponder.schema.ts b/benchmarks/apps/ponder-reth/ponder.schema.ts index 32e0ddfa4..c1e6d48bf 100644 --- a/benchmarks/apps/ponder-reth/ponder.schema.ts +++ b/benchmarks/apps/ponder-reth/ponder.schema.ts @@ -1,4 +1,4 @@ -import { createSchema } from "@ponder/core"; +import { createSchema } from "ponder"; export default createSchema((p) => ({ Account: p.createTable({ diff --git a/benchmarks/apps/ponder-reth/src/index.ts b/benchmarks/apps/ponder-reth/src/index.ts index f14d5f5ae..e80210155 100644 --- a/benchmarks/apps/ponder-reth/src/index.ts +++ b/benchmarks/apps/ponder-reth/src/index.ts @@ -2,7 +2,7 @@ import { ponder } from "@/generated"; // biome-ignore lint/suspicious/noRedeclare: :) -declare const ponder: import("@ponder/core").PonderApp< +declare const ponder: import("ponder").PonderApp< typeof import("../ponder.config.js").default, typeof import("../ponder.schema.js").default >; diff --git a/benchmarks/apps/ponder-univ2/ponder.config.ts b/benchmarks/apps/ponder-univ2/ponder.config.ts index 01fa71131..f33678ae5 100644 --- a/benchmarks/apps/ponder-univ2/ponder.config.ts +++ b/benchmarks/apps/ponder-univ2/ponder.config.ts @@ -1,4 +1,4 @@ -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http, getAbiItem } from "viem"; import { FactoryAbi } from "./abis/FactoryAbi"; diff --git a/benchmarks/apps/ponder-univ2/ponder.schema.ts b/benchmarks/apps/ponder-univ2/ponder.schema.ts index 09c1c5240..07f3f7a0b 100644 --- a/benchmarks/apps/ponder-univ2/ponder.schema.ts +++ b/benchmarks/apps/ponder-univ2/ponder.schema.ts @@ -1,4 +1,4 @@ -import { createSchema } from "@ponder/core"; +import { createSchema } from "ponder"; export default createSchema((p) => ({ UniswapFactory: p.createTable({ diff --git a/benchmarks/apps/ponder-univ2/src/index.ts b/benchmarks/apps/ponder-univ2/src/index.ts index a2a28966c..9c167fd62 100644 --- a/benchmarks/apps/ponder-univ2/src/index.ts +++ b/benchmarks/apps/ponder-univ2/src/index.ts @@ -2,7 +2,7 @@ import { ponder } from "@/generated"; // biome-ignore lint/suspicious/noRedeclare: :) -declare const ponder: import("@ponder/core").PonderApp< +declare const ponder: import("ponder").PonderApp< typeof import("../ponder.config.js").default, typeof import("../ponder.schema.js").default >; diff --git a/benchmarks/package.json b/benchmarks/package.json index 37e0e569b..eaf362156 100644 --- a/benchmarks/package.json +++ b/benchmarks/package.json @@ -11,7 +11,7 @@ "devDependencies": { "@graphprotocol/graph-cli": "0.61.0", "@graphprotocol/graph-ts": "^0.31.0", - "@ponder/core": "workspace:*", + "ponder": "workspace:*", "@types/node": "^20.10.0", "execa": "^8.0.1", "tsup": "^8.0.1", diff --git a/docs/pages/docs/_meta.ts b/docs/pages/docs/_meta.ts index 46260cada..00734fa50 100644 --- a/docs/pages/docs/_meta.ts +++ b/docs/pages/docs/_meta.ts @@ -16,6 +16,7 @@ export default { title: "Config", }, "contracts-and-networks": "Contracts & networks", + "accounts": "Accounts", "block-intervals": "Block intervals", "call-traces": "Call traces", diff --git a/docs/pages/docs/accounts.mdx b/docs/pages/docs/accounts.mdx new file mode 100644 index 000000000..f987ffa93 --- /dev/null +++ b/docs/pages/docs/accounts.mdx @@ -0,0 +1,98 @@ +--- +title: "Accounts" +description: "Learn how to index accounts in a Ponder app" +--- + +import { Callout, Steps } from "nextra/components"; + +# Accounts + +Account indexing is useful for activity that is not associated with a specific contract, like transactions and native transfers to and from an address. The key difference between `contracts` and `accounts` in `ponder.config.ts` is that accounts do not have an ABI. + + + The standard Ethereum RPC does not support filtering for transactions & native + transfers. So, account indexing uses a block-by-block approach which is often + slower than log indexing. + + +## Quick example + +In this example, we'll index transactions sent by the [Beaver](https://beaverbuild.org/) block builder account. + + + +### Add an account + +Add the network, address, and start block to the `accounts` field in `ponder.config.ts`. + +```ts filename="ponder.config.ts" +import { createConfig } from "ponder"; + +export default createConfig({ + // ... more config + accounts: { + BeaverBuild: { + network: "mainnet", + address: "0x95222290DD7278Aa3Ddd389Cc1E1d165CC4BAfe5", + startBlock: 20000000, + }, + }, +}); +``` + +### Register indexing functions + +Register an indexing function for the `transaction:from` event. The framework will fetch all transactions where `transaction.from` matches the account address, then process each one using your indexing function. + +```ts filename="src/index.ts" {4} +import { ponder } from "ponder:registry"; +import { transactions } from "ponder:schema"; + +ponder.on("BeaverBuilder:transaction:from", async ({ event, context }) => { + await context.db.insert(transactions).values({ + from: event.transaction.from, + to: event.transaction.to, + value: event.transaction.value, + gasUsed: event.transactionReceipt.gasUsed, + // ... more fields + }); +}); +``` + +Account indexing also supports the `transaction:to`, `transfer:from`, and `transfer:to` events. [Read more](/docs/api-reference/config#accounts) about event types. + + + +## Account name + +Every account must have a unique name, provided as a key to the `accounts` object. Names must be unique across accounts, contracts, and block intervals. + +```ts filename="ponder.config.ts" {9} +import { createConfig } from "ponder"; +import { http } from "viem"; + +export default createConfig({ + networks: { + mainnet: { chainId: 1, transport: http(process.env.PONDER_RPC_URL_1) }, + }, + accounts: { + BeaverBuilder: { + network: "mainnet", + address: "0x95222290DD7278Aa3Ddd389Cc1E1d165CC4BAfe5", + startBlock: 12439123, + }, + }, +}); +``` + +## Network + +The `network` option for accounts works the same way as it does for contracts. [Read more](/docs/contracts-and-networks#network). + +## Address + +The `address` option for accounts works the same way as it does for contracts. You can provide a single address, a list of addresses, or an address factory. You can also specify network-specific overrides. [Read more](/docs/contracts-and-networks#address). + +## Block range + +The `startBlock` and `endBlock` options for accounts work the same way as it does for contracts. [Read more](/docs/contracts-and-networks#block-range). diff --git a/docs/pages/docs/advanced/foundry.mdx b/docs/pages/docs/advanced/foundry.mdx index ef657cdf8..e5ce7e4ad 100644 --- a/docs/pages/docs/advanced/foundry.mdx +++ b/docs/pages/docs/advanced/foundry.mdx @@ -20,12 +20,12 @@ Ponder's RPC request cache works well for live networks where the chain is gener Use the `disableCache` option to **disable RPC request caching** for the Anvil network. With this option set to true, Ponder will clear the cache on start up and between hot reloads. ```ts filename="ponder.config.ts" {9} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; export default createConfig({ networks: { - anvil: { + anvil: { chainId: 31337, transport: http("http://127.0.0.1:8545"), disableCache: true, @@ -43,7 +43,11 @@ We recommend using `31337` (the default Anvil chain ID) even when forking a live We recommend using [interval mining](https://book.getfoundry.sh/reference/anvil/#mining-modes) with a block time of ~2 seconds. This better simulates a live network. -Known issue: When indexing Anvil with auto mining enabled in an app with multiple networks, indexing progress will get "stuck" at the timestamp of the latest Anvil block. + + Known issue: When indexing Anvil with auto mining enabled in an app with + multiple networks, indexing progress will get "stuck" at the timestamp of the + latest Anvil block.{" "} + ## Generate ABI files @@ -74,7 +78,11 @@ export default defineConfig({ Foundry scripts write transaction inputs and receipts to JSON files in the `broadcast` directory. You can import these files directly into `ponder.config.ts` to automate address management and enable hot reloading. -Remember to enable [broadcast](https://book.getfoundry.sh/tutorials/solidity-scripting?highlight=deploy#deploying-locally) so that `forge script` submits transactions to Anvil. + + Remember to enable + [broadcast](https://book.getfoundry.sh/tutorials/solidity-scripting?highlight=deploy#deploying-locally) + so that `forge script` submits transactions to Anvil. + ### Automate address management @@ -83,7 +91,7 @@ To read the contract address and deployment block number from a broadcast file, The `ponder.config.ts` file from the Foundry [example project](https://github.com/ponder-sh/ponder/tree/main/examples/with-foundry) demonstrates this pattern. Here, the first transaction in the broadcast file deployed the `Counter.sol` contract. The location of the contract address and start block within the broadcast file depends on the order and number of transactions in your deployment script. ```ts filename="ponder.config.ts" {4, 6-7, 21-22} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http, getAddress, hexToNumber } from "viem"; import { counterABI } from "../abis/CounterAbi"; import CounterDeploy from "../foundry/broadcast/Deploy.s.sol/31337/run-latest.json"; @@ -115,7 +123,7 @@ export default createConfig({ If you import a JSON broadcast file in `ponder.config.ts`, the dev server will reload each time that file changes. This is a simple way to ensure that Ponder reloads every time you run a Foundry deployment script. ```ts filename="ponder.config.ts" {3-4} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import CounterDeploy from "../foundry/broadcast/Deploy.s.sol/31337/run-latest.json"; // ^ The development server detects changes to this file and triggers a hot reload. @@ -123,4 +131,4 @@ import CounterDeploy from "../foundry/broadcast/Deploy.s.sol/31337/run-latest.js export default createConfig({ // ... }); -``` \ No newline at end of file +``` diff --git a/docs/pages/docs/advanced/telemetry.mdx b/docs/pages/docs/advanced/telemetry.mdx index b08664bd5..ac8fed54b 100644 --- a/docs/pages/docs/advanced/telemetry.mdx +++ b/docs/pages/docs/advanced/telemetry.mdx @@ -7,7 +7,7 @@ import { Callout } from "nextra/components"; # Telemetry -As of version `0.0.79`, Ponder collects **completely anonymous** telemetry data about general usage. The developers use this data to prioritize new feature development, identify bugs, and improve performance & stability. +Ponder collects **completely anonymous** telemetry data about general usage. The developers use this data to prioritize new feature development, identify bugs, and improve performance & stability. ## Opt out @@ -20,6 +20,6 @@ PONDER_TELEMETRY_DISABLED = true ## Implementation -Ponder's telemetry implementation is 100% open-source. The [telemetry service](https://github.com/ponder-sh/ponder/blob/main/packages/core/src/common/telemetry.ts#L47) (part of `@ponder/core`) runs on the user's device and submits event data via HTTP POST requests to the [telemetry collection endpoint](https://github.com/ponder-sh/ponder/blob/main/docs/pages/api/telemetry/index.ts) hosted at `https://ponder.sh/api/telemetry`. +Ponder's telemetry implementation is 100% open-source. The [telemetry service](https://github.com/ponder-sh/ponder/blob/main/packages/core/src/common/telemetry.ts#L47) (part of `ponder`) runs on the user's device and submits event data via HTTP POST requests to the [telemetry collection endpoint](https://github.com/ponder-sh/ponder/blob/main/docs/pages/api/telemetry/index.ts) hosted at `https://ponder.sh/api/telemetry`. The implementation generates a stable anonymous unique identifier for the user's device and stores it at the [system default user config directory](https://github.com/sindresorhus/env-paths#pathsconfig). This config also stores the user's opt-out preference and a stable salt used to hash potentially sensitive data such as file paths and the git remote URL. diff --git a/docs/pages/docs/api-reference/config.mdx b/docs/pages/docs/api-reference/config.mdx index 97a9d4e85..f06487ddb 100644 --- a/docs/pages/docs/api-reference/config.mdx +++ b/docs/pages/docs/api-reference/config.mdx @@ -15,7 +15,7 @@ The `ponder.config.ts` file must **default export** the object returned by `crea {/* prettier-ignore */} ```ts filename="ponder.config.ts" {1,4} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; export default createConfig({ @@ -44,7 +44,7 @@ The `networks` field is an object where each key is a network name containing th | **disableCache** | `boolean \| undefined` | **Default: `false`**. Disables the RPC request cache. Use when indexing a [local node](/docs/advanced/foundry) like Anvil. | ```ts filename="ponder.config.ts" {7-12,16} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { BlitmapAbi } from "./abis/Blitmap"; @@ -74,32 +74,27 @@ export default createConfig({ the [contracts & networks](/docs/contracts-and-networks) guide. -The `contracts` field is an object where each key is a contract name containing that contract's configuration. Ponder will sync & index contract data according to the options you provide. - -| field | type | | -| :------------- | :--------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| **name** | `string` | A unique name for the smart contract. Must be unique across all contracts. _Provided as an object property name._ | -| **abi** | `abitype.Abi` | The contract [ABI](https://docs.soliditylang.org/en/v0.8.17/abi-spec.html) as an array as const. Must be asserted as constant, see [ABIType documentation](https://abitype.dev/guide/getting-started#usage) for details. | -| **network** | `string` | The name of the network this contract is deployed to. References the `networks` field. | -| **address** | `0x{string} \| 0x{string}[]` | One more more contract addresses. Mutually exclusive with `factory`. | -| **factory** | [`Factory?`](#factory) | Factory pattern configuration. Mutually exclusive with `address`. | -| **filter** | [`Filter?`](#filter) | Event filter criteria. | -| **startBlock** | `number \| undefined` | **Default: `0`**. Block number to start syncing events. Usually set to the contract deployment block number. **Default: `0`** | -| **endBlock** | `number \| undefined` | **Default: `undefined`**. Block number to stop syncing events. If this field is specified, the contract will not be indexed in realtime. This field can be used alongside `startBlock` to index a specific block range. | - -```ts filename="ponder.config.ts" {14-19} -import { createConfig } from "@ponder/core"; -import { http } from "viem"; +The `contracts` field is an object where each key is a contract name containing that contract's configuration. Ponder will sync & index logs or call traces according to the options you provide. + +| field | type | | +| :----------------------------- | :--------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **name** | `string` | A unique name for the smart contract. Must be unique across all contracts. _Provided as an object property name._ | +| **abi** | `abitype.Abi` | The contract [ABI](https://docs.soliditylang.org/en/v0.8.17/abi-spec.html) as an array as const. Must be asserted as constant, see [ABIType documentation](https://abitype.dev/guide/getting-started#usage) for details. | +| **network** | `string` | The name of the network this contract is deployed to. References the `networks` field. Also supports [multiple networks](/docs/contracts-and-networks#multiple-networks). | +| **address** | `0x{string} \| 0x{string}[] \| Factory \| undefined` | One or more contract addresses or factory configuration. | +| **filter** | [`Filter`](#filter) | Event filter criteria. | +| **startBlock** | `number \| undefined` | **Default: `0`**. Block number to start syncing events. Usually set to the contract deployment block number. | +| **endBlock** | `number \| undefined` | **Default: `undefined`**. Block number to stop syncing events. If this field is specified, the contract will not be indexed in realtime. This field can be used alongside `startBlock` to index a specific block range. | +| **includeTransactionReceipts** | `boolean \| undefined` | **Default: `false`**. If this field is `true`, `transactionReceipt` will be included in `event`. | +| **includeCallTraces** | `boolean \| undefined` | **Default: `false`**. If this field is `true`, each function in the abi will be available as an indexing function event name. See the [call traces guide](/docs/call-traces#register-an-indexing-function) for details. | + +```ts filename="ponder.config.ts" {7-14} +import { createConfig } from "ponder"; import { BlitmapAbi } from "./abis/Blitmap"; export default createConfig({ - networks: { - mainnet: { - chainId: 1, - transport: http(process.env.PONDER_RPC_URL_1), - }, - }, + // ... more config contracts: { Blitmap: { abi: BlitmapAbi, @@ -111,24 +106,91 @@ export default createConfig({ }); ``` -### Factory +### Filter + +| field | type | | +| :-------- | :-------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------ | +| **event** | `string \| string[] \| undefined` | **Default: `undefined`**. One or more event names present in the provided ABI. | +| **args** | `object \| undefined` | **Default: `undefined`**. An object containing indexed argument values to filter for. Only allowed if **one** event name was provided in `event`. | + +[Read more](/docs/contracts-and-networks#event-filter) about event filters. + +## Accounts + +The `accounts` field is an object similar to `contracts` where each key is an account name containing that account's configuration. Accounts are used to index transactions or native transfers. + +| field | type | | +| :----------------------------- | :--------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **name** | `string` | A unique name for the smart contract. Must be unique across all contracts. _Provided as an object property name._ | +| **network** | `string` | The name of the network this contract is deployed to. References the `networks` field. Also supports [multiple networks](/docs/contracts-and-networks#multiple-networks). | +| **address** | `0x{string} \| 0x{string}[] \| Factory \| undefined` | Address or factory configuration. | +| **startBlock** | `number \| undefined` | **Default: `0`**. Block number to start syncing events. | +| **endBlock** | `number \| undefined` | **Default: `undefined`**. Block number to stop syncing events. If this field is specified, the contract will not be indexed in realtime. This field can be used alongside `startBlock` to index a specific block range. | +| **includeTransactionReceipts** | `boolean \| undefined` | **Default: `false`**. If this field is `true`, `transactionReceipt` will be included in `event`. | + +```ts filename="ponder.config.ts" {5-11} +import { createConfig } from "ponder"; + +export default createConfig({ + // ... more config + accounts: { + coinbasePrime: { + network: "mainnet", + address: "0xCD531Ae9EFCCE479654c4926dec5F6209531Ca7b", + startBlock: 12111233, + }, + }, +}); +``` + +## Blocks + +```ts filename="ponder.config.ts" {5-11} +import { createConfig } from "ponder"; + +export default createConfig({ + // ... more config + blocks: { + ChainlinkPriceOracle: { + network: "mainnet", + startBlock: 19_750_000, + interval: 5, // every minute + }, + }, +}); +``` + +## `factory()` + +The `factory()` function is used to specify if an address is derived from the log of another contract. Both [`contracts`](#contracts) and [`accounts`](#accounts) support `factory()` in their `address` field. | field | type | | | :------------ | :--------------------------------------------------: | :-------------------------------------------------------------------------------------------- | -| **address** | `string` | The address of the factory contract that creates instances of this contract. | +| **address** | `0x{string} \| 0x{string}[]` | The address of the factory contract that creates instances of this contract. | | **event** | [`AbiEvent`](https://abitype.dev/api/types#abievent) | The ABI item of the event that announces the creation of a new child contract. | | **parameter** | `string` | The name of the parameter within `event` that contains the address of the new child contract. | [Read more](/docs/contracts-and-networks#factory-contracts) about factory patterns. -### Filter +```ts filename="ponder.config.ts" {8-14} +import { createConfig, factory } from "ponder"; -| field | type | | -| :-------- | :-------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------ | -| **event** | `string \| string[] \| undefined` | **Default: `undefined`**. One or more event names present in the provided ABI. | -| **args** | `object \| undefined` | **Default: `undefined`**. An object containing indexed argument values to filter for. Only allowed if **one** event name was provided in `event`. | - -[Read more](/docs/contracts-and-networks#event-filter) about event filters. +export default createConfig({ + // ... more config + contracts: { + uniswapV2: { + // ... other contract options + address: factory({ + address: "0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f", + event: parseAbiItem( + "event PairCreated(address indexed token0, address indexed token1, address pair, uint256)" + ), + parameter: "pair", + }), + }, + }, +}); +``` ## Database @@ -150,7 +212,7 @@ Here is the logic Ponder uses to determine which database to use:

```ts filename="ponder.config.ts" {4-7} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; export default createConfig({ database: { @@ -176,7 +238,7 @@ export default createConfig({

```ts filename="ponder.config.ts" {4-10} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; export default createConfig({ database: { @@ -197,9 +259,11 @@ export default createConfig({ ### Basic example ```ts filename="ponder.config.ts" -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; +import { ArtGobblersAbi } from "./abis/ArtGobblers"; + export default createConfig({ networks: { mainnet: { @@ -210,7 +274,7 @@ export default createConfig({ contracts: { ArtGobblers: { network: "mainnet", - abi: "./abis/ArtGobblers.json", + abi: ArtGobblersAbi, address: "0x60bb1e2aa1c9acafb4d34f71585d7e959f387769", startBlock: 15863321, }, @@ -221,8 +285,10 @@ export default createConfig({ ### Using top-level await {/* prettier-ignore */} -```ts filename="ponder.config.ts" {3,17} -import { createConfig } from "@ponder/core"; +```ts filename="ponder.config.ts" {5,19} +import { createConfig } from "ponder"; + +import { ArtGobblersAbi } from "./abis/ArtGobblers"; const startBlock = await fetch("http://..."); @@ -236,7 +302,7 @@ export default createConfig({ contracts: { ArtGobblers: { network: "mainnet", - abi: "./abis/ArtGobblers.json", + abi: ArtGobblersAbi, address: "0x60bb1e2aa1c9acafb4d34f71585d7e959f387769", startBlock, }, diff --git a/docs/pages/docs/api-reference/database.mdx b/docs/pages/docs/api-reference/database.mdx index 41d302c5f..0bc886fd8 100644 --- a/docs/pages/docs/api-reference/database.mdx +++ b/docs/pages/docs/api-reference/database.mdx @@ -10,48 +10,82 @@ import { Callout } from "nextra/components"; This is a low-level reference designed to assist with advanced workflows. To - learn how to run basic queries for indexed data, visit the [query the + learn how to query indexed data, visit the [query the database](/docs/query/direct-sql) guide. -This reference describes how Ponder interacts with the database, which may be useful for advanced workflows around zero-downtime deployments, crash recovery, and database administration. +This reference documents the Ponder instance lifecycle, which may be useful for advanced workflows around zero-downtime deployments, crash recovery, and database administration. ## Instance lifecycle -When a Ponder instance starts up, it runs the following logic (simplified for clarity). +Here is a high-level overview of the Ponder instance lifecycle. Note that "instance" here refers to a Node.js process running `ponder dev` or `ponder start`. 1. Start returning `200` from the `/health` endpoint. Return `503` from the `/ready` endpoint. -2. Generate a new `instance_id` (a random four-character string). -3. For each table defined in `ponder.schema.ts`, create a new table in the database named `{instance_id}__{table_name}`. If the user has specified a custom database schema in `ponder.schema.ts`, use it. -4. Begin the historical backfill (starting from scratch) and write data to the tables created in the previous step. -5. When the historical backfill is complete, for each table create a view named `{table_name}` as `SELECT * FROM {instance_id}__{table_name}`. If a view with that name already exists, drop the old one first. -6. Start returning `200` from the `/ready` endpoint. -7. On shutdown (e.g. due to a process exit signal), **do not** drop the tables or views. +2. Build all user code (config, schema, indexing functions), then generate a `build_id` by hashing the build artifacts. +3. Follow the database schema flow chart below to validate and prepare the target database schema. +4. Begin historical indexing, either from scratch or from where the previous instance left off (crash recovery). +5. When historical indexing is complete, start returning `200` from the `/ready` endpoint. +6. On shutdown (e.g. due to a process exit signal), release the lock on the target schema. Do not drop any tables. -The following sections describe possible deviations from this standard lifecycle. +### Database schema rules -### Live views +- No two instances can use the same database schema at the same time. This prevents data corruption. +- Once an instance running `ponder start` uses a schema, no other instance can use that schema – even after the instance stops. This is a safety mechanism to prevent data loss in production. +- If the target schema was previously used by `ponder dev`, the new instance will drop the previous tables and start successfully. -Instances running `ponder dev` create the live views immediately. Instances running `ponder start` create the live views as described above, just before the instance begins responding as ready. +Here is the detailed flow chart. -### Build ID and crash recovery +```mermaid +flowchart TD +A[Target schema is currently in use?] -During the build step, each instance generates a content hash of the entire app (config, schema, and indexing function file contents). This is called the `build_id`. +C[Target schema previously used?] +F[Previous instance command?] +E[Previous build ID same as current?] + +V(Drop existing tables) +W(Recover from existing tables) +X(Create tables) +Y(Start) +Z1(Error) +Z2(Error) + + +V --> X +W --> Y +X --> Y + +A -- yes --> Z1 +A -- no --> C -Then, each instance checks the `_ponder_meta` registry table for any instances with the same `build_id` that are no longer running. If it finds one, the current instance will adopt the prior instance's `instance_id` and resume indexing where the prior instance left off. +C -- yes --> F +C -- no --> X -Crash recovery is disabled when running `ponder dev`. +F -- ponder start --> E +F -- ponder dev --> V -### Stale table cleanup +E -- no --> Z2 +E -- yes --> W -Shortly after startup, each instance checks the `_ponder_meta` registry table and drops any tables belonging to instances that are not currently running, aside from the 3 most recent instances. +style Y fill:#0a0,color:#fff +style Z1 fill:#f00,color:#fff +style Z2 fill:#f00,color:#fff +``` + +### Lock / metadata table + +The `_ponder_meta` table tracks the state of an instance running in a specific database schema. It stores the `build_id`, indexing checkpoint, and table names of the current instance, and uses a heartbeat locking mechanism to support the lifecycle logic described above. + +### Build ID and crash recovery + +During the build step, each instance generates a content hash of the entire app (config, schema, and indexing function file contents). This is called the `build_id`. -Tables belonging to non-running `ponder dev` instances will **always** get dropped. They do not count towards the limit. +Note that crash recovery is disabled when running `ponder dev`. ## RPC request cache -Ponder caches RPC request data to improve reindexing performance on hot reloads and redeployments. +Ponder caches RPC request data to improve reindexing performance on hot reloads and redeployments. This includes logs, blocks, transactions, traces, and any RPC requests made with `context.client`. - The RPC request cache is located in the `ponder_sync` schema. - The cache is durable, persists across instance restarts, and does not store unfinalized data. -- The cache query patterns are designed to be lock-free. It's safe for multiple instances to run in the same Postgres database at the same time (reading and writing to the same `ponder_sync` schema). +- The cache query patterns are lock-free. It's safe for multiple instances to run in the same Postgres database at the same time (reading and writing to the same `ponder_sync` schema). diff --git a/docs/pages/docs/api-reference/indexing-functions.mdx b/docs/pages/docs/api-reference/indexing-functions.mdx index 396600cd4..32fe96231 100644 --- a/docs/pages/docs/api-reference/indexing-functions.mdx +++ b/docs/pages/docs/api-reference/indexing-functions.mdx @@ -7,21 +7,20 @@ import { Callout } from "nextra/components"; # Indexing function API -Indexing functions are user-defined functions that receive blockchain data (a log, block, or trace) and insert data into the database. You can register indexing functions within any `.ts` file inside the `src/` directory. +Indexing functions are user-defined functions that receive blockchain data (a log, block, transaction, trace, or transfer) and insert data into the database. You can register indexing functions within any `.ts` file inside the `src/` directory. ## Registration -To register an indexing function, use the `.on(){:ts}` method of the `ponder` object exported from `"@/generated"{:ts}`. +To register an indexing function, use the `.on(){:ts}` method of the `ponder` object exported from `"ponder:registry"{:ts}`. Values returned by indexing functions are ignored. ```ts filename="src/index.ts" {3} -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; ponder.on("ContractName:EventName", async ({ event, context }) => { - const { args, log, block, transaction } = event; const { db, network, client, contracts } = context; // ... @@ -30,6 +29,16 @@ ponder.on("ContractName:EventName", async ({ event, context }) => { ## Log event +Log events are specified with `"ContractName:EventName"`. + +```ts filename="src/index.ts" {3} +import { ponder } from "ponder:registry"; + +ponder.on("ContractName:EventName", async ({ event, context }) => { + // ... +}); +``` + The `event` argument contains the decoded log arguments and the raw log, block, and transaction. ```ts @@ -39,12 +48,14 @@ type LogEvent = { log: Log; block: Block; transaction: Transaction; + // Enabled using `includeTransactionReceipts` in contract config + transactionReceipt?: TransactionReceipt; }; ``` ### Log event arguments -The `event.args` object contains event log arguments decoded using Viem's [decodeEventLog](https://viem.sh/docs/contract/decodeEventLog.html) function. +The `event.args` object contains decoded `log.topics` and `log.data` decoded using Viem's [decodeEventLog](https://viem.sh/docs/contract/decodeEventLog.html) function. ```ts /** Sample `args` type for an ERC20 Transfer event. */ @@ -55,35 +66,117 @@ type Args = { }; ``` -### Raw log, block, and transaction +## Call trace event -The `event.block`, `event.transaction`, and `event.log` objects contain raw blockchain data. +Call trace events are specified using `"ContractName.functionName()"`. -```ts filename="Block, transaction, and log types" -/** The log being processed. */ -type Log = { - /** Globally unique identifier for this log (`${blockHash}-${logIndex}`). */ - id: string; - /** The address from which this log originated */ - address: `0x${string}`; - /** Hash of block containing this log */ - blockHash: `0x${string}`; - /** Number of block containing this log */ - blockNumber: bigint; - /** Contains the non-indexed arguments of the log */ - data: `0x${string}`; - /** Index of this log within its block */ - logIndex: number; - /** `true` if this log has been removed in a chain reorganization */ - removed: boolean; - /** List of order-dependent topics */ - topics: [`0x${string}`, ...`0x${string}`[]] | []; - /** Hash of the transaction that created this log */ - transactionHash: `0x${string}`; - /** Index of the transaction that created this log */ - transactionIndex: number; + + The [`includeCallTraces` contract + option](/docs/contracts-and-networks#call-traces) must be enabled to use call + trace events. + + +```ts filename="src/index.ts" {3} +import { ponder } from "ponder:registry"; + +ponder.on("ContractName.functionName()", async ({ event, context }) => { + // ... +}); +``` + +The `event` argument contains the decoded call trace args and result and the raw trace, block, and transaction. + +```ts +type TraceEvent = { + name: string; + args: Args; + result: Result; + trace: Trace; + block: Block; + transaction: Transaction; + // Enabled using `includeTransactionReceipts` in contract config + transactionReceipt?: TransactionReceipt; +}; +``` + +### Call trace event arguments + +The `event.args` and `event.result` objects contain `trace.input` and `trace.output` decoded using Viem's [decodeFunctionData](https://viem.sh/docs/contract/decodeFunctionData.html) and [decodeFunctionResult](https://viem.sh/docs/contract/decodeFunctionResult.html) functions, respectively. + +## Transaction event + +Transaction events are specified using `"AccountName:transaction:from"` or `"AccountName:transaction:to"`. + +```ts filename="src/index.ts" {3} +import { ponder } from "ponder:registry"; + +ponder.on("AccountName:transaction:from", async ({ event, context }) => { + // ... +}); +``` + +The `event` argument contains the raw block, transaction, and transaction receipt. + +```ts +type TransactionEvent = { + block: Block; + transaction: Transaction; + transactionReceipt: TransactionReceipt; +}; +``` + +## Transfer event + +Native transfer events are specified using `"AccountName:transfer:from"` or `"AccountName:transfer:to"`. + +```ts filename="src/index.ts" {3} +import { ponder } from "ponder:registry"; + +ponder.on("AccountName:transfer:from", async ({ event, context }) => { + // ... +}); +``` + +The `event` argument contains the transfer and raw block, transaction, and trace. + +```ts +type TransferEvent = { + transfer: { + from: `0x${string}`; + to: `0x${string}`; + value: bigint; + }; + block: Block; + transaction: Transaction; + trace: Trace; + // Enabled using `includeTransactionReceipts` in account config + transactionReceipt?: TransactionReceipt; }; +``` + +## Block event + +Block events are specified using `"SourceName:block"`. +```ts filename="src/index.ts" {3} +import { ponder } from "ponder:registry"; + +ponder.on("SourceName:block", async ({ event, context }) => { + // ... +}); +``` + +The `event` argument contains the raw block. + +```ts +type BlockEvent = { + block: Block; +}; +``` + +## Event types + +```ts filename="Event types" /** The block containing the transaction that emitted the log being processed. */ type Block = { /** Base fee per gas */ @@ -120,10 +213,6 @@ type Block = { /** The transaction that emitted the log being processed. */ type Transaction = { - /** Hash of block containing this transaction */ - blockHash: `0x${string}`; - /** Number of block containing this transaction */ - blockNumber: bigint; /** Transaction sender */ from: `0x${string}`; /** Gas provided for transaction execution */ @@ -147,19 +236,9 @@ type Transaction = { /** Value in wei sent with this transaction */ value: bigint; }; -``` -### Transaction receipt - -The `event.transactionReceipt{:ts}` object contains details about the transaction associated with the current log or trace. To enable transaction receipts, set `includeTransactionReceipts{:ts}` to `true{:ts}` in the contract config. [Read more](/docs/contracts-and-networks#transaction-receipts). - -```ts filename="Block, transaction, and log types" /** A confirmed Ethereum transaction receipt. */ -export type TransactionReceipt = { - /** Hash of block containing this transaction */ - blockHash: Hash; - /** Number of block containing this transaction */ - blockNumber: bigint; +type TransactionReceipt = { /** Address of new contract or `null` if no contract was created */ contractAddress: Address | null; /** Gas used by this and all preceding transactions in this block */ @@ -178,13 +257,61 @@ export type TransactionReceipt = { status: "success" | "reverted"; /** Transaction recipient or `null` if deploying a contract */ to: Address | null; - /** Hash of this transaction */ - transactionHash: Hash; - /** Index of this transaction in the block */ - transactionIndex: number; /** Transaction type */ type: TransactionType; }; + +/** The log being processed. */ +type Log = { + /** Globally unique identifier for this log (`${blockHash}-${logIndex}`). */ + id: string; + /** The address from which this log originated */ + address: `0x${string}`; + /** Contains the non-indexed arguments of the log */ + data: `0x${string}`; + /** Index of this log within its block */ + logIndex: number; + /** `true` if this log has been removed in a chain reorganization */ + removed: boolean; + /** List of order-dependent topics */ + topics: [`0x${string}`, ...`0x${string}`[]] | []; +}; + +type Trace = { + /** Globally unique identifier for this trace (`${transactionHash}-${tracePosition}`) */ + id: string; + /** The type of the call. */ + type: + | "CALL" + | "CALLCODE" + | "DELEGATECALL" + | "STATICCALL" + | "CREATE" + | "CREATE2" + | "SELFDESTRUCT"; + /** The address of that initiated the call. */ + from: Address; + /** The address of the contract that was called. */ + to: Address | null; + /** How much gas was left before the call. */ + gas: bigint; + /** How much gas was used by the call. */ + gasUsed: bigint; + /** Calldata input. */ + input: Hex; + /** Output of the call, if any. */ + output?: Hex; + /** Error message, if any. */ + error?: string; + /** Why this call reverted, if it reverted. */ + revertReason?: string; + /** Value transferred. */ + value: bigint | null; + /** Index of this trace in the transaction. */ + traceIndex: number; + /** Number of subcalls. */ + subcalls: number; +}; ``` ## Context @@ -211,8 +338,8 @@ type Context = { The `context.db` object is a live database connection. [Read more](/docs/indexing/write-to-the-database) about writing to the database. ```ts filename="src/index.ts" {5-7} -import { ponder } from "@/generated"; -import { persons, dogs } from "../ponder.schema"; +import { ponder } from "ponder:registry"; +import { persons, dogs } from "ponder:schema"; ponder.on("Neighborhood:NewNeighbor", async ({ event, context }) => { await context.db.insert(persons).values({ name: "bob", age: 30 }); @@ -260,8 +387,8 @@ You can also define a setup function for each contract that runs before indexing For example, you might have a singleton `World` record that occasionally gets updated in indexing functions. ```ts filename="src/index.ts" -import { ponder } from "@/generated"; -import { world } from "../ponder.schema"; +import { ponder } from "ponder:registry"; +import { world } from "ponder:schema"; ponder.on("FunGame:NewPlayer", async ({ context }) => { await context.db @@ -277,8 +404,8 @@ Without the `"setup"` event, you need to upsert the record in each indexing func {/* prettier-ignore */} ```ts filename="src/index.ts" -import { ponder } from "@/generated"; -import { world } from "../ponder.schema"; +import { ponder } from "ponder:registry"; +import { world } from "ponder:schema"; ponder.on("FunGame:setup", async ({ context }) => { await context.db.insert(world).values({ diff --git a/docs/pages/docs/api-reference/ponder-cli.mdx b/docs/pages/docs/api-reference/ponder-cli.mdx index 2b5ccc8bb..343703681 100644 --- a/docs/pages/docs/api-reference/ponder-cli.mdx +++ b/docs/pages/docs/api-reference/ponder-cli.mdx @@ -7,12 +7,13 @@ import { Callout } from "nextra/components"; # CLI commands -The CLI (provided by the `@ponder/core` package) is the entrypoint for your project. +The CLI (provided by the `ponder` package) is the entrypoint for your project. ```bash Usage: ponder [OPTIONS] Options: + --schema Database schema --root Path to the project root directory (default: working directory) --config Path to the project config file (default: "ponder.config.ts") -v, --debug Enable debug logs, e.g. realtime blocks, internal events @@ -26,6 +27,7 @@ Commands: dev [options] Start the development server with hot reloading start [options] Start the production server serve [options] Start the production HTTP server without the indexer + db list List all deployments codegen Generate the schema.graphql file, then exit ``` @@ -85,6 +87,22 @@ Options: -h, --help display help for command ``` +## db list + +List all deployments. + +```bash +Usage: ponder db list + +List all deployments + +│ Schema │ Active │ Last active │ Table count │ +├───────────────┼──────────┼────────────────┼─────────────┤ +│ indexer_prod │ yes │ --- │ 10 │ +│ test │ no │ 26m 58s ago │ 10 │ +│ demo │ no │ 1 day ago │ 5 │ +``` + ## codegen diff --git a/docs/pages/docs/api-reference/schema.mdx b/docs/pages/docs/api-reference/schema.mdx index 99ddc6242..2681cf7c8 100644 --- a/docs/pages/docs/api-reference/schema.mdx +++ b/docs/pages/docs/api-reference/schema.mdx @@ -11,10 +11,10 @@ The `ponder.schema.ts` file defines your database tables and their relationships ## File requirements -The `ponder.schema.ts` must use **named exports** for tables, enums, and relations, and these objects must be created using the corresponding functions exported by `@ponder/core`. +The `ponder.schema.ts` must use **named exports** for tables, enums, and relations, and these objects must be created using the corresponding functions exported by `ponder`. ```ts filename="ponder.schema.ts" {1,3} -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const pets = onchainTable("pets", (t) => ({ name: t.text().primaryKey(), @@ -34,7 +34,7 @@ The `onchainTable` function accepts three positional arguments. {/* prettier-ignore */} ```ts filename="ponder.schema.ts" -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const transferEvents = onchainTable( "transfer_event", // SQL table name @@ -85,7 +85,7 @@ Column modifiers can be chained after column type definitions. Every table must have exactly one primary key defined using either the `.primaryKey()` column modifier or the `primaryKey()` function in the table constraints argument. ```ts filename="ponder.schema.ts" {1, 5, 16} -import { onchainTable, primaryKey } from "@ponder/core"; +import { onchainTable, primaryKey } from "ponder"; // Single column primary key export const tokens = onchainTable("tokens", (t) => ({ @@ -111,7 +111,7 @@ Create indexes using the `index()` function in the constraints & indexes argumen the app becomes healthy. ```ts filename="ponder.schema.ts" {1,10} -import { onchainTable, index } from "@ponder/core"; +import { onchainTable, index } from "ponder"; export const persons = onchainTable( "persons", @@ -135,7 +135,7 @@ The `onchainEnum` function accepts two positional arguments. It returns a functi | `values` | `string[]` | An array of strings representing the allowed values for the enum. | ```ts filename="ponder.schema.ts" {3} -import { onchainEnum, onchainTable } from "@ponder/core"; +import { onchainEnum, onchainTable } from "ponder"; export const color = onchainEnum("color", ["ORANGE", "BLACK"]); @@ -161,7 +161,7 @@ export const dogs = onchainTable("cats", (t) => ({ Use the `relations` function to define relationships between tables. ```ts filename="ponder.schema.ts" {1,7} -import { onchainTable, relations } from "@ponder/core"; +import { onchainTable, relations } from "ponder"; export const users = onchainTable("users", (t) => ({ id: t.text().primaryKey(), diff --git a/docs/pages/docs/block-intervals.mdx b/docs/pages/docs/block-intervals.mdx index 40406f197..734d5f5b2 100644 --- a/docs/pages/docs/block-intervals.mdx +++ b/docs/pages/docs/block-intervals.mdx @@ -16,7 +16,7 @@ This guide describes how to configure block intervals, and suggests patterns for Every block interval must have a name, provided as a key to the `blocks` object. The name must be unique across `blocks` and `contracts`. ```ts filename="ponder.config.ts" {9} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; export default createConfig({ @@ -40,9 +40,9 @@ export default createConfig({ This indexing function uses block events to index price chart data by reading the latest price from a Chainlink oracle contract every minute. ```ts filename="src/index.ts" -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; +import { prices } from "ponder:schema"; import { ChainlinkOracleAbi } from "../abis/ChainlinkOracle.ts"; -import { prices } from "../ponder.schema"; ponder.on("ChainlinkOracleUpdate:block", async ({ event, context }) => { // Fetch the price at the current block height. @@ -92,7 +92,7 @@ To find the block time of a specific chain, check the chain's documentation webs If you only need to index blocks from one network, pass the network name as a string to the `network` field. ```ts filename="ponder.config.ts" {10} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; export default createConfig({ @@ -114,7 +114,7 @@ export default createConfig({ If you'd like to run the same block indexing function across multiple networks, pass an object to the `network` field containing network-specific options. ```ts filename="ponder.config.ts" {11-19} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; export default createConfig({ @@ -144,7 +144,7 @@ Now, the indexing functions you write for `ChainlinkOracleUpdate:block` will pro The `context.network` object contains information about which network the current block is from. ```ts filename="src/index.ts" -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; ponder.on("ChainlinkOracleUpdate:block", async ({ event, context }) => { context.network; @@ -165,7 +165,7 @@ The optional `startBlock` and `endBlock` options specify the block range to inde If `endBlock` is defined, no events will be indexed after that block number. This option is useful if you're only interested in a slice of historical data, or to enable faster feedback loops during development where it's not necessary to index the entire history. ```ts filename="ponder.config.ts" {9} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; export default createConfig({ diff --git a/docs/pages/docs/call-traces.mdx b/docs/pages/docs/call-traces.mdx index 86855f508..0f701f00b 100644 --- a/docs/pages/docs/call-traces.mdx +++ b/docs/pages/docs/call-traces.mdx @@ -7,12 +7,12 @@ import { Callout, Steps } from "nextra/components"; # Call traces -As of `0.4.26`, Ponder supports call trace indexing. Call traces are similar to logs, but they represent a _function call_ instead of an event log. +Call traces are similar to logs, but they represent a _function call_ instead of an event log. Call traces are slower, more expensive, and less widely supported than logs. For new chains, you might struggle to find an RPC provider that supports the - `trace_filter` and `trace_block` methods. + `debug_traceBlockByNumber` and `debug_traceBlockByHash` methods. ## Register an indexing function @@ -24,7 +24,7 @@ After enabling this option, each function in the contract ABI will become availa

```ts filename="ponder.config.ts" {11} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { BlitmapAbi } from "./abis/Blitmap"; export default createConfig({ @@ -42,7 +42,7 @@ export default createConfig({ ``` ```ts filename="src/index.ts" {15} -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; ponder.on("Blitmap.mintOriginal()", async ({ event }) => { event.args; @@ -54,7 +54,6 @@ ponder.on("Blitmap.mintOriginal()", async ({ event }) => {
- ## What is a call trace? Let's define call traces from three different perspectives: @@ -81,12 +80,3 @@ ponder.on("ERC20.transfer()", async ({ event }) => { The `eth_call` RPC method **does not** produce a call trace. These calls do not occur during the execution of a transaction, so they are not recorded as call traces. However, calls made to `view` or `pure` functions **do** produce call traces if they are made during the execution of a transaction. These call traces are rarely useful for indexing, but they do happen. - -## RPC methods - -| | RPC methods | -| :--------- | :--------------------------------------------- | -| Historical | `trace_filter` and `eth_getTransactionReceipt` | -| Realtime | `trace_block` and `eth_getTransactionReceipt` | - -The sync engine fetches call traces using the `trace_filter` and `trace_block` RPC methods, and uses `eth_getTransactionReceipt` to confirm that a specific call trace did not occur within a reverted transaction. diff --git a/docs/pages/docs/contracts-and-networks.mdx b/docs/pages/docs/contracts-and-networks.mdx index 75f3dd659..aa564e69c 100644 --- a/docs/pages/docs/contracts-and-networks.mdx +++ b/docs/pages/docs/contracts-and-networks.mdx @@ -16,7 +16,7 @@ This guide explains how each contract configuration field works, and suggests pa Every contract must have a unique name, provided as a key to the `contracts` object. ```ts filename="ponder.config.ts" {11} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { BlitmapAbi } from "./abis/Blitmap"; @@ -59,7 +59,7 @@ export const BlitmapAbi = [ ``` ```ts filename="ponder.config.ts" {3, 9} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { BlitmapAbi } from "./abis/Blitmap"; @@ -83,7 +83,7 @@ export default createConfig({ It's occasionally useful to provide multiple ABIs for one contract, like when defining a proxy/upgradable contract that has gone through multiple implementation contracts. The [`mergeAbis`](/docs/utilities/abi) utility function safely removes duplicate ABI items and maintains strict types. ```ts filename="ponder.config.ts" {1,14} -import { createConfig, mergeAbis } from "@ponder/core"; +import { createConfig, mergeAbis } from "ponder"; import { http } from "viem"; import { ERC1967ProxyAbi } from "./abis/ERC1967Proxy"; @@ -112,7 +112,7 @@ export default createConfig({ If the contract is only deployed to one network, just pass the network name as a string to the `network` field. ```ts filename="ponder.config.ts" {8,16} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { BlitmapAbi } from "./abis/Blitmap"; @@ -140,7 +140,7 @@ export default createConfig({ If you'd like to index the same contract (having the same ABI) across multiple networks, pass an object to the `network` field containing network-specific options. ```ts filename="ponder.config.ts" {14-23} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { UniswapV3FactoryAbi } from "./abis/UniswapV3Factory"; @@ -173,7 +173,7 @@ Now, the indexing functions you write for `UniswapV3Factory` will process events The `event` and `context` objects are still strictly typed according to the configuration you provide. The `context.network` object contains information about which network the current event is from. ```ts filename="src/index.ts" -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; ponder.on("UniswapV3Factory:Ownership", async ({ event, context }) => { context.network; @@ -195,7 +195,7 @@ Network-specific configuration uses an override pattern. Any options defined at For example, the Uniswap V3 factory contract is deployed to the same address on most chains, but has a different address on Base. ```ts filename="ponder.config.ts" -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { UniswapV3FactoryAbi } from "./abis/EntryPoint"; @@ -229,7 +229,7 @@ export default createConfig({ On the other hand, the ERC-4337 Entry Point contract is deployed to the same address on all networks, so you could define the `address` field at the top level. ```ts filename="ponder.config.ts" -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { EntryPointAbi } from "./abis/EntryPoint"; @@ -259,7 +259,7 @@ export default createConfig({ The simplest (and most common) option is to pass a single static address. ```ts filename="ponder.config.ts" {14} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { BlitmapAbi } from "./abis/Blitmap"; @@ -291,7 +291,7 @@ This option can be used to index multiple contracts with known addresses that ha ```ts filename="ponder.config.ts" {14-19} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { ERC721Abi } from "./abis/ERC721"; @@ -318,10 +318,10 @@ export default createConfig({ ### Factory contracts -The `factory` field specifies a set of contracts that are created by a factory. The `factory` and `address` fields are mutually exclusive. +The `factory()` function is used to specify a set of contracts that are created by a factory. ```ts filename="ponder.config.ts" {12-19} -import { createConfig } from "@ponder/core"; +import { createConfig, factory } from "ponder"; import { parseAbiItem } from "viem"; export default createConfig({ @@ -332,14 +332,14 @@ export default createConfig({ SudoswapPool: { abi: SudoswapPoolAbi, network: "mainnet", - factory: { + address: factory({ // The address of the factory contract that creates instances of this child contract. address: "0xb16c1342E617A5B6E4b631EB114483FDB289c0A4", // The event emitted by the factory that announces a new instance of this child contract. event: parseAbiItem("event NewPair(address poolAddress)"), // The name of the parameter that contains the address of the new child contract. parameter: "poolAddress", - }, + }), startBlock: 14645816, }, }, @@ -349,7 +349,7 @@ export default createConfig({ Now, the indexing functions you write for `SudoswapPool` will process events emitted by all child contracts that are created by the specified factory. The `event.log.address` field contains the address of the child contract that emitted the current event. ```ts filename="src/index.ts" -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; ponder.on("SudoswapPool:Transfer", async ({ event }) => { // This is the address of the child contract that emitted the event. @@ -363,7 +363,7 @@ To run an indexing function whenever a child contract is created (but before any
```ts filename="ponder.config.ts" {9-14} -import { createConfig } from "@ponder/core"; +import { createConfig, factory } from "ponder"; import { parseAbiItem } from "viem"; export default createConfig({ @@ -380,11 +380,11 @@ export default createConfig({ SudoswapPool: { abi: SudoswapPoolAbi, network: "mainnet", - factory: { + address: factory({ address: "0xb16c1342E617A5B6E4b631EB114483FDB289c0A4", event: parseAbiItem("event NewPair(address poolAddress)"), parameter: "poolAddress", - }, + }), startBlock: 14645816, }, }, @@ -392,7 +392,7 @@ export default createConfig({ ``` ```ts filename="src/index.ts" {3-8} -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; // This function will run whenever a new child contract is created. ponder.on("SudoswapFactory:NewPair", async ({ event }) => { @@ -415,7 +415,7 @@ ponder.on("SudoswapPool:Transfer", async ({ event }) => { The factory `address` field also accepts a list of factory contract addresses. Use this option if there are multiple factory contracts on the same network that have the same ABI, factory event signature, and create the same kind of child contract. ```ts filename="ponder.config.ts" {12-15} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { parseAbiItem } from "viem"; export default createConfig({ @@ -424,15 +424,15 @@ export default createConfig({ SudoswapPool: { abi: SudoswapPoolAbi, network: "mainnet", - factory: { + address: factory({ // A list of factory contract addresses that all create SudoswapPool contracts. address: [ "0xb16c1342E617A5B6E4b631EB114483FDB289c0A4", - "0xb16c1342E617A5B6E4b631EB114483FDB289c0A4" + "0xb16c1342E617A5B6E4b631EB114483FDB289c0A4", ], event: parseAbiItem("event NewPair(address poolAddress)"), parameter: "poolAddress", - }, + }), }, }, }); @@ -462,7 +462,7 @@ event ChildCreated(ChildContract child); 2. **Nested factory patterns**: The sync engine doesn't support factory patterns that are nested beyond a single layer. -3. **Scaling**: As of `0.5.9`, the sync engine supports any number of child contracts. If a factory contract has more than 1,000 children, the sync engine omits the `address` argument when calling `eth_getLogs` or `trace_filter` and filters the result client-side. This can cause slow sync performance for very large factories. +3. **Scaling**: The sync engine supports any number of child contracts. If a factory contract has more than 1,000 children, the sync engine omits the `address` argument when calling `eth_getLogs` or `trace_filter` and filters the result client-side. This can cause slow sync performance for very large factories. ### Proxy & upgradable contracts @@ -484,7 +484,7 @@ The `filter` option filters for events by signature and indexed argument values. The `filter.event` option accepts an event name (or list of event names) present in the provided ABI. ```ts filename="ponder.config.ts" {14} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { ERC20Abi } from "./abis/ERC20"; @@ -509,7 +509,7 @@ export default createConfig({ The indexing functions you write will run for all events matching the filter, regardless of which contract emitted them. ```ts filename="src/index.ts" -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; ponder.on("ERC20:Transfer", async ({ event }) => { // This is the address of the contract that emitted the event. @@ -526,7 +526,7 @@ You can use the `filter.event` and `filter.args` options together to filter for This example filters for all ERC20 `Transfer` events where the `from` argument matches a specific address, and the `to` argument matches one of two addresses. ```ts filename="ponder.config.ts" {14-23} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { ERC20Abi } from "./abis/ERC20"; @@ -559,7 +559,7 @@ export default createConfig({ The indexing function will run for all events matching the filter. ```ts filename="src/index.ts" -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; ponder.on("ERC20:Transfer", async ({ event }) => { // This will always be "0xa0ee7a142d267c1f36714e4a8f75612f20a79720" @@ -578,7 +578,7 @@ After enabling this option, each function in the contract ABI will become availa
```ts filename="ponder.config.ts" {11} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { BlitmapAbi } from "./abis/Blitmap"; export default createConfig({ @@ -596,7 +596,7 @@ export default createConfig({ ``` ```ts filename="src/index.ts" {15} -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; ponder.on("Blitmap.mintOriginal()", async ({ event }) => { event.args; @@ -617,7 +617,7 @@ After enabling this option, the `event.transactionReceipt{:ts}` property will be
```ts filename="ponder.config.ts" {11} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { BlitmapAbi } from "./abis/Blitmap"; export default createConfig({ @@ -635,7 +635,7 @@ export default createConfig({ ``` ```ts filename="src/index.ts" {15} -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; ponder.on("Blitmap.mintOriginal()", async ({ event }) => { event.transactionReceipt.cumulativeGasUsed; @@ -647,8 +647,6 @@ ponder.on("Blitmap.mintOriginal()", async ({ event }) => {
- - ## Block range The optional `startBlock` and `endBlock` options specify the block range to index. @@ -661,7 +659,7 @@ The optional `startBlock` and `endBlock` options specify the block range to inde If `endBlock` is `undefined{:ts}`, the contract will be indexed in realtime. If `endBlock` is defined, no events will be indexed after that block. This option can be useful if you're only interested in a slice of historical data, or to enable faster feedback loops during development where it's not necessary to index the entire history. ```ts filename="ponder.config.ts" {15-16} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { BlitmapAbi } from "./abis/Blitmap"; diff --git a/docs/pages/docs/getting-started/_meta.ts b/docs/pages/docs/getting-started/_meta.ts index 44bfb80a7..2edaff239 100644 --- a/docs/pages/docs/getting-started/_meta.ts +++ b/docs/pages/docs/getting-started/_meta.ts @@ -1,5 +1,6 @@ export default { "new-project": "Create a new project", "migrate-subgraph": "Migrate a subgraph", - "installation": "Installation", -}; + "system-requirements": "System requirements", + "database": "Database", +}; \ No newline at end of file diff --git a/docs/pages/docs/getting-started/database.mdx b/docs/pages/docs/getting-started/database.mdx new file mode 100644 index 000000000..c2c2e731d --- /dev/null +++ b/docs/pages/docs/getting-started/database.mdx @@ -0,0 +1,62 @@ +--- +title: "Database" +description: "A guide to set up the Ponder database." +--- + +import { Tabs, Callout } from "nextra/components"; + +# Set up the database + +Ponder supports two database options. + +- **PostgreSQL**: A traditional Postgres database server. Required for production, can be used for local development. +- **PGlite**: An embedded Postgres database that runs in the same process as your Ponder app. Only suitable for local development. + +## Choose a database + +By default, Ponder uses PGlite with data stored in the `.ponder` directory. To use Postgres, set the `DATABASE_URL` environment variable to a Postgres connection string, or use explicit configuration in `ponder.config.ts`. + +```ts filename="ponder.config.ts" +import { createConfig } from "ponder"; + +export default createConfig({ + database: { + kind: "postgres", + connectionString: "postgresql://user:password@localhost:5432/dbname", + }, + // ... more config +}); +``` + +Visit the `ponder.config.ts` [API reference](/docs/api-reference/config#database) for more details. + +## Database schema + +When you start a Ponder app, you must specify which **database schema** it should use. This is where the app will create the tables defined in `ponder.schema.ts`. + +Use the `DATABASE_SCHEMA` environment variable or the `--schema` CLI option to configure it. + +
+ +```bash filename=".env.local" +DATABASE_SCHEMA=my_schema +``` + +```bash filename="shell" +ponder start --schema my_schema +``` + +
+ +### Guidelines + +Here are the key guidelines to keep in mind when selecting a database schema. + +- No two Ponder instances/deployments can use the same database schema at the same time. +- Tables created by `ponder start` are treated as valuable and will never be dropped automatically. +- The default schema for `ponder dev` is `public`. When using `ponder start`, you must explicitly set the database schema; there is no default. +- Use `ponder dev` for local development; `ponder start` is intended for production. + +{/* - Use `ponder db drop {schema name}` to reuse a schema previously used by `ponder start`. */} + +[Read more](/docs/api-reference/database) about the instance lifecycle and database schema rules. diff --git a/docs/pages/docs/getting-started/migrate-subgraph.mdx b/docs/pages/docs/getting-started/migrate-subgraph.mdx index 3fe4d2174..48d7a2b20 100644 --- a/docs/pages/docs/getting-started/migrate-subgraph.mdx +++ b/docs/pages/docs/getting-started/migrate-subgraph.mdx @@ -120,8 +120,8 @@ export function handleTransfer(event) { With Ponder, these objects are injected as properties of `context`. ```ts filename="src/MyNftContract.ts" -import { ponder } from "@/generated"; -import { tokens } from "../ponder.schema"; +import { ponder } from "ponder:registry"; +import { tokens } from "ponder:schema"; ponder.on("MyNftContract:Transfer", async ({ event, context }) => { // Get a database record diff --git a/docs/pages/docs/getting-started/new-project.mdx b/docs/pages/docs/getting-started/new-project.mdx index 050a6a974..f3e630b6c 100644 --- a/docs/pages/docs/getting-started/new-project.mdx +++ b/docs/pages/docs/getting-started/new-project.mdx @@ -80,7 +80,7 @@ PONDER_RPC_URL_1 = "https://eth-mainnet.g.alchemy.com/v2/..." The `ponder.schema.ts` file contains the database schema, and defines the shape data that the GraphQL API serves. ```ts filename="ponder.schema.ts" -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const blitmapTokens = onchainTable("blitmap_tokens", (t) => ({ id: t.integer().primaryKey(), @@ -95,8 +95,8 @@ Read more about [designing your schema](/docs/schema). Files in the `src/` directory contain **indexing functions**, which are TypeScript functions that process a contract event. The purpose of these functions is to write indexed data to the database. ```ts filename="src/index.ts" -import { ponder } from "@/generated"; -import { blitmapTokens } from "../ponder.schema"; +import { ponder } from "ponder:registry"; +import { blitmapTokens } from "ponder:schema"; ponder.on("Blitmap:Transfer", async ({ event, context }) => { await context.db.insert(blitmapTokens).values({ diff --git a/docs/pages/docs/getting-started/installation.mdx b/docs/pages/docs/getting-started/system-requirements.mdx similarity index 68% rename from docs/pages/docs/getting-started/installation.mdx rename to docs/pages/docs/getting-started/system-requirements.mdx index b6084e83a..4de5d00ea 100644 --- a/docs/pages/docs/getting-started/installation.mdx +++ b/docs/pages/docs/getting-started/system-requirements.mdx @@ -1,23 +1,24 @@ --- -title: "Installation" +title: "System requirements" description: "A guide for installing Ponder on any platform." --- import { Tabs, Callout } from "nextra/components"; -# Installation +# System requirements The [`create-ponder` CLI](/docs/getting-started/new-project) is the best way to get started. If `create-ponder` runs without error, your system meets the requirements. -## System requirements +## Requirements - MacOS, Linux, or Windows - [Node.js](https://nodejs.org/en) >=18 (support for Bun is [in progress](https://github.com/ponder-sh/ponder/issues/442)) - [pnpm](https://pnpm.io/installation) (recommended), [yarn](https://yarnpkg.com/getting-started/install), or npm +- [PostgreSQL](https://www.postgresql.org/download/) version 14, 15, 16 or 17. ### Windows -As of `0.4.7`, Ponder supports Windows via PowerShell, command prompt (cmd.exe), or Windows Subsystem for Linux (WSL). If you encounter a Windows-specific problem, please [open an issue](https://github.com/ponder-sh/ponder/issues). +Ponder supports Windows via PowerShell, command prompt (cmd.exe), or Windows Subsystem for Linux (WSL). If you encounter a Windows-specific problem, please [open an issue](https://github.com/ponder-sh/ponder/issues). ## TypeScript @@ -25,15 +26,15 @@ Ponder uses advanced TypeScript features to offer end-to-end type safety without ### Requirements -- TypeScript `>=5.0.4` and viem `>=1.16.0` +- TypeScript `>=5.0.4`, viem `>=2`, and hono `>=4.5 - ABIs must be asserted `as const` following [ABIType guidelines](https://abitype.dev/guide/getting-started#usage) - The `ponder-env.d.ts` file must be present and up to date ### `ponder-env.d.ts` -This "magical" file is what makes Ponder's zero-codegen type system possible. The file contains a module declaration for `"@/generated"{:ts}` that exports types derived from `ponder.config.ts` and `ponder.schema.ts`. +This "magical" file is what makes Ponder's zero-codegen type system possible. The file contains a module declaration for `"ponder:registry"{:ts}` that exports types derived from `ponder.config.ts` and `ponder.schema.ts`. -After upgrading to a new version of `@ponder/core`, the dev server might make changes to `ponder-env.d.ts`. When this happens, please accept and commit the changes. +After upgrading to a new version of `ponder`, the dev server might make changes to `ponder-env.d.ts`. When this happens, please accept and commit the changes. ### VSCode diff --git a/docs/pages/docs/guides/time-series.mdx b/docs/pages/docs/guides/time-series.mdx index 315610280..510a439e4 100644 --- a/docs/pages/docs/guides/time-series.mdx +++ b/docs/pages/docs/guides/time-series.mdx @@ -14,7 +14,7 @@ This guide describes techniques for working with time-series data in your app. To power a [candlestick](https://en.wikipedia.org/wiki/Candlestick_chart) or open-high-low-close chart, create a table that stores OHLC data for a specific time interval. ```ts filename="ponder.schema.ts" -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const hourBuckets = onchainTable("hour_buckets", (t) => ({ id: t.integer().primaryKey(), @@ -30,7 +30,7 @@ export const hourBuckets = onchainTable("hour_buckets", (t) => ({ Then, in your indexing function, create or update the bucket record that the current event falls into. ```ts filename="src/index.ts" -import { ponder, type Schema } from "@/generated"; +import { ponder, type Schema } from "ponder:registry"; const secondsInHour = 60 * 60; @@ -97,7 +97,7 @@ The simplest way to add a time dimension to your data is to include the block nu
```ts filename="ponder.schema.ts" -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const swapEvents = onchainTable("swap_events", (t) => ({ id: t.text().primaryKey(), @@ -109,8 +109,8 @@ export const swapEvents = onchainTable("swap_events", (t) => ({ ``` ```ts filename="src/index.ts" {10} -import { ponder } from "@/generated"; -import { swapEvents } from "../ponder.schema"; +import { ponder } from "ponder:registry"; +import { swapEvents } from "ponder:schema"; ponder.on("Token:Swap", async ({ event, context }) => { await context.db.insert(swapEvents).values({ diff --git a/docs/pages/docs/indexing/read-contract-data.mdx b/docs/pages/docs/indexing/read-contract-data.mdx index e2132b3b9..7e0603200 100644 --- a/docs/pages/docs/indexing/read-contract-data.mdx +++ b/docs/pages/docs/indexing/read-contract-data.mdx @@ -18,7 +18,7 @@ To read data from a contract, use `context.client.readContract(){:ts}` and inclu
```ts filename="ponder.config.ts" -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { BlitmapAbi } from "./abis/Blitmap"; @@ -42,8 +42,8 @@ export default createConfig({ ``` ```ts filename="src/index.ts" -import { ponder } from "@/generated"; -import { tokens } from "../ponder.schema"; +import { ponder } from "ponder:registry"; +import { tokens } from "ponder:schema"; ponder.on("Blitmap:Mint", async ({ event, context }) => { const { client } = context; @@ -124,20 +124,36 @@ The custom client uses the transport you specify in `ponder.config.ts`. ### Supported actions -| name | description | Viem docs | -| :----------- | :-------------------------------------------------------- | :---------------------------------------------------------------- | -| readContract | Returns the result of a read-only function on a contract. | [readContract](https://viem.sh/docs/contract/readContract.html) | -| multicall | Similar to readContract, but batches requests. | [multicall](https://viem.sh/docs/contract/multicall.html) | -| getBalance | Returns the balance of an address in wei. | [getBalance](https://viem.sh/docs/actions/public/getBalance.html) | -| getBytecode | Returns the bytecode at an address. | [getBytecode](https://viem.sh/docs/contract/getBytecode.html) | -| getStorageAt | Returns the value from a storage slot at a given address. | [getStorageAt](https://viem.sh/docs/contract/getStorageAt.html) | +| name | description | Viem docs | +| :-------------------------- | :-------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------- | +| readContract | Returns the result of a read-only function on a contract. | [readContract](https://viem.sh/docs/contract/readContract.html) | +| multicall | Similar to readContract, but batches requests. | [multicall](https://viem.sh/docs/contract/multicall.html) | +| simulateContract | Simulates & validates a contract interaction. | [simulateContract](https://viem.sh/docs/contract/simulateContract) | +| getBalance | Returns the balance of an address in wei. | [getBalance](https://viem.sh/docs/actions/public/getBalance.html) | +| getBytecode | Returns the bytecode at an address. | [getBytecode](https://viem.sh/docs/contract/getBytecode.html) | +| getStorageAt | Returns the value from a storage slot at a given address. | [getStorageAt](https://viem.sh/docs/contract/getStorageAt.html) | +| getBlock | Returns information about a block at a block number, hash or tag. | [getBlock](https://viem.sh/docs/actions/public/getBlock) | +| getTransactionCount | Returns the number of transactions an account has broadcast / sent. | [getTransactionCount](https://viem.sh/docs/actions/public/getTransactionCount) | +| getBlockTransactionCount | Returns the number of Transactions at a block number, hash or tag. | [getBlockTransactionCount](https://viem.sh/docs/actions/public/getBlockTransactionCount) | +| getTransaction | Returns information about a transaction given a hash or block identifier. | [getTransaction](https://viem.sh/docs/actions/public/getTransaction) | +| getTransactionReceipt | Returns the transaction receipt given a transaction hash. | [getTransactionReceipt](https://viem.sh/docs/actions/public/getTransactionReceipt) | +| getTransactionConfirmations | Returns the number of blocks passed (confirmations) since the transaction was processed on a block. | [getTransactionConfirmations](https://viem.sh/docs/actions/public/getTransactionConfirmations) | +| call | An Action for executing a new message call. | [call](https://viem.sh/docs/actions/public/call) | +| estimateGas | An Action for estimating gas for a transaction. | [estimateGas](https://viem.sh/docs/actions/public/estimateGas) | +| getFeeHistory | Returns a collection of historical gas information. | [getFeeHistory](https://viem.sh/docs/actions/public/getFeeHistory) | +| getProof | Returns the account and storage values of the specified account including the Merkle-proof. | [getProof](https://viem.sh/docs/actions/public/getProof) | +| getEnsAddress | Gets address for ENS name. | [getEnsAddress](https://viem.sh/docs/ens/actions/getEnsAddress) | +| getEnsAvatar | Gets the avatar of an ENS name. | [getEnsAvatar](https://viem.sh/docs/ens/actions/getEnsAvatar) | +| getEnsName | Gets primary name for specified address. | [getEnsName](https://viem.sh/docs/ens/actions/getEnsName) | +| getEnsResolver | Gets resolver for ENS name. | [getEnsResolver](https://viem.sh/docs/ens/actions/getEnsResolver) | +| getEnsText | Gets a text record for specified ENS name. | [getEnsText](https://viem.sh/docs/ens/actions/getEnsText) | ### Block number By default, the `blockNumber` option is set to the block number of the current event (`event.block.number`). ```ts filename="src/index.ts" {8-9} -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; ponder.on("Blitmap:Mint", async ({ event, context }) => { const totalSupply = await context.client.readContract({ @@ -153,7 +169,7 @@ ponder.on("Blitmap:Mint", async ({ event, context }) => { You can also specify a `blockNumber` to read data at a specific block height. It will still be cached. ```ts filename="src/index.ts" {8} -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; ponder.on("Blitmap:Mint", async ({ event, context }) => { const totalSupply = await context.client.readContract({ @@ -190,7 +206,7 @@ If a contract is configured to run on multiple networks, `context.contracts` con
```ts filename="ponder.config.ts" {14-23} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { UniswapV3FactoryAbi } from "./abis/UniswapV3Factory"; @@ -219,7 +235,7 @@ export default createConfig({ ``` ```ts filename="src/index.ts" -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; ponder.on("UniswapV3Factory:FeeAmountEnabled", async ({ event, context }) => { const tickSpacing = await context.client.readContract({ @@ -238,7 +254,7 @@ ponder.on("UniswapV3Factory:FeeAmountEnabled", async ({ event, context }) => { Contracts that are created by a factory have a dynamic address, so the `context.contracts` object does not have an `address` property. To read data from the contract that emitted the current event, use `event.log.address`. ```ts filename="src/index.ts" {9} -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; ponder.on("SudoswapPool:Transfer", async ({ event, context }) => { const { SudoswapPool } = context.contracts; @@ -255,7 +271,7 @@ ponder.on("SudoswapPool:Transfer", async ({ event, context }) => { To call a factory contract child from an indexing function for a _different_ contract, use your application logic to determine the correct address to enter. For example, the address might come from `event.args`. ```ts filename="src/index.ts" {6} -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; ponder.on("FancyLendingProtocol:RegisterPool", async ({ event, context }) => { const totalSupply = await context.client.readContract({ @@ -275,7 +291,7 @@ To read from a contract without syncing & indexing event logs from it, import th
```ts filename="ponder.config.ts" -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { AaveTokenAbi } from "./abis/AaveToken"; @@ -292,7 +308,7 @@ export default createConfig({ ``` ```ts filename="src/index.ts" -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; import { ChainlinkPriceFeedAbi } from "../abis/ChainlinkPriceFeed"; @@ -332,8 +348,8 @@ contract ZorbNft is ERC721 { Every Zorb has a gradient, but the contract doesn't emit gradient data in any event logs (it only emits events required by ERC721). The gradient data for a given Zorb can, however, be accessed using the `gradientForAddress` function. ```ts filename="src/index.ts" {6-11} -import { ponder } from "@/generated"; -import { zorbs } from "../ponder.schema"; +import { ponder } from "ponder:registry"; +import { zorbs } from "ponder:schema"; ponder.on("ZorbNft:Transfer", async ({ event, context }) => { if (event.args.from === ZERO_ADDRESS) { diff --git a/docs/pages/docs/indexing/write-to-the-database.mdx b/docs/pages/docs/indexing/write-to-the-database.mdx index 98819708e..48206abef 100644 --- a/docs/pages/docs/indexing/write-to-the-database.mdx +++ b/docs/pages/docs/indexing/write-to-the-database.mdx @@ -18,7 +18,7 @@ The store API is a SQL-like query builder optimized for EVM indexing workloads.

```ts filename="ponder.schema.ts" -import { onchainTable, primaryKey } from "@ponder/core"; +import { onchainTable, primaryKey } from "ponder"; export const accounts = onchainTable("accounts", (t) => ({ address: t.hex().primaryKey(), @@ -47,7 +47,7 @@ Insert one or many rows into the database. Returns the inserted rows, **includin {/* prettier-ignore */} ```ts filename="src/index.ts" -import { accounts } from "../ponder.schema"; +import { accounts } from "ponder:schema"; const row = await db.insert(accounts).values({ address: "0x7Df1", balance: 0n @@ -62,7 +62,7 @@ const rows = await db.insert(accounts).values([ If you insert a row that violates a not null constraint, `insert` will reject with an error. ```ts filename="src/index.ts" {7} -import { tokens } from "../ponder.schema"; +import { tokens } from "ponder:schema"; const row = await db.insert(accounts).values({ address: "0x7Df1", @@ -76,7 +76,7 @@ const row = await db.insert(accounts).values({ Find a single row by primary key. Returns the row, or `null` if no matching row is found. ```ts filename="src/index.ts" -import { accounts } from "../ponder.schema"; +import { accounts } from "ponder:schema"; const row = await db.find(accounts, { address: "0x7Df1" }); ``` @@ -84,7 +84,7 @@ const row = await db.find(accounts, { address: "0x7Df1" }); If the table has a composite primary key, the second argument is an object including all the primary key values. ```ts filename="src/index.ts" -import { allowances } from "../ponder.schema"; +import { allowances } from "ponder:schema"; const row = await db.find(allowances, { owner: "0x7Df1", spender: "0x7Df2" }); ``` @@ -94,7 +94,7 @@ const row = await db.find(allowances, { owner: "0x7Df1", spender: "0x7Df2" }); Update a row by primary key. Returns the updated row. ```ts filename="src/index.ts" -import { accounts } from "../ponder.schema"; +import { accounts } from "ponder:schema"; const row = await db .update(accounts, { address: "0x7Df1" }) @@ -104,7 +104,7 @@ const row = await db You can also pass a function to `set` which receives the existing row and returns the update object. ```ts filename="src/index.ts" {5} -import { accounts } from "../ponder.schema"; +import { accounts } from "ponder:schema"; const row = await db .update(accounts, { address: "0x7Df1" }) @@ -114,7 +114,7 @@ const row = await db If the target row is not found, `update` will reject with an error. ```ts filename="src/index.ts" {7} -import { tokens } from "../ponder.schema"; +import { tokens } from "ponder:schema"; const row = await db .update(accounts, { address: "0x7Df1" }) @@ -126,7 +126,7 @@ const row = await db If the new row violates a not null constraint, `update` will reject with an error. ```ts filename="src/index.ts" {7} -import { tokens } from "../ponder.schema"; +import { tokens } from "ponder:schema"; const row = await db .update(accounts, { address: "0x7Df1" }) @@ -140,7 +140,7 @@ const row = await db Delete a row by primary key. Returns `true` if the row was deleted, or `false` if no matching row was found. ```ts filename="src/index.ts" -import { accounts } from "../ponder.schema"; +import { accounts } from "ponder:schema"; const deleted = await db.delete(accounts, { address: "0x7Df1" }); ``` @@ -150,7 +150,7 @@ const deleted = await db.delete(accounts, { address: "0x7Df1" }); If you insert a duplicate row that violates the table's primary key constraint, `insert` will reject with an error. Use `onConflictDoNothing` to skip the insert operation if a row with the same primary key already exists. ```ts filename="src/index.ts" {6} -import { accounts } from "../ponder.schema"; +import { accounts } from "ponder:schema"; const row = await db .insert(accounts) @@ -161,7 +161,7 @@ const row = await db Or, perform an **upsert** with `onConflictDoUpdate`. ```ts filename="src/index.ts" {6} -import { accounts } from "../ponder.schema"; +import { accounts } from "ponder:schema"; const row = await db .insert(accounts) @@ -172,7 +172,7 @@ const row = await db Like `update`, you can pass a function to `onConflictDoUpdate` which receives the existing row and returns the update object. ```ts filename="src/index.ts" {6} -import { accounts } from "../ponder.schema"; +import { accounts } from "ponder:schema"; const row = await db .insert(accounts) @@ -183,7 +183,7 @@ const row = await db Both `onConflictDoNothing` and `onConflictDoUpdate` also work when inserting many rows at once. The conflict resolution logic gets applied to each row individually. ```ts filename="src/index.ts" -import { accounts } from "../ponder.schema"; +import { accounts } from "ponder:schema"; const rows = await db .insert(accounts) @@ -216,8 +216,8 @@ The `db.sql` object exposes the raw Drizzle PostgreSQL query builder, including Here's an example that uses the raw SQL `update` function to execute a complex bulk update query. ```ts filename="src/index.ts" -import { ponder } from "@/generated"; -import { accounts, tradeEvents } from "../ponder.schema"; +import { ponder } from "ponder:registry"; +import { accounts, tradeEvents } from "ponder:schema"; import { eq, and, gte, inArray, sql } from "drizzle-orm"; // Add 100 points to all accounts that submitted a trade in the last 24 hours. @@ -246,8 +246,8 @@ Drizzle's relational query builder (AKA Drizzle Queries) offers a great develope Here's an example that uses the relational query builder in an API function to find the 10 largest trades in the past hour joined with the account that made the trade. ```ts filename="src/api/index.ts" -import { accounts, tradeEvents } from "../ponder.schema"; import { eq, and, gte, inArray, sql } from "drizzle-orm"; +import { accounts, tradeEvents } from "ponder:schema"; ponder.get("/hot-trades", async (c) => { const trades = await c.db.query.tradeEvents.findMany({ diff --git a/docs/pages/docs/migration-guide.mdx b/docs/pages/docs/migration-guide.mdx index a392a7f72..6dd85a076 100644 --- a/docs/pages/docs/migration-guide.mdx +++ b/docs/pages/docs/migration-guide.mdx @@ -7,6 +7,219 @@ import { FileTree, Steps, Tabs, Callout } from "nextra/components"; # Migration guide +## 0.8 + + + This release includes an irreversible migration to the `ponder_sync` schema (RPC request cache). Once you run a `0.8` app against a database, you can no longer run `<=0.7` apps against the same database. + + +### Database management + +Ponder now requires the database schema to be explicitly specified with an environment variable or CLI flag. **`onchainSchema()` is removed.** + +

+ +```bash filename=".env.local" +DATABASE_SCHEMA=my_schema +``` + +```bash filename="shell" +ponder start --schema my_schema +``` + +
+ + + Each deployment/instance of a Ponder app must have it's own schema, with some + exceptions for `ponder dev` and crash recovery. [Read + more](/docs/getting-started/database#database-schema). + + +#### Railway + +Railway users should [update the start command](/docs/production/deploy#create-a-ponder-app-service) to include a database schema. + +{/* prettier-ignore */} + + +```text filename="Start command" +pnpm start --schema $RAILWAY_DEPLOYMENT_ID +``` + + +```text filename="Start command" +yarn start --schema $RAILWAY_DEPLOYMENT_ID +``` + + +```text filename="Start command" +npm run start --schema $RAILWAY_DEPLOYMENT_ID +``` + + + +#### `ponder db list` + +A new command was added for more visibility into which database schemas are being used. + +```bash filename="shell" +$ ponder db list + +│ Schema │ Active │ Last active │ Table count │ +├───────────────┼──────────┼────────────────┼─────────────┤ +│ indexer_prod │ yes │ --- │ 10 │ +│ test │ no │ 26m 58s ago │ 10 │ +│ demo │ no │ 1 day ago │ 5 │ +``` + +### New features + +#### Account indexing + +A new event source `accounts` is available. Accounts can be used to index transactions and native transfers to and from an address. [Read more](/docs/accounts). + +#### `ponder:schema` alias + +The `ponder:schema` virtual module was added. It is an alias for `ponder.schema.ts`. + +```diff filename="src/index.ts" +- import { accounts } from "../ponder.schema"; ++ import { accounts } from "ponder:schema"; +``` + +It also contains a default export of all the exported table objects from `ponder.schema.ts`. + +{/* prettier-ignore */} +```ts filename="src/index.ts" {1,3} +import schema from "ponder:schema"; + +const row = await db.insert(schema.accounts).values({ + address: "0x7Df1", balance: 0n +}); +``` + +### Breaking changes + +#### `@ponder/core` → `ponder` + +New versions will be published to `ponder` and not `@ponder/core`. + +{/* prettier-ignore */} + + +```bash filename="shell" +pnpm remove @ponder/core +pnpm add ponder +``` + + +```bash filename="shell" +yarn remove @ponder/core +yarn add ponder +``` + + +```bash filename="shell" +npm remove @ponder/core +npm add ponder +``` + + + +#### `@/generated` → `ponder:registry` + +The virtual module `@/generated` was replaced with `ponder:registry`. + +```diff filename="src/index.ts" +- import { ponder } from "@/generated"; ++ import { ponder } from "ponder:registry"; +``` + +#### `factory()` function + +The `factory()` function replaces the `factory` property in the contract config. The result should be passed to the `address` property. + +
+ +```ts filename="ponder.config.ts (<=0.7)" {6-10} +import { createConfig } from "@ponder/core"; + +export default createConfig({ + contracts: { + uniswap: { + factory: { + address: "0x1F98431c8aD98523631AE4a59f267346ea31F984", + event: getAbiItem({ abi: UniswapV3FactoryAbi, name: "PoolCreated" }), + parameter: "pool", + }, + }, + }, +}); +``` + +{/* prettier-ignore */} +```ts filename="ponder.config.ts (0.8)" {6-10} +import { createConfig, factory } from "ponder"; + +export default createConfig({ + contracts: { + uniswap: { + address: factory({ + address: "0x1F98431c8aD98523631AE4a59f267346ea31F984", + event: getAbiItem({ abi: UniswapV3FactoryAbi, name: "PoolCreated" }), + parameter: "pool", + }), + }, + }, +}); +``` + +
+ +#### `ponder-env.d.ts` + +A new `ponder-env.d.ts` is required. The new file uses [triple slash directives](https://www.typescriptlang.org/docs/handbook/triple-slash-directives.html#-reference-types-) for (hopefully) less frequent updates. + +{/* prettier-ignore */} + + +```bash filename="shell" +pnpm codegen +``` + + +```bash filename="shell" +yarn codegen +``` + + +```bash filename="shell" +npm run codegen +``` + + + +#### Removed `transactionReceipt.logs` + +The `transactionReceipt.logs` property was removed from the `event` object. + +#### Removed redundant properties from `event` + +The following properties were removed from the `event` object. + +```diff +- event.log.blockNumber; +- event.log.blockHash; +- event.log.transactionHash; +- event.log.transactionIndex; +- event.transaction.blockNumber; +- event.transaction.blockHash; +- event.transactionReceipt.transactionHash; +- event.transactionReceipt.transactionIndex; +``` + +All of the data is still available on other properties of the `event` object, such as `event.transaction.hash` or `event.block.number`. + ## 0.7 The `0.7` release includes several breaking changes. diff --git a/docs/pages/docs/production/_meta.ts b/docs/pages/docs/production/_meta.ts index a115253a6..ee918759e 100644 --- a/docs/pages/docs/production/_meta.ts +++ b/docs/pages/docs/production/_meta.ts @@ -1,4 +1,3 @@ export default { deploy: "Deploy", - "horizontal-scaling": "Horizontal scaling", }; diff --git a/docs/pages/docs/production/deploy.mdx b/docs/pages/docs/production/deploy.mdx index e4a4d4eac..4cf170e77 100644 --- a/docs/pages/docs/production/deploy.mdx +++ b/docs/pages/docs/production/deploy.mdx @@ -3,8 +3,7 @@ title: "Deploy to production" description: "A guide for deploying Ponder apps to production" --- -import { Steps } from "nextra/components"; -import { Callout } from "nextra/components"; +import { Steps, Tabs, Callout } from "nextra/components"; # Deploy @@ -30,13 +29,34 @@ From the Railway console: 1. Click **New Project** → **Deploy from GitHub repo** and select your repo from the list 2. Click **Add variables**, then add RPC URLs (e.g. `PONDER_RPC_URL_1`) and other environment variables 3. Create a public domain. In **Settings** → **Networking**, click **Generate Domain** -4. Set the healthcheck path and timeout. In **Settings** → **Deploy**, set the **Healthcheck Path** to `/ready` and the **Healthcheck Timeout** to `86400` seconds (1 day) +4. Update the start command. In **Settings** → **Deploy**, set the **Custom Start Command** to include the `--schema` option. This is required to enable zero-downtime deployments. [Read more](/docs/getting-started/database#database-schema). - - _Monorepo users:_ Configure the **Root Directory** and **Start Command** such - that `ponder start` runs at the Ponder project root. For example, set the root - directory to `packages/ponder` or set the start command to `cd packages/ponder - && pnpm start`. +{/* prettier-ignore */} + + +```text filename="Start command" +pnpm start --schema $RAILWAY_DEPLOYMENT_ID +``` + + +```text filename="Start command" +yarn start --schema $RAILWAY_DEPLOYMENT_ID +``` + + +```text filename="Start command" +npm run start --schema $RAILWAY_DEPLOYMENT_ID +``` + + + +5. Set the healthcheck path and timeout. In **Settings** → **Deploy**, set the **Healthcheck Path** to `/ready` and the **Healthcheck Timeout** to `86400` seconds (1 day) + + + _Monorepo users:_ Use the **Root Directory** and/or **Start Command** options + to run `ponder start` at the Ponder project root, e.g. set the root directory + to `packages/ponder` or set the start command to `cd packages/ponder && pnpm + start`. ### Create a Postgres database @@ -46,33 +66,26 @@ From the new project dashboard: 1. Click **Create** → **Database** → **Add PostgreSQL** 2. Open the **Variables** tab for the Ponder app service, click **New Variable** → **Add Reference** → select `DATABASE_URL` and click **Add** -After a moment, the Ponder app service should redeploy successfully. Check the **Build Logs** and **Deploy Logs** tabs to debug any issues. - -## Self hosting - -In general, hosting a Ponder app is similar to hosting a normal Node.js HTTP server. Rather than offer a step-by-step guide, this section describes the key Ponder-specific quirks to consider when self-hosting. +After a moment, the service running `ponder start` should redeploy successfully. Check the **Build Logs** and **Deploy Logs** tabs to debug any issues. -### Health checks & probes +## Self hosting -Use the `/health` and `/ready` endpoints to configure health checks or [probes](https://kubernetes.io/docs/concepts/configuration/liveness-readiness-startup-probes/). - -- **`/health`**: Returns an HTTP `200` response immediately after the process starts. -- **`/ready`**: Returns an HTTP `200` response once indexing progress has reached realtime across all chains. During the historical backfill, the endpoint returns an HTTP `503` response. +In general, hosting a Ponder app is similar to hosting a normal Node.js web server. This section describes the key Ponder-specific quirks to consider when self-hosting. ### Database connection - - Your app will have performance issues if the roundtrip database latency exceeds - ~20 milliseconds. This is common when using a database in different private - network or region. + + Your app will run into performance issues if the roundtrip database latency + exceeds ~20 milliseconds. This is common when using a database in different + private network or cloud region. In production, Ponder works best with a Postgres database in the same private network. Set the `DATABASE_URL` environment variable to the connection string of your Postgres database, or manually override the `database.connectionString` option in `ponder.config.ts`. ```ts filename="ponder.config.ts" {6} -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; export default createConfig({ database: { @@ -83,29 +96,57 @@ export default createConfig({ }); ``` -### Table names and live views +### Database schema -When a Ponder app starts up, it creates a table in the database for each table exported from `ponder.schema.ts`. To avoid naming conflicts with prior instances of the same app, the tables are prefixed with a random four-character `instance_id`. +Each deployment must use a different database schema. Use the `DATABASE_SCHEMA` environment variable or the `--schema` CLI argument passed to `ponder start` to specify which database schema each deployment should use. [Read more](/docs/api-reference/database#database-schema-rules) about database schema selection rules. -When the app finishes the historical backfill (or immediately after startup when using `ponder dev`) it creates a view for each table in `ponder.schema.ts` using this command. +The best choice depends on your deployment strategy – here are a few common options. -```sql -CREATE VIEW {table_name} AS ( SELECT * FROM {instance_id}__{table_name} ); -``` +- Kubernetes pod name +- Railway deployment ID +- Git branch name or commit hash -The **live view** mechanism is essential for zero-downtime deployments, horizontal scaling, and direct SQL queries. - -#### Example +### Health checks & probes -Here's a zero-downtime redeployment scenario, where views continously serve data from the most recent instance to go live. +Use the `/health` and `/ready` endpoints to configure health checks or [probes](https://kubernetes.io/docs/concepts/configuration/liveness-readiness-startup-probes/). -1. App `1234` starts up. It creates and begins writing to a table named `1234__account`. -2. App `1234` completes the historical backfill. It creates a view named `account` that points at `1234__account`. -3. App `5678` starts up. It creates and begins writing to a table named `5678__account`. -4. App `5678` completes the historical backfill. It updates the `account` view to point at `5678__account`. -5. App `1234` shuts down. -6. App `5678` continues to serve live data via the `account` view. +- **`/health`**: Returns an HTTP `200` response immediately after the process starts. +- **`/ready`**: Returns an HTTP `200` response once indexing progress has reached realtime across all chains. During the historical backfill, the endpoint returns an HTTP `503` response. ### Crash recovery -If a Ponder app running `ponder start` crashes and restarts, it will attempt to resume indexing where it left off. [Read more](/docs/api-reference/database) about the instance lifecycle and crash recovery mechanism. +If a Ponder app running `ponder start` crashes and restarts using the same database schema, it will attempt to resume indexing where it left off. [Read more](/docs/api-reference/database#build-id-and-crash-recovery) about the instance lifecycle and crash recovery mechanism. + +## Advanced + +### Scale the HTTP server + +If a `ponder start` instance receives a large volume of HTTP traffic (e.g. GraphQL requests), the HTTP server will contend with the indexing engine for CPU and memory resources. This can lead to degraded indexing performance and might ultimately crash the instance. + +To solve this problem, you can use `ponder serve` to run additional instances of the HTTP server without the indexing engine. Here are a few things to keep in mind. + +- The `ponder serve` instance should use the same [database schema](#database-schema) as the `ponder start` instance that you'd like to scale. +- If one `ponder serve` instance is not enough, you can safely run multiple replicas behind a proxy using an architecture like this. + +```mermaid +graph LR + A[Clients] + B[Proxy] + C["HTTP (ponder serve)"] + D["HTTP (ponder serve)"] + E["HTTP (ponder serve)"] + F["Database"] + G["Indexer (ponder start)"] + A --> B + B --> C + B --> D + B --> E + C --> F + D --> F + E --> F + F <--> G +``` + +### Database maintenance + +The `ponder db` CLI entrypoint offers a set of commands useful for observing and maintaining your database. [Read more](/docs/api-reference/ponder-cli#db). diff --git a/docs/pages/docs/production/horizontal-scaling.mdx b/docs/pages/docs/production/horizontal-scaling.mdx deleted file mode 100644 index 764679e6c..000000000 --- a/docs/pages/docs/production/horizontal-scaling.mdx +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: "Deploy to production" -description: "A guide for deploying Ponder apps to production" ---- - -import { Steps } from "nextra/components"; -import { Callout } from "nextra/components"; - -# Horizontal scaling - -By default, Ponder runs the sync engine, indexing engine, and HTTP server (GraphQL and API functions) in the same Node.js process. This works well for small apps, but can lead to performance issues as your app grows. - -If the HTTP server experiences high request volume, it contends with the indexing services for CPU and memory resources on the host, which can lead to degraded indexing performance. - -To solve this, you can use the `ponder serve` command to horizontally scale the HTTP service. `ponder serve` is similar to `ponder start`, but only runs the HTTP server. It doesn't run the indexing or sync engines. - -```mermaid -graph LR - A[Clients] - B[Proxy] - C["HTTP (ponder serve)"] - D["HTTP (ponder serve)"] - E["HTTP (ponder serve)"] - F["Database"] - G["Indexer (ponder start)"] - A --> B - B --> C - B --> D - B --> E - C --> F - D --> F - E --> F - F <--> G -``` - - - At this time, `ponder serve` only works with Postgres. PGlite is not - supported. - - -## Railway Replicas - -Railway makes it easy to horizontally scale `ponder serve` instances using their [Replicas](https://docs.railway.app/reference/scaling#horizontal-scaling-with-replicas) feature. This feature uses Railway's proxy to distribute incoming requests across multiple instances of your service using a round-robin strategy. - - - -### Create a new service for `ponder serve` - -Other than the start command, the `ponder serve` service should have the same config as the original `ponder start` service. From your project dashboard: - -1. Click **New** → **GitHub repo** and select your Ponder repo from the list. -2. Open the **Settings** tab for the new service and update the **Start Command** to `ponder serve`. -3. Open the **Variables** tab, click on **New Variable** → **Add Reference** and select `DATABASE_URL`. Be sure to use the same database as the `ponder start` service. -4. [set schema] -5. Set a healthcheck path. On the **Settings** tab, go to **Deploy** → **Healthcheck Path** and enter `/health`. - -### Create replicas - -Once the new service is deployed and healthy, go the **Settings** tab and set the **Replicas** count to 2 or more. - -### Direct traffic to `ponder serve` - -Update any clients of this Ponder app to send API requests to the public domain of the new `ponder serve` service. If you're using a custom domain, you can simply update that to point to the `ponder serve` service. - -### Remove public domain from the `ponder start` service - -Now that the replicated `ponder serve` service is handling all incoming requests, you can remove the public URL for the original service running `ponder serve`. - - - -## Other cloud environments - -Ponder has not been thoroughly tested on cloud providers other than Railway. However, Ponder apps should work in any environment that supports Node.js and can connect to a Postgres database. diff --git a/docs/pages/docs/query/api-functions.mdx b/docs/pages/docs/query/api-functions.mdx index 2cf4180da..47cd70b76 100644 --- a/docs/pages/docs/query/api-functions.mdx +++ b/docs/pages/docs/query/api-functions.mdx @@ -31,7 +31,7 @@ API functions are available starting from version `0.5.0`. Read the [migration g To enable API functions, create a file named `src/api/index.ts` with the following code. You can register API functions in any `.ts` file in the `src/api/` directory. ```ts filename="src/api/index.ts" -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; ponder.get("/hello", (c) => { return c.text("Hello, world!"); @@ -53,11 +53,11 @@ Hello, world! your app **will not** serve the standard GraphQL API by default. -To continue using the standard GraphQL API, register the `graphql` middleware exported from `@ponder/core`. +To continue using the standard GraphQL API, register the `graphql` middleware exported from `ponder`. ```ts filename="src/api/index.ts" {2,4-5} -import { ponder } from "@/generated"; -import { graphql } from "@ponder/core"; +import { ponder } from "ponder:registry"; +import { graphql } from "ponder"; ponder.use("/", graphql()); ponder.use("/graphql", graphql()); @@ -76,8 +76,8 @@ The API function context includes a ready-to-use Drizzle database client at `c.d Here's a simple query using the Drizzle `select` query builder. ```ts filename="src/api/index.ts" -import { ponder } from "@/generated"; -import { accounts } from "../../ponder.schema"; +import { ponder } from "ponder:registry"; +import { accounts } from "ponder:schema"; ponder.get("/account/:address", async (c) => { const address = c.req.param("address"); @@ -92,7 +92,7 @@ ponder.get("/account/:address", async (c) => { }); ``` -To build more complex queries, use `join`, `groupBy`, `where`, `orderBy`, `limit`, and other methods. Drizzle's filter & conditional operators (like `eq`, `gte`, and `or`) are re-exported by `@ponder/core`. Visit the Drizzle [documentation](https://orm.drizzle.team/docs/select) for more details. +To build more complex queries, use `join`, `groupBy`, `where`, `orderBy`, `limit`, and other methods. Drizzle's filter & conditional operators (like `eq`, `gte`, and `or`) are re-exported by `ponder`. Visit the Drizzle [documentation](https://orm.drizzle.team/docs/select) for more details. ### Relational queries @@ -101,7 +101,7 @@ Drizzle's relational query builder (AKA Drizzle Queries) offers a great develope Here's an example that uses the relational query builder in an API function to find the 10 largest trades in the past hour joined with the account that made the trade. Visit the Drizzle Queries [documentation](https://orm.drizzle.team/docs/rqb) for more details. ```ts filename="src/api/index.ts" -import { accounts, tradeEvents } from "../ponder.schema"; +import { accounts, tradeEvents } from "ponder:schema"; import { eq, and, gte, inArray, sql } from "drizzle-orm"; ponder.get("/hot-trades", async (c) => { @@ -126,9 +126,9 @@ ponder.get("/hot-trades", async (c) => { Use `ponder.get()` to handle HTTP `GET` requests. The `c` context object contains the request, response helpers, and the database connection. ```ts filename="src/api/index.ts" {5} -import { ponder } from "@/generated"; -import { eq } from "@ponder/core"; -import { accounts } from "../../ponder.schema"; +import { ponder } from "ponder:registry"; +import { accounts } from "ponder:schema"; +import { eq } from "ponder"; ponder.get("/account/:address", async (c) => { const address = c.req.param("address"); @@ -159,9 +159,9 @@ Use `ponder.post()` to handle HTTP `POST` requests. In this example, we calculate the volume of transfers for each recipient within a given time range. The `fromTimestamp` and `toTimestamp` parameters are passed in the request body. ```ts filename="src/api/index.ts" {5} -import { ponder } from "@/generated"; -import { and, gte, sum } from "@ponder/core"; -import { transferEvents } from "../../ponder.schema"; +import { ponder } from "ponder:registry"; +import { transferEvents } from "ponder:schema"; +import { and, gte, sum } from "ponder"; ponder.post("/volume", async (c) => { const body = await c.req.json(); @@ -191,7 +191,7 @@ ponder.post("/volume", async (c) => { Use `ponder.use(...){:ts}` to add middleware to your API functions. Middleware functions can modify the request and response objects, add logs, authenticate requests, and more. [Read more](https://hono.dev/docs/guides/middleware) about Hono middleware. ```ts filename="src/api/index.ts" {3} -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; ponder.use((c, next) => { console.log("Request received:", c.req.url); @@ -204,7 +204,7 @@ ponder.use((c, next) => { Use `ponder.hono` to access the underlying Hono instance. ```ts filename="src/api/index.ts" {3} -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; ponder.hono.notFound((c) => { return c.text("Custom 404 Message", 404); diff --git a/docs/pages/docs/query/direct-sql.mdx b/docs/pages/docs/query/direct-sql.mdx index eb2368594..b3064cca9 100644 --- a/docs/pages/docs/query/direct-sql.mdx +++ b/docs/pages/docs/query/direct-sql.mdx @@ -7,7 +7,7 @@ import { Callout, FileTree, Steps } from "nextra/components"; # Direct SQL queries -This guide describes how to query the database directly using Drizzle or psql. +This guide describes how to query the database directly using psql or Drizzle. Don't write to onchain tables from external clients. Only write to onchain @@ -20,38 +20,6 @@ This guide describes how to query the database directly using Drizzle or psql. but the methods described here do not work out of the box. -## Drizzle - -The `onchainTable` objects exported by `ponder.schema.ts` are valid Drizzle table objects. You can import them from TypeScript files outside the Ponder `src/` directory and use them with the Drizzle query builder. - - - When you use `onchainTable` objects externally, they query the live views - automatically. [Read more](/docs/api-reference/database) about table names and - live views. - - -Here's a script that creates a Drizzle client and runs a query against the Ponder tables. Be sure to connect to the database using the same `DATABASE_URL` as the Ponder app. - -```ts filename="query.ts" -import { drizzle } from "drizzle-orm/node-postgres"; -import * as schema from "../../ponder/ponder.schema"; - -const db = drizzle(process.env.DATABASE_URL, { schema, casing: "snake_case" }); - -// Select -const oldAccounts = await db - .select() - .from(schema.accounts) - .orderBy(asc(schema.accounts.createdAt)) - .limit(100); - -// Query -const whalesWithTransfers = await db.query.accounts.findMany({ - where: (accounts, { eq }) => eq(accounts.balance, 1_000_000n), - with: { transferEvents: true }, -}); -``` - ## psql You can also use [psql](https://www.postgresql.org/docs/current/app-psql.html), a terminal-based Postgres front-end, to query the database from the command line. @@ -82,54 +50,23 @@ Use the `\dt` command to list all tables in the `public` schema. If you are usin
-Notice that the tables have an unfamiliar four-character prefix. This prefix mechanism is used to avoid table name conflicts during hot reloads and redeployments. - The `reorg` tables are used by Ponder internally during reorg reconciliation, and the `_ponder_meta` table is used to store metadata about the database state. ```plaintext filename="psql (result)" List of relations Schema | Name | Type | Owner --------+-----------------------------+-------+---------- - public | b83f__accounts | table | username - public | b83f__transfer_events | table | username - public | b83f_reorg__accounts | table | username - public | b83f_reorg__transfer_events | table | username + public | accounts | table | username + public | transfer_events | table | username public | _ponder_meta | table | username + public | _reorg__accounts | table | username + public | _reorg__transfer_events | table | username (5 rows) ``` -Querying the tables directly is not recommended, because the table names are subject to change. To work around this problem, use the **live views** instead. - -### Display views - -Now, for a more intuitive experience, run the `\dv` command to list all views in the schema. - -
- -```bash filename="psql" -\dv -``` - -```bash filename="psql" -\dv my_schema.* -``` - -
- -The `accounts` and `transfer_events` views are **live views**. They proxy queries to the underlying `b83f__accounts` and `b83f__transfer_events` tables. Read more about [live views](/docs/production/deploy#table-names-and-live-views). - -```plaintext filename="psql (result)" - List of relations - Schema | Name | Type | Owner ---------+-----------------+-------+---------- - public | accounts | view | username - public | transfer_events | view | username -(2 rows) -``` - ### Select rows -Select a few rows from the `accounts` view. +Select a few rows from the `accounts` table. ```sql filename="psql" SELECT * FROM accounts LIMIT 5; @@ -170,3 +107,37 @@ SELECT "to", COUNT(*) AS transfer_count ``` + +## Drizzle + + + As of `0.8`, the `onchainTable` objects are not automatically aware of the + database schema that your instance is using. To get this working, you'll need + to 1) use the `public` schema or 2) set the connection search path to the + schema that your instance is using. We're tracking this issue + [here](https://github.com/ponder-sh/ponder/issues/1325). + + +The `onchainTable` objects exported by `ponder.schema.ts` are valid Drizzle table objects. You can import them from TypeScript files outside the Ponder `src/` directory and use them with the Drizzle query builder. + +Here's a script that creates a Drizzle client and runs a query against the Ponder tables. Be sure to connect to the database using the same `DATABASE_URL` as the Ponder app. + +```ts filename="query.ts" +import { drizzle } from "drizzle-orm/node-postgres"; +import * as schema from "../../ponder/ponder.schema"; + +const db = drizzle(process.env.DATABASE_URL, { schema, casing: "snake_case" }); + +// Select +const oldAccounts = await db + .select() + .from(schema.accounts) + .orderBy(asc(schema.accounts.createdAt)) + .limit(100); + +// Query +const whalesWithTransfers = await db.query.accounts.findMany({ + where: (accounts, { eq }) => eq(accounts.balance, 1_000_000n), + with: { transferEvents: true }, +}); +``` diff --git a/docs/pages/docs/query/graphql.mdx b/docs/pages/docs/query/graphql.mdx index 9d1e39678..1716c25fb 100644 --- a/docs/pages/docs/query/graphql.mdx +++ b/docs/pages/docs/query/graphql.mdx @@ -27,7 +27,7 @@ If your schema contains a `person` table, the GraphQL schema will include a `per
```ts filename="ponder.schema.ts" -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const person = onchainTable("person", (t) => ({ id: t.integer().primaryKey(), @@ -241,7 +241,7 @@ Plural fields and `p.many()` relationship fields each return a `Page` type. This
```ts filename="ponder.schema.ts" -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const pet = onchainTable("pet", (t) => ({ id: t.text().primaryKey(), @@ -463,7 +463,7 @@ The GraphQL schema includes a relationship field for each `one` or `many` relati
```ts filename="ponder.schema.ts" -import { onchainTable, relations } from "@ponder/core"; +import { onchainTable, relations } from "ponder"; export const pet = onchainTable("pet", (t) => ({ id: t.text().primaryKey(), diff --git a/docs/pages/docs/schema.mdx b/docs/pages/docs/schema.mdx index 8642b512c..e15016256 100644 --- a/docs/pages/docs/schema.mdx +++ b/docs/pages/docs/schema.mdx @@ -9,10 +9,10 @@ import { Callout } from "nextra/components"; Ponder's schema definition API is built on [Drizzle](https://orm.drizzle.team/), a modern TypeScript ORM. -To create a table, use the `onchainTable` function exported by `@ponder/core` and include column definitions. +To create a table, use the `onchainTable` function exported by `ponder` and include column definitions. ```ts filename="ponder.schema.ts" {1,3} -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const pets = onchainTable("pets", (t) => ({ name: t.text().primaryKey(), @@ -52,7 +52,7 @@ Ponder also includes a few extra column types built specifically for EVM indexin The `bigint` column type can hold any EVM `uint256` or `int256` value. ```ts filename="ponder.schema.ts" {5} -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const accounts = onchainTable("accounts", (t) => ({ address: t.hex().primaryKey(), @@ -72,7 +72,7 @@ export const accounts = onchainTable("accounts", (t) => ({ The `hex` column type is useful for EVM `address`, `bytes`, or any other hex-encoded value. ```ts filename="ponder.schema.ts" {4} -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const accounts = onchainTable("accounts", (t) => ({ address: t.hex().primaryKey(), @@ -82,10 +82,10 @@ export const accounts = onchainTable("accounts", (t) => ({ ### Enums -To define an enum, use the `onchainEnum` function exported by `@ponder/core`. Then, use the value returned by `onchainEnum` as a column type. Under the hood, `onchainEnum` uses a PostgreSQL [enumerated type](https://www.postgresql.org/docs/current/datatype-enum.html). +To define an enum, use the `onchainEnum` function exported by `ponder`. Then, use the value returned by `onchainEnum` as a column type. Under the hood, `onchainEnum` uses a PostgreSQL [enumerated type](https://www.postgresql.org/docs/current/datatype-enum.html). ```ts filename="ponder.schema.ts" {3,7} -import { onchainEnum, onchainTable } from "@ponder/core"; +import { onchainEnum, onchainTable } from "ponder"; export const color = onchainEnum("color", ["ORANGE", "BLACK"]); @@ -100,7 +100,7 @@ export const cats = onchainTable("cats", (t) => ({ To define an array column, use the `.array(){:ts}` modifier. Arrays are a good fit for small one-dimensional collections, not [relationships](#relationships) between records. ```ts filename="ponder.schema.ts" {5} -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const cats = onchainTable("cats", (t) => ({ name: t.text().primaryKey(), @@ -113,7 +113,7 @@ export const cats = onchainTable("cats", (t) => ({ To mark a column as not null, use the `.notNull(){:ts}` modifier. If you attempt to insert a row that does not include a value for a `NOT NULL{:sql}` column, the database will throw an error. ```ts filename="ponder.schema.ts" {5} -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const cats = onchainTable("cats", (t) => ({ name: t.text().primaryKey(), @@ -126,7 +126,7 @@ export const cats = onchainTable("cats", (t) => ({ To set a default value for a column, use the `.default(){:ts}` modifier and pass a string, number, boolean, or `null`. ```ts filename="ponder.schema.ts" {5} -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const cats = onchainTable("cats", (t) => ({ name: t.text().primaryKey(), @@ -137,7 +137,7 @@ export const cats = onchainTable("cats", (t) => ({ Alternatively, use the `.$default(){:ts}` modifier to specify a JavaScript function that returns the default value. With this approach, the database driver calls the function before inserting a row into this table that does not include a value for this column. ```ts filename="ponder.schema.ts" {6} -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; import { generateId } from "../utils"; export const cats = onchainTable("cats", (t) => ({ @@ -159,12 +159,12 @@ Ponder uses Drizzle Relations to define relationships between tables. Here are s ### One-to-one -Use the `relations` function exported by `@ponder/core` to define the relationships for a table. +Use the `relations` function exported by `ponder` to define the relationships for a table. To define a one-to-one relationship, use the `one()` operator and specify which columns relate the two tables. In this example, each user has a profile and each profile belongs to one user. ```ts filename="ponder.schema.ts" {1,7-9} -import { onchainTable, relations } from "@ponder/core"; +import { onchainTable, relations } from "ponder"; export const users = onchainTable("users", (t) => ({ id: t.text().primaryKey(), @@ -184,7 +184,7 @@ export const profiles = onchainTable("profiles", (t) => ({ Now that you've defined the relationship, the `profile` field will become available in the Query API (`findMany` and `findFirst`) using the `with` option. ```ts filename="src/index.ts" {8,11} -import { users, profiles } from "../ponder.schema"; +import { users, profiles } from "ponder:schema"; await db.insert(users).values({ id: "hunter42" }); await db.insert(profiles).values({ userId: "hunter42", age: 29 }); @@ -203,7 +203,7 @@ console.log(user.profile.age); To define a one-to-many relationship, use the `one()` and `many()` operators to define both sides of the relationship. In this example, each dog has one owner and each person can own many dogs. ```ts filename="ponder.schema.ts" {7-9,16-18} -import { onchainTable, relations } from "@ponder/core"; +import { onchainTable, relations } from "ponder"; export const persons = onchainTable("persons", (t) => ({ name: t.text().primaryKey(), @@ -226,7 +226,7 @@ export const dogsRelations = relations(dogs, ({ one }) => ({ Now, any row inserted into the `dogs` table with `ownerName: "Bob"` will become available in Bob's `dogs` field. ```ts filename="src/index.ts" {11,15} -import { persons, dogs } from "../ponder.schema"; +import { persons, dogs } from "ponder:schema"; await db.insert(persons).values({ name: "Bob" }); await db.insert(dogs).values([ @@ -255,7 +255,7 @@ To define a many-to-many relationship, create a "join table" that relates the tw {/* prettier-ignore */} ```ts filename="ponder.schema.ts" {7-9,16-18,30-33} -import { onchainTable, relations } from "@ponder/core"; +import { onchainTable, relations } from "ponder"; export const users = onchainTable("users", (t) => ({ id: t.text().primaryKey(), @@ -294,7 +294,7 @@ Now, any row inserted into the `userTeams` table will create a relationship betw {/* prettier-ignore */} ```ts filename="src/index.ts" {19} -import { users, teams, userTeams } from "../ponder.schema"; +import { users, teams, userTeams } from "ponder:schema"; await db.insert(users).values([ { id: "ron" }, { id: "harry" }, { id: "hermione" } @@ -371,17 +371,17 @@ query { Every table **must** have a primary key. To create a single-column primary key, use the `.primaryKey(){:ts}` modifier. ```ts filename="ponder.schema.ts" {4} -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const tokens = onchainTable("tokens", (t) => ({ id: t.bigint().primaryKey(), })); ``` -To create a composite primary key, use the `primaryKey()` function exported by `@ponder/core`. All components of the primary key constraint must be unique and not null. +To create a composite primary key, use the `primaryKey()` function exported by `ponder`. All components of the primary key constraint must be unique and not null. ```ts filename="ponder.schema.ts" {11} -import { onchainTable, primaryKey } from "@ponder/core"; +import { onchainTable, primaryKey } from "ponder"; export const poolStates = onchainTable( "pool_states", @@ -398,10 +398,10 @@ export const poolStates = onchainTable( ### Indexes -To create a database index, use the `index(){:ts}` function exported by `@ponder/core`. The following example creates an index on the `persons.name` column to speed up search queries, and an index on the `dogs.ownerId` column to speed up the `persons.dogs` relational query. +To create a database index, use the `index(){:ts}` function exported by `ponder`. The following example creates an index on the `persons.name` column to speed up search queries, and an index on the `dogs.ownerId` column to speed up the `persons.dogs` relational query. ```ts filename="ponder.schema.ts" {10,25} -import { onchainTable, relations, index } from "@ponder/core"; +import { onchainTable, relations, index } from "ponder"; export const persons = onchainTable( "persons", @@ -450,7 +450,7 @@ If a table has two or more columns that together form a unique identifier for a Consider an `allowances` table storing ERC20 token allowances. Each row in this table represents the allowance granted by one owner to one spender. ```ts filename="ponder.schema.ts" {10} -import { onchainTable, primaryKey } from "@ponder/core"; +import { onchainTable, primaryKey } from "ponder"; export const allowances = onchainTable( "allowances", @@ -468,7 +468,7 @@ export const allowances = onchainTable( Use the `bigint` column type to store block timestamps using their EVM-native Unix timestamp representation. This maintains consistency with Viem's approach, and avoids error-prone timezone manipulation code. ```ts filename="ponder.schema.ts" {5} -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const events = onchainTable("events", (t) => ({ id: t.text().primaryKey(), @@ -479,7 +479,7 @@ export const events = onchainTable("events", (t) => ({ If you strongly prefer working with JavaScript `Date` objects, you can also use the `timestamp` column type, but we recommend doing this conversion in the view layer. ```ts filename="ponder.schema.ts" {5} -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const events = onchainTable("events", (t) => ({ id: t.text().primaryKey(), @@ -493,7 +493,7 @@ Use the `.$type()` modifier to customize the TypeScript type for a column. Note {/* prettier-ignore */} ```ts filename="ponder.schema.ts" {5} -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const tokens = onchainTable("tokens", (t) => ({ id: t.bigint().primaryKey(), @@ -506,7 +506,7 @@ export const tokens = onchainTable("tokens", (t) => ({ Use `camelCase` for TypeScript names and `snake_case` for SQL names. This guideline applies to all database objects and properties, including tables, columns, relations, and indexes. ```ts filename="ponder.schema.ts" -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const registrationEvents = onchainTable( "registration_events", // Use snake_case for the SQL table name @@ -525,7 +525,7 @@ export const registrationEvents = onchainTable( Here's a schema for a simple ERC20 app. ```ts filename="ponder.schema.ts" -import { index, onchainTable, primaryKey } from "@ponder/core"; +import { index, onchainTable, primaryKey } from "ponder"; export const account = onchainTable("account", (t) => ({ address: t.hex().primaryKey(), diff --git a/docs/pages/docs/utilities/merge-abis.mdx b/docs/pages/docs/utilities/merge-abis.mdx index 7250cab25..37b3d87c2 100644 --- a/docs/pages/docs/utilities/merge-abis.mdx +++ b/docs/pages/docs/utilities/merge-abis.mdx @@ -22,10 +22,10 @@ const tokenAbi = mergeAbis([erc20Abi, erc4626Abi]); Use `mergeAbis` to preserve type safety when combining multiple ABIs for a single contract. This is especially useful for proxy contracts that have had several different implementation ABIs. -For convenience, `@ponder/core` re-exports `mergeAbis` from `@ponder/utils`. +For convenience, `ponder` re-exports `mergeAbis` from `@ponder/utils`. ```ts filename="ponder.config.ts" {1,14} -import { createConfig, mergeAbis } from "@ponder/core"; +import { createConfig, mergeAbis } from "ponder"; import { http } from "viem"; import { ERC1967ProxyAbi } from "./abis/ERC1967Proxy"; @@ -59,4 +59,4 @@ A list of ABIs to merge. - Type: `Abi{:ts}` -A new ABI including all items from the input list, with duplicates removed. \ No newline at end of file +A new ABI including all items from the input list, with duplicates removed. diff --git a/docs/pages/docs/utilities/replace-bigints.mdx b/docs/pages/docs/utilities/replace-bigints.mdx index 02b688510..879eddcd2 100644 --- a/docs/pages/docs/utilities/replace-bigints.mdx +++ b/docs/pages/docs/utilities/replace-bigints.mdx @@ -31,10 +31,10 @@ Here are a few common scenarios where you might want to replace `BigInt` values The [`json` column type](/docs/schema#json-type) does not support `BigInt` values. Use `replaceBigInts` to prepare objects containing `BigInt` values for insertion. ```ts filename="src/index.ts" {2,9} -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; +import { userOperations } from "ponder:schema"; import { replaceBigInts } from "@ponder/utils"; import { toHex } from "viem"; -import { userOperations } from "../ponder.schema"; ponder.on("EntryPoint:UserOp", async ({ event, context }) => { await context.db.insert(userOperations).values({ @@ -46,8 +46,8 @@ ponder.on("EntryPoint:UserOp", async ({ event, context }) => { To maintain type safety for column values, use the `ReplaceBigInts{:ts}` helper type in the column `$type` annotation. -```ts filename="ponder.schema.ts" {2,7} -import { onchainTable } from "@ponder/core"; +```ts filename="ponder.schema.ts" {1,7} +import { onchainTable } from "ponder"; import type { ReplaceBigInts } from "@ponder/utils"; import type { TransactionReceipt, Hex } from "viem"; @@ -62,10 +62,10 @@ export const userOperations = onchainTable("user_operations", (t) => ({ The GraphQL API automatically serializes `BigInt` values to strings before returning them in HTTP responses. In API functions, you need to handle this serialization process manually. ```ts filename="src/api/index.ts" {2,16} -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; +import { accounts } from "ponder:schema"; import { replaceBigInts } from "@ponder/utils"; import { numberToHex } from "viem"; -import { accounts } from "../../ponder.schema"; ponder.get("/whale-balances", async (c) => { const rows = await c.db diff --git a/docs/pages/docs/utilities/transports.mdx b/docs/pages/docs/utilities/transports.mdx index 203e1396b..d09a0a863 100644 --- a/docs/pages/docs/utilities/transports.mdx +++ b/docs/pages/docs/utilities/transports.mdx @@ -33,10 +33,10 @@ const client = createPublicClient({ ### Usage in Ponder -For convenience, `@ponder/core` re-exports `loadBalance` from `@ponder/utils`. +For convenience, `ponder` re-exports `loadBalance` from `@ponder/utils`. ```ts filename="ponder.config.ts" {8-13} -import { createConfig, loadBalance } from "@ponder/core"; +import { createConfig, loadBalance } from "ponder"; import { http } from "viem"; export default createConfig({ @@ -86,10 +86,10 @@ const client = createPublicClient({ ### Usage in Ponder -For convenience, `@ponder/core` re-exports `rateLimit` from `@ponder/utils`. +For convenience, `ponder` re-exports `rateLimit` from `@ponder/utils`. ```ts filename="ponder.config.ts" {8-10} -import { createConfig, rateLimit } from "@ponder/core"; +import { createConfig, rateLimit } from "ponder"; import { http } from "viem"; export default createConfig({ diff --git a/docs/pages/docs/utilities/types.mdx b/docs/pages/docs/utilities/types.mdx index 8b4d4fba8..dc9d8def5 100644 --- a/docs/pages/docs/utilities/types.mdx +++ b/docs/pages/docs/utilities/types.mdx @@ -11,14 +11,14 @@ To enable code reuse and maintain type safety for advanced use cases, Ponder off ## Indexing function types -The `"@/generated"{:ts}` module exports utility types that are useful for creating reusable helper functions in your indexing files. +The `"ponder:registry"{:ts}` module exports utility types that are useful for creating reusable helper functions in your indexing files. ### EventNames A union of all event names that are available from the contracts defined in `ponder.config.ts`. ```ts filename="src/helpers.ts" {1} -import { ponder, type EventNames } from "@/generated"; +import { ponder, type EventNames } from "ponder:registry"; function helper(eventName: EventNames) { eventName; @@ -31,7 +31,7 @@ function helper(eventName: EventNames) { A generic type that optionally accepts an event name and returns the `event` object type for that event. ```ts filename="src/helpers.ts" {1} -import { ponder, type Event } from "@/generated"; +import { ponder, type Event } from "ponder:registry"; function helper(event: Event<"Weth:Deposit">) { event; @@ -48,7 +48,7 @@ function helper(event: Event<"Weth:Deposit">) { If no event name is provided, `Event` is the union of all event types. This can be useful if all you need is the `block`, `transaction`, and `log` types which are the same for all events. ```ts filename="src/helpers.ts" {1} -import { ponder, type Event } from "@/generated"; +import { ponder, type Event } from "ponder:registry"; function helper(event: Event) { event; @@ -63,7 +63,7 @@ function helper(event: Event) { A generic type that optionally accepts an event name and returns the `context` object type. ```ts filename="src/helpers.ts" {1} -import { ponder, type Context } from "@/generated"; +import { ponder, type Context } from "ponder:registry"; function helper(context: Context<"Weth:Deposit">) { event; @@ -83,7 +83,7 @@ If no event name is provided, `Context` returns the union of all context types. A generic type that optionally accepts an event name and returns the indexing function argument type. ```ts filename="src/helpers.ts" {1} -import { ponder, type IndexingFunctionArgs } from "@/generated"; +import { ponder, type IndexingFunctionArgs } from "ponder:registry"; function helper(args: IndexingFunctionArgs<"Weth:Deposit">) { args; @@ -101,7 +101,7 @@ Like `Event` and `Context`, `IndexingFunctionArgs` returns the union of all inde Use the [Drizzle type helpers](https://orm.drizzle.team/docs/goodies#type-api) to create custom types for database records. ```ts filename="src/helpers.ts" {3} -import { accounts } from "../ponder.schema"; +import { accounts } from "ponder:schema"; function helper(account: typeof accounts.$inferSelect) { account; @@ -116,14 +116,14 @@ function helper(account: typeof accounts.$inferSelect) { ## Config types -The `@ponder/core` package exports a utility type for each option passed to `createConfig(){:ts}`. +The `ponder` package exports a utility type for each option passed to `createConfig(){:ts}`. ### ContractConfig The type of a contract in `createConfig(){:ts}`. ```ts filename="ponder.config.ts" {1} -import { createConfig, type ContractConfig } from "@ponder/core"; +import { createConfig, type ContractConfig } from "ponder"; import { Erc20Abi } from "./abis/Erc20Abi.ts"; const Erc20 = { @@ -145,7 +145,7 @@ export default createConfig({ The type of a network in `createConfig(){:ts}`. ```ts filename="ponder.config.ts" {1} -import { createConfig, type NetworkConfig } from "@ponder/core"; +import { createConfig, type NetworkConfig } from "ponder"; import { http } from "viem"; const mainnet = { @@ -166,7 +166,7 @@ export default createConfig({ The type of a block source in `createConfig(){:ts}`. ```ts filename="ponder.config.ts" {1} -import { createconfig, type blockconfig } from "@ponder/core"; +import { createconfig, type blockconfig } from "ponder"; const ChainlinkPriceOracle = { network: "mainnet", @@ -187,7 +187,7 @@ export default createConfig({ The type of a database in `createConfig(){:ts}`. ```ts filename="ponder.config.ts" {1,6} -import { createConfig, type DatabaseConfig } from "@ponder/core"; +import { createConfig, type DatabaseConfig } from "ponder"; const database = { kind: "postgres", diff --git a/examples/feature-accounts/.env.example b/examples/feature-accounts/.env.example new file mode 100644 index 000000000..f7745c21c --- /dev/null +++ b/examples/feature-accounts/.env.example @@ -0,0 +1,5 @@ +# Mainnet RPC URL used for fetching blockchain data. Alchemy is recommended. +PONDER_RPC_URL_1=https://eth-mainnet.g.alchemy.com/v2/... + +# (Optional) Postgres database URL. If not provided, SQLite will be used. +DATABASE_URL= \ No newline at end of file diff --git a/examples/feature-accounts/.eslintrc.json b/examples/feature-accounts/.eslintrc.json new file mode 100644 index 000000000..359e2bbfa --- /dev/null +++ b/examples/feature-accounts/.eslintrc.json @@ -0,0 +1,3 @@ +{ + "extends": "ponder" +} diff --git a/examples/feature-accounts/.gitignore b/examples/feature-accounts/.gitignore new file mode 100644 index 000000000..f0c7e1177 --- /dev/null +++ b/examples/feature-accounts/.gitignore @@ -0,0 +1,18 @@ +# Dependencies +/node_modules + +# Debug +npm-debug.log* +yarn-debug.log* +yarn-error.log* +.pnpm-debug.log* + +# Misc +.DS_Store + +# Env files +.env*.local + +# Ponder +/generated/ +/.ponder/ diff --git a/examples/feature-accounts/package.json b/examples/feature-accounts/package.json new file mode 100644 index 000000000..137e335e5 --- /dev/null +++ b/examples/feature-accounts/package.json @@ -0,0 +1,28 @@ +{ + "name": "ponder-examples-feature-accounts", + "private": true, + "type": "module", + "scripts": { + "dev": "ponder dev", + "start": "ponder start", + "db": "ponder db", + "codegen": "ponder codegen", + "serve": "ponder serve", + "lint": "eslint .", + "typecheck": "tsc" + }, + "dependencies": { + "ponder": "workspace:*", + "hono": "^4.5.0", + "viem": "^2.21.3" + }, + "devDependencies": { + "@types/node": "^20.10.0", + "eslint": "^8.54.0", + "eslint-config-ponder": "workspace:*", + "typescript": "^5.3.2" + }, + "engines": { + "node": ">=18.14" + } +} diff --git a/examples/feature-accounts/ponder-env.d.ts b/examples/feature-accounts/ponder-env.d.ts new file mode 100644 index 000000000..b8c6a630d --- /dev/null +++ b/examples/feature-accounts/ponder-env.d.ts @@ -0,0 +1,15 @@ +/// + +declare module "ponder:internal" { + const config: typeof import("./ponder.config.ts"); + const schema: typeof import("./ponder.schema.ts"); +} + +declare module "ponder:schema" { + export * from "./ponder.schema.ts"; +} + +// This file enables type checking and editor autocomplete for this Ponder project. +// After upgrading, you may find that changes have been made to this file. +// If this happens, please commit the changes. Do not manually edit this file. +// See https://ponder.sh/docs/getting-started/installation#typescript for more information. diff --git a/examples/feature-accounts/ponder.config.ts b/examples/feature-accounts/ponder.config.ts new file mode 100644 index 000000000..0ba6e4282 --- /dev/null +++ b/examples/feature-accounts/ponder.config.ts @@ -0,0 +1,22 @@ +import { createConfig } from "ponder"; +import { http, createPublicClient } from "viem"; + +const latestBlockMainnet = await createPublicClient({ + transport: http(process.env.PONDER_RPC_URL_1), +}).getBlock(); + +export default createConfig({ + networks: { + mainnet: { + chainId: 1, + transport: http(process.env.PONDER_RPC_URL_1), + }, + }, + accounts: { + BeaverBuilder: { + network: "mainnet", + startBlock: Number(latestBlockMainnet.number) - 100, + address: "0x95222290DD7278Aa3Ddd389Cc1E1d165CC4BAfe5", + }, + }, +}); diff --git a/examples/feature-accounts/ponder.schema.ts b/examples/feature-accounts/ponder.schema.ts new file mode 100644 index 000000000..e6e8df9ff --- /dev/null +++ b/examples/feature-accounts/ponder.schema.ts @@ -0,0 +1,7 @@ +import { onchainTable } from "ponder"; + +export const transactionEvents = onchainTable("transaction_events", (t) => ({ + to: t.hex().primaryKey(), + value: t.bigint().notNull(), + data: t.hex().notNull(), +})); diff --git a/examples/feature-accounts/src/index.ts b/examples/feature-accounts/src/index.ts new file mode 100644 index 000000000..0eea6613c --- /dev/null +++ b/examples/feature-accounts/src/index.ts @@ -0,0 +1,22 @@ +import { ponder } from "ponder:registry"; +import schema from "ponder:schema"; + +ponder.on("BeaverBuilder:transaction:from", async ({ event, context }) => { + if (event.transaction.to === null) return; + + await context.db + .insert(schema.transactionEvents) + .values({ + to: event.transaction.to, + value: event.transaction.value, + data: event.transaction.input, + }) + .onConflictDoUpdate((row) => ({ + value: row.value + event.transaction.value, + data: event.transaction.input, + })); +}); + +ponder.on("BeaverBuilder:transfer:to", async ({ event }) => { + console.log("sent", event.transfer); +}); diff --git a/examples/feature-accounts/tsconfig.json b/examples/feature-accounts/tsconfig.json new file mode 100644 index 000000000..592b9a939 --- /dev/null +++ b/examples/feature-accounts/tsconfig.json @@ -0,0 +1,26 @@ +{ + "compilerOptions": { + // Type checking + "strict": true, + "noUncheckedIndexedAccess": true, + + // Interop constraints + "verbatimModuleSyntax": false, + "esModuleInterop": true, + "isolatedModules": true, + "allowSyntheticDefaultImports": true, + "resolveJsonModule": true, + + // Language and environment + "moduleResolution": "bundler", + "module": "ESNext", + "noEmit": true, + "lib": ["ES2022"], + "target": "ES2022", + + // Skip type checking for node modules + "skipLibCheck": true + }, + "include": ["./**/*.ts"], + "exclude": ["node_modules"] +} diff --git a/examples/feature-api-functions/package.json b/examples/feature-api-functions/package.json index 436f16671..13c94384f 100644 --- a/examples/feature-api-functions/package.json +++ b/examples/feature-api-functions/package.json @@ -5,16 +5,15 @@ "scripts": { "dev": "ponder dev", "start": "ponder start", + "db": "ponder db", "codegen": "ponder codegen", "serve": "ponder serve", - "generate": "drizzle-kit generate --dialect postgresql --schema ./ponder.schema.ts --out migrations", "lint": "eslint .", "typecheck": "tsc" }, "dependencies": { - "@ponder/core": "workspace:*", - "drizzle-kit": "0.25.0", "hono": "^4.5.0", + "ponder": "workspace:*", "viem": "^2.21.3" }, "devDependencies": { diff --git a/examples/feature-api-functions/ponder-env.d.ts b/examples/feature-api-functions/ponder-env.d.ts index e7f300973..b8c6a630d 100644 --- a/examples/feature-api-functions/ponder-env.d.ts +++ b/examples/feature-api-functions/ponder-env.d.ts @@ -1,27 +1,15 @@ +/// + +declare module "ponder:internal" { + const config: typeof import("./ponder.config.ts"); + const schema: typeof import("./ponder.schema.ts"); +} + +declare module "ponder:schema" { + export * from "./ponder.schema.ts"; +} + // This file enables type checking and editor autocomplete for this Ponder project. // After upgrading, you may find that changes have been made to this file. // If this happens, please commit the changes. Do not manually edit this file. // See https://ponder.sh/docs/getting-started/installation#typescript for more information. - -declare module "@/generated" { - import type { Virtual } from "@ponder/core"; - - type config = typeof import("./ponder.config.ts").default; - type schema = typeof import("./ponder.schema.ts"); - - export const ponder: Virtual.Registry; - - export type EventNames = Virtual.EventNames; - export type Event = Virtual.Event< - config, - name - >; - export type Context = Virtual.Context< - config, - schema, - name - >; - export type ApiContext = Virtual.ApiContext; - export type IndexingFunctionArgs = - Virtual.IndexingFunctionArgs; -} diff --git a/examples/feature-api-functions/ponder.config.ts b/examples/feature-api-functions/ponder.config.ts index 32ae5a2ba..9d14961e7 100644 --- a/examples/feature-api-functions/ponder.config.ts +++ b/examples/feature-api-functions/ponder.config.ts @@ -1,4 +1,4 @@ -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { erc20ABI } from "./abis/erc20ABI"; diff --git a/examples/feature-api-functions/ponder.schema.ts b/examples/feature-api-functions/ponder.schema.ts index 0c3d400be..67e5d1f59 100644 --- a/examples/feature-api-functions/ponder.schema.ts +++ b/examples/feature-api-functions/ponder.schema.ts @@ -1,4 +1,4 @@ -import { index, onchainTable, primaryKey } from "@ponder/core"; +import { index, onchainTable, primaryKey } from "ponder"; export const account = onchainTable("account", (p) => ({ address: p.hex().primaryKey(), diff --git a/examples/feature-api-functions/src/api/index.ts b/examples/feature-api-functions/src/api/index.ts index 9043b59e4..fbb15644c 100644 --- a/examples/feature-api-functions/src/api/index.ts +++ b/examples/feature-api-functions/src/api/index.ts @@ -1,7 +1,7 @@ -import { ponder } from "@/generated"; -import { count, desc, eq, graphql, or, replaceBigInts } from "@ponder/core"; +import { ponder } from "ponder:registry"; +import { account, transferEvent } from "ponder:schema"; +import { count, desc, eq, graphql, or, replaceBigInts } from "ponder"; import { formatEther, getAddress } from "viem"; -import { account, transferEvent } from "../../ponder.schema"; ponder.use("/graphql", graphql()); diff --git a/examples/feature-api-functions/src/index.ts b/examples/feature-api-functions/src/index.ts index 6eb45c6e3..08be07a4b 100644 --- a/examples/feature-api-functions/src/index.ts +++ b/examples/feature-api-functions/src/index.ts @@ -1,10 +1,10 @@ -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; import { account, allowance, approvalEvent, transferEvent, -} from "../ponder.schema"; +} from "ponder:schema"; ponder.on("ERC20:Transfer", async ({ event, context }) => { await context.db diff --git a/examples/feature-blocks/package.json b/examples/feature-blocks/package.json index 26be4aceb..70b2182d9 100644 --- a/examples/feature-blocks/package.json +++ b/examples/feature-blocks/package.json @@ -5,13 +5,14 @@ "scripts": { "dev": "ponder dev", "start": "ponder start", + "db": "ponder db", "codegen": "ponder codegen", "serve": "ponder serve", "lint": "eslint .", "typecheck": "tsc" }, "dependencies": { - "@ponder/core": "workspace:*", + "ponder": "workspace:*", "hono": "^4.5.0", "viem": "^2.21.3" }, diff --git a/examples/feature-blocks/ponder-env.d.ts b/examples/feature-blocks/ponder-env.d.ts index e7f300973..b8c6a630d 100644 --- a/examples/feature-blocks/ponder-env.d.ts +++ b/examples/feature-blocks/ponder-env.d.ts @@ -1,27 +1,15 @@ +/// + +declare module "ponder:internal" { + const config: typeof import("./ponder.config.ts"); + const schema: typeof import("./ponder.schema.ts"); +} + +declare module "ponder:schema" { + export * from "./ponder.schema.ts"; +} + // This file enables type checking and editor autocomplete for this Ponder project. // After upgrading, you may find that changes have been made to this file. // If this happens, please commit the changes. Do not manually edit this file. // See https://ponder.sh/docs/getting-started/installation#typescript for more information. - -declare module "@/generated" { - import type { Virtual } from "@ponder/core"; - - type config = typeof import("./ponder.config.ts").default; - type schema = typeof import("./ponder.schema.ts"); - - export const ponder: Virtual.Registry; - - export type EventNames = Virtual.EventNames; - export type Event = Virtual.Event< - config, - name - >; - export type Context = Virtual.Context< - config, - schema, - name - >; - export type ApiContext = Virtual.ApiContext; - export type IndexingFunctionArgs = - Virtual.IndexingFunctionArgs; -} diff --git a/examples/feature-blocks/ponder.config.ts b/examples/feature-blocks/ponder.config.ts index 656779c29..582c9e6e7 100644 --- a/examples/feature-blocks/ponder.config.ts +++ b/examples/feature-blocks/ponder.config.ts @@ -1,4 +1,4 @@ -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; export default createConfig({ diff --git a/examples/feature-blocks/ponder.schema.ts b/examples/feature-blocks/ponder.schema.ts index 7f394f808..5d1e9f52a 100644 --- a/examples/feature-blocks/ponder.schema.ts +++ b/examples/feature-blocks/ponder.schema.ts @@ -1,4 +1,4 @@ -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const chainlinkPrice = onchainTable("chainlink_price", (t) => ({ timestamp: t.bigint().primaryKey(), diff --git a/examples/feature-blocks/src/index.ts b/examples/feature-blocks/src/index.ts index 6f7e15df6..32319dacb 100644 --- a/examples/feature-blocks/src/index.ts +++ b/examples/feature-blocks/src/index.ts @@ -1,6 +1,6 @@ -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; +import schema from "ponder:schema"; import { parseAbi } from "viem"; -import * as schema from "../ponder.schema"; ponder.on("ChainlinkPriceOracle:block", async ({ event, context }) => { const price = await context.client.readContract({ diff --git a/examples/feature-call-traces/package.json b/examples/feature-call-traces/package.json index 54c4132eb..29f93180e 100644 --- a/examples/feature-call-traces/package.json +++ b/examples/feature-call-traces/package.json @@ -5,13 +5,14 @@ "scripts": { "dev": "ponder dev", "start": "ponder start", + "db": "ponder db", "codegen": "ponder codegen", "serve": "ponder serve", "lint": "eslint .", "typecheck": "tsc" }, "dependencies": { - "@ponder/core": "workspace:*", + "ponder": "workspace:*", "hono": "^4.5.0", "viem": "^2.21.3" }, diff --git a/examples/feature-call-traces/ponder-env.d.ts b/examples/feature-call-traces/ponder-env.d.ts index e7f300973..b8c6a630d 100644 --- a/examples/feature-call-traces/ponder-env.d.ts +++ b/examples/feature-call-traces/ponder-env.d.ts @@ -1,27 +1,15 @@ +/// + +declare module "ponder:internal" { + const config: typeof import("./ponder.config.ts"); + const schema: typeof import("./ponder.schema.ts"); +} + +declare module "ponder:schema" { + export * from "./ponder.schema.ts"; +} + // This file enables type checking and editor autocomplete for this Ponder project. // After upgrading, you may find that changes have been made to this file. // If this happens, please commit the changes. Do not manually edit this file. // See https://ponder.sh/docs/getting-started/installation#typescript for more information. - -declare module "@/generated" { - import type { Virtual } from "@ponder/core"; - - type config = typeof import("./ponder.config.ts").default; - type schema = typeof import("./ponder.schema.ts"); - - export const ponder: Virtual.Registry; - - export type EventNames = Virtual.EventNames; - export type Event = Virtual.Event< - config, - name - >; - export type Context = Virtual.Context< - config, - schema, - name - >; - export type ApiContext = Virtual.ApiContext; - export type IndexingFunctionArgs = - Virtual.IndexingFunctionArgs; -} diff --git a/examples/feature-call-traces/ponder.config.ts b/examples/feature-call-traces/ponder.config.ts index 022cac91c..dec67d178 100644 --- a/examples/feature-call-traces/ponder.config.ts +++ b/examples/feature-call-traces/ponder.config.ts @@ -1,5 +1,5 @@ -import { createConfig } from "@ponder/core"; -import { http, Abi, multicall3Abi } from "viem"; +import { createConfig } from "ponder"; +import { http, multicall3Abi } from "viem"; import { mainnet } from "viem/chains"; export default createConfig({ diff --git a/examples/feature-call-traces/ponder.schema.ts b/examples/feature-call-traces/ponder.schema.ts index aa6489c78..158ba4997 100644 --- a/examples/feature-call-traces/ponder.schema.ts +++ b/examples/feature-call-traces/ponder.schema.ts @@ -1,4 +1,4 @@ -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const multicall = onchainTable("multicall", (t) => ({ from: t.hex().primaryKey(), diff --git a/examples/feature-call-traces/src/index.ts b/examples/feature-call-traces/src/index.ts index e949d278d..36a864762 100644 --- a/examples/feature-call-traces/src/index.ts +++ b/examples/feature-call-traces/src/index.ts @@ -1,5 +1,5 @@ -import { ponder } from "@/generated"; -import * as schema from "../ponder.schema"; +import { ponder } from "ponder:registry"; +import schema from "ponder:schema"; ponder.on("multicall3.aggregate3()", async ({ event, context }) => { await context.db diff --git a/examples/feature-factory/package.json b/examples/feature-factory/package.json index 614780096..18b2985ab 100644 --- a/examples/feature-factory/package.json +++ b/examples/feature-factory/package.json @@ -5,13 +5,14 @@ "scripts": { "dev": "ponder dev", "start": "ponder start", + "db": "ponder db", "codegen": "ponder codegen", "serve": "ponder serve", "lint": "eslint .", "typecheck": "tsc" }, "dependencies": { - "@ponder/core": "workspace:*", + "ponder": "workspace:*", "abitype": "^0.10.2", "hono": "^4.5.0", "viem": "^2.21.3" diff --git a/examples/feature-factory/ponder-env.d.ts b/examples/feature-factory/ponder-env.d.ts index e7f300973..b8c6a630d 100644 --- a/examples/feature-factory/ponder-env.d.ts +++ b/examples/feature-factory/ponder-env.d.ts @@ -1,27 +1,15 @@ +/// + +declare module "ponder:internal" { + const config: typeof import("./ponder.config.ts"); + const schema: typeof import("./ponder.schema.ts"); +} + +declare module "ponder:schema" { + export * from "./ponder.schema.ts"; +} + // This file enables type checking and editor autocomplete for this Ponder project. // After upgrading, you may find that changes have been made to this file. // If this happens, please commit the changes. Do not manually edit this file. // See https://ponder.sh/docs/getting-started/installation#typescript for more information. - -declare module "@/generated" { - import type { Virtual } from "@ponder/core"; - - type config = typeof import("./ponder.config.ts").default; - type schema = typeof import("./ponder.schema.ts"); - - export const ponder: Virtual.Registry; - - export type EventNames = Virtual.EventNames; - export type Event = Virtual.Event< - config, - name - >; - export type Context = Virtual.Context< - config, - schema, - name - >; - export type ApiContext = Virtual.ApiContext; - export type IndexingFunctionArgs = - Virtual.IndexingFunctionArgs; -} diff --git a/examples/feature-factory/ponder.config.ts b/examples/feature-factory/ponder.config.ts index e317b1253..0a7a1ce36 100644 --- a/examples/feature-factory/ponder.config.ts +++ b/examples/feature-factory/ponder.config.ts @@ -1,5 +1,6 @@ -import { createConfig } from "@ponder/core"; import { parseAbiItem } from "abitype"; +import { createConfig, factory } from "ponder"; + import { http } from "viem"; import { LlamaCoreAbi } from "./abis/LlamaCoreAbi"; @@ -20,21 +21,21 @@ export default createConfig({ LlamaCore: { network: "sepolia", abi: LlamaCoreAbi, - factory: { + address: factory({ address: "0xFf5d4E226D9A3496EECE31083a8F493edd79AbEB", event: llamaFactoryEvent, parameter: "llamaCore", - }, + }), startBlock: 4121269, }, LlamaPolicy: { network: "sepolia", abi: LlamaPolicyAbi, - factory: { + address: factory({ address: "0xFf5d4E226D9A3496EECE31083a8F493edd79AbEB", event: llamaFactoryEvent, parameter: "llamaPolicy", - }, + }), startBlock: 4121269, }, }, diff --git a/examples/feature-factory/ponder.schema.ts b/examples/feature-factory/ponder.schema.ts index 2e318dbf2..8f349ba3a 100644 --- a/examples/feature-factory/ponder.schema.ts +++ b/examples/feature-factory/ponder.schema.ts @@ -1,4 +1,4 @@ -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const llama = onchainTable("llama", (t) => ({ id: t.text().primaryKey(), diff --git a/examples/feature-factory/src/LlamaCore.ts b/examples/feature-factory/src/LlamaCore.ts index c7a5b6797..08ad1f418 100644 --- a/examples/feature-factory/src/LlamaCore.ts +++ b/examples/feature-factory/src/LlamaCore.ts @@ -1,4 +1,4 @@ -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; ponder.on("LlamaCore:ActionCreated", async ({ event }) => { console.log( diff --git a/examples/feature-filter/package.json b/examples/feature-filter/package.json index f9ddd77f1..b6c0f0878 100644 --- a/examples/feature-filter/package.json +++ b/examples/feature-filter/package.json @@ -5,13 +5,14 @@ "scripts": { "dev": "ponder dev", "start": "ponder start", + "db": "ponder db", "codegen": "ponder codegen", "serve": "ponder serve", "lint": "eslint .", "typecheck": "tsc" }, "dependencies": { - "@ponder/core": "workspace:*", + "ponder": "workspace:*", "hono": "^4.5.0", "viem": "^2.21.3" }, diff --git a/examples/feature-filter/ponder-env.d.ts b/examples/feature-filter/ponder-env.d.ts index e7f300973..b8c6a630d 100644 --- a/examples/feature-filter/ponder-env.d.ts +++ b/examples/feature-filter/ponder-env.d.ts @@ -1,27 +1,15 @@ +/// + +declare module "ponder:internal" { + const config: typeof import("./ponder.config.ts"); + const schema: typeof import("./ponder.schema.ts"); +} + +declare module "ponder:schema" { + export * from "./ponder.schema.ts"; +} + // This file enables type checking and editor autocomplete for this Ponder project. // After upgrading, you may find that changes have been made to this file. // If this happens, please commit the changes. Do not manually edit this file. // See https://ponder.sh/docs/getting-started/installation#typescript for more information. - -declare module "@/generated" { - import type { Virtual } from "@ponder/core"; - - type config = typeof import("./ponder.config.ts").default; - type schema = typeof import("./ponder.schema.ts"); - - export const ponder: Virtual.Registry; - - export type EventNames = Virtual.EventNames; - export type Event = Virtual.Event< - config, - name - >; - export type Context = Virtual.Context< - config, - schema, - name - >; - export type ApiContext = Virtual.ApiContext; - export type IndexingFunctionArgs = - Virtual.IndexingFunctionArgs; -} diff --git a/examples/feature-filter/ponder.config.ts b/examples/feature-filter/ponder.config.ts index 379522302..177244be5 100644 --- a/examples/feature-filter/ponder.config.ts +++ b/examples/feature-filter/ponder.config.ts @@ -1,4 +1,4 @@ -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { PrimitiveManagerAbi } from "./abis/PrimitiveManagerAbi"; diff --git a/examples/feature-filter/ponder.schema.ts b/examples/feature-filter/ponder.schema.ts index 17841db7b..194979eec 100644 --- a/examples/feature-filter/ponder.schema.ts +++ b/examples/feature-filter/ponder.schema.ts @@ -1,4 +1,4 @@ -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const swapEvent = onchainTable("swapEvent", (t) => ({ id: t.text().primaryKey(), diff --git a/examples/feature-filter/src/index.ts b/examples/feature-filter/src/index.ts index b1f6009c8..16a75be7d 100644 --- a/examples/feature-filter/src/index.ts +++ b/examples/feature-filter/src/index.ts @@ -1,5 +1,5 @@ -import { ponder } from "@/generated"; -import * as schema from "../ponder.schema"; +import { ponder } from "ponder:registry"; +import schema from "ponder:schema"; ponder.on("PrimitiveManager:Swap", async ({ event, context }) => { await context.db.insert(schema.swapEvent).values({ diff --git a/examples/feature-multichain/package.json b/examples/feature-multichain/package.json index d0d694bfc..9aa5fc877 100644 --- a/examples/feature-multichain/package.json +++ b/examples/feature-multichain/package.json @@ -5,13 +5,14 @@ "scripts": { "dev": "ponder dev", "start": "ponder start", + "db": "ponder db", "codegen": "ponder codegen", "serve": "ponder serve", "lint": "eslint .", "typecheck": "tsc" }, "dependencies": { - "@ponder/core": "workspace:*", + "ponder": "workspace:*", "hono": "^4.5.0", "viem": "^2.21.3" }, diff --git a/examples/feature-multichain/ponder-env.d.ts b/examples/feature-multichain/ponder-env.d.ts index e7f300973..b8c6a630d 100644 --- a/examples/feature-multichain/ponder-env.d.ts +++ b/examples/feature-multichain/ponder-env.d.ts @@ -1,27 +1,15 @@ +/// + +declare module "ponder:internal" { + const config: typeof import("./ponder.config.ts"); + const schema: typeof import("./ponder.schema.ts"); +} + +declare module "ponder:schema" { + export * from "./ponder.schema.ts"; +} + // This file enables type checking and editor autocomplete for this Ponder project. // After upgrading, you may find that changes have been made to this file. // If this happens, please commit the changes. Do not manually edit this file. // See https://ponder.sh/docs/getting-started/installation#typescript for more information. - -declare module "@/generated" { - import type { Virtual } from "@ponder/core"; - - type config = typeof import("./ponder.config.ts").default; - type schema = typeof import("./ponder.schema.ts"); - - export const ponder: Virtual.Registry; - - export type EventNames = Virtual.EventNames; - export type Event = Virtual.Event< - config, - name - >; - export type Context = Virtual.Context< - config, - schema, - name - >; - export type ApiContext = Virtual.ApiContext; - export type IndexingFunctionArgs = - Virtual.IndexingFunctionArgs; -} diff --git a/examples/feature-multichain/ponder.config.ts b/examples/feature-multichain/ponder.config.ts index e50840ed4..b66ded766 100644 --- a/examples/feature-multichain/ponder.config.ts +++ b/examples/feature-multichain/ponder.config.ts @@ -1,4 +1,4 @@ -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http, createPublicClient } from "viem"; import { weth9Abi } from "./abis/weth9Abi"; diff --git a/examples/feature-multichain/ponder.schema.ts b/examples/feature-multichain/ponder.schema.ts index a24bd8c45..dc0d61b80 100644 --- a/examples/feature-multichain/ponder.schema.ts +++ b/examples/feature-multichain/ponder.schema.ts @@ -1,4 +1,4 @@ -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const account = onchainTable("account", (t) => ({ address: t.hex().primaryKey(), diff --git a/examples/feature-multichain/src/index.ts b/examples/feature-multichain/src/index.ts index ef5b90bbf..191095578 100644 --- a/examples/feature-multichain/src/index.ts +++ b/examples/feature-multichain/src/index.ts @@ -1,5 +1,5 @@ -import { ponder } from "@/generated"; -import { account } from "../ponder.schema"; +import { ponder } from "ponder:registry"; +import { account } from "ponder:schema"; ponder.on("weth9:Deposit", async ({ event, context }) => { await context.db diff --git a/examples/feature-proxy/package.json b/examples/feature-proxy/package.json index 6786bd479..ed0910d0b 100644 --- a/examples/feature-proxy/package.json +++ b/examples/feature-proxy/package.json @@ -5,13 +5,14 @@ "scripts": { "dev": "ponder dev", "start": "ponder start", + "db": "ponder db", "codegen": "ponder codegen", "serve": "ponder serve", "lint": "eslint .", "typecheck": "tsc" }, "dependencies": { - "@ponder/core": "workspace:*", + "ponder": "workspace:*", "hono": "^4.5.0", "viem": "^2.21.3" }, diff --git a/examples/feature-proxy/ponder-env.d.ts b/examples/feature-proxy/ponder-env.d.ts index e7f300973..b8c6a630d 100644 --- a/examples/feature-proxy/ponder-env.d.ts +++ b/examples/feature-proxy/ponder-env.d.ts @@ -1,27 +1,15 @@ +/// + +declare module "ponder:internal" { + const config: typeof import("./ponder.config.ts"); + const schema: typeof import("./ponder.schema.ts"); +} + +declare module "ponder:schema" { + export * from "./ponder.schema.ts"; +} + // This file enables type checking and editor autocomplete for this Ponder project. // After upgrading, you may find that changes have been made to this file. // If this happens, please commit the changes. Do not manually edit this file. // See https://ponder.sh/docs/getting-started/installation#typescript for more information. - -declare module "@/generated" { - import type { Virtual } from "@ponder/core"; - - type config = typeof import("./ponder.config.ts").default; - type schema = typeof import("./ponder.schema.ts"); - - export const ponder: Virtual.Registry; - - export type EventNames = Virtual.EventNames; - export type Event = Virtual.Event< - config, - name - >; - export type Context = Virtual.Context< - config, - schema, - name - >; - export type ApiContext = Virtual.ApiContext; - export type IndexingFunctionArgs = - Virtual.IndexingFunctionArgs; -} diff --git a/examples/feature-proxy/ponder.config.ts b/examples/feature-proxy/ponder.config.ts index 0fe0cb356..e90469b87 100644 --- a/examples/feature-proxy/ponder.config.ts +++ b/examples/feature-proxy/ponder.config.ts @@ -1,4 +1,4 @@ -import { createConfig, mergeAbis } from "@ponder/core"; +import { createConfig, mergeAbis } from "ponder"; import { http } from "viem"; import { RouterImplAbi } from "./abis/RouterImplAbi"; diff --git a/examples/feature-proxy/ponder.schema.ts b/examples/feature-proxy/ponder.schema.ts index 08277d05a..3307af9a0 100644 --- a/examples/feature-proxy/ponder.schema.ts +++ b/examples/feature-proxy/ponder.schema.ts @@ -1,4 +1,4 @@ -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const liquidationEvent = onchainTable("liquidation_event", (t) => ({ id: t.text().primaryKey(), diff --git a/examples/feature-proxy/src/index.ts b/examples/feature-proxy/src/index.ts index d070aad8f..e94fad377 100644 --- a/examples/feature-proxy/src/index.ts +++ b/examples/feature-proxy/src/index.ts @@ -1,5 +1,5 @@ -import { ponder } from "@/generated"; -import * as schema from "../ponder.schema"; +import { ponder } from "ponder:registry"; +import schema from "ponder:schema"; ponder.on("AstariaRouter:Liquidation", async ({ event, context }) => { await context.db diff --git a/examples/feature-read-contract/package.json b/examples/feature-read-contract/package.json index 642ff39e7..a7b40bdcc 100644 --- a/examples/feature-read-contract/package.json +++ b/examples/feature-read-contract/package.json @@ -5,13 +5,14 @@ "scripts": { "dev": "ponder dev", "start": "ponder start", + "db": "ponder db", "codegen": "ponder codegen", "serve": "ponder serve", "lint": "eslint .", "typecheck": "tsc" }, "dependencies": { - "@ponder/core": "workspace:*", + "ponder": "workspace:*", "hono": "^4.5.0", "viem": "^2.21.3" }, diff --git a/examples/feature-read-contract/ponder-env.d.ts b/examples/feature-read-contract/ponder-env.d.ts index e7f300973..b8c6a630d 100644 --- a/examples/feature-read-contract/ponder-env.d.ts +++ b/examples/feature-read-contract/ponder-env.d.ts @@ -1,27 +1,15 @@ +/// + +declare module "ponder:internal" { + const config: typeof import("./ponder.config.ts"); + const schema: typeof import("./ponder.schema.ts"); +} + +declare module "ponder:schema" { + export * from "./ponder.schema.ts"; +} + // This file enables type checking and editor autocomplete for this Ponder project. // After upgrading, you may find that changes have been made to this file. // If this happens, please commit the changes. Do not manually edit this file. // See https://ponder.sh/docs/getting-started/installation#typescript for more information. - -declare module "@/generated" { - import type { Virtual } from "@ponder/core"; - - type config = typeof import("./ponder.config.ts").default; - type schema = typeof import("./ponder.schema.ts"); - - export const ponder: Virtual.Registry; - - export type EventNames = Virtual.EventNames; - export type Event = Virtual.Event< - config, - name - >; - export type Context = Virtual.Context< - config, - schema, - name - >; - export type ApiContext = Virtual.ApiContext; - export type IndexingFunctionArgs = - Virtual.IndexingFunctionArgs; -} diff --git a/examples/feature-read-contract/ponder.config.ts b/examples/feature-read-contract/ponder.config.ts index 5689c42fb..80b33d015 100644 --- a/examples/feature-read-contract/ponder.config.ts +++ b/examples/feature-read-contract/ponder.config.ts @@ -1,4 +1,4 @@ -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { FileStoreAbi } from "./abis/FileStoreAbi"; diff --git a/examples/feature-read-contract/ponder.schema.ts b/examples/feature-read-contract/ponder.schema.ts index 13bfb29bc..aa0d95965 100644 --- a/examples/feature-read-contract/ponder.schema.ts +++ b/examples/feature-read-contract/ponder.schema.ts @@ -1,4 +1,4 @@ -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const file = onchainTable("file", (t) => ({ name: t.text().primaryKey(), diff --git a/examples/feature-read-contract/src/FileStore.ts b/examples/feature-read-contract/src/FileStore.ts index 30f45eea5..ae2b78def 100644 --- a/examples/feature-read-contract/src/FileStore.ts +++ b/examples/feature-read-contract/src/FileStore.ts @@ -1,8 +1,8 @@ -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; +import schema from "ponder:schema"; import type { Hex } from "viem"; import { fromHex } from "viem"; import { FileStoreFrontendAbi } from "../abis/FileStoreFrontendAbi"; -import * as schema from "../ponder.schema"; const parseJson = (encodedJson: string, defaultValue: any = null) => { try { diff --git a/examples/project-friendtech/package.json b/examples/project-friendtech/package.json index abe25004e..38f7f45b5 100644 --- a/examples/project-friendtech/package.json +++ b/examples/project-friendtech/package.json @@ -5,13 +5,14 @@ "scripts": { "dev": "ponder dev", "start": "ponder start", + "db": "ponder db", "codegen": "ponder codegen", "serve": "ponder serve", "lint": "eslint .", "typecheck": "tsc" }, "dependencies": { - "@ponder/core": "workspace:*", + "ponder": "workspace:*", "hono": "^4.5.0", "viem": "^2.21.3" }, diff --git a/examples/project-friendtech/ponder-env.d.ts b/examples/project-friendtech/ponder-env.d.ts index e7f300973..b8c6a630d 100644 --- a/examples/project-friendtech/ponder-env.d.ts +++ b/examples/project-friendtech/ponder-env.d.ts @@ -1,27 +1,15 @@ +/// + +declare module "ponder:internal" { + const config: typeof import("./ponder.config.ts"); + const schema: typeof import("./ponder.schema.ts"); +} + +declare module "ponder:schema" { + export * from "./ponder.schema.ts"; +} + // This file enables type checking and editor autocomplete for this Ponder project. // After upgrading, you may find that changes have been made to this file. // If this happens, please commit the changes. Do not manually edit this file. // See https://ponder.sh/docs/getting-started/installation#typescript for more information. - -declare module "@/generated" { - import type { Virtual } from "@ponder/core"; - - type config = typeof import("./ponder.config.ts").default; - type schema = typeof import("./ponder.schema.ts"); - - export const ponder: Virtual.Registry; - - export type EventNames = Virtual.EventNames; - export type Event = Virtual.Event< - config, - name - >; - export type Context = Virtual.Context< - config, - schema, - name - >; - export type ApiContext = Virtual.ApiContext; - export type IndexingFunctionArgs = - Virtual.IndexingFunctionArgs; -} diff --git a/examples/project-friendtech/ponder.config.ts b/examples/project-friendtech/ponder.config.ts index 6e14a3bad..fab8dcff0 100644 --- a/examples/project-friendtech/ponder.config.ts +++ b/examples/project-friendtech/ponder.config.ts @@ -1,4 +1,4 @@ -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { FriendtechSharesV1Abi } from "./abis/FriendtechSharesV1Abi"; diff --git a/examples/project-friendtech/ponder.schema.ts b/examples/project-friendtech/ponder.schema.ts index 5ca93ad4a..1fdd9212f 100644 --- a/examples/project-friendtech/ponder.schema.ts +++ b/examples/project-friendtech/ponder.schema.ts @@ -1,4 +1,4 @@ -import { onchainEnum, onchainTable, primaryKey } from "@ponder/core"; +import { onchainEnum, onchainTable, primaryKey } from "ponder"; export const tradeType = onchainEnum("trade_type", ["BUY", "SELL"]); diff --git a/examples/project-friendtech/src/FriendtechSharesV1.ts b/examples/project-friendtech/src/FriendtechSharesV1.ts index f28be4801..ff2096c40 100644 --- a/examples/project-friendtech/src/FriendtechSharesV1.ts +++ b/examples/project-friendtech/src/FriendtechSharesV1.ts @@ -1,5 +1,5 @@ -import { ponder } from "@/generated"; -import * as schema from "../ponder.schema"; +import { ponder } from "ponder:registry"; +import schema from "ponder:schema"; ponder.on("FriendtechSharesV1:Trade", async ({ event, context }) => { // Skip phantom events diff --git a/examples/project-uniswap-v3-flash/package.json b/examples/project-uniswap-v3-flash/package.json index 8d2ddd162..2728985c4 100644 --- a/examples/project-uniswap-v3-flash/package.json +++ b/examples/project-uniswap-v3-flash/package.json @@ -5,13 +5,14 @@ "scripts": { "dev": "ponder dev", "start": "ponder start", + "db": "ponder db", "codegen": "ponder codegen", "serve": "ponder serve", "lint": "eslint .", "typecheck": "tsc" }, "dependencies": { - "@ponder/core": "workspace:*", + "ponder": "workspace:*", "hono": "^4.5.0", "viem": "^2.21.3" }, diff --git a/examples/project-uniswap-v3-flash/ponder-env.d.ts b/examples/project-uniswap-v3-flash/ponder-env.d.ts index e7f300973..b8c6a630d 100644 --- a/examples/project-uniswap-v3-flash/ponder-env.d.ts +++ b/examples/project-uniswap-v3-flash/ponder-env.d.ts @@ -1,27 +1,15 @@ +/// + +declare module "ponder:internal" { + const config: typeof import("./ponder.config.ts"); + const schema: typeof import("./ponder.schema.ts"); +} + +declare module "ponder:schema" { + export * from "./ponder.schema.ts"; +} + // This file enables type checking and editor autocomplete for this Ponder project. // After upgrading, you may find that changes have been made to this file. // If this happens, please commit the changes. Do not manually edit this file. // See https://ponder.sh/docs/getting-started/installation#typescript for more information. - -declare module "@/generated" { - import type { Virtual } from "@ponder/core"; - - type config = typeof import("./ponder.config.ts").default; - type schema = typeof import("./ponder.schema.ts"); - - export const ponder: Virtual.Registry; - - export type EventNames = Virtual.EventNames; - export type Event = Virtual.Event< - config, - name - >; - export type Context = Virtual.Context< - config, - schema, - name - >; - export type ApiContext = Virtual.ApiContext; - export type IndexingFunctionArgs = - Virtual.IndexingFunctionArgs; -} diff --git a/examples/project-uniswap-v3-flash/ponder.config.ts b/examples/project-uniswap-v3-flash/ponder.config.ts index 0b4b80121..8a958916a 100644 --- a/examples/project-uniswap-v3-flash/ponder.config.ts +++ b/examples/project-uniswap-v3-flash/ponder.config.ts @@ -1,4 +1,4 @@ -import { createConfig } from "@ponder/core"; +import { createConfig, factory } from "ponder"; import { http, getAbiItem } from "viem"; import { UniswapV3FactoryAbi } from "./abis/UniswapV3FactoryAbi"; @@ -15,11 +15,11 @@ export default createConfig({ UniswapV3Pool: { network: "mainnet", abi: UniswapV3PoolAbi, - factory: { + address: factory({ address: "0x1F98431c8aD98523631AE4a59f267346ea31F984", event: getAbiItem({ abi: UniswapV3FactoryAbi, name: "PoolCreated" }), parameter: "pool", - }, + }), startBlock: 12369621, filter: { event: "Flash", diff --git a/examples/project-uniswap-v3-flash/ponder.schema.ts b/examples/project-uniswap-v3-flash/ponder.schema.ts index 915d42841..14464ae19 100644 --- a/examples/project-uniswap-v3-flash/ponder.schema.ts +++ b/examples/project-uniswap-v3-flash/ponder.schema.ts @@ -1,4 +1,4 @@ -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const tokenPaid = onchainTable("token_paid", (t) => ({ address: t.hex().primaryKey(), diff --git a/examples/project-uniswap-v3-flash/src/index.ts b/examples/project-uniswap-v3-flash/src/index.ts index e4d6cb127..ff6608ae5 100644 --- a/examples/project-uniswap-v3-flash/src/index.ts +++ b/examples/project-uniswap-v3-flash/src/index.ts @@ -1,5 +1,5 @@ -import { ponder } from "@/generated"; -import * as schema from "../ponder.schema"; +import { ponder } from "ponder:registry"; +import schema from "ponder:schema"; ponder.on("UniswapV3Pool:Flash", async ({ event, context }) => { const poolAddress = event.log.address; diff --git a/examples/reference-erc1155/package.json b/examples/reference-erc1155/package.json index db8025375..ada04aa72 100644 --- a/examples/reference-erc1155/package.json +++ b/examples/reference-erc1155/package.json @@ -5,13 +5,14 @@ "scripts": { "dev": "ponder dev", "start": "ponder start", + "db": "ponder db", "codegen": "ponder codegen", "serve": "ponder serve", "lint": "eslint .", "typecheck": "tsc" }, "dependencies": { - "@ponder/core": "workspace:*", + "ponder": "workspace:*", "hono": "^4.5.0", "viem": "^2.21.3" }, diff --git a/examples/reference-erc1155/ponder-env.d.ts b/examples/reference-erc1155/ponder-env.d.ts index e7f300973..b8c6a630d 100644 --- a/examples/reference-erc1155/ponder-env.d.ts +++ b/examples/reference-erc1155/ponder-env.d.ts @@ -1,27 +1,15 @@ +/// + +declare module "ponder:internal" { + const config: typeof import("./ponder.config.ts"); + const schema: typeof import("./ponder.schema.ts"); +} + +declare module "ponder:schema" { + export * from "./ponder.schema.ts"; +} + // This file enables type checking and editor autocomplete for this Ponder project. // After upgrading, you may find that changes have been made to this file. // If this happens, please commit the changes. Do not manually edit this file. // See https://ponder.sh/docs/getting-started/installation#typescript for more information. - -declare module "@/generated" { - import type { Virtual } from "@ponder/core"; - - type config = typeof import("./ponder.config.ts").default; - type schema = typeof import("./ponder.schema.ts"); - - export const ponder: Virtual.Registry; - - export type EventNames = Virtual.EventNames; - export type Event = Virtual.Event< - config, - name - >; - export type Context = Virtual.Context< - config, - schema, - name - >; - export type ApiContext = Virtual.ApiContext; - export type IndexingFunctionArgs = - Virtual.IndexingFunctionArgs; -} diff --git a/examples/reference-erc1155/ponder.config.ts b/examples/reference-erc1155/ponder.config.ts index 601944165..4107eadb7 100644 --- a/examples/reference-erc1155/ponder.config.ts +++ b/examples/reference-erc1155/ponder.config.ts @@ -1,4 +1,4 @@ -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { erc1155ABI } from "./abis/erc1155Abi"; diff --git a/examples/reference-erc1155/ponder.schema.ts b/examples/reference-erc1155/ponder.schema.ts index 649590ac8..910317235 100644 --- a/examples/reference-erc1155/ponder.schema.ts +++ b/examples/reference-erc1155/ponder.schema.ts @@ -1,4 +1,4 @@ -import { onchainTable, primaryKey } from "@ponder/core"; +import { onchainTable, primaryKey } from "ponder"; export const account = onchainTable("account", (t) => ({ address: t.hex().primaryKey(), diff --git a/examples/reference-erc1155/src/index.ts b/examples/reference-erc1155/src/index.ts index 4e14cc280..2ffdde6be 100644 --- a/examples/reference-erc1155/src/index.ts +++ b/examples/reference-erc1155/src/index.ts @@ -1,5 +1,5 @@ -import { ponder } from "@/generated"; -import * as schema from "../ponder.schema"; +import { ponder } from "ponder:registry"; +import schema from "ponder:schema"; ponder.on("ERC1155:TransferSingle", async ({ event, context }) => { // Create an Account for the sender, or update the balance if it already exists. diff --git a/examples/reference-erc20/package.json b/examples/reference-erc20/package.json index b5819ad48..303fa830a 100644 --- a/examples/reference-erc20/package.json +++ b/examples/reference-erc20/package.json @@ -5,13 +5,14 @@ "scripts": { "dev": "ponder dev", "start": "ponder start", + "db": "ponder db", "codegen": "ponder codegen", "serve": "ponder serve", "lint": "eslint .", "typecheck": "tsc" }, "dependencies": { - "@ponder/core": "workspace:*", + "ponder": "workspace:*", "drizzle-kit": "0.22.8", "hono": "^4.5.0", "viem": "^2.21.3" diff --git a/examples/reference-erc20/ponder-env.d.ts b/examples/reference-erc20/ponder-env.d.ts index e7f300973..b8c6a630d 100644 --- a/examples/reference-erc20/ponder-env.d.ts +++ b/examples/reference-erc20/ponder-env.d.ts @@ -1,27 +1,15 @@ +/// + +declare module "ponder:internal" { + const config: typeof import("./ponder.config.ts"); + const schema: typeof import("./ponder.schema.ts"); +} + +declare module "ponder:schema" { + export * from "./ponder.schema.ts"; +} + // This file enables type checking and editor autocomplete for this Ponder project. // After upgrading, you may find that changes have been made to this file. // If this happens, please commit the changes. Do not manually edit this file. // See https://ponder.sh/docs/getting-started/installation#typescript for more information. - -declare module "@/generated" { - import type { Virtual } from "@ponder/core"; - - type config = typeof import("./ponder.config.ts").default; - type schema = typeof import("./ponder.schema.ts"); - - export const ponder: Virtual.Registry; - - export type EventNames = Virtual.EventNames; - export type Event = Virtual.Event< - config, - name - >; - export type Context = Virtual.Context< - config, - schema, - name - >; - export type ApiContext = Virtual.ApiContext; - export type IndexingFunctionArgs = - Virtual.IndexingFunctionArgs; -} diff --git a/examples/reference-erc20/ponder.config.ts b/examples/reference-erc20/ponder.config.ts index 32ae5a2ba..9d14961e7 100644 --- a/examples/reference-erc20/ponder.config.ts +++ b/examples/reference-erc20/ponder.config.ts @@ -1,4 +1,4 @@ -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { erc20ABI } from "./abis/erc20ABI"; diff --git a/examples/reference-erc20/ponder.schema.ts b/examples/reference-erc20/ponder.schema.ts index 10013b068..d5cfe2118 100644 --- a/examples/reference-erc20/ponder.schema.ts +++ b/examples/reference-erc20/ponder.schema.ts @@ -1,4 +1,4 @@ -import { index, onchainTable, primaryKey, relations } from "@ponder/core"; +import { index, onchainTable, primaryKey, relations } from "ponder"; export const account = onchainTable("account", (t) => ({ address: t.hex().primaryKey(), diff --git a/examples/reference-erc20/src/index.ts b/examples/reference-erc20/src/index.ts index 8862bf914..1cb307ed6 100644 --- a/examples/reference-erc20/src/index.ts +++ b/examples/reference-erc20/src/index.ts @@ -1,10 +1,10 @@ -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; import { account, allowance, approvalEvent, transferEvent, -} from "../ponder.schema"; +} from "ponder:schema"; ponder.on("ERC20:Transfer", async ({ event, context }) => { await context.db diff --git a/examples/reference-erc4626/package.json b/examples/reference-erc4626/package.json index 247eee09d..83fd6838d 100644 --- a/examples/reference-erc4626/package.json +++ b/examples/reference-erc4626/package.json @@ -5,13 +5,14 @@ "scripts": { "dev": "ponder dev", "start": "ponder start", + "db": "ponder db", "codegen": "ponder codegen", "serve": "ponder serve", "lint": "eslint .", "typecheck": "tsc" }, "dependencies": { - "@ponder/core": "workspace:*", + "ponder": "workspace:*", "hono": "^4.5.0", "viem": "^2.21.3" }, diff --git a/examples/reference-erc4626/ponder-env.d.ts b/examples/reference-erc4626/ponder-env.d.ts index e7f300973..b8c6a630d 100644 --- a/examples/reference-erc4626/ponder-env.d.ts +++ b/examples/reference-erc4626/ponder-env.d.ts @@ -1,27 +1,15 @@ +/// + +declare module "ponder:internal" { + const config: typeof import("./ponder.config.ts"); + const schema: typeof import("./ponder.schema.ts"); +} + +declare module "ponder:schema" { + export * from "./ponder.schema.ts"; +} + // This file enables type checking and editor autocomplete for this Ponder project. // After upgrading, you may find that changes have been made to this file. // If this happens, please commit the changes. Do not manually edit this file. // See https://ponder.sh/docs/getting-started/installation#typescript for more information. - -declare module "@/generated" { - import type { Virtual } from "@ponder/core"; - - type config = typeof import("./ponder.config.ts").default; - type schema = typeof import("./ponder.schema.ts"); - - export const ponder: Virtual.Registry; - - export type EventNames = Virtual.EventNames; - export type Event = Virtual.Event< - config, - name - >; - export type Context = Virtual.Context< - config, - schema, - name - >; - export type ApiContext = Virtual.ApiContext; - export type IndexingFunctionArgs = - Virtual.IndexingFunctionArgs; -} diff --git a/examples/reference-erc4626/ponder.config.ts b/examples/reference-erc4626/ponder.config.ts index 71720ba5c..04d07b465 100644 --- a/examples/reference-erc4626/ponder.config.ts +++ b/examples/reference-erc4626/ponder.config.ts @@ -1,4 +1,4 @@ -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { erc4626ABI } from "./abis/erc4626ABI"; diff --git a/examples/reference-erc4626/ponder.schema.ts b/examples/reference-erc4626/ponder.schema.ts index 01f2dfa52..73e875533 100644 --- a/examples/reference-erc4626/ponder.schema.ts +++ b/examples/reference-erc4626/ponder.schema.ts @@ -1,4 +1,4 @@ -import { onchainTable, primaryKey } from "@ponder/core"; +import { onchainTable, primaryKey } from "ponder"; export const account = onchainTable("account", (t) => ({ address: t.hex().primaryKey(), diff --git a/examples/reference-erc4626/src/index.ts b/examples/reference-erc4626/src/index.ts index ccca4bed2..8d6fe7e14 100644 --- a/examples/reference-erc4626/src/index.ts +++ b/examples/reference-erc4626/src/index.ts @@ -1,5 +1,5 @@ -import { ponder } from "@/generated"; -import * as schema from "../ponder.schema"; +import { ponder } from "ponder:registry"; +import schema from "ponder:schema"; ponder.on("ERC4626:Transfer", async ({ event, context }) => { // Create an Account for the sender, or update the balance if it already exists. diff --git a/examples/reference-erc721/package.json b/examples/reference-erc721/package.json index 36b152aea..3feebec5e 100644 --- a/examples/reference-erc721/package.json +++ b/examples/reference-erc721/package.json @@ -5,13 +5,14 @@ "scripts": { "dev": "ponder dev", "start": "ponder start", + "db": "ponder db", "codegen": "ponder codegen", "serve": "ponder serve", "lint": "eslint .", "typecheck": "tsc" }, "dependencies": { - "@ponder/core": "workspace:*", + "ponder": "workspace:*", "hono": "^4.5.0", "viem": "^2.21.3" }, diff --git a/examples/reference-erc721/ponder-env.d.ts b/examples/reference-erc721/ponder-env.d.ts index e7f300973..b8c6a630d 100644 --- a/examples/reference-erc721/ponder-env.d.ts +++ b/examples/reference-erc721/ponder-env.d.ts @@ -1,27 +1,15 @@ +/// + +declare module "ponder:internal" { + const config: typeof import("./ponder.config.ts"); + const schema: typeof import("./ponder.schema.ts"); +} + +declare module "ponder:schema" { + export * from "./ponder.schema.ts"; +} + // This file enables type checking and editor autocomplete for this Ponder project. // After upgrading, you may find that changes have been made to this file. // If this happens, please commit the changes. Do not manually edit this file. // See https://ponder.sh/docs/getting-started/installation#typescript for more information. - -declare module "@/generated" { - import type { Virtual } from "@ponder/core"; - - type config = typeof import("./ponder.config.ts").default; - type schema = typeof import("./ponder.schema.ts"); - - export const ponder: Virtual.Registry; - - export type EventNames = Virtual.EventNames; - export type Event = Virtual.Event< - config, - name - >; - export type Context = Virtual.Context< - config, - schema, - name - >; - export type ApiContext = Virtual.ApiContext; - export type IndexingFunctionArgs = - Virtual.IndexingFunctionArgs; -} diff --git a/examples/reference-erc721/ponder.config.ts b/examples/reference-erc721/ponder.config.ts index 48b5f92c8..82526c030 100644 --- a/examples/reference-erc721/ponder.config.ts +++ b/examples/reference-erc721/ponder.config.ts @@ -1,4 +1,4 @@ -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { erc721ABI } from "./abis/erc721ABI"; diff --git a/examples/reference-erc721/ponder.schema.ts b/examples/reference-erc721/ponder.schema.ts index 6c084093e..313f276e0 100644 --- a/examples/reference-erc721/ponder.schema.ts +++ b/examples/reference-erc721/ponder.schema.ts @@ -1,4 +1,4 @@ -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const account = onchainTable("account", (t) => ({ address: t.hex().primaryKey(), diff --git a/examples/reference-erc721/src/index.ts b/examples/reference-erc721/src/index.ts index 36bf44af8..35240e40b 100644 --- a/examples/reference-erc721/src/index.ts +++ b/examples/reference-erc721/src/index.ts @@ -1,5 +1,5 @@ -import { ponder } from "@/generated"; -import * as schema from "../ponder.schema"; +import { ponder } from "ponder:registry"; +import schema from "ponder:schema"; ponder.on("ERC721:Transfer", async ({ event, context }) => { // Create an Account for the sender, or update the balance if it already exists. diff --git a/examples/with-client/ponder/package.json b/examples/with-client/ponder/package.json index 0c70fdab7..401bd4293 100644 --- a/examples/with-client/ponder/package.json +++ b/examples/with-client/ponder/package.json @@ -5,13 +5,14 @@ "scripts": { "dev": "ponder dev", "start": "ponder start", + "db": "ponder db", "codegen": "ponder codegen", "lint": "eslint .", "typecheck": "tsc" }, "dependencies": { "@hono/trpc-server": "^0.3.2", - "@ponder/core": "workspace:*", + "ponder": "workspace:*", "@trpc/server": "^10.45.2", "hono": "^4.5.0", "viem": "^2.21.3", diff --git a/examples/with-client/ponder/ponder-env.d.ts b/examples/with-client/ponder/ponder-env.d.ts index e7f300973..b8c6a630d 100644 --- a/examples/with-client/ponder/ponder-env.d.ts +++ b/examples/with-client/ponder/ponder-env.d.ts @@ -1,27 +1,15 @@ +/// + +declare module "ponder:internal" { + const config: typeof import("./ponder.config.ts"); + const schema: typeof import("./ponder.schema.ts"); +} + +declare module "ponder:schema" { + export * from "./ponder.schema.ts"; +} + // This file enables type checking and editor autocomplete for this Ponder project. // After upgrading, you may find that changes have been made to this file. // If this happens, please commit the changes. Do not manually edit this file. // See https://ponder.sh/docs/getting-started/installation#typescript for more information. - -declare module "@/generated" { - import type { Virtual } from "@ponder/core"; - - type config = typeof import("./ponder.config.ts").default; - type schema = typeof import("./ponder.schema.ts"); - - export const ponder: Virtual.Registry; - - export type EventNames = Virtual.EventNames; - export type Event = Virtual.Event< - config, - name - >; - export type Context = Virtual.Context< - config, - schema, - name - >; - export type ApiContext = Virtual.ApiContext; - export type IndexingFunctionArgs = - Virtual.IndexingFunctionArgs; -} diff --git a/examples/with-client/ponder/ponder.config.ts b/examples/with-client/ponder/ponder.config.ts index 32ae5a2ba..9d14961e7 100644 --- a/examples/with-client/ponder/ponder.config.ts +++ b/examples/with-client/ponder/ponder.config.ts @@ -1,4 +1,4 @@ -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { erc20ABI } from "./abis/erc20ABI"; diff --git a/examples/with-client/ponder/ponder.schema.ts b/examples/with-client/ponder/ponder.schema.ts index 9ce5a1106..f36486fe6 100644 --- a/examples/with-client/ponder/ponder.schema.ts +++ b/examples/with-client/ponder/ponder.schema.ts @@ -1,4 +1,4 @@ -import { index, onchainTable, primaryKey } from "@ponder/core"; +import { index, onchainTable, primaryKey } from "ponder"; export const account = onchainTable("account", (t) => ({ address: t.hex().primaryKey(), diff --git a/examples/with-client/ponder/src/api/index.ts b/examples/with-client/ponder/src/api/index.ts index 39d1f7954..2ab49f1d7 100644 --- a/examples/with-client/ponder/src/api/index.ts +++ b/examples/with-client/ponder/src/api/index.ts @@ -1,10 +1,10 @@ -import { type ApiContext, ponder } from "@/generated"; +import { type ApiContext, ponder } from "ponder:registry"; +import schema from "ponder:schema"; import { trpcServer } from "@hono/trpc-server"; -import { eq } from "@ponder/core"; import { initTRPC } from "@trpc/server"; +import { eq } from "ponder"; import type { Address } from "viem"; import { z } from "zod"; -import * as schema from "../../ponder.schema"; const t = initTRPC.context().create(); diff --git a/examples/with-client/ponder/src/index.ts b/examples/with-client/ponder/src/index.ts index 8862bf914..1cb307ed6 100644 --- a/examples/with-client/ponder/src/index.ts +++ b/examples/with-client/ponder/src/index.ts @@ -1,10 +1,10 @@ -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; import { account, allowance, approvalEvent, transferEvent, -} from "../ponder.schema"; +} from "ponder:schema"; ponder.on("ERC20:Transfer", async ({ event, context }) => { await context.db diff --git a/examples/with-foundry/ponder/package.json b/examples/with-foundry/ponder/package.json index 138b2fa88..a4b286901 100644 --- a/examples/with-foundry/ponder/package.json +++ b/examples/with-foundry/ponder/package.json @@ -10,7 +10,7 @@ "typecheck": "tsc" }, "dependencies": { - "@ponder/core": "workspace:*", + "ponder": "workspace:*", "hono": "^4.5.0", "viem": "^2.21.3" }, diff --git a/examples/with-foundry/ponder/ponder-env.d.ts b/examples/with-foundry/ponder/ponder-env.d.ts index e7f300973..b8c6a630d 100644 --- a/examples/with-foundry/ponder/ponder-env.d.ts +++ b/examples/with-foundry/ponder/ponder-env.d.ts @@ -1,27 +1,15 @@ +/// + +declare module "ponder:internal" { + const config: typeof import("./ponder.config.ts"); + const schema: typeof import("./ponder.schema.ts"); +} + +declare module "ponder:schema" { + export * from "./ponder.schema.ts"; +} + // This file enables type checking and editor autocomplete for this Ponder project. // After upgrading, you may find that changes have been made to this file. // If this happens, please commit the changes. Do not manually edit this file. // See https://ponder.sh/docs/getting-started/installation#typescript for more information. - -declare module "@/generated" { - import type { Virtual } from "@ponder/core"; - - type config = typeof import("./ponder.config.ts").default; - type schema = typeof import("./ponder.schema.ts"); - - export const ponder: Virtual.Registry; - - export type EventNames = Virtual.EventNames; - export type Event = Virtual.Event< - config, - name - >; - export type Context = Virtual.Context< - config, - schema, - name - >; - export type ApiContext = Virtual.ApiContext; - export type IndexingFunctionArgs = - Virtual.IndexingFunctionArgs; -} diff --git a/examples/with-foundry/ponder/ponder.config.ts b/examples/with-foundry/ponder/ponder.config.ts index e9be8d2e1..218c7ea0a 100644 --- a/examples/with-foundry/ponder/ponder.config.ts +++ b/examples/with-foundry/ponder/ponder.config.ts @@ -1,4 +1,4 @@ -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http, getAddress, hexToNumber } from "viem"; import type { Hex } from "viem"; import { counterABI } from "../abis/CounterAbi"; diff --git a/examples/with-foundry/ponder/ponder.schema.ts b/examples/with-foundry/ponder/ponder.schema.ts index 9202a5b26..913b051d5 100644 --- a/examples/with-foundry/ponder/ponder.schema.ts +++ b/examples/with-foundry/ponder/ponder.schema.ts @@ -1,4 +1,4 @@ -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const counter = onchainTable("counter", (t) => ({ value: t.integer().primaryKey(), diff --git a/examples/with-foundry/ponder/src/Counter.ts b/examples/with-foundry/ponder/src/Counter.ts index 769d64b35..e28710988 100644 --- a/examples/with-foundry/ponder/src/Counter.ts +++ b/examples/with-foundry/ponder/src/Counter.ts @@ -1,5 +1,5 @@ -import { ponder } from "@/generated"; -import * as schema from "../ponder.schema"; +import { ponder } from "ponder:registry"; +import schema from "ponder:schema"; ponder.on("Counter:Incremented", async ({ event, context }) => { await context.db.insert(schema.counter).values({ diff --git a/examples/with-nextjs/ponder/package.json b/examples/with-nextjs/ponder/package.json index 3468e1a95..3d67c5296 100644 --- a/examples/with-nextjs/ponder/package.json +++ b/examples/with-nextjs/ponder/package.json @@ -5,12 +5,13 @@ "scripts": { "dev": "ponder dev", "start": "ponder start", + "db": "ponder db", "codegen": "ponder codegen", "lint": "eslint .", "typecheck": "tsc" }, "dependencies": { - "@ponder/core": "workspace:*", + "ponder": "workspace:*", "hono": "^4.5.0", "viem": "^2.21.3" }, diff --git a/examples/with-nextjs/ponder/ponder-env.d.ts b/examples/with-nextjs/ponder/ponder-env.d.ts index e7f300973..b8c6a630d 100644 --- a/examples/with-nextjs/ponder/ponder-env.d.ts +++ b/examples/with-nextjs/ponder/ponder-env.d.ts @@ -1,27 +1,15 @@ +/// + +declare module "ponder:internal" { + const config: typeof import("./ponder.config.ts"); + const schema: typeof import("./ponder.schema.ts"); +} + +declare module "ponder:schema" { + export * from "./ponder.schema.ts"; +} + // This file enables type checking and editor autocomplete for this Ponder project. // After upgrading, you may find that changes have been made to this file. // If this happens, please commit the changes. Do not manually edit this file. // See https://ponder.sh/docs/getting-started/installation#typescript for more information. - -declare module "@/generated" { - import type { Virtual } from "@ponder/core"; - - type config = typeof import("./ponder.config.ts").default; - type schema = typeof import("./ponder.schema.ts"); - - export const ponder: Virtual.Registry; - - export type EventNames = Virtual.EventNames; - export type Event = Virtual.Event< - config, - name - >; - export type Context = Virtual.Context< - config, - schema, - name - >; - export type ApiContext = Virtual.ApiContext; - export type IndexingFunctionArgs = - Virtual.IndexingFunctionArgs; -} diff --git a/examples/with-nextjs/ponder/ponder.config.ts b/examples/with-nextjs/ponder/ponder.config.ts index 0f71c4e9f..393154522 100644 --- a/examples/with-nextjs/ponder/ponder.config.ts +++ b/examples/with-nextjs/ponder/ponder.config.ts @@ -1,4 +1,4 @@ -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http, createPublicClient } from "viem"; import { Weth9Abi } from "./abis/Weth9Abi"; diff --git a/examples/with-nextjs/ponder/ponder.schema.ts b/examples/with-nextjs/ponder/ponder.schema.ts index a2e13de2d..8c2e8ebdb 100644 --- a/examples/with-nextjs/ponder/ponder.schema.ts +++ b/examples/with-nextjs/ponder/ponder.schema.ts @@ -1,4 +1,4 @@ -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const depositEvent = onchainTable("deposit_event", (t) => ({ id: t.text().primaryKey(), diff --git a/examples/with-nextjs/ponder/src/index.ts b/examples/with-nextjs/ponder/src/index.ts index f48a9e2ca..d94ddc008 100644 --- a/examples/with-nextjs/ponder/src/index.ts +++ b/examples/with-nextjs/ponder/src/index.ts @@ -1,5 +1,5 @@ -import { ponder } from "@/generated"; -import * as schema from "../ponder.schema"; +import { ponder } from "ponder:registry"; +import schema from "ponder:schema"; ponder.on("WETH:Deposit", async ({ event, context }) => { await context.db.insert(schema.depositEvent).values({ diff --git a/examples/with-trpc/ponder/package.json b/examples/with-trpc/ponder/package.json index 0c70fdab7..401bd4293 100644 --- a/examples/with-trpc/ponder/package.json +++ b/examples/with-trpc/ponder/package.json @@ -5,13 +5,14 @@ "scripts": { "dev": "ponder dev", "start": "ponder start", + "db": "ponder db", "codegen": "ponder codegen", "lint": "eslint .", "typecheck": "tsc" }, "dependencies": { "@hono/trpc-server": "^0.3.2", - "@ponder/core": "workspace:*", + "ponder": "workspace:*", "@trpc/server": "^10.45.2", "hono": "^4.5.0", "viem": "^2.21.3", diff --git a/examples/with-trpc/ponder/ponder-env.d.ts b/examples/with-trpc/ponder/ponder-env.d.ts index e7f300973..b8c6a630d 100644 --- a/examples/with-trpc/ponder/ponder-env.d.ts +++ b/examples/with-trpc/ponder/ponder-env.d.ts @@ -1,27 +1,15 @@ +/// + +declare module "ponder:internal" { + const config: typeof import("./ponder.config.ts"); + const schema: typeof import("./ponder.schema.ts"); +} + +declare module "ponder:schema" { + export * from "./ponder.schema.ts"; +} + // This file enables type checking and editor autocomplete for this Ponder project. // After upgrading, you may find that changes have been made to this file. // If this happens, please commit the changes. Do not manually edit this file. // See https://ponder.sh/docs/getting-started/installation#typescript for more information. - -declare module "@/generated" { - import type { Virtual } from "@ponder/core"; - - type config = typeof import("./ponder.config.ts").default; - type schema = typeof import("./ponder.schema.ts"); - - export const ponder: Virtual.Registry; - - export type EventNames = Virtual.EventNames; - export type Event = Virtual.Event< - config, - name - >; - export type Context = Virtual.Context< - config, - schema, - name - >; - export type ApiContext = Virtual.ApiContext; - export type IndexingFunctionArgs = - Virtual.IndexingFunctionArgs; -} diff --git a/examples/with-trpc/ponder/ponder.config.ts b/examples/with-trpc/ponder/ponder.config.ts index 32ae5a2ba..9d14961e7 100644 --- a/examples/with-trpc/ponder/ponder.config.ts +++ b/examples/with-trpc/ponder/ponder.config.ts @@ -1,4 +1,4 @@ -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { erc20ABI } from "./abis/erc20ABI"; diff --git a/examples/with-trpc/ponder/ponder.schema.ts b/examples/with-trpc/ponder/ponder.schema.ts index 9ce5a1106..f36486fe6 100644 --- a/examples/with-trpc/ponder/ponder.schema.ts +++ b/examples/with-trpc/ponder/ponder.schema.ts @@ -1,4 +1,4 @@ -import { index, onchainTable, primaryKey } from "@ponder/core"; +import { index, onchainTable, primaryKey } from "ponder"; export const account = onchainTable("account", (t) => ({ address: t.hex().primaryKey(), diff --git a/examples/with-trpc/ponder/src/api/index.ts b/examples/with-trpc/ponder/src/api/index.ts index 39d1f7954..2ab49f1d7 100644 --- a/examples/with-trpc/ponder/src/api/index.ts +++ b/examples/with-trpc/ponder/src/api/index.ts @@ -1,10 +1,10 @@ -import { type ApiContext, ponder } from "@/generated"; +import { type ApiContext, ponder } from "ponder:registry"; +import schema from "ponder:schema"; import { trpcServer } from "@hono/trpc-server"; -import { eq } from "@ponder/core"; import { initTRPC } from "@trpc/server"; +import { eq } from "ponder"; import type { Address } from "viem"; import { z } from "zod"; -import * as schema from "../../ponder.schema"; const t = initTRPC.context().create(); diff --git a/examples/with-trpc/ponder/src/index.ts b/examples/with-trpc/ponder/src/index.ts index 8862bf914..1cb307ed6 100644 --- a/examples/with-trpc/ponder/src/index.ts +++ b/examples/with-trpc/ponder/src/index.ts @@ -1,10 +1,10 @@ -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; import { account, allowance, approvalEvent, transferEvent, -} from "../ponder.schema"; +} from "ponder:schema"; ponder.on("ERC20:Transfer", async ({ event, context }) => { await context.db diff --git a/packages/client/dist/index.d.ts b/packages/client/dist/index.d.ts new file mode 100644 index 000000000..1d90ecb31 --- /dev/null +++ b/packages/client/dist/index.d.ts @@ -0,0 +1,14 @@ +import { PgRemoteDatabase } from 'drizzle-orm/pg-proxy'; + +type Schema = { + [name: string]: unknown; +}; +type Client = { + db: PgRemoteDatabase; +}; +declare const createClient: ({ url, schema, }: { + url: string; + schema: schema; +}) => Client; + +export { type Client, createClient }; diff --git a/packages/core/CHANGELOG.md b/packages/core/CHANGELOG.md index 242985fc4..06d30e3fd 100644 --- a/packages/core/CHANGELOG.md +++ b/packages/core/CHANGELOG.md @@ -1347,7 +1347,7 @@ ### Patch Changes -- [#270](https://github.com/0xOlias/ponder/pull/270) [`9919db8`](https://github.com/0xOlias/ponder/commit/9919db807e546d220d92706f00910afaa4424ea2) Thanks [@0xOlias](https://github.com/0xOlias)! - Fixed a bug where the server would crash if no event handlers were registered in a file that had `import { ponder } from "@/generated"` +- [#270](https://github.com/0xOlias/ponder/pull/270) [`9919db8`](https://github.com/0xOlias/ponder/commit/9919db807e546d220d92706f00910afaa4424ea2) Thanks [@0xOlias](https://github.com/0xOlias)! - Fixed a bug where the server would crash if no event handlers were registered in a file that had `import { ponder } from "ponder:registry"` ## 0.0.75 @@ -1575,7 +1575,7 @@ - [#151](https://github.com/0xOlias/ponder/pull/151) [`ace6a36`](https://github.com/0xOlias/ponder/commit/ace6a3664c2e1354701e2225d0f5c92c3eae9a28) Thanks [@0xOlias](https://github.com/0xOlias)! - Added support for a "setup" event which is processed before all log events. The "setup" event handler argument only includes `context` (no `event` property). Example: ```ts - import { ponder } from "@/generated"; + import { ponder } from "ponder:registry"; ponder.on("setup", async ({ context }) => { const { MyEntity } = context.entities; @@ -1683,18 +1683,18 @@ - [#118](https://github.com/0xOlias/ponder/pull/118) [`84b4ca0`](https://github.com/0xOlias/ponder/commit/84b4ca0b7e3b4e73ff6daa8c317b48a22b4ca652) Thanks [@0xOlias](https://github.com/0xOlias)! - Updated package exports to include cjs and esm -- [#118](https://github.com/0xOlias/ponder/pull/118) [`84b4ca0`](https://github.com/0xOlias/ponder/commit/84b4ca0b7e3b4e73ff6daa8c317b48a22b4ca652) Thanks [@0xOlias](https://github.com/0xOlias)! - Added support for a path alias `@/generated` in Ponder project `src` files. +- [#118](https://github.com/0xOlias/ponder/pull/118) [`84b4ca0`](https://github.com/0xOlias/ponder/commit/84b4ca0b7e3b4e73ff6daa8c317b48a22b4ca652) Thanks [@0xOlias](https://github.com/0xOlias)! - Added support for a path alias `ponder:registry` in Ponder project `src` files. ```ts // src/SomeContract.ts - import { ponder } from "@/generated"; + import { ponder } from "ponder:registry"; ponder.on(...) ``` ```ts // src/nested/AnotherContract.ts - import { ponder } from "@/generated"; + import { ponder } from "ponder:registry"; ponder.on(...) diff --git a/packages/core/README.md b/packages/core/README.md index 1b71bc618..55bc9b214 100644 --- a/packages/core/README.md +++ b/packages/core/README.md @@ -62,7 +62,7 @@ Ponder fetches event logs for the contracts added to `ponder.config.ts`, and pas ```ts // ponder.config.ts -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { BaseRegistrarAbi } from "./abis/BaseRegistrar"; @@ -92,7 +92,7 @@ The `ponder.schema.ts` file contains the database schema, and defines the shape ```ts // ponder.schema.ts -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const ensName = onchainTable("ens_name", (t) => ({ name: p.text().primaryKey(), @@ -108,8 +108,8 @@ Files in the `src/` directory contain **indexing functions**, which are TypeScri ```ts // src/BaseRegistrar.ts -import { ponder } from "@/generated"; -import * as schema from "../ponder.schema"; +import { ponder } from "ponder:registry"; +import schema from "ponder:schema"; ponder.on("BaseRegistrar:NameRegistered", async ({ event, context }) => { const { name, owner } = event.params; @@ -167,7 +167,7 @@ If you're interested in contributing to Ponder, please read the [contribution gu ## Packages -- `@ponder/core` +- `ponder` - `@ponder/utils` - `create-ponder` - `eslint-config-ponder` @@ -180,7 +180,7 @@ Ponder is MIT-licensed open-source software. [ci-url]: https://github.com/ponder-sh/ponder/actions/workflows/main.yml [tg-badge]: https://img.shields.io/endpoint?color=neon&logo=telegram&label=Chat&url=https%3A%2F%2Fmogyo.ro%2Fquart-apis%2Ftgmembercount%3Fchat_id%3Dponder_sh [tg-url]: https://t.me/ponder_sh -[license-badge]: https://img.shields.io/npm/l/@ponder/core?label=License +[license-badge]: https://img.shields.io/npm/l/ponder?label=License [license-url]: https://github.com/ponder-sh/ponder/blob/main/LICENSE -[version-badge]: https://img.shields.io/npm/v/@ponder/core +[version-badge]: https://img.shields.io/npm/v/ponder [version-url]: https://github.com/ponder-sh/ponder/releases diff --git a/packages/core/package.json b/packages/core/package.json index 92b928258..27b6e6b07 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -1,5 +1,5 @@ { - "name": "@ponder/core", + "name": "ponder", "version": "0.7.17", "description": "An open-source framework for crypto application backends", "license": "MIT", @@ -23,8 +23,13 @@ "types": "./dist/index.d.ts", "typings": "./dist/index.d.ts", "exports": { - "types": "./dist/index.d.ts", - "import": "./dist/index.js" + ".": { + "types": "./dist/index.d.ts", + "import": "./dist/index.js" + }, + "./virtual": { + "types": "./src/types.d.ts" + } }, "scripts": { "build": "tsup", diff --git a/packages/core/src/_test/constants.ts b/packages/core/src/_test/constants.ts index f7ca3d2bf..6d5adfcfc 100644 --- a/packages/core/src/_test/constants.ts +++ b/packages/core/src/_test/constants.ts @@ -6,9 +6,3 @@ export const ACCOUNTS = [ // Named accounts export const [ALICE, BOB] = ACCOUNTS; - -// Deployed contract addresses. -export const CONTRACTS = { - erc20Address: "0x5fbdb2315678afecb367f032d93f642f64180aa3", - factoryAddress: "0xe7f1725e7734ce288f8367e1bb143e90bb3f0512", -} as const; diff --git a/packages/core/src/_test/e2e/erc20/erc20.test.ts b/packages/core/src/_test/e2e/erc20/erc20.test.ts index c5067eccc..51af3a904 100644 --- a/packages/core/src/_test/e2e/erc20/erc20.test.ts +++ b/packages/core/src/_test/e2e/erc20/erc20.test.ts @@ -1,11 +1,11 @@ import path from "node:path"; -import { ALICE, BOB } from "@/_test/constants.js"; +import { ALICE } from "@/_test/constants.js"; import { setupAnvil, setupCommon, setupIsolatedDatabase, } from "@/_test/setup.js"; -import { simulate } from "@/_test/simulate.js"; +import { deployErc20, mintErc20 } from "@/_test/simulate.js"; import { getFreePort, postGraphql, @@ -13,9 +13,8 @@ import { } from "@/_test/utils.js"; import { serve } from "@/bin/commands/serve.js"; import { start } from "@/bin/commands/start.js"; -import { range } from "@/utils/range.js"; import { rimrafSync } from "rimraf"; -import { zeroAddress } from "viem"; +import { parseEther, zeroAddress } from "viem"; import { beforeEach, describe, expect, test } from "vitest"; const rootDir = path.join(".", "src", "_test", "e2e", "erc20"); @@ -29,6 +28,7 @@ beforeEach(setupAnvil); beforeEach(setupIsolatedDatabase); const cliOptions = { + schema: "public", root: rootDir, config: "ponder.config.ts", logLevel: "error", @@ -37,7 +37,7 @@ const cliOptions = { test( "erc20", - async (context) => { + async () => { const port = await getFreePort(); const cleanup = await start({ @@ -48,12 +48,16 @@ test( }, }); - await simulate({ - erc20Address: context.erc20.address, - factoryAddress: context.factory.address, + const { address } = await deployErc20({ sender: ALICE }); + + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, }); - await waitForIndexedBlock(port, "mainnet", 8); + await waitForIndexedBlock(port, "mainnet", 2); const response = await postGraphql( port, @@ -66,22 +70,18 @@ test( } `, ); - expect(response.status).toBe(200); const body = (await response.json()) as any; + expect(body.errors).toBe(undefined); const accounts = body.data.accounts.items; expect(accounts[0]).toMatchObject({ address: zeroAddress, - balance: (-2 * 10 ** 18).toString(), + balance: (-1 * 10 ** 18).toString(), }); expect(accounts[1]).toMatchObject({ - address: BOB.toLowerCase(), - balance: (2 * 10 ** 18).toString(), - }); - expect(accounts[2]).toMatchObject({ address: ALICE.toLowerCase(), - balance: "0", + balance: (10 ** 18).toString(), }); await cleanup(); @@ -93,7 +93,7 @@ const isPglite = !!process.env.DATABASE_URL; // Fix this once it's easier to have per-command kill functions in Ponder.ts. describe.skipIf(isPglite)("postgres database", () => { - test.todo("ponder serve", async (context) => { + test.todo("ponder serve", async () => { const startPort = await getFreePort(); const cleanupStart = await start({ @@ -104,13 +104,14 @@ describe.skipIf(isPglite)("postgres database", () => { }, }); - for (const _ in range(0, 3)) { - await simulate({ - erc20Address: context.erc20.address, - factoryAddress: context.factory.address, - }); - } + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); const servePort = await getFreePort(); const cleanupServe = await serve({ @@ -141,15 +142,11 @@ describe.skipIf(isPglite)("postgres database", () => { expect(accounts).toHaveLength(3); expect(accounts[0]).toMatchObject({ address: zeroAddress, - balance: (-4 * 10 ** 18).toString(), + balance: (-1 * 10 ** 18).toString(), }); expect(accounts[1]).toMatchObject({ - address: BOB.toLowerCase(), - balance: (4 * 10 ** 18).toString(), - }); - expect(accounts[2]).toMatchObject({ address: ALICE.toLowerCase(), - balance: "0", + balance: (10 ** 18).toString(), }); await cleanupServe(); diff --git a/packages/core/src/_test/e2e/erc20/ponder.config.ts b/packages/core/src/_test/e2e/erc20/ponder.config.ts index ce217d77e..dc014fde4 100644 --- a/packages/core/src/_test/e2e/erc20/ponder.config.ts +++ b/packages/core/src/_test/e2e/erc20/ponder.config.ts @@ -1,7 +1,5 @@ import { http } from "viem"; - import { createConfig } from "../../../config/config.js"; -import { CONTRACTS } from "../../constants.js"; import { erc20ABI } from "../../generated.js"; const poolId = Number(process.env.VITEST_POOL_ID ?? 1); @@ -29,11 +27,7 @@ export default createConfig({ Erc20: { network: "mainnet", abi: erc20ABI, - address: CONTRACTS.erc20Address, - filter: { - event: - "Transfer(address indexed from, address indexed to, uint256 amount)", - }, + address: "0x5fbdb2315678afecb367f032d93f642f64180aa3", }, }, }); diff --git a/packages/core/src/_test/e2e/erc20/src/api/index.ts b/packages/core/src/_test/e2e/erc20/src/api/index.ts index d300a24f9..e03167573 100644 --- a/packages/core/src/_test/e2e/erc20/src/api/index.ts +++ b/packages/core/src/_test/e2e/erc20/src/api/index.ts @@ -1,11 +1,4 @@ -// @ts-ignore -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; import { graphql } from "@/index.js"; -// biome-ignore lint/suspicious/noRedeclare: :) -declare const ponder: import("@/index.js").Virtual.Registry< - typeof import("../../ponder.config.js").default, - typeof import("../../ponder.schema.js") ->; - ponder.use("/graphql", graphql()); diff --git a/packages/core/src/_test/e2e/erc20/src/index.ts b/packages/core/src/_test/e2e/erc20/src/index.ts index 82d3d7d61..37cb0e751 100644 --- a/packages/core/src/_test/e2e/erc20/src/index.ts +++ b/packages/core/src/_test/e2e/erc20/src/index.ts @@ -1,13 +1,17 @@ -// @ts-ignore -import { ponder } from "@/generated"; -import * as schema from "../ponder.schema.js"; - -// biome-ignore lint/suspicious/noRedeclare: :) declare const ponder: import("@/index.js").Virtual.Registry< typeof import("../ponder.config.js").default, typeof import("../ponder.schema.js") >; +declare const schema: typeof import("../ponder.schema.js"); + +// @ts-ignore +// biome-ignore lint/suspicious/noRedeclare: +import { ponder } from "ponder:registry"; +// @ts-ignore +// biome-ignore lint/suspicious/noRedeclare: +import schema from "ponder:schema"; + ponder.on( "Erc20:Transfer(address indexed from, address indexed to, uint256 amount)", async ({ event, context }) => { diff --git a/packages/core/src/_test/e2e/factory/factory.test.ts b/packages/core/src/_test/e2e/factory/factory.test.ts index 9f542eb9b..1c6b4303e 100644 --- a/packages/core/src/_test/e2e/factory/factory.test.ts +++ b/packages/core/src/_test/e2e/factory/factory.test.ts @@ -5,7 +5,9 @@ import { setupCommon, setupIsolatedDatabase, } from "@/_test/setup.js"; -import { simulatePairSwap } from "@/_test/simulate.js"; +import { deployFactory } from "@/_test/simulate.js"; +import { createPair } from "@/_test/simulate.js"; +import { swapPair } from "@/_test/simulate.js"; import { getFreePort, postGraphql, @@ -26,6 +28,7 @@ beforeEach(setupAnvil); beforeEach(setupIsolatedDatabase); const cliOptions = { + schema: "public", root: "./src/_test/e2e/factory", config: "ponder.config.ts", logLevel: "error", @@ -34,7 +37,7 @@ const cliOptions = { test( "factory", - async (context) => { + async () => { const port = await getFreePort(); const cleanup = await start({ @@ -45,20 +48,33 @@ test( }, }); - await waitForIndexedBlock(port, "mainnet", 5); + const { address } = await deployFactory({ sender: ALICE }); + const { result: pair } = await createPair({ + factory: address, + sender: ALICE, + }); + await swapPair({ + pair, + amount0Out: 1n, + amount1Out: 1n, + to: ALICE, + sender: ALICE, + }); + + await waitForIndexedBlock(port, "mainnet", 3); let response = await postGraphql( port, ` - swapEvents { - items { - id - pair - from - to + swapEvents { + items { + id + pair + from + to + } } - } - `, + `, ); expect(response.status).toBe(200); @@ -71,25 +87,31 @@ test( id: expect.any(String), from: ALICE.toLowerCase(), to: ALICE.toLowerCase(), - pair: context.factory.pair.toLowerCase(), + pair, }); - await simulatePairSwap(context.factory.pair); + await swapPair({ + pair, + amount0Out: 1n, + amount1Out: 1n, + to: ALICE, + sender: ALICE, + }); - await waitForIndexedBlock(port, "mainnet", 6); + await waitForIndexedBlock(port, "mainnet", 4); response = await postGraphql( port, ` - swapEvents { - items { - id - pair - from - to + swapEvents { + items { + id + pair + from + to + } } - } - `, + `, ); expect(response.status).toBe(200); diff --git a/packages/core/src/_test/e2e/factory/ponder.config.ts b/packages/core/src/_test/e2e/factory/ponder.config.ts index 875fc0392..a0d6b7fdf 100644 --- a/packages/core/src/_test/e2e/factory/ponder.config.ts +++ b/packages/core/src/_test/e2e/factory/ponder.config.ts @@ -1,7 +1,6 @@ +import { factory } from "@/config/address.js"; import { http, getAbiItem } from "viem"; - import { createConfig } from "../../../config/config.js"; -import { CONTRACTS } from "../../constants.js"; import { factoryABI, pairABI } from "../../generated.js"; const poolId = Number(process.env.VITEST_POOL_ID ?? 1); @@ -29,11 +28,11 @@ export default createConfig({ Pair: { network: "mainnet", abi: pairABI, - factory: { - address: CONTRACTS.factoryAddress, + address: factory({ + address: "0x5fbdb2315678afecb367f032d93f642f64180aa3", event: getAbiItem({ abi: factoryABI, name: "PairCreated" }), parameter: "pair", - }, + }), }, }, }); diff --git a/packages/core/src/_test/e2e/factory/src/api/index.ts b/packages/core/src/_test/e2e/factory/src/api/index.ts index d300a24f9..e03167573 100644 --- a/packages/core/src/_test/e2e/factory/src/api/index.ts +++ b/packages/core/src/_test/e2e/factory/src/api/index.ts @@ -1,11 +1,4 @@ -// @ts-ignore -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; import { graphql } from "@/index.js"; -// biome-ignore lint/suspicious/noRedeclare: :) -declare const ponder: import("@/index.js").Virtual.Registry< - typeof import("../../ponder.config.js").default, - typeof import("../../ponder.schema.js") ->; - ponder.use("/graphql", graphql()); diff --git a/packages/core/src/_test/e2e/factory/src/index.ts b/packages/core/src/_test/e2e/factory/src/index.ts index fd35fa901..11381b966 100644 --- a/packages/core/src/_test/e2e/factory/src/index.ts +++ b/packages/core/src/_test/e2e/factory/src/index.ts @@ -1,13 +1,17 @@ -// @ts-ignore -import { ponder } from "@/generated"; -import * as schema from "../ponder.schema.js"; - -// biome-ignore lint/suspicious/noRedeclare: :) declare const ponder: import("@/index.js").Virtual.Registry< typeof import("../ponder.config.js").default, typeof import("../ponder.schema.js") >; +declare const schema: typeof import("../ponder.schema.js"); + +// @ts-ignore +// biome-ignore lint/suspicious/noRedeclare: +import { ponder } from "ponder:registry"; +// @ts-ignore +// biome-ignore lint/suspicious/noRedeclare: +import schema from "ponder:schema"; + ponder.on("Pair:Swap", async ({ event, context }) => { await context.db.insert(schema.swapEvent).values({ id: event.log.id, diff --git a/packages/core/src/_test/setup.ts b/packages/core/src/_test/setup.ts index 07ce0a74b..1eef0f293 100644 --- a/packages/core/src/_test/setup.ts +++ b/packages/core/src/_test/setup.ts @@ -4,9 +4,7 @@ import { createLogger } from "@/common/logger.js"; import { MetricsService } from "@/common/metrics.js"; import { buildOptions } from "@/common/options.js"; import { createTelemetry } from "@/common/telemetry.js"; -import type { Config } from "@/config/config.js"; import type { DatabaseConfig } from "@/config/database.js"; -import type { Network } from "@/config/networks.js"; import { type Database, createDatabase } from "@/database/index.js"; import type { Schema } from "@/drizzle/index.js"; import type { IndexingStore } from "@/indexing-store/index.js"; @@ -16,37 +14,16 @@ import { } from "@/indexing-store/metadata.js"; import { createRealtimeIndexingStore } from "@/indexing-store/realtime.js"; import { type SyncStore, createSyncStore } from "@/sync-store/index.js"; -import type { BlockSource, ContractSource, LogFactory } from "@/sync/source.js"; import { createPglite } from "@/utils/pglite.js"; -import type { RequestQueue } from "@/utils/requestQueue.js"; import type { PGlite } from "@electric-sql/pglite"; import pg from "pg"; -import type { Address } from "viem"; import { type TestContext, afterAll } from "vitest"; -import { deploy, simulate } from "./simulate.js"; -import { - getConfig, - getNetworkAndSources, - poolId, - testClient, -} from "./utils.js"; +import { poolId, testClient } from "./utils.js"; declare module "vitest" { export interface TestContext { common: Common; databaseConfig: DatabaseConfig; - sources: [ - ContractSource<"log", undefined>, - ContractSource<"log", LogFactory>, - ContractSource<"trace", LogFactory>, - ContractSource<"trace", undefined>, - BlockSource, - ]; - networks: [Network]; - requestQueues: [RequestQueue]; - config: Config; - erc20: { address: Address }; - factory: { address: Address; pair: Address }; } } @@ -168,20 +145,15 @@ export async function setupIsolatedDatabase(context: TestContext) { type DatabaseServiceSetup = { buildId: string; - instanceId: string; schema: Schema; indexing: "realtime" | "historical"; }; const defaultDatabaseServiceSetup: DatabaseServiceSetup = { buildId: "abc", - instanceId: "1234", schema: {}, indexing: "historical", }; -// @ts-ignore -globalThis.__PONDER_INSTANCE_ID = "1234"; - export async function setupDatabaseServices( context: TestContext, overrides: Partial = {}, @@ -194,22 +166,23 @@ export async function setupDatabaseServices( }> { const config = { ...defaultDatabaseServiceSetup, ...overrides }; - const { statements, namespace } = buildSchema({ + const { statements } = buildSchema({ schema: config.schema, - instanceId: config.instanceId, }); const database = createDatabase({ common: context.common, - databaseConfig: context.databaseConfig, - schema: config.schema, - instanceId: config.instanceId, - buildId: config.buildId, - statements, - namespace, + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { + schema: config.schema, + statements, + }, }); - await database.setup(); + await database.setup({ buildId: config.buildId }); await database.migrateSync().catch((err) => { console.log(err); @@ -229,7 +202,6 @@ export async function setupDatabaseServices( const metadataStore = getMetadataStore({ db: database.qb.readonly, - instanceId: config.instanceId, }); const cleanup = () => database.kill(); @@ -244,42 +216,17 @@ export async function setupDatabaseServices( } /** - * Sets up an isolated Ethereum client on the test context, with the appropriate Erc20 + Factory state. + * Sets up an isolated Ethereum client. * + * @example * ```ts * // Add this to any test suite that uses the Ethereum client. - * beforeEach((context) => setupAnvil(context)) + * beforeEach(setupAnvil) * ``` */ -export async function setupAnvil(context: TestContext) { +export async function setupAnvil() { const emptySnapshotId = await testClient.snapshot(); - // Chain state setup shared across all tests. - const addresses = await deploy(); - const pair = await simulate(addresses); - await testClient.mine({ blocks: 1 }); - - context.config = getConfig(addresses); - - const { networks, sources, requestQueues } = await getNetworkAndSources( - addresses, - context.common, - ); - context.networks = networks as [Network]; - context.requestQueues = requestQueues as [RequestQueue]; - context.sources = sources as [ - ContractSource<"log", undefined>, - ContractSource<"log", LogFactory>, - ContractSource<"trace", LogFactory>, - ContractSource<"trace", undefined>, - BlockSource, - ]; - context.erc20 = { address: addresses.erc20Address }; - context.factory = { - address: addresses.factoryAddress, - pair: pair.toLowerCase() as Address, - }; - return async () => { await testClient.revert({ id: emptySnapshotId }); }; diff --git a/packages/core/src/_test/simulate.ts b/packages/core/src/_test/simulate.ts index cc05fe033..08cf37a89 100644 --- a/packages/core/src/_test/simulate.ts +++ b/packages/core/src/_test/simulate.ts @@ -1,115 +1,178 @@ -import { type Address, type Hex, parseEther } from "viem"; - -import { ALICE, BOB } from "./constants.js"; +import { toLowerCase } from "@/utils/lowercase.js"; +import { http, type Address, type Hex, createWalletClient } from "viem"; import Erc20Bytecode from "./contracts/out/ERC20.sol/ERC20.json"; import FactoryBytecode from "./contracts/out/Factory.sol/Factory.json"; import { erc20ABI, factoryABI, pairABI } from "./generated.js"; -import { publicClient, testClient, walletClient } from "./utils.js"; +import { anvil, publicClient, testClient } from "./utils.js"; + +/** Deploy Erc20 contract and mine block. */ +export const deployErc20 = async (params: { sender: Address }) => { + const walletClient = createWalletClient({ + chain: anvil, + transport: http(), + account: params.sender, + }); -/** - * Deploy Two ERC20 tokens and a Factory contract. All happens in one block. - */ -export const deploy = async () => { - const deployHashErc20 = await walletClient.deployContract({ + const hash = await walletClient.deployContract({ abi: erc20ABI, bytecode: Erc20Bytecode.bytecode.object as Hex, args: ["name", "symbol", 18], }); - const deployHashFactory = await walletClient.deployContract({ + await testClient.mine({ blocks: 1 }); + const { contractAddress } = await publicClient.waitForTransactionReceipt({ + hash, + }); + + return { address: contractAddress!, hash }; +}; + +/** Deploy Factory contract and mine block. */ +export const deployFactory = async (params: { sender: Address }) => { + const walletClient = createWalletClient({ + chain: anvil, + transport: http(), + account: params.sender, + }); + + const hash = await walletClient.deployContract({ abi: factoryABI, bytecode: FactoryBytecode.bytecode.object as Hex, }); await testClient.mine({ blocks: 1 }); + const { contractAddress } = await publicClient.waitForTransactionReceipt({ + hash, + }); - const { contractAddress: erc20Address } = - await publicClient.waitForTransactionReceipt({ - hash: deployHashErc20, - }); - const { contractAddress: factoryAddress } = - await publicClient.waitForTransactionReceipt({ - hash: deployHashFactory, - }); - return { - erc20Address: erc20Address!, - factoryAddress: factoryAddress!, - }; + return { address: contractAddress!, hash }; }; -/** - * Simulate network activity - * - * 1) Mint one tokens to Alice - * 2) Transfer one token from Alice to Bob - * 3) Create a pair - * 4) Swap on created pair - * - * Blocks are created after 2, 3, and 4. - * - * @returns The pair address - */ -export const simulate = async ( - addresses: Awaited>, -): Promise
=> { - await simulateErc20(addresses.erc20Address); - const pairAddress = await simulateFactoryDeploy(addresses.factoryAddress); - await simulatePairSwap(pairAddress); - - return pairAddress; -}; +/** Mint Erc20 tokens and mine block. */ +export const mintErc20 = async (params: { + erc20: Address; + to: Address; + amount: bigint; + sender: Address; +}) => { + const walletClient = createWalletClient({ + chain: anvil, + transport: http(), + account: params.sender, + }); -export const simulateErc20 = async (erc20Address: Address) => { - // Mint 1 token to ALICE - const mintHash = await walletClient.writeContract({ + const hash = await walletClient.writeContract({ abi: erc20ABI, functionName: "mint", - address: erc20Address, - args: [ALICE, parseEther("1")], + address: params.erc20, + args: [params.to, params.amount], + }); + + await testClient.mine({ blocks: 1 }); + await publicClient.waitForTransactionReceipt({ hash }); + + return { hash }; +}; + +/** Transfer Erc20 tokens and mine block. */ +export const transferErc20 = async (params: { + erc20: Address; + to: Address; + amount: bigint; + sender: Address; +}) => { + const walletClient = createWalletClient({ + chain: anvil, + transport: http(), + account: params.sender, }); - // Transfer 1 token from ALICE to BOB - const transferHash = await walletClient.writeContract({ + const hash = await walletClient.writeContract({ abi: erc20ABI, functionName: "transfer", - address: erc20Address, - args: [BOB, parseEther("1")], + address: params.erc20, + args: [params.to, params.amount], }); await testClient.mine({ blocks: 1 }); + await publicClient.waitForTransactionReceipt({ hash }); - await publicClient.waitForTransactionReceipt({ hash: mintHash }); - await publicClient.waitForTransactionReceipt({ hash: transferHash }); + return { hash }; }; -export const simulateFactoryDeploy = async ( - factoryAddress: Address, -): Promise
=> { +/** Create pair and mine block. */ +export const createPair = async (params: { + factory: Address; + sender: Address; +}) => { + const walletClient = createWalletClient({ + chain: anvil, + transport: http(), + account: params.sender, + }); + const { result, request } = await publicClient.simulateContract({ abi: factoryABI, functionName: "createPair", - address: factoryAddress, + address: params.factory, }); - const createPairHash = await walletClient.writeContract(request); - await testClient.mine({ blocks: 1 }); + const hash = await walletClient.writeContract(request); + await testClient.mine({ blocks: 1 }); await publicClient.waitForTransactionReceipt({ - hash: createPairHash, + hash, }); - return result; + return { result: toLowerCase(result), hash }; }; -export const simulatePairSwap = async (pairAddress: Address) => { - const swapHash = await walletClient.writeContract({ +/** Swap tokens in pair and mine block. */ +export const swapPair = async (params: { + pair: Address; + amount0Out: bigint; + amount1Out: bigint; + to: Address; + sender: Address; +}) => { + const walletClient = createWalletClient({ + chain: anvil, + transport: http(), + account: params.sender, + }); + + const hash = await walletClient.writeContract({ abi: pairABI, functionName: "swap", - address: pairAddress, - args: [1n, 2n, ALICE], + address: params.pair, + args: [params.amount0Out, params.amount1Out, params.to], + }); + + await testClient.mine({ blocks: 1 }); + await publicClient.waitForTransactionReceipt({ hash }); + + return { hash }; +}; + +/** Transfer native tokens and mine block. */ +export const transferEth = async (params: { + to: Address; + amount: bigint; + sender: Address; +}) => { + const walletClient = createWalletClient({ + chain: anvil, + transport: http(), + account: params.sender, + }); + + const hash = await walletClient.sendTransaction({ + to: params.to, + value: params.amount, }); await testClient.mine({ blocks: 1 }); + await publicClient.waitForTransactionReceipt({ hash }); - await publicClient.waitForTransactionReceipt({ hash: swapHash }); + return { hash }; }; diff --git a/packages/core/src/_test/utils.ts b/packages/core/src/_test/utils.ts index 31d66cb61..b0f6db682 100644 --- a/packages/core/src/_test/utils.ts +++ b/packages/core/src/_test/utils.ts @@ -1,52 +1,12 @@ import { type AddressInfo, createServer } from "node:net"; -import { buildConfigAndIndexingFunctions } from "@/build/configAndIndexingFunctions.js"; -import type { Common } from "@/common/common.js"; +import { factory } from "@/config/address.js"; import { createConfig } from "@/config/config.js"; -import type { RawEvent } from "@/sync/events.js"; +import type { Network } from "@/config/networks.js"; import type { Status } from "@/sync/index.js"; -import type { Source } from "@/sync/source.js"; -import type { - SyncBlock, - SyncCallTrace, - SyncCreateTrace, - SyncLog, - SyncTransaction, - SyncTransactionReceipt, -} from "@/types/sync.js"; -import { - encodeCheckpoint, - maxCheckpoint, - zeroCheckpoint, -} from "@/utils/checkpoint.js"; -import { createRequestQueue } from "@/utils/requestQueue.js"; -import { - type Chain, - type Hash, - type Hex, - encodeFunctionData, - encodeFunctionResult, - formatTransactionReceipt, - hexToBigInt, - hexToNumber, - parseEther, -} from "viem"; -import { - http, - checksumAddress, - createPublicClient, - createTestClient, - createWalletClient, - formatBlock, - formatLog, - formatTransaction, - getAbiItem, - slice, - toHex, -} from "viem"; +import type { Address, Chain } from "viem"; +import { http, createPublicClient, createTestClient, getAbiItem } from "viem"; import { mainnet } from "viem/chains"; -import { ALICE, BOB } from "./constants.js"; import { erc20ABI, factoryABI, pairABI } from "./generated.js"; -import type { deploy } from "./simulate.js"; // Anvil test setup adapted from @viem/anvil `example-vitest` repository. // https://github.com/wagmi-dev/anvil.js/tree/main/examples/example-vitest @@ -80,18 +40,15 @@ export const publicClient = createPublicClient({ transport: http(), }); -export const walletClient = createWalletClient({ - chain: anvil, - transport: http(), - account: ALICE, -}); +export const getBlockNumber = async () => + publicClient.getBlockNumber().then(Number); -/** - * Returns the config for the local anvil testing suite. - * The suite contains an erc20 and mock factory + pair event sources. - */ -export const getConfig = (addresses: Awaited>) => - createConfig({ +export const getErc20ConfigAndIndexingFunctions = (params: { + address: Address; + includeCallTraces?: boolean; + includeTransactionReceipts?: boolean; +}) => { + const config = createConfig({ networks: { mainnet: { chainId: 1, @@ -102,554 +59,132 @@ export const getConfig = (addresses: Awaited>) => Erc20: { abi: erc20ABI, network: "mainnet", - address: addresses.erc20Address, - filter: { - event: [ - "Transfer(address indexed from, address indexed to, uint256 amount)", - "Approval", - ], + address: params.address, + includeCallTraces: params.includeCallTraces, + includeTransactionReceipts: params.includeTransactionReceipts, + }, + }, + }); + + const rawIndexingFunctions = params.includeCallTraces + ? [ + { name: "Erc20.transfer()", fn: () => {} }, + { + name: "Erc20:Transfer(address indexed from, address indexed to, uint256 amount)", + fn: () => {}, }, + ] + : [ + { + name: "Erc20:Transfer(address indexed from, address indexed to, uint256 amount)", + fn: () => {}, + }, + ]; + + return { config, rawIndexingFunctions }; +}; + +export const getPairWithFactoryConfigAndIndexingFunctions = (params: { + address: Address; + includeCallTraces?: boolean; + includeTransactionReceipts?: boolean; +}) => { + const config = createConfig({ + networks: { + mainnet: { + chainId: 1, + transport: http(`http://127.0.0.1:8545/${poolId}`), }, + }, + contracts: { Pair: { abi: pairABI, network: "mainnet", - factory: { - address: addresses.factoryAddress, + address: factory({ + address: params.address, event: getAbiItem({ abi: factoryABI, name: "PairCreated" }), parameter: "pair", - }, - includeCallTraces: true, - filter: { - event: ["Swap"], - }, - }, - Factory: { - abi: factoryABI, - network: "mainnet", - address: addresses.factoryAddress, - includeCallTraces: true, - }, - }, - blocks: { - OddBlocks: { - startBlock: 1, - interval: 2, - network: "mainnet", + }), + includeCallTraces: params.includeCallTraces, + includeTransactionReceipts: params.includeTransactionReceipts, }, }, }); -/** - * Returns a network representing the local anvil chain. - * Set `finalityBlockCount` to 4 because `deploy()` + `simulate()` is 4 blocks. - */ -export const getNetworkAndSources = async ( - addresses: Awaited>, - common: Common, -) => { - const config = getConfig(addresses); - const { networks, sources } = await buildConfigAndIndexingFunctions({ - config, - rawIndexingFunctions: [ - { - name: "Erc20:Transfer(address indexed from, address indexed to, uint256 amount)", - fn: () => {}, - }, - { name: "Pair:Swap", fn: () => {} }, - { name: "Pair.swap()", fn: () => {} }, - { name: "OddBlocks:block", fn: () => {} }, - { name: "Factory.createPair()", fn: () => {} }, - ], - options: common.options, - }); - const mainnet = { ...networks[0], finalityBlockCount: 4 }; + const rawIndexingFunctions = params.includeCallTraces + ? [ + { name: "Pair.swap()", fn: () => {} }, + { name: "Pair:Swap", fn: () => {} }, + ] + : [{ name: "Pair:Swap", fn: () => {} }]; - const requestQueue = createRequestQueue({ - network: networks[0]!, - common, - }); - - return { - networks: [mainnet], - sources, - requestQueues: [requestQueue], - }; + return { config, rawIndexingFunctions }; }; -/** - * Returns the logs, block, traces, and transaction data for blocks 1, 2, 3, 4, 5. - * Block 2 has two contract creations. - * Block 2 has two erc20 transfer events. - * Block 3 has a pair creation event. - * Block 4 has a swap event from the newly created pair. - * Block 5 is empty. - */ -export const getRawRPCData = async () => { - const latestBlock = await publicClient.getBlockNumber(); - const logs = await publicClient.request({ - method: "eth_getLogs", - params: [ - { - fromBlock: toHex(latestBlock - 3n), +export const getBlocksConfigAndIndexingFunctions = (params: { + interval: number; +}) => { + const config = createConfig({ + networks: { + mainnet: { + chainId: 1, + transport: http(`http://127.0.0.1:8545/${poolId}`), + }, + }, + blocks: { + Blocks: { + network: "mainnet", + interval: params.interval, }, - ], + }, }); - // Manually add the child address log - logs.push( - ...(await publicClient.request({ - method: "eth_getLogs", - params: [ - { - address: slice(logs[2]!.topics[1]!, 12), - fromBlock: toHex(latestBlock - 3n), - }, - ], - })), - ); - - // Dedupe any repeated blocks and txs - const blockNumbers: Set = new Set(); - const txHashes: Set = new Set(); - for (const log of logs) { - if (log.blockNumber) blockNumbers.add(log.blockNumber); - if (log.transactionHash) txHashes.add(log.transactionHash); - } - const blocks = await Promise.all( - [1, 2, 3, 4, 5].map( - (bn) => - publicClient.request({ - method: "eth_getBlockByNumber", - params: [toHex(bn), true], - }) as Promise, - ), - ); - const transactionReceipts = await Promise.all( - [...txHashes].map((tx) => - publicClient.request({ - method: "eth_getTransactionReceipt", - params: [tx], - }), - ), - ); + const rawIndexingFunctions = [{ name: "Blocks:block", fn: () => {} }]; - return { - block1: { - logs: [], - block: blocks[0], - transactions: [], - transactionReceipts: [], - callTraces: [ - { - action: { - from: ALICE, - gas: "0x0", - init: "0x0", - value: "0x0", - }, - blockHash: blocks[0]!.hash, - blockNumber: blocks[0]!.number, - result: { - address: "0x0", - code: "0x0", - gasUsed: "0x0", - }, - subtraces: 0, - traceAddress: [0], - transactionHash: blocks[0]!.transactions[0]!.hash, - transactionPosition: hexToNumber( - blocks[0]!.transactions[0]!.transactionIndex, - ), - type: "create", - }, - { - action: { - from: ALICE, - gas: "0x0", - init: "0x0", - value: "0x0", - }, - blockHash: blocks[0]!.hash, - blockNumber: blocks[0]!.number, - result: { - address: "0x0", - code: "0x0", - gasUsed: "0x0", - }, - subtraces: 0, - traceAddress: [0], - transactionHash: blocks[0]!.transactions[1]!.hash, - transactionPosition: hexToNumber( - blocks[0]!.transactions[1]!.transactionIndex, - ), - type: "create", - }, - ], - }, - block2: { - logs: [logs[0]!, logs[1]!], - block: blocks[1]!, - transactions: blocks[1]!.transactions, - transactionReceipts: transactionReceipts.filter( - (tr) => tr?.blockNumber === blocks[1]?.number, - ), - callTraces: [ - { - action: { - callType: "call", - from: ALICE, - gas: "0x0", - input: encodeFunctionData({ - abi: erc20ABI, - functionName: "mint", - args: [ALICE, parseEther("1")], - }), - to: logs[0]!.address, - value: "0x0", - }, - blockHash: blocks[1]!.hash, - blockNumber: blocks[1]!.number, - result: { - gasUsed: "0x0", - output: encodeFunctionResult({ - abi: erc20ABI, - functionName: "mint", - }), - }, - subtraces: 0, - traceAddress: [0], - transactionHash: blocks[1]!.transactions[0]!.hash, - transactionPosition: hexToNumber( - blocks[1]!.transactions[0]!.transactionIndex, - ), - type: "call", - }, - { - action: { - callType: "call", - from: ALICE, - gas: "0x0", - input: encodeFunctionData({ - abi: erc20ABI, - functionName: "mint", - args: [BOB, parseEther("1")], - }), - to: logs[1]!.address, - value: "0x0", - }, - blockHash: blocks[1]!.hash, - blockNumber: blocks[1]!.number, - result: { - gasUsed: "0x0", - output: encodeFunctionResult({ - abi: erc20ABI, - functionName: "mint", - }), - }, - subtraces: 0, - traceAddress: [0], - transactionHash: blocks[1]!.transactions[1]!.hash, - transactionPosition: hexToNumber( - blocks[1]!.transactions[1]!.transactionIndex, - ), - type: "call", - }, - ], - }, - block3: { - logs: [logs[2]], - block: blocks[2], - transactions: blocks[2]!.transactions, - transactionReceipts: transactionReceipts.filter( - (tr) => tr?.blockNumber === blocks[2]?.number, - ), - callTraces: [ - { - action: { - callType: "call", - from: ALICE, - gas: "0x0", - input: encodeFunctionData({ - abi: factoryABI, - functionName: "createPair", - }), - to: logs[2]!.address, - value: "0x0", - }, - blockHash: blocks[2]!.hash, - blockNumber: blocks[2]!.number, - result: { - gasUsed: "0x0", - output: encodeFunctionResult({ - abi: factoryABI, - functionName: "createPair", - result: logs[3]!.address, - }), - }, - subtraces: 0, - traceAddress: [0], - transactionHash: blocks[2]!.transactions[0]!.hash, - transactionPosition: hexToNumber( - blocks[2]!.transactions[0]!.transactionIndex, - ), - type: "call", - }, - ], - }, - block4: { - logs: [logs[3]], - block: blocks[3], - transactions: blocks[3]!.transactions, - transactionReceipts: transactionReceipts.filter( - (tr) => tr?.blockNumber === blocks[3]?.number, - ), - callTraces: [ - { - action: { - callType: "call", - from: ALICE, - gas: "0x0", - input: encodeFunctionData({ - abi: pairABI, - functionName: "swap", - args: [1n, 2n, ALICE], - }), - to: logs[3]!.address, - value: "0x0", - }, - blockHash: blocks[3]!.hash, - blockNumber: blocks[3]!.number, - result: { - gasUsed: "0x0", - output: encodeFunctionResult({ - abi: pairABI, - functionName: "swap", - }), - }, - subtraces: 0, - traceAddress: [0], - transactionHash: blocks[3]!.transactions[0]!.hash, - transactionPosition: hexToNumber( - blocks[3]!.transactions[0]!.transactionIndex, - ), - type: "call", - }, - ], - }, - block5: { - logs: [], - block: blocks[4]!, - transactions: [], - transactionReceipts: [], - callTraces: [], - }, - } as unknown as { - block1: { - logs: []; - block: SyncBlock; - transactions: []; - transactionReceipts: []; - callTraces: [SyncCreateTrace, SyncCreateTrace]; - }; - block2: { - logs: [SyncLog, SyncLog]; - block: SyncBlock; - transactions: [SyncTransaction, SyncTransaction]; - transactionReceipts: [SyncTransactionReceipt, SyncTransactionReceipt]; - callTraces: [SyncCallTrace, SyncCallTrace]; - }; - block3: { - logs: [SyncLog]; - block: SyncBlock; - transactions: [SyncTransaction]; - transactionReceipts: [SyncTransactionReceipt]; - callTraces: [SyncCallTrace]; - }; - block4: { - logs: [SyncLog]; - block: SyncBlock; - transactions: [SyncTransaction]; - transactionReceipts: [SyncTransactionReceipt]; - callTraces: [SyncCallTrace]; - }; - block5: { - logs: []; - block: SyncBlock; - transactions: []; - transactionReceipts: []; - callTraces: []; - }; - }; + return { config, rawIndexingFunctions }; }; -/** - * Mock function for `getEvents` that specifically returns the event data for the log and factory sources. - */ -export const getEventsLog = async (sources: Source[]): Promise => { - const rpcData = await getRawRPCData(); - - return [ - { - log: rpcData.block2.logs[0], - block: rpcData.block2.block, - transaction: rpcData.block2.transactions[0]!, - transactionReceipt: rpcData.block2.transactionReceipts[0]!, - }, - { - log: rpcData.block2.logs[1], - block: rpcData.block2.block, - transaction: rpcData.block2.transactions[1]!, - transactionReceipt: rpcData.block2.transactionReceipts[1]!, - }, - { - log: rpcData.block4.logs[0], - block: rpcData.block4.block, - transaction: rpcData.block4.transactions[0]!, - transactionReceipt: rpcData.block4.transactionReceipts[0]!, - }, - ] - .map((e) => ({ - log: formatLog(e.log), - block: formatBlock(e.block), - transaction: formatTransaction(e.transaction), - transactionReceipt: formatTransactionReceipt(e.transactionReceipt), - })) - .map(({ log, block, transaction, transactionReceipt }, i) => ({ - sourceIndex: i === 0 || i === 1 ? 0 : 1, - chainId: sources[0]!.filter.chainId, - checkpoint: encodeCheckpoint({ - blockTimestamp: Number(block.timestamp), - chainId: BigInt(sources[0]!.filter.chainId), - blockNumber: block.number!, - transactionIndex: BigInt(transaction.transactionIndex!), - eventType: 5, - eventIndex: BigInt(log.logIndex!), - }), - log: { - ...log, - id: `${log.blockHash}-${toHex(log.logIndex!)}`, - address: checksumAddress(log.address), - }, - block: { ...block, miner: checksumAddress(block.miner) }, - transaction: { - ...transaction, - from: checksumAddress(transaction.from), - to: transaction.to ? checksumAddress(transaction.to) : transaction.to, +export const getAccountsConfigAndIndexingFunctions = (params: { + address: Address; +}) => { + const config = createConfig({ + networks: { + mainnet: { + chainId: 1, + transport: http(`http://127.0.0.1:8545/${poolId}`), }, - transactionReceipt: { - ...transactionReceipt, - from: checksumAddress(transactionReceipt.from), - to: transactionReceipt.to - ? checksumAddress(transactionReceipt.to) - : transactionReceipt.to, - logs: transactionReceipt.logs.map((l) => ({ - ...l, - id: `${l.blockHash}-${toHex(l.logIndex!)}`, - })), + }, + accounts: { + Accounts: { + network: "mainnet", + address: params.address, }, - })) as RawEvent[]; -}; - -/** - * Mock function for `getEvents` that specifically returns the event data for the block sources. - */ -export const getEventsBlock = async ( - sources: Source[], -): Promise => { - const rpcData = await getRawRPCData(); - - return [ - { - block: rpcData.block3.block, }, - ] - .map((e) => ({ - block: formatBlock(e.block), - })) - .map(({ block }) => ({ - sourceIndex: 4, - chainId: sources[4]!.filter.chainId, - checkpoint: encodeCheckpoint({ - blockTimestamp: Number(block.timestamp), - chainId: BigInt(sources[0]!.filter.chainId), - blockNumber: block.number!, - transactionIndex: maxCheckpoint.transactionIndex, - eventType: 5, - eventIndex: zeroCheckpoint.eventIndex, - }), + }); - block: { ...block, miner: checksumAddress(block.miner) }, - })) as RawEvent[]; -}; + const rawIndexingFunctions = [ + { name: "Accounts:transaction:from", fn: () => {} }, + { name: "Accounts:transaction:to", fn: () => {} }, + { name: "Accounts:transfer:from", fn: () => {} }, + { name: "Accounts:transfer:to", fn: () => {} }, + ]; -/** - * Mock function for `getEvents` that specifically returns the event data for the trace sources. - */ -export const getEventsTrace = async ( - sources: Source[], -): Promise => { - const rpcData = await getRawRPCData(); + return { config, rawIndexingFunctions }; +}; - return [ - { - trace: rpcData.block3.callTraces[0], - block: rpcData.block3.block, - transaction: rpcData.block3.transactions[0]!, - transactionReceipt: rpcData.block3.transactionReceipts[0]!, - }, - ] - .map((e) => ({ - trace: e.trace, - block: formatBlock(e.block), - transaction: formatTransaction(e.transaction), - transactionReceipt: formatTransactionReceipt(e.transactionReceipt), - })) - .map(({ trace, block, transaction, transactionReceipt }) => ({ - sourceIndex: 3, - chainId: sources[3]!.filter.chainId, - checkpoint: encodeCheckpoint({ - blockTimestamp: Number(block.timestamp), - chainId: BigInt(sources[0]!.filter.chainId), - blockNumber: block.number!, - transactionIndex: BigInt(transaction.transactionIndex!), - eventType: 7, - eventIndex: 0n, - }), - trace: { - id: `${trace.transactionHash}-${JSON.stringify(trace.traceAddress)}`, - from: checksumAddress(trace.action.from), - to: checksumAddress(trace.action.to), - gas: hexToBigInt(trace.action.gas), - value: hexToBigInt(trace.action.value), - input: trace.action.input, - output: trace.result!.output, - gasUsed: hexToBigInt(trace.result!.gasUsed), - subtraces: trace.subtraces, - traceAddress: trace.traceAddress, - blockHash: trace.blockHash, - blockNumber: hexToBigInt(trace.blockNumber), - transactionHash: trace.transactionHash, - transactionIndex: trace.transactionPosition, - callType: trace.action.callType, - }, - block: { ...block, miner: checksumAddress(block.miner) }, - transaction: { - ...transaction, - from: checksumAddress(transaction.from), - to: transaction.to ? checksumAddress(transaction.to) : transaction.to, - }, - transactionReceipt: { - ...transactionReceipt, - from: checksumAddress(transactionReceipt.from), - to: transactionReceipt.to - ? checksumAddress(transactionReceipt.to) - : transactionReceipt.to, - logs: transactionReceipt.logs.map((l) => ({ - ...l, - id: `${l.blockHash}-${toHex(l.logIndex!)}`, - })), - }, - })) as RawEvent[]; +export const getNetwork = (params?: { + finalityBlockCount?: number; +}) => { + return { + name: "mainnet", + chainId: 1, + chain: anvil, + transport: http(`http://127.0.0.1:8545/${poolId}`)({ chain: anvil }), + maxRequestsPerSecond: 50, + pollingInterval: 1_000, + finalityBlockCount: params?.finalityBlockCount ?? 1, + disableCache: false, + } satisfies Network; }; export function getFreePort(): Promise { diff --git a/packages/core/src/bin/commands/codegen.ts b/packages/core/src/bin/commands/codegen.ts index 66b0e0805..2d57ae9fe 100644 --- a/packages/core/src/bin/commands/codegen.ts +++ b/packages/core/src/bin/commands/codegen.ts @@ -1,4 +1,4 @@ -import { createBuildService } from "@/build/index.js"; +import { createBuild } from "@/build/index.js"; import { runCodegen } from "@/common/codegen.js"; import { createLogger } from "@/common/logger.js"; import { MetricsService } from "@/common/metrics.js"; @@ -31,23 +31,25 @@ export async function codegen({ cliOptions }: { cliOptions: CliOptions }) { const telemetry = createTelemetry({ options, logger }); const common = { options, logger, metrics, telemetry }; - const buildService = await createBuildService({ common }); + const build = await createBuild({ common }); const cleanup = async () => { - await buildService.kill(); + await build.kill(); await telemetry.kill(); }; const shutdown = setupShutdown({ common, cleanup }); - const buildResult = await buildService.start({ watch: false }); + const executeResult = await build.execute(); + if (executeResult.schemaResult.status === "error") { + await shutdown({ reason: "Failed schema build", code: 1 }); + return; + } + const schemaBuildResult = build.compileSchema( + executeResult.schemaResult.result, + ); - if (buildResult.status === "error") { - logger.error({ - service: "process", - msg: "Failed schema build", - error: buildResult.error, - }); + if (schemaBuildResult.status === "error") { await shutdown({ reason: "Failed schema build", code: 1 }); return; } @@ -57,8 +59,7 @@ export async function codegen({ cliOptions }: { cliOptions: CliOptions }) { properties: { cli_command: "codegen" }, }); - const graphqlSchema = buildResult.indexingBuild.graphqlSchema; - runCodegen({ common, graphqlSchema }); + runCodegen({ common, graphqlSchema: schemaBuildResult.result.graphqlSchema }); logger.info({ service: "codegen", msg: "Wrote ponder-env.d.ts" }); logger.info({ service: "codegen", msg: "Wrote schema.graphql" }); diff --git a/packages/core/src/bin/commands/dev.ts b/packages/core/src/bin/commands/dev.ts index 980e48a0a..407ed7854 100644 --- a/packages/core/src/bin/commands/dev.ts +++ b/packages/core/src/bin/commands/dev.ts @@ -1,13 +1,17 @@ -import { existsSync } from "node:fs"; +import fs from "node:fs"; import path from "node:path"; -import { createBuildService } from "@/build/index.js"; -import type { BuildResultDev } from "@/build/service.js"; +import { + type BuildResultDev, + type SchemaBuild, + createBuild, +} from "@/build/index.js"; import { createLogger } from "@/common/logger.js"; import { MetricsService } from "@/common/metrics.js"; import { buildOptions } from "@/common/options.js"; import { buildPayload, createTelemetry } from "@/common/telemetry.js"; import { type Database, createDatabase } from "@/database/index.js"; import { createUi } from "@/ui/service.js"; +import { mergeResults } from "@/utils/result.js"; import { createQueue } from "@ponder/common"; import type { CliOptions } from "../ponder.js"; import { run } from "../utils/run.js"; @@ -34,7 +38,7 @@ export async function dev({ cliOptions }: { cliOptions: CliOptions }) { process.exit(1); } - if (!existsSync(path.join(options.rootDir, ".env.local"))) { + if (!fs.existsSync(path.join(options.rootDir, ".env.local"))) { logger.warn({ service: "app", msg: "Local environment file (.env.local) not found", @@ -51,7 +55,7 @@ export async function dev({ cliOptions }: { cliOptions: CliOptions }) { const telemetry = createTelemetry({ options, logger }); const common = { options, logger, metrics, telemetry }; - const buildService = await createBuildService({ common }); + const build = await createBuild({ common }); const ui = createUi({ common }); @@ -64,13 +68,15 @@ export async function dev({ cliOptions }: { cliOptions: CliOptions }) { if (database) { await database.kill(); } - await buildService.kill(); + await build.kill(); await telemetry.kill(); ui.kill(); }; const shutdown = setupShutdown({ common, cleanup }); + let schemaBuild: SchemaBuild | undefined; + const buildQueue = createQueue({ initialStart: true, concurrency: 1, @@ -88,20 +94,19 @@ export async function dev({ cliOptions }: { cliOptions: CliOptions }) { await database.kill(); } + schemaBuild = result.result.schemaBuild; + database = createDatabase({ common, - schema: result.indexingBuild.schema, - databaseConfig: result.indexingBuild.databaseConfig, - buildId: result.indexingBuild.buildId, - instanceId: result.indexingBuild.instanceId, - namespace: result.indexingBuild.namespace, - statements: result.indexingBuild.statements, + preBuild: result.result.preBuild, + schemaBuild: result.result.schemaBuild, }); indexingCleanupReloadable = await run({ common, - build: result.indexingBuild, database, + schemaBuild: result.result.schemaBuild, + indexingBuild: result.result.indexingBuild, onFatalError: () => { shutdown({ reason: "Received fatal error", code: 1 }); }, @@ -110,14 +115,25 @@ export async function dev({ cliOptions }: { cliOptions: CliOptions }) { buildQueue.add({ status: "error", kind: "indexing", error }); }, }); - } - metrics.resetApiMetrics(); - apiCleanupReloadable = await runServer({ - common, - build: result.apiBuild, - database: database!, - }); + metrics.resetApiMetrics(); + + apiCleanupReloadable = await runServer({ + common, + database: database!, + schemaBuild: result.result.schemaBuild, + apiBuild: result.result.apiBuild, + }); + } else { + metrics.resetApiMetrics(); + + apiCleanupReloadable = await runServer({ + common, + database: database!, + schemaBuild: schemaBuild!, + apiBuild: result.result, + }); + } } else { // This handles indexing function build failures on hot reload. metrics.ponder_indexing_has_error.set(1); @@ -131,28 +147,70 @@ export async function dev({ cliOptions }: { cliOptions: CliOptions }) { let database: Database | undefined; - const buildResult = await buildService.start({ - watch: true, + const executeResult = await build.execute(); + + if (executeResult.configResult.status === "error") { + await shutdown({ reason: "Failed intial build", code: 1 }); + return cleanup; + } + if (executeResult.schemaResult.status === "error") { + await shutdown({ reason: "Failed intial build", code: 1 }); + return cleanup; + } + if (executeResult.indexingResult.status === "error") { + await shutdown({ reason: "Failed intial build", code: 1 }); + return cleanup; + } + if (executeResult.apiResult.status === "error") { + await shutdown({ reason: "Failed intial build", code: 1 }); + return cleanup; + } + + const initialBuildResult = mergeResults([ + build.preCompile(executeResult.configResult.result), + build.compileSchema(executeResult.schemaResult.result), + await build.compileIndexing({ + configResult: executeResult.configResult.result, + schemaResult: executeResult.schemaResult.result, + indexingResult: executeResult.indexingResult.result, + }), + build.compileApi({ apiResult: executeResult.apiResult.result }), + ]); + + if (initialBuildResult.status === "error") { + await shutdown({ reason: "Failed intial build", code: 1 }); + return cleanup; + } + + build.startDev({ onBuild: (buildResult) => { buildQueue.clear(); buildQueue.add(buildResult); }, }); - if (buildResult.status === "error") { - await shutdown({ reason: "Failed intial build", code: 1 }); - return cleanup; - } - telemetry.record({ name: "lifecycle:session_start", properties: { cli_command: "dev", - ...buildPayload(buildResult.indexingBuild), + ...buildPayload({ + preBuild: initialBuildResult.result[0], + schemaBuild: initialBuildResult.result[1], + indexingBuild: initialBuildResult.result[2], + }), }, }); - buildQueue.add({ ...buildResult, kind: "indexing" }); + buildQueue.add({ + status: "success", + kind: "indexing", + result: { + preBuild: initialBuildResult.result[0], + schemaBuild: initialBuildResult.result[1], + indexingBuild: initialBuildResult.result[2], + apiBuild: initialBuildResult.result[3], + }, + }); return async () => { buildQueue.pause(); diff --git a/packages/core/src/bin/commands/list.ts b/packages/core/src/bin/commands/list.ts new file mode 100644 index 000000000..e6ecbb3c9 --- /dev/null +++ b/packages/core/src/bin/commands/list.ts @@ -0,0 +1,141 @@ +import { createBuild } from "@/build/index.js"; +import { createLogger } from "@/common/logger.js"; +import { MetricsService } from "@/common/metrics.js"; +import { buildOptions } from "@/common/options.js"; +import { createTelemetry } from "@/common/telemetry.js"; +import { + type PonderApp, + type PonderInternalSchema, + createDatabase, +} from "@/database/index.js"; +import { printTable } from "@/ui/Table.js"; +import { formatEta } from "@/utils/format.js"; +import { type SelectQueryBuilder, sql } from "kysely"; +import type { CliOptions } from "../ponder.js"; +import { setupShutdown } from "../utils/shutdown.js"; + +const emptySchemaBuild = { + schema: {}, + statements: { + tables: { sql: [], json: [] }, + enums: { sql: [], json: [] }, + indexes: { sql: [], json: [] }, + }, +}; + +export async function list({ cliOptions }: { cliOptions: CliOptions }) { + const options = buildOptions({ cliOptions }); + + const logger = createLogger({ + level: options.logLevel, + mode: options.logFormat, + }); + + const metrics = new MetricsService(); + const telemetry = createTelemetry({ options, logger }); + const common = { options, logger, metrics, telemetry }; + + const build = await createBuild({ common }); + + const cleanup = async () => { + await build.kill(); + await telemetry.kill(); + }; + + const shutdown = setupShutdown({ common, cleanup }); + + const executeResult = await build.execute(); + + if (executeResult.configResult.status === "error") { + await shutdown({ reason: "Failed intial build", code: 1 }); + return; + } + + const buildResult = build.preCompile(executeResult.configResult.result); + + if (buildResult.status === "error") { + await shutdown({ reason: "Failed intial build", code: 1 }); + return; + } + + const database = createDatabase({ + common, + preBuild: buildResult.result, + schemaBuild: emptySchemaBuild, + }); + + const ponderSchemas = await database.qb.internal + .selectFrom("information_schema.tables") + // @ts-ignore + .select(["table_name", "table_schema"]) + // @ts-ignore + .where("table_name", "=", "_ponder_meta") + .where( + // @ts-ignore + "table_schema", + "in", + database.qb.internal + // @ts-ignore + .selectFrom("information_schema.schemata") + // @ts-ignore + .select("schema_name"), + ) + .execute(); + + let union: + | SelectQueryBuilder< + PonderInternalSchema, + "_ponder_meta", + { + value: PonderApp; + schema: string; + } + > + | undefined; + + for (const row of ponderSchemas) { + // @ts-ignore + const query = database.qb.internal + .selectFrom(`${row.table_schema}._ponder_meta`) + .select(["value", sql`${row.table_schema}`.as("schema")]) + // @ts-ignore + .where("key", "=", "app") as NonNullable; + + if (union === undefined) { + union = query; + } else { + union = union.unionAll(query); + } + } + + const result = ponderSchemas.length === 0 ? [] : await union!.execute(); + + printTable({ + columns: [ + { title: "Schema", key: "table_schema", align: "left" }, + { title: "Active", key: "active", align: "right" }, + { title: "Last active", key: "last_active", align: "right" }, + { title: "Table count", key: "table_count", align: "right" }, + ], + rows: result + .filter((row) => row.value.is_dev === 0) + .map((row) => ({ + table_schema: row.schema, + active: + row.value.is_locked === 1 && + row.value.heartbeat_at + common.options.databaseHeartbeatTimeout > + Date.now() + ? "yes" + : "no", + last_active: + row.value.is_locked === 1 + ? "---" + : `${formatEta(Date.now() - row.value.heartbeat_at)} ago`, + table_count: row.value.table_names.length, + })), + }); + + await database.kill(); + + await shutdown({ reason: "Success", code: 0 }); +} diff --git a/packages/core/src/bin/commands/serve.ts b/packages/core/src/bin/commands/serve.ts index a088c1237..64fab0350 100644 --- a/packages/core/src/bin/commands/serve.ts +++ b/packages/core/src/bin/commands/serve.ts @@ -1,11 +1,12 @@ import path from "node:path"; -import { createBuildService } from "@/build/index.js"; +import { createBuild } from "@/build/index.js"; import { createLogger } from "@/common/logger.js"; import { MetricsService } from "@/common/metrics.js"; import { buildOptions } from "@/common/options.js"; import { buildPayload, createTelemetry } from "@/common/telemetry.js"; import { createDatabase } from "@/database/index.js"; import { createServer } from "@/server/index.js"; +import { mergeResults } from "@/utils/result.js"; import type { CliOptions } from "../ponder.js"; import { setupShutdown } from "../utils/shutdown.js"; @@ -39,7 +40,7 @@ export async function serve({ cliOptions }: { cliOptions: CliOptions }) { const telemetry = createTelemetry({ options, logger }); const common = { options, logger, metrics, telemetry }; - const buildService = await createBuildService({ common }); + const build = await createBuild({ common }); let cleanupReloadable = () => Promise.resolve(); @@ -50,27 +51,47 @@ export async function serve({ cliOptions }: { cliOptions: CliOptions }) { const shutdown = setupShutdown({ common, cleanup }); - const buildResult = await buildService.start({ watch: false }); - // Once we have the initial build, we can kill the build service. - await buildService.kill(); + const executeResult = await build.execute(); + await build.kill(); + + if (executeResult.configResult.status === "error") { + await shutdown({ reason: "Failed intial build", code: 1 }); + return cleanup; + } + if (executeResult.schemaResult.status === "error") { + await shutdown({ reason: "Failed intial build", code: 1 }); + return cleanup; + } + if (executeResult.apiResult.status === "error") { + await shutdown({ reason: "Failed intial build", code: 1 }); + return cleanup; + } + + const buildResult = mergeResults([ + build.preCompile(executeResult.configResult.result), + build.compileSchema(executeResult.schemaResult.result), + build.compileApi({ apiResult: executeResult.apiResult.result }), + ]); if (buildResult.status === "error") { await shutdown({ reason: "Failed intial build", code: 1 }); return cleanup; } + const [preBuild, schemaBuild, apiBuild] = buildResult.result; + telemetry.record({ name: "lifecycle:session_start", properties: { cli_command: "serve", - ...buildPayload(buildResult.indexingBuild), + ...buildPayload({ + preBuild, + schemaBuild, + }), }, }); - const { databaseConfig, schema, instanceId, buildId, statements, namespace } = - buildResult.apiBuild; - - if (databaseConfig.kind === "pglite") { + if (preBuild.databaseConfig.kind === "pglite") { await shutdown({ reason: "The 'ponder serve' command does not support PGlite", code: 1, @@ -80,24 +101,15 @@ export async function serve({ cliOptions }: { cliOptions: CliOptions }) { const database = createDatabase({ common, - schema, - databaseConfig, - instanceId, - buildId, - statements, - namespace, + preBuild, + schemaBuild, }); const server = await createServer({ common, - app: buildResult.apiBuild.app, - routes: buildResult.apiBuild.routes, - graphqlSchema: buildResult.indexingBuild.graphqlSchema, database, - instanceId: - process.env.PONDER_EXPERIMENTAL_INSTANCE_ID === undefined - ? undefined - : instanceId, + schemaBuild, + apiBuild, }); cleanupReloadable = async () => { diff --git a/packages/core/src/bin/commands/start.ts b/packages/core/src/bin/commands/start.ts index e99797d69..b15109927 100644 --- a/packages/core/src/bin/commands/start.ts +++ b/packages/core/src/bin/commands/start.ts @@ -1,10 +1,11 @@ import path from "node:path"; -import { createBuildService } from "@/build/index.js"; +import { createBuild } from "@/build/index.js"; import { createLogger } from "@/common/logger.js"; import { MetricsService } from "@/common/metrics.js"; import { buildOptions } from "@/common/options.js"; import { buildPayload, createTelemetry } from "@/common/telemetry.js"; -import { createDatabase } from "@/database/index.js"; +import { type Database, createDatabase } from "@/database/index.js"; +import { mergeResults } from "@/utils/result.js"; import type { CliOptions } from "../ponder.js"; import { run } from "../utils/run.js"; import { runServer } from "../utils/runServer.js"; @@ -40,11 +41,14 @@ export async function start({ cliOptions }: { cliOptions: CliOptions }) { const telemetry = createTelemetry({ options, logger }); const common = { options, logger, metrics, telemetry }; - const buildService = await createBuildService({ common }); + const build = await createBuild({ common }); let cleanupReloadable = () => Promise.resolve(); let cleanupReloadableServer = () => Promise.resolve(); + // biome-ignore lint/style/useConst: + let database: Database | undefined; + const cleanup = async () => { await cleanupReloadable(); await cleanupReloadableServer(); @@ -56,37 +60,67 @@ export async function start({ cliOptions }: { cliOptions: CliOptions }) { const shutdown = setupShutdown({ common, cleanup }); - const buildResult = await buildService.start({ watch: false }); - // Once we have the initial build, we can kill the build service. - await buildService.kill(); + const executeResult = await build.execute(); + await build.kill(); + + if (executeResult.configResult.status === "error") { + await shutdown({ reason: "Failed intial build", code: 1 }); + return cleanup; + } + if (executeResult.schemaResult.status === "error") { + await shutdown({ reason: "Failed intial build", code: 1 }); + return cleanup; + } + if (executeResult.indexingResult.status === "error") { + await shutdown({ reason: "Failed intial build", code: 1 }); + return cleanup; + } + if (executeResult.apiResult.status === "error") { + await shutdown({ reason: "Failed intial build", code: 1 }); + return cleanup; + } + + const buildResult = mergeResults([ + build.preCompile(executeResult.configResult.result), + build.compileSchema(executeResult.schemaResult.result), + await build.compileIndexing({ + configResult: executeResult.configResult.result, + schemaResult: executeResult.schemaResult.result, + indexingResult: executeResult.indexingResult.result, + }), + build.compileApi({ apiResult: executeResult.apiResult.result }), + ]); if (buildResult.status === "error") { await shutdown({ reason: "Failed intial build", code: 1 }); return cleanup; } + const [preBuild, schemaBuild, indexingBuild, apiBuild] = buildResult.result; + telemetry.record({ name: "lifecycle:session_start", properties: { cli_command: "start", - ...buildPayload(buildResult.indexingBuild), + ...buildPayload({ + preBuild, + schemaBuild, + indexingBuild, + }), }, }); - const database = createDatabase({ + database = createDatabase({ common, - schema: buildResult.indexingBuild.schema, - databaseConfig: buildResult.indexingBuild.databaseConfig, - buildId: buildResult.indexingBuild.buildId, - instanceId: buildResult.indexingBuild.instanceId, - namespace: buildResult.indexingBuild.namespace, - statements: buildResult.indexingBuild.statements, + preBuild, + schemaBuild, }); cleanupReloadable = await run({ common, - build: buildResult.indexingBuild!, database, + schemaBuild, + indexingBuild, onFatalError: () => { shutdown({ reason: "Received fatal error", code: 1 }); }, @@ -97,8 +131,9 @@ export async function start({ cliOptions }: { cliOptions: CliOptions }) { cleanupReloadableServer = await runServer({ common, - build: buildResult.apiBuild, database, + schemaBuild, + apiBuild, }); return cleanup; diff --git a/packages/core/src/bin/ponder.ts b/packages/core/src/bin/ponder.ts index 08a1829fd..2d3bd9c95 100644 --- a/packages/core/src/bin/ponder.ts +++ b/packages/core/src/bin/ponder.ts @@ -7,6 +7,7 @@ import { Command } from "@commander-js/extra-typings"; import dotenv from "dotenv"; import { codegen } from "./commands/codegen.js"; import { dev } from "./commands/dev.js"; +import { list } from "./commands/list.js"; import { serve } from "./commands/serve.js"; import { start } from "./commands/start.js"; @@ -60,6 +61,7 @@ type GlobalOptions = { const devCommand = new Command("dev") .description("Start the development server with hot reloading") + .option("--schema ", "Database schema", String) .option("-p, --port ", "Port for the web server", Number, 42069) // NOTE: Do not set a default for hostname. We currently rely on the Node.js // default behavior when passing undefined to http.Server.listen(), which @@ -80,6 +82,7 @@ const devCommand = new Command("dev") const startCommand = new Command("start") .description("Start the production server") + .option("--schema ", "Database schema", String) .option("-p, --port ", "Port for the web server", Number, 42069) .option( "-H, --hostname ", @@ -96,6 +99,7 @@ const startCommand = new Command("start") const serveCommand = new Command("serve") .description("Start the production HTTP server without the indexer") + .option("--schema ", "Database schema", String) .option("-p, --port ", "Port for the web server", Number, 42069) .option( "-H, --hostname ", @@ -110,6 +114,19 @@ const serveCommand = new Command("serve") await serve({ cliOptions }); }); +const dbCommand = new Command("db").description("Database management commands"); + +const listCommand = new Command("list") + .description("List all deployments") + .showHelpAfterError() + .action(async (_, command) => { + const cliOptions = { + ...command.optsWithGlobals(), + command: command.name(), + } as GlobalOptions & ReturnType; + await list({ cliOptions }); + }); + const codegenCommand = new Command("codegen") .description("Generate the schema.graphql file, then exit") .showHelpAfterError() @@ -141,9 +158,12 @@ const codegenCommand = new Command("codegen") // console.log("ponder cache prune"); // }); +dbCommand.addCommand(listCommand); + ponder.addCommand(devCommand); ponder.addCommand(startCommand); ponder.addCommand(serveCommand); +ponder.addCommand(dbCommand); ponder.addCommand(codegenCommand); export type CliOptions = Prettify< @@ -152,6 +172,7 @@ export type CliOptions = Prettify< ReturnType & ReturnType & ReturnType & + ReturnType & ReturnType > >; diff --git a/packages/core/src/bin/utils/run.test.ts b/packages/core/src/bin/utils/run.test.ts index a36b532fc..844fa9e1f 100644 --- a/packages/core/src/bin/utils/run.test.ts +++ b/packages/core/src/bin/utils/run.test.ts @@ -1,9 +1,14 @@ +import { ALICE } from "@/_test/constants.js"; import { setupAnvil, setupCommon, setupIsolatedDatabase, } from "@/_test/setup.js"; -import type { IndexingBuild } from "@/build/index.js"; +import { deployErc20 } from "@/_test/simulate.js"; +import { getErc20ConfigAndIndexingFunctions } from "@/_test/utils.js"; +import { getNetwork } from "@/_test/utils.js"; +import { buildConfigAndIndexingFunctions } from "@/build/configAndIndexingFunctions.js"; +import type { IndexingBuild, SchemaBuild } from "@/build/index.js"; import { buildSchema } from "@/build/schema.js"; import { createDatabase } from "@/database/index.js"; import { onchainTable } from "@/drizzle/index.js"; @@ -25,42 +30,57 @@ const schema = { account }; const graphqlSchema = buildGraphQLSchema(schema); test("run() setup", async (context) => { + const network = getNetwork(); + + const { address } = await deployErc20({ sender: ALICE }); + + const { config, rawIndexingFunctions } = getErc20ConfigAndIndexingFunctions({ + address, + }); + + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + const indexingFunctions = { "Erc20:setup": vi.fn(), }; - const { statements, namespace } = buildSchema({ + const { statements } = buildSchema({ schema, - instanceId: "1234", }); - const build: IndexingBuild = { - buildId: "buildId", - instanceId: "1234", + const schemaBuild: SchemaBuild = { schema, + statements, graphqlSchema, - databaseConfig: context.databaseConfig, - networks: context.networks, - sources: context.sources, + }; + + const indexingBuild: IndexingBuild = { + buildId: "buildId", + networks: [network], + sources, indexingFunctions, - statements, - namespace, }; const database = createDatabase({ common: context.common, - schema, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "buildId", - statements, - namespace, + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { + schema, + statements, + }, }); const kill = await run({ common: context.common, - build, database, + schemaBuild, + indexingBuild, onFatalError: vi.fn(), onReloadableError: vi.fn(), }); @@ -73,45 +93,60 @@ test("run() setup", async (context) => { }); test("run() setup error", async (context) => { + const network = getNetwork(); + + const { address } = await deployErc20({ sender: ALICE }); + + const { config, rawIndexingFunctions } = getErc20ConfigAndIndexingFunctions({ + address, + }); + + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + const indexingFunctions = { "Erc20:setup": vi.fn(), }; const onReloadableErrorPromiseResolver = promiseWithResolvers(); - const { statements, namespace } = buildSchema({ + const { statements } = buildSchema({ schema, - instanceId: "1234", }); - const build: IndexingBuild = { - buildId: "buildId", - instanceId: "1234", + const schemaBuild: SchemaBuild = { schema, + statements, graphqlSchema, - databaseConfig: context.databaseConfig, - networks: context.networks, - sources: context.sources, + }; + + const indexingBuild: IndexingBuild = { + buildId: "buildId", + networks: [network], + sources, indexingFunctions, - statements, - namespace, }; const database = createDatabase({ common: context.common, - schema, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "buildId", - statements, - namespace, + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { + schema, + statements, + }, }); indexingFunctions["Erc20:setup"].mockRejectedValue(new Error()); const kill = await run({ common: context.common, - build, database, + schemaBuild, + indexingBuild, onFatalError: vi.fn(), onReloadableError: () => { onReloadableErrorPromiseResolver.resolve(); diff --git a/packages/core/src/bin/utils/run.ts b/packages/core/src/bin/utils/run.ts index f08d57f81..a2c4d9ca0 100644 --- a/packages/core/src/bin/utils/run.ts +++ b/packages/core/src/bin/utils/run.ts @@ -1,4 +1,4 @@ -import type { IndexingBuild } from "@/build/index.js"; +import type { IndexingBuild, SchemaBuild } from "@/build/index.js"; import { runCodegen } from "@/common/codegen.js"; import type { Common } from "@/common/common.js"; import type { Database } from "@/database/index.js"; @@ -21,29 +21,22 @@ import { createQueue } from "@ponder/common"; /** Starts the sync and indexing services for the specified build. */ export async function run({ common, - build, + schemaBuild, + indexingBuild, database, onFatalError, onReloadableError, }: { common: Common; - build: IndexingBuild; + schemaBuild: SchemaBuild; + indexingBuild: IndexingBuild; database: Database; onFatalError: (error: Error) => void; onReloadableError: (error: Error) => void; }) { - const { - instanceId, - networks, - sources, - schema, - indexingFunctions, - graphqlSchema, - } = build; - let isKilled = false; - const { checkpoint: initialCheckpoint } = await database.setup(); + const { checkpoint: initialCheckpoint } = await database.setup(indexingBuild); const syncStore = createSyncStore({ common, @@ -52,21 +45,20 @@ export async function run({ const metadataStore = getMetadataStore({ db: database.qb.user, - instanceId, }); // This can be a long-running operation, so it's best to do it after // starting the server so the app can become responsive more quickly. await database.migrateSync(); - runCodegen({ common, graphqlSchema }); + runCodegen({ common, graphqlSchema: schemaBuild.graphqlSchema }); // Note: can throw const sync = await createSync({ common, syncStore, - networks, - sources, + networks: indexingBuild.networks, + sources: indexingBuild.sources, // Note: this is not great because it references the // `realtimeQueue` which isn't defined yet onRealtimeEvent: (realtimeEvent) => { @@ -95,7 +87,7 @@ export async function run({ // update the temporary `checkpoint` value set in the trigger. for (const events of splitEvents(event.events)) { const result = await handleEvents( - decodeEvents(common, sources, events), + decodeEvents(common, indexingBuild.sources, events), event.checkpoint, ); @@ -127,17 +119,17 @@ export async function run({ }); const indexingService = createIndexingService({ - indexingFunctions, + indexingFunctions: indexingBuild.indexingFunctions, common, - sources, - networks, + sources: indexingBuild.sources, + networks: indexingBuild.networks, sync, }); const historicalIndexingStore = createHistoricalIndexingStore({ common, database, - schema, + schema: schemaBuild.schema, initialCheckpoint, }); @@ -149,8 +141,8 @@ export async function run({ // If the initial checkpoint is zero, we need to run setup events. if (encodeCheckpoint(zeroCheckpoint) === initialCheckpoint) { const result = await indexingService.processSetupEvents({ - sources, - networks, + sources: indexingBuild.sources, + networks: indexingBuild.networks, }); if (result.status === "killed") { return; @@ -169,7 +161,7 @@ export async function run({ end = checkpoint; const result = await handleEvents( - decodeEvents(common, sources, events), + decodeEvents(common, indexingBuild.sources, events), checkpoint, ); @@ -240,13 +232,12 @@ export async function run({ }); await database.createIndexes(); - await database.createLiveViews(); await database.createTriggers(); indexingService.setIndexingStore( createRealtimeIndexingStore({ database, - schema, + schema: schemaBuild.schema, common, }), ); diff --git a/packages/core/src/bin/utils/runServer.ts b/packages/core/src/bin/utils/runServer.ts index 9e0eb0163..10524e5a6 100644 --- a/packages/core/src/bin/utils/runServer.ts +++ b/packages/core/src/bin/utils/runServer.ts @@ -1,4 +1,4 @@ -import type { ApiBuild } from "@/build/index.js"; +import type { ApiBuild, SchemaBuild } from "@/build/index.js"; import type { Common } from "@/common/common.js"; import type { Database } from "@/database/index.js"; import { createServer } from "@/server/index.js"; @@ -6,25 +6,13 @@ import { createServer } from "@/server/index.js"; /** * Starts the server for the specified build. */ -export async function runServer({ - common, - build, - database, -}: { +export async function runServer(params: { common: Common; - build: ApiBuild; + schemaBuild: SchemaBuild; + apiBuild: ApiBuild; database: Database; }) { - const { instanceId, graphqlSchema } = build; - - const server = await createServer({ - app: build.app, - routes: build.routes, - common, - graphqlSchema, - database, - instanceId, - }); + const server = await createServer(params); return async () => { await server.kill(); diff --git a/packages/core/src/build/configAndIndexingFunctions.test.ts b/packages/core/src/build/configAndIndexingFunctions.test.ts index 518ebb997..8d474080e 100644 --- a/packages/core/src/build/configAndIndexingFunctions.test.ts +++ b/packages/core/src/build/configAndIndexingFunctions.test.ts @@ -1,15 +1,19 @@ -import path from "node:path"; -import type { Options } from "@/common/options.js"; -import type { CallTraceFilter, LogFactory, LogFilter } from "@/sync/source.js"; +import { factory } from "@/config/address.js"; +import { + type LogFactory, + type LogFilter, + type TraceFilter, + shouldGetTransactionReceipt, +} from "@/sync/source.js"; import { http, type Address, - getEventSelector, - getFunctionSelector, parseAbiItem, + toEventSelector, + toFunctionSelector, zeroAddress, } from "viem"; -import { expect, test, vi } from "vitest"; +import { expect, test } from "vitest"; import { type Config, createConfig } from "../config/config.js"; import { buildConfigAndIndexingFunctions, @@ -30,10 +34,6 @@ const bytes1 = "0x0000000000000000000000000000000000000000000000000000000000000001"; const bytes2 = "0x0000000000000000000000000000000000000000000000000000000000000002"; -const options = { - ponderDir: ".ponder", - rootDir: "rootDir", -} as const satisfies Pick; test("buildConfigAndIndexingFunctions() builds topics for multiple events", async () => { const config = createConfig({ @@ -58,11 +58,11 @@ test("buildConfigAndIndexingFunctions() builds topics for multiple events", asyn { name: "a:Event0", fn: () => {} }, { name: "a:Event1", fn: () => {} }, ], - options, }); - expect((sources[0]!.filter as LogFilter).topics).toMatchObject([ - [getEventSelector(event0), getEventSelector(event1)], + expect((sources[0]!.filter as LogFilter).topic0).toMatchObject([ + toEventSelector(event0), + toEventSelector(event1), ]); }); @@ -91,11 +91,11 @@ test("buildConfigAndIndexingFunctions() handles overloaded event signatures and { name: "a:Event1()", fn: () => {} }, { name: "a:Event1(bytes32 indexed)", fn: () => {} }, ], - options, }); - expect((sources[0]!.filter as LogFilter).topics).toMatchObject([ - [getEventSelector(event1), getEventSelector(event1Overloaded)], + expect((sources[0]!.filter as LogFilter).topic0).toMatchObject([ + toEventSelector(event1), + toEventSelector(event1Overloaded), ]); }); @@ -116,7 +116,6 @@ test("buildConfigAndIndexingFunctions() creates a source for each network for mu const { sources } = await buildConfigAndIndexingFunctions({ config, rawIndexingFunctions: [{ name: "a:Event0", fn: () => {} }], - options, }); expect(sources.length).toBe(2); @@ -147,13 +146,12 @@ test("buildConfigAndIndexingFunctions() builds topics for event with args", asyn const { sources } = await buildConfigAndIndexingFunctions({ config, rawIndexingFunctions: [{ name: "a:Event0", fn: () => {} }], - options, }); - expect((sources[0]!.filter as LogFilter).topics).toMatchObject([ - [getEventSelector(event0)], - bytes1, + expect((sources[0]!.filter as LogFilter).topic0).toMatchObject([ + toEventSelector(event0), ]); + expect((sources[0]!.filter as LogFilter).topic1).toMatchObject(bytes1); }); test("buildConfigAndIndexingFunctions() builds topics for event with unnamed parameters", async () => { @@ -179,12 +177,14 @@ test("buildConfigAndIndexingFunctions() builds topics for event with unnamed par const { sources } = await buildConfigAndIndexingFunctions({ config, rawIndexingFunctions: [{ name: "a:Event1", fn: () => {} }], - options, }); - expect((sources[0]!.filter as LogFilter).topics).toMatchObject([ - [getEventSelector(event1Overloaded)], - [bytes1, bytes2], + expect((sources[0]!.filter as LogFilter).topic0).toMatchObject([ + toEventSelector(event1Overloaded), + ]); + expect((sources[0]!.filter as LogFilter).topic1).toMatchObject([ + bytes1, + bytes2, ]); }); @@ -212,7 +212,6 @@ test("buildConfigAndIndexingFunctions() overrides default values with network-sp const { sources } = await buildConfigAndIndexingFunctions({ config, rawIndexingFunctions: [{ name: "a:Event0", fn: () => {} }], - options, }); expect((sources[0]!.filter as LogFilter).address).toBe(address2); @@ -238,7 +237,6 @@ test("buildConfigAndIndexingFunctions() handles network name shortcut", async () const { sources } = await buildConfigAndIndexingFunctions({ config, rawIndexingFunctions: [{ name: "a:Event0", fn: () => {} }], - options, }); expect(sources[0]!.networkName).toBe("mainnet"); @@ -262,12 +260,11 @@ test("buildConfigAndIndexingFunctions() validates network name", async () => { const result = await safeBuildConfigAndIndexingFunctions({ config, rawIndexingFunctions: [{ name: "a:Event0", fn: () => {} }], - options, }); expect(result.status).toBe("error"); expect(result.error?.message).toBe( - "Validation failed: Invalid network for contract 'a'. Got 'mainnetz', expected one of ['mainnet'].", + "Validation failed: Invalid network for 'a'. Got 'mainnetz', expected one of ['mainnet'].", ); }); @@ -288,7 +285,6 @@ test("buildConfigAndIndexingFunctions() warns for public RPC URL", async () => { const result = await safeBuildConfigAndIndexingFunctions({ config, rawIndexingFunctions: [{ name: "a:Event0", fn: () => {} }], - options, }); expect(result.status).toBe("success"); @@ -321,7 +317,6 @@ test("buildConfigAndIndexingFunctions() validates against multiple events and in const result = await safeBuildConfigAndIndexingFunctions({ config, rawIndexingFunctions: [{ name: "a:Event0", fn: () => {} }], - options, }); expect(result.status).toBe("error"); @@ -350,7 +345,6 @@ test("buildConfigAndIndexingFunctions() validates event filter event name must b const result = await safeBuildConfigAndIndexingFunctions({ config, rawIndexingFunctions: [{ name: "a:Event0", fn: () => {} }], - options, }); expect(result.status).toBe("error"); @@ -359,38 +353,6 @@ test("buildConfigAndIndexingFunctions() validates event filter event name must b ); }); -test("buildConfigAndIndexingFunctions() validates against specifying both factory and address", async () => { - const config = createConfig({ - networks: { - mainnet: { chainId: 1, transport: http("https://cloudflare-eth.com") }, - }, - contracts: { - a: { - network: "mainnet", - abi: [event0], - // @ts-expect-error - address: address1, - factory: { - address: address2, - event: eventFactory, - parameter: "child", - }, - }, - }, - }); - - const result = await safeBuildConfigAndIndexingFunctions({ - config, - rawIndexingFunctions: [{ name: "a:Event0", fn: () => {} }], - options, - }); - - expect(result.status).toBe("error"); - expect(result.error?.message).toBe( - "Validation failed: Contract 'a' cannot specify both 'factory' and 'address' options.", - ); -}); - test("buildConfigAndIndexingFunctions() validates address empty string", async () => { const config = createConfig({ networks: { @@ -408,7 +370,6 @@ test("buildConfigAndIndexingFunctions() validates address empty string", async ( const result = await safeBuildConfigAndIndexingFunctions({ config: config as unknown as Config, rawIndexingFunctions: [{ name: "a:Event0", fn: () => {} }], - options, }); expect(result.status).toBe("error"); @@ -435,7 +396,6 @@ test("buildConfigAndIndexingFunctions() validates address prefix", async () => { const result = await safeBuildConfigAndIndexingFunctions({ config: config as unknown as Config, rawIndexingFunctions: [{ name: "a:Event0", fn: () => {} }], - options, }); expect(result.status).toBe("error"); @@ -461,7 +421,6 @@ test("buildConfigAndIndexingFunctions() validates address length", async () => { const result = await safeBuildConfigAndIndexingFunctions({ config, rawIndexingFunctions: [{ name: "a:Event0", fn: () => {} }], - options, }); expect(result.status).toBe("error"); @@ -470,7 +429,7 @@ test("buildConfigAndIndexingFunctions() validates address length", async () => { ); }); -test("buildConfigAndIndexingFunctions() coerces NaN startBlock to 0", async () => { +test("buildConfigAndIndexingFunctions() coerces NaN startBlock to undefined", async () => { const config = createConfig({ networks: { mainnet: { chainId: 1, transport: http("http://127.0.0.1:8545") }, @@ -487,10 +446,9 @@ test("buildConfigAndIndexingFunctions() coerces NaN startBlock to 0", async () = const { sources } = await buildConfigAndIndexingFunctions({ config, rawIndexingFunctions: [{ name: "a:Event0", fn: () => {} }], - options, }); - expect(sources[0]?.filter.fromBlock).toBe(0); + expect(sources[0]?.filter.fromBlock).toBe(undefined); }); test("buildConfigAndIndexingFunctions() includeTransactionReceipts", async () => { @@ -510,19 +468,13 @@ test("buildConfigAndIndexingFunctions() includeTransactionReceipts", async () => }, }, }); - const { sources } = await buildConfigAndIndexingFunctions({ config, rawIndexingFunctions: [{ name: "a:Event0", fn: () => {} }], - options, }); - expect((sources[0]!.filter as LogFilter).includeTransactionReceipts).toBe( - true, - ); - expect((sources[1]!.filter as LogFilter).includeTransactionReceipts).toBe( - false, - ); + expect(shouldGetTransactionReceipt(sources[0]!.filter)).toBe(true); + expect(shouldGetTransactionReceipt(sources[1]!.filter)).toBe(false); }); test("buildConfigAndIndexingFunctions() includeCallTraces", async () => { @@ -547,21 +499,18 @@ test("buildConfigAndIndexingFunctions() includeCallTraces", async () => { const { sources } = await buildConfigAndIndexingFunctions({ config, rawIndexingFunctions: [{ name: "a.func0()", fn: () => {} }], - options, }); expect(sources).toHaveLength(1); - expect((sources[0]!.filter as CallTraceFilter).fromAddress).toBeUndefined(); - expect((sources[0]!.filter as CallTraceFilter).toAddress).toMatchObject([ + expect((sources[0]!.filter as TraceFilter).fromAddress).toBeUndefined(); + expect((sources[0]!.filter as TraceFilter).toAddress).toMatchObject([ zeroAddress, ]); - expect( - (sources[0]!.filter as CallTraceFilter).functionSelectors, - ).toMatchObject([getFunctionSelector(func0)]); - expect( - (sources[0]!.filter as CallTraceFilter).includeTransactionReceipts, - ).toBe(false); + expect((sources[0]!.filter as TraceFilter).functionSelector).toMatchObject([ + toFunctionSelector(func0), + ]); + expect(shouldGetTransactionReceipt(sources[0]!.filter)).toBe(false); }); test("buildConfigAndIndexingFunctions() includeCallTraces with factory", async () => { @@ -577,11 +526,11 @@ test("buildConfigAndIndexingFunctions() includeCallTraces with factory", async ( mainnet: {}, optimism: { includeCallTraces: false }, }, - factory: { + address: factory({ address: address2, event: eventFactory, parameter: "child", - }, + }), abi: [func0], }, }, @@ -590,21 +539,18 @@ test("buildConfigAndIndexingFunctions() includeCallTraces with factory", async ( const { sources } = await buildConfigAndIndexingFunctions({ config, rawIndexingFunctions: [{ name: "a.func0()", fn: () => {} }], - options, }); expect(sources).toHaveLength(1); - expect((sources[0]!.filter as CallTraceFilter).fromAddress).toBeUndefined(); + expect((sources[0]!.filter as TraceFilter).fromAddress).toBeUndefined(); expect( - ((sources[0]!.filter as CallTraceFilter).toAddress as LogFactory).address, + ((sources[0]!.filter as TraceFilter).toAddress as LogFactory).address, ).toMatchObject(address2); - expect( - (sources[0]!.filter as CallTraceFilter).functionSelectors, - ).toMatchObject([getFunctionSelector(func0)]); - expect( - (sources[0]!.filter as CallTraceFilter).includeTransactionReceipts, - ).toBe(false); + expect((sources[0]!.filter as TraceFilter).functionSelector).toMatchObject([ + toFunctionSelector(func0), + ]); + expect(shouldGetTransactionReceipt(sources[0]!.filter)).toBe(false); }); test("buildConfigAndIndexingFunctions() coerces NaN endBlock to undefined", async () => { @@ -624,177 +570,78 @@ test("buildConfigAndIndexingFunctions() coerces NaN endBlock to undefined", asyn const { sources } = await buildConfigAndIndexingFunctions({ config, rawIndexingFunctions: [{ name: "a:Event0", fn: () => {} }], - options, }); expect(sources[0]!.filter.toBlock).toBe(undefined); }); -test("buildConfigAndIndexingFunctions() database uses pglite by default", async () => { +test("buildConfigAndIndexingFunctions() account source", async () => { const config = createConfig({ - networks: { mainnet: { chainId: 1, transport: http() } }, - contracts: { a: { network: "mainnet", abi: [event0] } }, - }); - - const prev = process.env.DATABASE_URL; - // biome-ignore lint/performance/noDelete: Required to test default behavior. - delete process.env.DATABASE_URL; - - const { databaseConfig } = await buildConfigAndIndexingFunctions({ - config, - rawIndexingFunctions: [{ name: "a:Event0", fn: () => {} }], - options, - }); - expect(databaseConfig).toMatchObject({ - kind: "pglite", - options: { - dataDir: expect.stringContaining(path.join(".ponder", "pglite")), - }, - }); - - process.env.DATABASE_URL = prev; -}); - -test("buildConfigAndIndexingFunctions() database respects custom pglite path", async () => { - const config = createConfig({ - database: { kind: "pglite", directory: "custom-pglite/directory" }, - networks: { mainnet: { chainId: 1, transport: http() } }, - contracts: { a: { network: "mainnet", abi: [event0] } }, - }); - - const { databaseConfig } = await buildConfigAndIndexingFunctions({ - config, - rawIndexingFunctions: [{ name: "a:Event0", fn: () => {} }], - options, - }); - - expect(databaseConfig).toMatchObject({ - kind: "pglite", - options: { - dataDir: expect.stringContaining(path.join("custom-pglite", "directory")), + networks: { + mainnet: { chainId: 1, transport: http("http://127.0.0.1:8545") }, }, - }); -}); - -test("buildConfigAndIndexingFunctions() database uses pglite if specified even if DATABASE_URL env var present", async () => { - const config = createConfig({ - database: { kind: "pglite" }, - networks: { mainnet: { chainId: 1, transport: http() } }, - contracts: { a: { network: "mainnet", abi: [event0] } }, - }); - - vi.stubEnv("DATABASE_URL", "postgres://username@localhost:5432/database"); - - const { databaseConfig } = await buildConfigAndIndexingFunctions({ - config, - rawIndexingFunctions: [{ name: "a:Event0", fn: () => {} }], - options, - }); - expect(databaseConfig).toMatchObject({ - kind: "pglite", - options: { - dataDir: expect.stringContaining(path.join(".ponder", "pglite")), + accounts: { + a: { + network: { mainnet: {} }, + address: address1, + startBlock: 16370000, + endBlock: 16370020, + }, }, }); - vi.unstubAllEnvs(); -}); - -test("buildConfigAndIndexingFunctions() database uses postgres if DATABASE_URL env var present", async () => { - const config = createConfig({ - networks: { mainnet: { chainId: 1, transport: http() } }, - contracts: { a: { network: "mainnet", abi: [event0] } }, - }); - - vi.stubEnv("DATABASE_URL", "postgres://username@localhost:5432/database"); - - const { databaseConfig } = await buildConfigAndIndexingFunctions({ + const { sources } = await buildConfigAndIndexingFunctions({ config, - rawIndexingFunctions: [{ name: "a:Event0", fn: () => {} }], - options, - }); - expect(databaseConfig).toMatchObject({ - kind: "postgres", - poolConfig: { - connectionString: "postgres://username@localhost:5432/database", - }, + rawIndexingFunctions: [ + { name: "a:transfer:from", fn: () => {} }, + { name: "a:transaction:to", fn: () => {} }, + ], }); - vi.unstubAllEnvs(); -}); + expect(sources).toHaveLength(2); -test("buildConfigAndIndexingFunctions() database uses postgres if DATABASE_PRIVATE_URL env var present", async () => { - const config = createConfig({ - networks: { mainnet: { chainId: 1, transport: http() } }, - contracts: { a: { network: "mainnet", abi: [event0] } }, - }); + expect(sources[0]?.networkName).toBe("mainnet"); + expect(sources[1]?.networkName).toBe("mainnet"); - vi.stubEnv("DATABASE_URL", "postgres://username@localhost:5432/database"); - vi.stubEnv( - "DATABASE_PRIVATE_URL", - "postgres://username@localhost:5432/better_database", - ); + expect(sources[0]?.name).toBe("a"); + expect(sources[1]?.name).toBe("a"); - const { databaseConfig } = await buildConfigAndIndexingFunctions({ - config, - rawIndexingFunctions: [{ name: "a:Event0", fn: () => {} }], - options, - }); - expect(databaseConfig).toMatchObject({ - kind: "postgres", - poolConfig: { - connectionString: "postgres://username@localhost:5432/better_database", - }, - }); + expect(sources[0]?.filter.type).toBe("transaction"); + expect(sources[1]?.filter.type).toBe("transfer"); - vi.unstubAllEnvs(); -}); + expect(sources[0]?.filter.fromBlock).toBe(16370000); + expect(sources[1]?.filter.fromBlock).toBe(16370000); -test("buildConfigAndIndexingFunctions() throws for postgres database with no connection string", async () => { - const config = createConfig({ - database: { kind: "postgres" }, - networks: { mainnet: { chainId: 1, transport: http() } }, - contracts: { a: { network: "mainnet", abi: [event0] } }, - }); - - const prev = process.env.DATABASE_URL; - // biome-ignore lint/performance/noDelete: Required to test default behavior. - delete process.env.DATABASE_URL; - - await expect(() => - buildConfigAndIndexingFunctions({ - config, - rawIndexingFunctions: [{ name: "a:Event0", fn: () => {} }], - options, - }), - ).rejects.toThrow( - "Invalid database configuration: 'kind' is set to 'postgres' but no connection string was provided.", - ); - - process.env.DATABASE_URL = prev; + expect(sources[0]?.filter.toBlock).toBe(16370020); + expect(sources[1]?.filter.toBlock).toBe(16370020); }); -test("buildConfigAndIndexingFunctions() database with postgres uses pool config", async () => { +test("buildConfigAndIndexingFunctions() block source", async () => { const config = createConfig({ - database: { - kind: "postgres", - connectionString: "postgres://username@localhost:5432/database", - poolConfig: { max: 100 }, + networks: { + mainnet: { chainId: 1, transport: http("http://127.0.0.1:8545") }, + }, + blocks: { + a: { + network: { mainnet: {} }, + startBlock: 16370000, + endBlock: 16370020, + }, }, - networks: { mainnet: { chainId: 1, transport: http() } }, - contracts: { a: { network: "mainnet", abi: [event0] } }, }); - const { databaseConfig } = await buildConfigAndIndexingFunctions({ + const { sources } = await buildConfigAndIndexingFunctions({ config, - rawIndexingFunctions: [{ name: "a:Event0", fn: () => {} }], - options, - }); - expect(databaseConfig).toMatchObject({ - kind: "postgres", - poolConfig: { - connectionString: "postgres://username@localhost:5432/database", - max: 100, - }, + rawIndexingFunctions: [{ name: "a:block", fn: () => {} }], }); + + expect(sources).toHaveLength(1); + + expect(sources[0]?.networkName).toBe("mainnet"); + expect(sources[0]?.name).toBe("a"); + expect(sources[0]?.filter.type).toBe("block"); + // @ts-ignore + expect(sources[0]?.filter.interval).toBe(1); + expect(sources[0]?.filter.fromBlock).toBe(16370000); + expect(sources[0]?.filter.toBlock).toBe(16370020); }); diff --git a/packages/core/src/build/configAndIndexingFunctions.ts b/packages/core/src/build/configAndIndexingFunctions.ts index 479abcb39..84134ac2f 100644 --- a/packages/core/src/build/configAndIndexingFunctions.ts +++ b/packages/core/src/build/configAndIndexingFunctions.ts @@ -1,8 +1,6 @@ -import path from "node:path"; import { BuildError } from "@/common/errors.js"; -import type { Options } from "@/common/options.js"; +import type { Factory } from "@/config/address.js"; import type { Config } from "@/config/config.js"; -import type { DatabaseConfig } from "@/config/database.js"; import { type Network, getFinalityBlockCount, @@ -10,12 +8,21 @@ import { isRpcUrlPublic, } from "@/config/networks.js"; import { buildAbiEvents, buildAbiFunctions, buildTopics } from "@/sync/abi.js"; -import type { BlockSource, ContractSource } from "@/sync/source.js"; +import { + type AccountSource, + type BlockSource, + type ContractSource, + type Source, + defaultBlockFilterInclude, + defaultLogFilterInclude, + defaultTraceFilterInclude, + defaultTransactionFilterInclude, + defaultTransactionReceiptInclude, + defaultTransferFilterInclude, +} from "@/sync/source.js"; import { chains } from "@/utils/chains.js"; import { toLowerCase } from "@/utils/lowercase.js"; -import { dedupe } from "@ponder/common"; -import parse from "pg-connection-string"; -import type { Hex, LogTopic } from "viem"; +import type { Address, Hex, LogTopic } from "viem"; import { buildLogFactory } from "./factory.js"; export type RawIndexingFunctions = { @@ -27,108 +34,50 @@ export type IndexingFunctions = { [eventName: string]: (...args: any) => any; }; +const flattenSources = < + T extends Config["contracts"] | Config["accounts"] | Config["blocks"], +>( + config: T, +): (Omit & { name: string; network: string })[] => { + return Object.entries(config).flatMap( + ([name, source]: [string, T[string]]) => { + if (typeof source.network === "string") { + return { + name, + ...source, + }; + } else { + return Object.entries(source.network).map( + ([network, sourceOverride]) => { + const { network: _network, ...base } = source; + + return { + name, + network, + ...base, + ...sourceOverride, + }; + }, + ); + } + }, + ); +}; + export async function buildConfigAndIndexingFunctions({ config, rawIndexingFunctions, - options: { rootDir, ponderDir }, }: { config: Config; rawIndexingFunctions: RawIndexingFunctions; - options: Pick; }): Promise<{ - databaseConfig: DatabaseConfig; networks: Network[]; - sources: (BlockSource | ContractSource)[]; + sources: Source[]; indexingFunctions: IndexingFunctions; logs: { level: "warn" | "info" | "debug"; msg: string }[]; }> { const logs: { level: "warn" | "info" | "debug"; msg: string }[] = []; - // Build database. - let databaseConfig: DatabaseConfig; - - // Determine PGlite directory, preferring config.database.directory if available - const pgliteDir = - config.database?.kind === "pglite" && config.database.directory - ? config.database.directory === "memory://" - ? "memory://" - : path.resolve(config.database.directory) - : path.join(ponderDir, "pglite"); - - const pglitePrintPath = - pgliteDir === "memory://" ? "memory://" : path.relative(rootDir, pgliteDir); - - if (config.database?.kind) { - if (config.database.kind === "postgres") { - let connectionString: string | undefined = undefined; - let source: string | undefined = undefined; - - if (config.database.connectionString) { - connectionString = config.database.connectionString; - source = "from ponder.config.ts"; - } else if (process.env.DATABASE_PRIVATE_URL) { - connectionString = process.env.DATABASE_PRIVATE_URL; - source = "from DATABASE_PRIVATE_URL env var"; - } else if (process.env.DATABASE_URL) { - connectionString = process.env.DATABASE_URL; - source = "from DATABASE_URL env var"; - } else { - throw new Error( - `Invalid database configuration: 'kind' is set to 'postgres' but no connection string was provided.`, - ); - } - - logs.push({ - level: "info", - msg: `Using Postgres database '${getDatabaseName(connectionString)}' (${source})`, - }); - - const poolConfig = { - max: config.database.poolConfig?.max ?? 30, - connectionString, - }; - - databaseConfig = { kind: "postgres", poolConfig }; - } else { - logs.push({ - level: "info", - msg: `Using PGlite database in '${pglitePrintPath}' (from ponder.config.ts)`, - }); - - databaseConfig = { kind: "pglite", options: { dataDir: pgliteDir } }; - } - } else { - let connectionString: string | undefined = undefined; - let source: string | undefined = undefined; - if (process.env.DATABASE_PRIVATE_URL) { - connectionString = process.env.DATABASE_PRIVATE_URL; - source = "from DATABASE_PRIVATE_URL env var"; - } else if (process.env.DATABASE_URL) { - connectionString = process.env.DATABASE_URL; - source = "from DATABASE_URL env var"; - } - - // If either of the DATABASE_URL env vars are set, use Postgres. - if (connectionString !== undefined) { - logs.push({ - level: "info", - msg: `Using Postgres database ${getDatabaseName(connectionString)} (${source})`, - }); - - const poolConfig = { max: 30, connectionString }; - - databaseConfig = { kind: "postgres", poolConfig }; - } else { - // Fall back to PGlite. - logs.push({ - level: "info", - msg: `Using PGlite database at ${pglitePrintPath} (default)`, - }); - - databaseConfig = { kind: "pglite", options: { dataDir: pgliteDir } }; - } - } - const networks: Network[] = await Promise.all( Object.entries(config.networks).map(async ([networkName, network]) => { const { chainId, transport } = network; @@ -172,6 +121,20 @@ export async function buildConfigAndIndexingFunctions({ }), ); + const sourceNames = new Set(); + for (const source of [ + ...Object.keys(config.contracts ?? {}), + ...Object.keys(config.accounts ?? {}), + ...Object.keys(config.blocks ?? {}), + ]) { + if (sourceNames.has(source)) { + throw new Error( + `Validation failed: Duplicate source name '${source}' not allowed.`, + ); + } + sourceNames.add(source); + } + // Validate and build indexing functions let indexingFunctionCount = 0; const indexingFunctions: IndexingFunctions = {}; @@ -180,10 +143,37 @@ export async function buildConfigAndIndexingFunctions({ const eventNameComponents = eventName.includes(".") ? eventName.split(".") : eventName.split(":"); - const [sourceName, sourceEventName] = eventNameComponents; - if (eventNameComponents.length !== 2 || !sourceName || !sourceEventName) { + + const [sourceName] = eventNameComponents; + + if (!sourceName) { throw new Error( - `Validation failed: Invalid event '${eventName}', expected format '{sourceName}:{eventName}' or '{sourceName}.{eventName}'.`, + `Validation failed: Invalid event '${eventName}', expected format '{sourceName}:{eventName}' or '{sourceName}.{functionName}'.`, + ); + } + + if (eventNameComponents.length === 3) { + const [, sourceType, fromOrTo] = eventNameComponents; + + if ( + (sourceType !== "transaction" && sourceType !== "transfer") || + (fromOrTo !== "from" && fromOrTo !== "to") + ) { + throw new Error( + `Validation failed: Invalid event '${eventName}', expected format '{sourceName}:transaction:from', '{sourceName}:transaction:to', '{sourceName}:transfer:from', or '{sourceName}:transfer:to'.`, + ); + } + } else if (eventNameComponents.length === 2) { + const [, sourceEventName] = eventNameComponents; + + if (!sourceEventName) { + throw new Error( + `Validation failed: Invalid event '${eventName}', expected format '{sourceName}:{eventName}' or '{sourceName}.{functionName}'.`, + ); + } + } else { + throw new Error( + `Validation failed: Invalid event '${eventName}', expected format '{sourceName}:{eventName}' or '{sourceName}.{functionName}'.`, ); } @@ -196,16 +186,15 @@ export async function buildConfigAndIndexingFunctions({ // Validate that the indexing function uses a sourceName that is present in the config. const matchedSourceName = Object.keys({ ...(config.contracts ?? {}), + ...(config.accounts ?? {}), ...(config.blocks ?? {}), }).find((_sourceName) => _sourceName === sourceName); if (!matchedSourceName) { - // Multi-network has N sources, but the hint here should not have duplicates. - const uniqueSourceNames = dedupe( - Object.keys({ ...(config.contracts ?? {}), ...(config.blocks ?? {}) }), - ); throw new Error( - `Validation failed: Invalid source name '${sourceName}'. Got '${sourceName}', expected one of [${uniqueSourceNames + `Validation failed: Invalid source name '${sourceName}'. Got '${sourceName}', expected one of [${Array.from( + sourceNames, + ) .map((n) => `'${n}'`) .join(", ")}].`, ); @@ -219,118 +208,56 @@ export async function buildConfigAndIndexingFunctions({ logs.push({ level: "warn", msg: "No indexing functions were registered." }); } - const contractSources: ContractSource[] = Object.entries( - config.contracts ?? {}, - ) - // First, apply any network-specific overrides and flatten the result. - .flatMap(([contractName, contract]) => { - if (contract.network === null || contract.network === undefined) { - throw new Error( - `Validation failed: Network for contract '${contractName}' is null or undefined. Expected one of [${networks - .map((n) => `'${n.name}'`) - .join(", ")}].`, - ); - } - - const startBlockMaybeNan = contract.startBlock ?? 0; - const startBlock = Number.isNaN(startBlockMaybeNan) - ? 0 - : startBlockMaybeNan; - const endBlockMaybeNan = contract.endBlock; - const endBlock = Number.isNaN(endBlockMaybeNan) - ? undefined - : endBlockMaybeNan; - - if (endBlock !== undefined && endBlock < startBlock) { - throw new Error( - `Validation failed: Start block for contract '${contractName}' is after end block (${startBlock} > ${endBlock}).`, - ); - } - - // Single network case. - if (typeof contract.network === "string") { - return { - id: `log_${contractName}_${contract.network}`, - name: contractName, - networkName: contract.network, - abi: contract.abi, - - address: "address" in contract ? contract.address : undefined, - factory: "factory" in contract ? contract.factory : undefined, - filter: contract.filter, - - includeTransactionReceipts: - contract.includeTransactionReceipts ?? false, - includeCallTraces: contract.includeCallTraces ?? false, + // common validation for all sources + for (const source of [ + ...flattenSources(config.contracts ?? {}), + ...flattenSources(config.accounts ?? {}), + ...flattenSources(config.blocks ?? {}), + ]) { + if (source.network === null || source.network === undefined) { + throw new Error( + `Validation failed: Network for '${source.name}' is null or undefined. Expected one of [${networks + .map((n) => `'${n.name}'`) + .join(", ")}].`, + ); + } - startBlock, - endBlock, - }; - } + const startBlockMaybeNan = source.startBlock; + const startBlock = Number.isNaN(startBlockMaybeNan) + ? undefined + : startBlockMaybeNan; + const endBlockMaybeNan = source.endBlock; + const endBlock = Number.isNaN(endBlockMaybeNan) + ? undefined + : endBlockMaybeNan; + + if ( + startBlock !== undefined && + endBlock !== undefined && + endBlock < startBlock + ) { + throw new Error( + `Validation failed: Start block for '${source.name}' is after end block (${startBlock} > ${endBlock}).`, + ); + } - type DefinedNetworkOverride = NonNullable< - Exclude[string] - >; - - // Multiple networks case. - return Object.entries(contract.network) - .filter((n): n is [string, DefinedNetworkOverride] => !!n[1]) - .map(([networkName, overrides]) => { - const startBlockMaybeNan = - overrides.startBlock ?? contract.startBlock ?? 0; - const startBlock = Number.isNaN(startBlockMaybeNan) - ? 0 - : startBlockMaybeNan; - const endBlockMaybeNan = overrides.endBlock ?? contract.endBlock; - const endBlock = Number.isNaN(endBlockMaybeNan) - ? undefined - : endBlockMaybeNan; - - if (endBlock !== undefined && endBlock < startBlock) { - throw new Error( - `Validation failed: Start block for contract '${contractName}' is after end block (${startBlock} > ${endBlock}).`, - ); - } + const network = networks.find((n) => n.name === source.network); + if (!network) { + throw new Error( + `Validation failed: Invalid network for '${ + source.name + }'. Got '${source.network}', expected one of [${networks + .map((n) => `'${n.name}'`) + .join(", ")}].`, + ); + } + } - return { - name: contractName, - networkName, - abi: contract.abi, - - address: - ("address" in overrides ? overrides?.address : undefined) ?? - ("address" in contract ? contract.address : undefined), - factory: - ("factory" in overrides ? overrides.factory : undefined) ?? - ("factory" in contract ? contract.factory : undefined), - filter: overrides.filter ?? contract.filter, - - includeTransactionReceipts: - overrides.includeTransactionReceipts ?? - contract.includeTransactionReceipts ?? - false, - includeCallTraces: - overrides.includeCallTraces ?? - contract.includeCallTraces ?? - false, - - startBlock, - endBlock, - }; - }); - }) - // Second, build and validate the factory or log source. - .flatMap((rawContract): ContractSource[] => { - const network = networks.find((n) => n.name === rawContract.networkName); - if (!network) { - throw new Error( - `Validation failed: Invalid network for contract '${ - rawContract.name - }'. Got '${rawContract.networkName}', expected one of [${networks - .map((n) => `'${n.name}'`) - .join(", ")}].`, - ); - } + const contractSources: ContractSource[] = flattenSources( + config.contracts ?? {}, + ) + .flatMap((source): ContractSource[] => { + const network = networks.find((n) => n.name === source.network)!; // Get indexing function that were registered for this contract const registeredLogEvents: string[] = []; @@ -342,29 +269,26 @@ export async function buildConfigAndIndexingFunctions({ string, string, ]; - if ( - logContractName === rawContract.name && - logEventName !== "setup" - ) { + if (logContractName === source.name && logEventName !== "setup") { registeredLogEvents.push(logEventName); } } - // call trace event + // trace event if (eventName.includes(".")) { const [functionContractName, functionName] = eventName.split(".") as [ string, string, ]; - if (functionContractName === rawContract.name) { + if (functionContractName === source.name) { registeredCallTraceEvents.push(functionName); } } } // Note: This can probably throw for invalid ABIs. Consider adding explicit ABI validation before this line. - const abiEvents = buildAbiEvents({ abi: rawContract.abi }); - const abiFunctions = buildAbiFunctions({ abi: rawContract.abi }); + const abiEvents = buildAbiEvents({ abi: source.abi }); + const abiFunctions = buildAbiFunctions({ abi: source.abi }); const registeredEventSelectors: Hex[] = []; // Validate that the registered log events exist in the abi @@ -399,28 +323,31 @@ export async function buildConfigAndIndexingFunctions({ registeredFunctionSelectors.push(abiFunction.selector); } - let topics: LogTopic[] = [registeredEventSelectors]; + let topic0: LogTopic = registeredEventSelectors; + let topic1: LogTopic = null; + let topic2: LogTopic = null; + let topic3: LogTopic = null; - if (rawContract.filter !== undefined) { + if (source.filter !== undefined) { if ( - Array.isArray(rawContract.filter.event) && - rawContract.filter.args !== undefined + Array.isArray(source.filter.event) && + source.filter.args !== undefined ) { throw new Error( - `Validation failed: Event filter for contract '${rawContract.name}' cannot contain indexed argument values if multiple events are provided.`, + `Validation failed: Event filter for contract '${source.name}' cannot contain indexed argument values if multiple events are provided.`, ); } - const filterSafeEventNames = Array.isArray(rawContract.filter.event) - ? rawContract.filter.event - : [rawContract.filter.event]; + const filterSafeEventNames = Array.isArray(source.filter.event) + ? source.filter.event + : [source.filter.event]; for (const filterSafeEventName of filterSafeEventNames) { const abiEvent = abiEvents.bySafeName[filterSafeEventName]; if (!abiEvent) { throw new Error( `Validation failed: Invalid filter for contract '${ - rawContract.name + source.name }'. Got event name '${filterSafeEventName}', expected one of [${Object.keys( abiEvents.bySafeName, ) @@ -434,10 +361,12 @@ export async function buildConfigAndIndexingFunctions({ // The first element of the array return from `buildTopics` being defined // is an invariant of the current filter design. // Note: This can throw. - const [topic0FromFilter, ...topicsFromFilter] = buildTopics( - rawContract.abi, - rawContract.filter, - ) as [Exclude, ...LogTopic[]]; + + const topics = buildTopics(source.abi, source.filter); + const topic0FromFilter = topics.topic0; + topic1 = topics.topic1; + topic2 = topics.topic2; + topic3 = topics.topic3; const filteredEventSelectors = Array.isArray(topic0FromFilter) ? topic0FromFilter @@ -453,7 +382,7 @@ export async function buildConfigAndIndexingFunctions({ throw new Error( `Validation failed: Event '${logEventName}' is excluded by the event filter defined on the contract '${ - rawContract.name + source.name }'. Got '${logEventName}', expected one of [${filteredEventSelectors .map((s) => abiEvents.bySelector[s]!.safeName) .map((eventName) => `'${eventName}'`) @@ -462,32 +391,34 @@ export async function buildConfigAndIndexingFunctions({ } } - topics = [registeredEventSelectors, ...topicsFromFilter]; + topic0 = registeredEventSelectors; } + const startBlockMaybeNan = source.startBlock; + const fromBlock = Number.isNaN(startBlockMaybeNan) + ? undefined + : startBlockMaybeNan; + const endBlockMaybeNan = source.endBlock; + const toBlock = Number.isNaN(endBlockMaybeNan) + ? undefined + : endBlockMaybeNan; + const contractMetadata = { type: "contract", - abi: rawContract.abi, + abi: source.abi, abiEvents, abiFunctions, - name: rawContract.name, - networkName: rawContract.networkName, + name: source.name, + networkName: source.network, } as const; - const resolvedFactory = rawContract?.factory; - const resolvedAddress = rawContract?.address; + const resolvedAddress = source?.address; - if (resolvedFactory !== undefined && resolvedAddress !== undefined) { - throw new Error( - `Validation failed: Contract '${contractMetadata.name}' cannot specify both 'factory' and 'address' options.`, - ); - } - - if (resolvedFactory) { + if (typeof resolvedAddress === "object") { // Note that this can throw. const logFactory = buildLogFactory({ chainId: network.chainId, - ...resolvedFactory, + ...(resolvedAddress as Factory), }); const logSource = { @@ -496,58 +427,68 @@ export async function buildConfigAndIndexingFunctions({ type: "log", chainId: network.chainId, address: logFactory, - topics, - includeTransactionReceipts: rawContract.includeTransactionReceipts, - fromBlock: rawContract.startBlock, - toBlock: rawContract.endBlock, + topic0, + topic1, + topic2, + topic3, + fromBlock, + toBlock, + include: defaultLogFilterInclude.concat( + source.includeTransactionReceipts + ? defaultTransactionReceiptInclude + : [], + ), }, } satisfies ContractSource; - if (rawContract.includeCallTraces) { + if (source.includeCallTraces) { return [ logSource, { ...contractMetadata, filter: { - type: "callTrace", + type: "trace", chainId: network.chainId, fromAddress: undefined, toAddress: logFactory, - functionSelectors: registeredFunctionSelectors, - includeTransactionReceipts: - rawContract.includeTransactionReceipts, - fromBlock: rawContract.startBlock, - toBlock: rawContract.endBlock, + callType: "CALL", + functionSelector: registeredFunctionSelectors, + includeReverted: false, + fromBlock, + toBlock, + include: defaultTraceFilterInclude.concat( + source.includeTransactionReceipts + ? defaultTransactionReceiptInclude + : [], + ), }, } satisfies ContractSource, ]; } return [logSource]; - } - - if (resolvedAddress !== undefined) { + } else if (resolvedAddress !== undefined) { for (const address of Array.isArray(resolvedAddress) ? resolvedAddress : [resolvedAddress]) { - if (!address.startsWith("0x")) + if (!address!.startsWith("0x")) throw new Error( - `Validation failed: Invalid prefix for address '${address}'. Got '${address.slice( + `Validation failed: Invalid prefix for address '${address}'. Got '${address!.slice( 0, 2, )}', expected '0x'.`, ); - if (address.length !== 42) + if (address!.length !== 42) throw new Error( - `Validation failed: Invalid length for address '${address}'. Got ${address.length}, expected 42 characters.`, + `Validation failed: Invalid length for address '${address}'. Got ${address!.length}, expected 42 characters.`, ); } } const validatedAddress = Array.isArray(resolvedAddress) - ? resolvedAddress.map((r) => toLowerCase(r)) + ? (resolvedAddress.map((r) => toLowerCase(r)) as Address[]) : resolvedAddress !== undefined - ? toLowerCase(resolvedAddress) + ? (toLowerCase(resolvedAddress) as Address) : undefined; const logSource = { @@ -556,20 +497,27 @@ export async function buildConfigAndIndexingFunctions({ type: "log", chainId: network.chainId, address: validatedAddress, - topics, - includeTransactionReceipts: rawContract.includeTransactionReceipts, - fromBlock: rawContract.startBlock, - toBlock: rawContract.endBlock, + topic0, + topic1, + topic2, + topic3, + fromBlock, + toBlock, + include: defaultLogFilterInclude.concat( + source.includeTransactionReceipts + ? defaultTransactionReceiptInclude + : [], + ), }, } satisfies ContractSource; - if (rawContract.includeCallTraces) { + if (source.includeCallTraces) { return [ logSource, { ...contractMetadata, filter: { - type: "callTrace", + type: "trace", chainId: network.chainId, fromAddress: undefined, toAddress: Array.isArray(validatedAddress) @@ -577,145 +525,290 @@ export async function buildConfigAndIndexingFunctions({ : validatedAddress === undefined ? undefined : [validatedAddress], - functionSelectors: registeredFunctionSelectors, - includeTransactionReceipts: - rawContract.includeTransactionReceipts, - fromBlock: rawContract.startBlock, - toBlock: rawContract.endBlock, + callType: "CALL", + functionSelector: registeredFunctionSelectors, + includeReverted: false, + fromBlock, + toBlock, + include: defaultTraceFilterInclude.concat( + source.includeTransactionReceipts + ? defaultTransactionReceiptInclude + : [], + ), }, } satisfies ContractSource, ]; } else return [logSource]; - }) - // Remove sources with no registered indexing functions + }) // Remove sources with no registered indexing functions .filter((source) => { const hasRegisteredIndexingFunctions = - source.filter.type === "callTrace" - ? source.filter.functionSelectors.length !== 0 - : source.filter.topics[0]?.length !== 0; + source.filter.type === "trace" + ? Array.isArray(source.filter.functionSelector) && + source.filter.functionSelector.length > 0 + : Array.isArray(source.filter.topic0) && + source.filter.topic0?.length > 0; if (!hasRegisteredIndexingFunctions) { logs.push({ level: "debug", msg: `No indexing functions were registered for '${ source.name - }' ${source.filter.type === "callTrace" ? "call traces" : "logs"}`, + }' ${source.filter.type === "trace" ? "traces" : "logs"}`, }); } return hasRegisteredIndexingFunctions; }); - const blockSources: BlockSource[] = Object.entries(config.blocks ?? {}) - .flatMap(([sourceName, blockSourceConfig]) => { - const startBlockMaybeNan = blockSourceConfig.startBlock ?? 0; - const startBlock = Number.isNaN(startBlockMaybeNan) - ? 0 + const accountSources: AccountSource[] = flattenSources(config.accounts ?? {}) + .flatMap((source): AccountSource[] => { + const network = networks.find((n) => n.name === source.network)!; + + const startBlockMaybeNan = source.startBlock; + const fromBlock = Number.isNaN(startBlockMaybeNan) + ? undefined : startBlockMaybeNan; - const endBlockMaybeNan = blockSourceConfig.endBlock; - const endBlock = Number.isNaN(endBlockMaybeNan) + const endBlockMaybeNan = source.endBlock; + const toBlock = Number.isNaN(endBlockMaybeNan) ? undefined : endBlockMaybeNan; - if (endBlock !== undefined && endBlock < startBlock) { + const resolvedAddress = source?.address; + + if (resolvedAddress === undefined) { throw new Error( - `Validation failed: Start block for block source '${sourceName}' is after end block (${startBlock} > ${endBlock}).`, + `Validation failed: Account '${source.name}' must specify an 'address'.`, ); } - if (typeof blockSourceConfig.network === "string") { - const network = networks.find( - (n) => n.name === blockSourceConfig.network, - ); - if (!network) { - throw new Error( - `Validation failed: Invalid network for block source '${sourceName}'. Got '${ - blockSourceConfig.network - }', expected one of [${networks.map((n) => `'${n.name}'`).join(", ")}].`, - ); - } + if (typeof resolvedAddress === "object") { + // Note that this can throw. + const logFactory = buildLogFactory({ + chainId: network.chainId, + ...(resolvedAddress as Factory), + }); - const intervalMaybeNan = blockSourceConfig.interval ?? 1; - const interval = Number.isNaN(intervalMaybeNan) ? 0 : intervalMaybeNan; + return [ + { + type: "account", + name: source.name, + networkName: source.network, + filter: { + type: "transaction", + chainId: network.chainId, + fromAddress: undefined, + toAddress: logFactory, + includeReverted: false, + fromBlock, + toBlock, + include: defaultTransactionFilterInclude, + }, + } satisfies AccountSource, + { + type: "account", + name: source.name, + networkName: source.network, + filter: { + type: "transaction", + chainId: network.chainId, + fromAddress: logFactory, + toAddress: undefined, + includeReverted: false, + fromBlock, + toBlock, + include: defaultTransactionFilterInclude, + }, + } satisfies AccountSource, + { + type: "account", + name: source.name, + networkName: source.network, + filter: { + type: "transfer", + chainId: network.chainId, + fromAddress: undefined, + toAddress: logFactory, + includeReverted: false, + fromBlock, + toBlock, + include: defaultTransferFilterInclude.concat( + source.includeTransactionReceipts + ? defaultTransactionReceiptInclude + : [], + ), + }, + } satisfies AccountSource, + { + type: "account", + name: source.name, + networkName: source.network, + filter: { + type: "transfer", + chainId: network.chainId, + fromAddress: logFactory, + toAddress: undefined, + includeReverted: false, + fromBlock, + toBlock, + include: defaultTransferFilterInclude.concat( + source.includeTransactionReceipts + ? defaultTransactionReceiptInclude + : [], + ), + }, + } satisfies AccountSource, + ]; + } - if (!Number.isInteger(interval) || interval === 0) { + for (const address of Array.isArray(resolvedAddress) + ? resolvedAddress + : [resolvedAddress]) { + if (!address!.startsWith("0x")) throw new Error( - `Validation failed: Invalid interval for block source '${sourceName}'. Got ${interval}, expected a non-zero integer.`, + `Validation failed: Invalid prefix for address '${address}'. Got '${address!.slice( + 0, + 2, + )}', expected '0x'.`, ); - } + if (address!.length !== 42) + throw new Error( + `Validation failed: Invalid length for address '${address}'. Got ${address!.length}, expected 42 characters.`, + ); + } - return { - type: "block", - name: sourceName, - networkName: blockSourceConfig.network, + const validatedAddress = Array.isArray(resolvedAddress) + ? (resolvedAddress.map((r) => toLowerCase(r)) as Address[]) + : resolvedAddress !== undefined + ? (toLowerCase(resolvedAddress) as Address) + : undefined; + + return [ + { + type: "account", + name: source.name, + + networkName: source.network, filter: { - type: "block", + type: "transaction", chainId: network.chainId, - interval: interval, - offset: startBlock % interval, - fromBlock: startBlock, - toBlock: endBlock, + fromAddress: undefined, + toAddress: validatedAddress, + includeReverted: false, + fromBlock, + toBlock, + include: defaultTransactionFilterInclude, }, - } satisfies BlockSource; + } satisfies AccountSource, + { + type: "account", + name: source.name, + networkName: source.network, + filter: { + type: "transaction", + chainId: network.chainId, + fromAddress: validatedAddress, + toAddress: undefined, + includeReverted: false, + fromBlock, + toBlock, + include: defaultTransactionFilterInclude, + }, + } satisfies AccountSource, + { + type: "account", + name: source.name, + networkName: source.network, + filter: { + type: "transfer", + chainId: network.chainId, + fromAddress: undefined, + toAddress: validatedAddress, + includeReverted: false, + fromBlock, + toBlock, + include: defaultTransferFilterInclude.concat( + source.includeTransactionReceipts + ? defaultTransactionReceiptInclude + : [], + ), + }, + } satisfies AccountSource, + { + type: "account", + name: source.name, + networkName: source.network, + filter: { + type: "transfer", + chainId: network.chainId, + fromAddress: validatedAddress, + toAddress: undefined, + includeReverted: false, + fromBlock, + toBlock, + include: defaultTransferFilterInclude.concat( + source.includeTransactionReceipts + ? defaultTransactionReceiptInclude + : [], + ), + }, + } satisfies AccountSource, + ]; + }) + .filter((source) => { + const eventName = + source.filter.type === "transaction" + ? source.filter.fromAddress === undefined + ? `${source.name}:transaction:to` + : `${source.name}:transaction:from` + : source.filter.fromAddress === undefined + ? `${source.name}:transfer:to` + : `${source.name}:transfer:from`; + + const hasRegisteredIndexingFunction = + indexingFunctions[eventName] !== undefined; + if (!hasRegisteredIndexingFunction) { + logs.push({ + level: "debug", + msg: `No indexing functions were registered for '${eventName}'`, + }); } + return hasRegisteredIndexingFunction; + }); - type DefinedNetworkOverride = NonNullable< - Exclude[string] - >; + const blockSources: BlockSource[] = flattenSources(config.blocks ?? {}) + .map((source) => { + const network = networks.find((n) => n.name === source.network)!; - return Object.entries(blockSourceConfig.network) - .filter((n): n is [string, DefinedNetworkOverride] => !!n[1]) - .map(([networkName, overrides]) => { - const network = networks.find((n) => n.name === networkName); - if (!network) { - throw new Error( - `Validation failed: Invalid network for block source '${sourceName}'. Got '${networkName}', expected one of [${networks - .map((n) => `'${n.name}'`) - .join(", ")}].`, - ); - } + const intervalMaybeNan = source.interval ?? 1; + const interval = Number.isNaN(intervalMaybeNan) ? 0 : intervalMaybeNan; - const startBlockMaybeNan = - overrides.startBlock ?? blockSourceConfig.startBlock ?? 0; - const startBlock = Number.isNaN(startBlockMaybeNan) - ? 0 - : startBlockMaybeNan; - const endBlockMaybeNan = - overrides.endBlock ?? blockSourceConfig.endBlock; - const endBlock = Number.isNaN(endBlockMaybeNan) - ? undefined - : endBlockMaybeNan; - - if (endBlock !== undefined && endBlock < startBlock) { - throw new Error( - `Validation failed: Start block for block source '${sourceName}' is after end block (${startBlock} > ${endBlock}).`, - ); - } - - const intervalMaybeNan = - overrides.interval ?? blockSourceConfig.interval ?? 0; - const interval = Number.isNaN(intervalMaybeNan) - ? 0 - : intervalMaybeNan; + if (!Number.isInteger(interval) || interval === 0) { + throw new Error( + `Validation failed: Invalid interval for block source '${source.name}'. Got ${interval}, expected a non-zero integer.`, + ); + } - if (!Number.isInteger(interval) || interval === 0) { - throw new Error( - `Validation failed: Invalid interval for block source '${sourceName}'. Got ${interval}, expected a non-zero integer.`, - ); - } + const startBlockMaybeNan = source.startBlock; + const fromBlock = Number.isNaN(startBlockMaybeNan) + ? undefined + : startBlockMaybeNan; + const endBlockMaybeNan = source.endBlock; + const toBlock = Number.isNaN(endBlockMaybeNan) + ? undefined + : endBlockMaybeNan; - return { - type: "block", - name: sourceName, - networkName, - filter: { - type: "block", - chainId: network.chainId, - interval: interval, - offset: startBlock % interval, - fromBlock: startBlock, - toBlock: endBlock, - }, - } satisfies BlockSource; - }); + return { + type: "block", + name: source.name, + networkName: source.network, + filter: { + type: "block", + chainId: network.chainId, + interval: interval, + offset: (fromBlock ?? 0) % interval, + fromBlock, + toBlock, + include: defaultBlockFilterInclude, + }, + } satisfies BlockSource; }) .filter((blockSource) => { const hasRegisteredIndexingFunction = @@ -729,7 +822,7 @@ export async function buildConfigAndIndexingFunctions({ return hasRegisteredIndexingFunction; }); - const sources = [...contractSources, ...blockSources]; + const sources = [...contractSources, ...accountSources, ...blockSources]; // Filter out any networks that don't have any sources registered. const networksWithSources = networks.filter((network) => { @@ -752,7 +845,6 @@ export async function buildConfigAndIndexingFunctions({ } return { - databaseConfig, networks: networksWithSources, sources, indexingFunctions, @@ -763,17 +855,14 @@ export async function buildConfigAndIndexingFunctions({ export async function safeBuildConfigAndIndexingFunctions({ config, rawIndexingFunctions, - options, }: { config: Config; rawIndexingFunctions: RawIndexingFunctions; - options: Pick; }) { try { const result = await buildConfigAndIndexingFunctions({ config, rawIndexingFunctions, - options, }); return { @@ -781,7 +870,6 @@ export async function safeBuildConfigAndIndexingFunctions({ sources: result.sources, networks: result.networks, indexingFunctions: result.indexingFunctions, - databaseConfig: result.databaseConfig, logs: result.logs, } as const; } catch (_error) { @@ -790,8 +878,3 @@ export async function safeBuildConfigAndIndexingFunctions({ return { status: "error", error: buildError } as const; } } - -function getDatabaseName(connectionString: string) { - const parsed = (parse as unknown as typeof parse.parse)(connectionString); - return `${parsed.host}:${parsed.port}/${parsed.database}`; -} diff --git a/packages/core/src/build/index.ts b/packages/core/src/build/index.ts index 33ab57a28..88031e899 100644 --- a/packages/core/src/build/index.ts +++ b/packages/core/src/build/index.ts @@ -1,11 +1,719 @@ -import { type Extend, extend } from "@/utils/extend.js"; -import { create, kill, start } from "./service.js"; -import type { ApiBuild, IndexingBuild, Service } from "./service.js"; +import crypto from "node:crypto"; +import fs from "node:fs"; +import path from "node:path"; +import type { Common } from "@/common/common.js"; +import { BuildError } from "@/common/errors.js"; +import type { Config } from "@/config/config.js"; +import type { DatabaseConfig } from "@/config/database.js"; +import type { Network } from "@/config/networks.js"; +import type { Schema } from "@/drizzle/index.js"; +import type { SqlStatements } from "@/drizzle/kit/index.js"; +import type { PonderRoutes } from "@/hono/index.js"; +import type { Source } from "@/sync/source.js"; +import { type Result, mergeResults } from "@/utils/result.js"; +import { serialize } from "@/utils/serialize.js"; +import { glob } from "glob"; +import type { GraphQLSchema } from "graphql"; +import type { Hono } from "hono"; +import { createServer } from "vite"; +import { ViteNodeRunner } from "vite-node/client"; +import { ViteNodeServer } from "vite-node/server"; +import { installSourcemapsSupport } from "vite-node/source-map"; +import { normalizeModuleId, toFilePath } from "vite-node/utils"; +import viteTsconfigPathsPlugin from "vite-tsconfig-paths"; +import { + type IndexingFunctions, + type RawIndexingFunctions, + safeBuildConfigAndIndexingFunctions, +} from "./configAndIndexingFunctions.js"; +import { vitePluginPonder } from "./plugin.js"; +import { safeBuildPre } from "./pre.js"; +import { safeBuildSchema } from "./schema.js"; +import { parseViteNodeError } from "./stacktrace.js"; -const methods = { start, kill }; +const BUILD_ID_VERSION = "1"; -export const createBuildService = extend(create, methods); +export type PreBuild = { + databaseConfig: DatabaseConfig; + namespace: string; +}; -export type BuildService = Extend; +export type SchemaBuild = { + schema: Schema; + statements: SqlStatements; + graphqlSchema: GraphQLSchema; +}; -export type { IndexingBuild, ApiBuild }; +export type IndexingBuild = { + buildId: string; + sources: Source[]; + networks: Network[]; + indexingFunctions: IndexingFunctions; +}; + +export type ApiBuild = { + app: Hono; + routes: PonderRoutes; +}; + +export type BuildResultDev = + | (Result<{ + preBuild: PreBuild; + schemaBuild: SchemaBuild; + indexingBuild: IndexingBuild; + apiBuild: ApiBuild; + }> & { kind: "indexing" }) + | (Result & { kind: "api" }); + +type ExecuteResult = { + configResult: Result<{ config: Config; contentHash: string }>; + schemaResult: Result<{ schema: Schema; contentHash: string }>; + indexingResult: Result<{ + indexingFunctions: RawIndexingFunctions; + contentHash: string; + }>; + apiResult: Result<{ app: Hono; routes: PonderRoutes }>; +}; + +export type Build = { + execute: () => Promise; + preCompile: (params: { config: Config }) => Result; + compileSchema: (params: { schema: Schema }) => Result; + compileIndexing: (params: { + configResult: Extract< + ExecuteResult["configResult"], + { status: "success" } + >["result"]; + schemaResult: Extract< + ExecuteResult["schemaResult"], + { status: "success" } + >["result"]; + indexingResult: Extract< + ExecuteResult["indexingResult"], + { status: "success" } + >["result"]; + }) => Promise>; + compileApi: (params: { + apiResult: Extract< + ExecuteResult["apiResult"], + { status: "success" } + >["result"]; + }) => Result; + startDev: (params: { + onBuild: (buildResult: BuildResultDev) => void; + }) => void; + kill: () => Promise; +}; + +export const createBuild = async ({ + common, +}: { + common: Common; +}): Promise => { + const escapeRegex = /[.*+?^${}()|[\]\\]/g; + + const escapedIndexingDir = common.options.indexingDir + // If on Windows, use a POSIX path for this regex. + .replace(/\\/g, "/") + // Escape special characters in the path. + .replace(escapeRegex, "\\$&"); + const indexingRegex = new RegExp(`^${escapedIndexingDir}/.*\\.(ts|js)$`); + + const escapedApiDir = common.options.apiDir + // If on Windows, use a POSIX path for this regex. + .replace(/\\/g, "/") + // Escape special characters in the path. + .replace(escapeRegex, "\\$&"); + const apiRegex = new RegExp(`^${escapedApiDir}/.*\\.(ts|js)$`); + + const indexingPattern = path + .join(common.options.indexingDir, "**/*.{js,mjs,ts,mts}") + .replace(/\\/g, "/"); + + const apiPattern = path + .join(common.options.apiDir, "**/*.{js,mjs,ts,mts}") + .replace(/\\/g, "/"); + + const viteLogger = { + warnedMessages: new Set(), + loggedErrors: new WeakSet(), + hasWarned: false, + clearScreen() {}, + hasErrorLogged: (error: Error) => viteLogger.loggedErrors.has(error), + info: (msg: string) => { + common.logger.trace({ service: "build(vite)", msg }); + }, + warn: (msg: string) => { + viteLogger.hasWarned = true; + common.logger.trace({ service: "build(vite)", msg }); + }, + warnOnce: (msg: string) => { + if (viteLogger.warnedMessages.has(msg)) return; + viteLogger.hasWarned = true; + common.logger.trace({ service: "build(vite)", msg }); + viteLogger.warnedMessages.add(msg); + }, + error: (msg: string) => { + viteLogger.hasWarned = true; + common.logger.trace({ service: "build(vite)", msg }); + }, + }; + + const viteDevServer = await createServer({ + root: common.options.rootDir, + cacheDir: path.join(common.options.ponderDir, "vite"), + publicDir: false, + customLogger: viteLogger, + server: { hmr: false }, + plugins: [viteTsconfigPathsPlugin(), vitePluginPonder(common.options)], + }); + + // This is Vite boilerplate (initializes the Rollup container). + await viteDevServer.pluginContainer.buildStart({}); + + const viteNodeServer = new ViteNodeServer(viteDevServer); + installSourcemapsSupport({ + getSourceMap: (source) => viteNodeServer.getSourceMap(source), + }); + + const viteNodeRunner = new ViteNodeRunner({ + root: viteDevServer.config.root, + fetchModule: (id) => viteNodeServer.fetchModule(id, "ssr"), + resolveId: (id, importer) => viteNodeServer.resolveId(id, importer, "ssr"), + }); + + const executeFile = async ({ + file, + }: { file: string }): Promise< + { status: "success"; exports: any } | { status: "error"; error: Error } + > => { + try { + const exports = await viteNodeRunner.executeFile(file); + return { status: "success", exports } as const; + } catch (error_) { + const relativePath = path.relative(common.options.rootDir, file); + const error = parseViteNodeError(relativePath, error_ as Error); + return { status: "error", error } as const; + } + }; + + const executeConfig = async (): Promise< + Awaited>["configResult"] + > => { + const executeResult = await executeFile({ + file: common.options.configFile, + }); + + if (executeResult.status === "error") { + common.logger.error({ + service: "build", + msg: "Error while executing 'ponder.config.ts':", + error: executeResult.error, + }); + + return executeResult; + } + + const config = executeResult.exports.default as Config; + + const contentHash = crypto + .createHash("sha256") + .update(serialize(config)) + .digest("hex"); + + return { + status: "success", + result: { config, contentHash }, + } as const; + }; + + const executeSchema = async (): Promise< + Awaited>["schemaResult"] + > => { + const executeResult = await executeFile({ + file: common.options.schemaFile, + }); + + if (executeResult.status === "error") { + common.logger.error({ + service: "build", + msg: "Error while executing 'ponder.schema.ts':", + error: executeResult.error, + }); + + return executeResult; + } + + const schema = executeResult.exports; + + const contents = fs.readFileSync(common.options.schemaFile, "utf-8"); + return { + status: "success", + result: { + schema, + contentHash: crypto.createHash("sha256").update(contents).digest("hex"), + }, + } as const; + }; + + const executeIndexingFunctions = async (): Promise< + Awaited>["indexingResult"] + > => { + const files = glob.sync(indexingPattern, { + ignore: apiPattern, + }); + const executeResults = await Promise.all( + files.map(async (file) => ({ + ...(await executeFile({ file })), + file, + })), + ); + + for (const executeResult of executeResults) { + if (executeResult.status === "error") { + common.logger.error({ + service: "build", + msg: `Error while executing '${path.relative( + common.options.rootDir, + executeResult.file, + )}':`, + error: executeResult.error, + }); + + return executeResult; + } + } + + // Note that we are only hashing the file contents, not the exports. This is + // different from the config/schema, where we include the serializable object itself. + const hash = crypto.createHash("sha256"); + for (const file of files) { + try { + const contents = fs.readFileSync(file, "utf-8"); + hash.update(contents); + } catch (e) { + common.logger.warn({ + service: "build", + msg: `Unable to read contents of file '${file}' while constructin build ID`, + }); + hash.update(file); + } + } + const contentHash = hash.digest("hex"); + + const exports = await viteNodeRunner.executeId("ponder:registry"); + + return { + status: "success", + result: { + indexingFunctions: exports.ponder.fns, + contentHash, + }, + }; + }; + + const executeApiRoutes = async (): Promise< + Awaited>["apiResult"] + > => { + const files = glob.sync(apiPattern); + const executeResults = await Promise.all( + files.map(async (file) => ({ + ...(await executeFile({ file })), + file, + })), + ); + + for (const executeResult of executeResults) { + if (executeResult.status === "error") { + common.logger.error({ + service: "build", + msg: `Error while executing '${path.relative( + common.options.rootDir, + executeResult.file, + )}':`, + error: executeResult.error, + }); + + return executeResult; + } + } + + const exports = await viteNodeRunner.executeId("ponder:registry"); + + return { + status: "success", + result: { + app: exports.ponder.hono, + routes: exports.ponder.routes, + }, + }; + }; + + let namespace = common.options.schema ?? process.env.DATABASE_SCHEMA; + + const build = { + async execute(): Promise { + if (namespace === undefined) { + if ( + common.options.command === "start" || + common.options.command === "serve" + ) { + const error = new BuildError( + "Database schema required. Specify with 'DATABASE_SCHEMA' env var or '--schema' CLI flag. Read more: https://ponder.sh/docs/getting-started/database#database-schema", + ); + error.stack = undefined; + common.logger.error({ + service: "build", + msg: "Failed build", + error, + }); + return { + configResult: { status: "error", error }, + schemaResult: { status: "error", error }, + indexingResult: { status: "error", error }, + apiResult: { status: "error", error }, + } as const; + } else { + namespace = "public"; + } + } + + process.env.PONDER_DATABASE_SCHEMA = namespace; + + // Note: Don't run these in parallel. If there are circular imports in user code, + // it's possible for ViteNodeRunner to return exports as undefined (a race condition). + const configResult = await executeConfig(); + const schemaResult = await executeSchema(); + const indexingResult = await executeIndexingFunctions(); + const apiResult = await executeApiRoutes(); + + return { + configResult, + schemaResult, + indexingResult, + apiResult, + }; + }, + preCompile({ config }): Result { + const preBuild = safeBuildPre({ + config, + options: common.options, + }); + if (preBuild.status === "error") { + common.logger.error({ + service: "build", + msg: "Failed build", + error: preBuild.error, + }); + + return preBuild; + } + + for (const log of preBuild.logs) { + common.logger[log.level]({ service: "build", msg: log.msg }); + } + + return { + status: "success", + result: { + databaseConfig: preBuild.databaseConfig, + namespace: namespace!, + }, + } as const; + }, + compileSchema({ schema }) { + const buildSchemaResult = safeBuildSchema({ + schema, + }); + + if (buildSchemaResult.status === "error") { + common.logger.error({ + service: "build", + msg: "Error while building schema:", + error: buildSchemaResult.error, + }); + + return buildSchemaResult; + } + + return { + status: "success", + result: { + schema, + statements: buildSchemaResult.statements, + graphqlSchema: buildSchemaResult.graphqlSchema, + }, + } as const; + }, + async compileIndexing({ configResult, schemaResult, indexingResult }) { + // Validates and build the config + const buildConfigAndIndexingFunctionsResult = + await safeBuildConfigAndIndexingFunctions({ + config: configResult.config, + rawIndexingFunctions: indexingResult.indexingFunctions, + }); + if (buildConfigAndIndexingFunctionsResult.status === "error") { + common.logger.error({ + service: "build", + msg: "Failed build", + error: buildConfigAndIndexingFunctionsResult.error, + }); + + return buildConfigAndIndexingFunctionsResult; + } + + for (const log of buildConfigAndIndexingFunctionsResult.logs) { + common.logger[log.level]({ service: "build", msg: log.msg }); + } + + const buildId = crypto + .createHash("sha256") + .update(BUILD_ID_VERSION) + .update(configResult.contentHash) + .update(schemaResult.contentHash) + .update(indexingResult.contentHash) + .digest("hex") + .slice(0, 10); + + return { + status: "success", + result: { + buildId, + sources: buildConfigAndIndexingFunctionsResult.sources, + networks: buildConfigAndIndexingFunctionsResult.networks, + indexingFunctions: + buildConfigAndIndexingFunctionsResult.indexingFunctions, + }, + } as const; + }, + compileApi({ apiResult }) { + for (const { + pathOrHandlers: [maybePathOrHandler], + } of apiResult.routes) { + if (typeof maybePathOrHandler === "string") { + if ( + maybePathOrHandler === "/status" || + maybePathOrHandler === "/metrics" || + maybePathOrHandler === "/health" + ) { + const error = new BuildError( + `Validation failed: API route "${maybePathOrHandler}" is reserved for internal use.`, + ); + error.stack = undefined; + common.logger.error({ + service: "build", + msg: "Failed build", + error, + }); + return { status: "error", error } as const; + } + } + } + + return { + status: "success", + result: { + app: apiResult.app, + routes: apiResult.routes, + }, + }; + }, + async startDev({ onBuild }) { + // Define the directories and files to ignore + const ignoredDirs = [ + common.options.generatedDir, + common.options.ponderDir, + ]; + const ignoredFiles = [ + path.join(common.options.rootDir, "ponder-env.d.ts"), + path.join(common.options.rootDir, ".env.local"), + ]; + + const isFileIgnored = (filePath: string) => { + const isInIgnoredDir = ignoredDirs.some((dir) => { + const rel = path.relative(dir, filePath); + return !rel.startsWith("..") && !path.isAbsolute(rel); + }); + + const isIgnoredFile = ignoredFiles.includes(filePath); + return isInIgnoredDir || isIgnoredFile; + }; + + const onFileChange = async (_file: string) => { + if (isFileIgnored(_file)) return; + + // Note that `toFilePath` always returns a POSIX path, even if you pass a Windows path. + const file = toFilePath( + normalizeModuleId(_file), + common.options.rootDir, + ).path; + + // Invalidate all modules that depend on the updated files. + // Note that `invalidateDepTree` accepts and returns POSIX paths, even on Windows. + const invalidated = viteNodeRunner.moduleCache.invalidateDepTree([ + file, + ]); + + // If no files were invalidated, no need to reload. + if (invalidated.size === 0) return; + + // Note that the paths in `invalidated` are POSIX, so we need to + // convert the paths in `options` to POSIX for this comparison. + // The `srcDir` regex is already converted to POSIX. + const hasConfigUpdate = invalidated.has( + common.options.configFile.replace(/\\/g, "/"), + ); + const hasSchemaUpdate = invalidated.has( + common.options.schemaFile.replace(/\\/g, "/"), + ); + + const hasIndexingUpdate = Array.from(invalidated).some( + (file) => indexingRegex.test(file) && !apiRegex.test(file), + ); + const hasApiUpdate = Array.from(invalidated).some((file) => + apiRegex.test(file), + ); + + // This branch could trigger if you change a `note.txt` file within `src/`. + // Note: We could probably do a better job filtering out files in `isFileIgnored`. + if ( + !hasConfigUpdate && + !hasSchemaUpdate && + !hasIndexingUpdate && + !hasApiUpdate + ) { + return; + } + + common.logger.info({ + service: "build", + msg: `Hot reload ${Array.from(invalidated) + .map((f) => `'${path.relative(common.options.rootDir, f)}'`) + .join(", ")}`, + }); + + // Fast path for when only the api has changed. + if ( + hasApiUpdate === true && + hasConfigUpdate === false && + hasSchemaUpdate === false && + hasIndexingUpdate === false + ) { + const files = glob.sync(apiPattern); + viteNodeRunner.moduleCache.invalidateDepTree(files); + viteNodeRunner.moduleCache.deleteByModuleId("ponder:registry"); + + const executeResult = await executeApiRoutes(); + if (executeResult.status === "error") { + onBuild({ + status: "error", + kind: "api", + error: executeResult.error, + }); + return; + } + + onBuild({ + ...this.compileApi({ apiResult: executeResult.result }), + kind: "api", + }); + } else { + // re-execute all files + viteNodeRunner.moduleCache.invalidateDepTree([ + common.options.configFile, + ]); + viteNodeRunner.moduleCache.invalidateDepTree([ + common.options.schemaFile, + ]); + viteNodeRunner.moduleCache.invalidateDepTree( + glob.sync(indexingPattern, { + ignore: apiPattern, + }), + ); + viteNodeRunner.moduleCache.invalidateDepTree(glob.sync(apiPattern)); + viteNodeRunner.moduleCache.deleteByModuleId("ponder:registry"); + + const configResult = await executeConfig(); + const schemaResult = await executeSchema(); + const indexingResult = await executeIndexingFunctions(); + const apiResult = await executeApiRoutes(); + + if (configResult.status === "error") { + onBuild({ + status: "error", + kind: "indexing", + error: configResult.error, + }); + return; + } + if (schemaResult.status === "error") { + onBuild({ + status: "error", + kind: "indexing", + error: schemaResult.error, + }); + return; + } + if (indexingResult.status === "error") { + onBuild({ + status: "error", + kind: "indexing", + error: indexingResult.error, + }); + return; + } + if (apiResult.status === "error") { + onBuild({ + status: "error", + kind: "indexing", + error: apiResult.error, + }); + return; + } + + const compileResult = mergeResults([ + build.preCompile(configResult.result), + build.compileSchema(schemaResult.result), + await build.compileIndexing({ + configResult: configResult.result, + schemaResult: schemaResult.result, + indexingResult: indexingResult.result, + }), + build.compileApi({ apiResult: apiResult.result }), + ]); + + if (compileResult.status === "error") { + onBuild({ + status: "error", + kind: "indexing", + error: compileResult.error, + }); + return; + } + + onBuild({ + status: "success", + kind: "indexing", + result: { + preBuild: compileResult.result[0], + schemaBuild: compileResult.result[1], + indexingBuild: compileResult.result[2], + apiBuild: compileResult.result[3], + }, + }); + } + }; + + viteDevServer.watcher.on("change", onFileChange); + }, + async kill() { + await viteDevServer?.close(); + common.logger.debug({ + service: "build", + msg: "Killed build service", + }); + }, + } satisfies Build; + + return build; +}; diff --git a/packages/core/src/build/plugin.ts b/packages/core/src/build/plugin.ts index 905fb0454..1955bec33 100644 --- a/packages/core/src/build/plugin.ts +++ b/packages/core/src/build/plugin.ts @@ -1,3 +1,4 @@ +import type { Common } from "@/common/common.js"; import type { Plugin } from "vite"; const virtualModule = () => `import { Hono } from "hono"; @@ -30,11 +31,19 @@ const ponder = { export { ponder }; `; -export const vitePluginPonder = (): Plugin => { +const schemaModule = ( + schemaPath: string, +) => `import * as schema from "${schemaPath}"; +export * from "${schemaPath}"; +export default schema; +`; + +export const vitePluginPonder = (options: Common["options"]): Plugin => { return { name: "ponder", load: (id) => { - if (id === "@/generated") return virtualModule(); + if (id === "ponder:registry") return virtualModule(); + if (id === "ponder:schema") return schemaModule(options.schemaFile); return null; }, }; diff --git a/packages/core/src/build/pre.test.ts b/packages/core/src/build/pre.test.ts new file mode 100644 index 000000000..1739d6ee0 --- /dev/null +++ b/packages/core/src/build/pre.test.ts @@ -0,0 +1,153 @@ +import path from "node:path"; +import type { Options } from "@/common/options.js"; +import { http } from "viem"; +import { expect, test, vi } from "vitest"; +import { createConfig } from "../config/config.js"; +import { buildPre } from "./pre.js"; + +const options = { + ponderDir: ".ponder", + rootDir: "rootDir", +} as const satisfies Pick; + +test("buildPre() database uses pglite by default", () => { + const config = createConfig({ + networks: { mainnet: { chainId: 1, transport: http() } }, + contracts: { a: { network: "mainnet", abi: [] } }, + }); + + const prev = process.env.DATABASE_URL; + // biome-ignore lint/performance/noDelete: Required to test default behavior. + delete process.env.DATABASE_URL; + + const { databaseConfig } = buildPre({ + config, + options, + }); + expect(databaseConfig).toMatchObject({ + kind: "pglite", + options: { + dataDir: expect.stringContaining(path.join(".ponder", "pglite")), + }, + }); + + process.env.DATABASE_URL = prev; +}); + +test("buildPre() database respects custom pglite path", async () => { + const config = createConfig({ + database: { kind: "pglite", directory: "custom-pglite/directory" }, + networks: { mainnet: { chainId: 1, transport: http() } }, + contracts: { a: { network: "mainnet", abi: [] } }, + }); + + const { databaseConfig } = buildPre({ config, options }); + + expect(databaseConfig).toMatchObject({ + kind: "pglite", + options: { + dataDir: expect.stringContaining(path.join("custom-pglite", "directory")), + }, + }); +}); + +test("buildPre() database uses pglite if specified even if DATABASE_URL env var present", async () => { + const config = createConfig({ + database: { kind: "pglite" }, + networks: { mainnet: { chainId: 1, transport: http() } }, + contracts: { a: { network: "mainnet", abi: [] } }, + }); + + vi.stubEnv("DATABASE_URL", "postgres://username@localhost:5432/database"); + + const { databaseConfig } = buildPre({ config, options }); + expect(databaseConfig).toMatchObject({ + kind: "pglite", + options: { + dataDir: expect.stringContaining(path.join(".ponder", "pglite")), + }, + }); + + vi.unstubAllEnvs(); +}); + +test("buildPre() database uses postgres if DATABASE_URL env var present", async () => { + const config = createConfig({ + networks: { mainnet: { chainId: 1, transport: http() } }, + contracts: { a: { network: "mainnet", abi: [] } }, + }); + + vi.stubEnv("DATABASE_URL", "postgres://username@localhost:5432/database"); + + const { databaseConfig } = buildPre({ config, options }); + expect(databaseConfig).toMatchObject({ + kind: "postgres", + poolConfig: { + connectionString: "postgres://username@localhost:5432/database", + }, + }); + + vi.unstubAllEnvs(); +}); + +test("buildPre() database uses postgres if DATABASE_PRIVATE_URL env var present", async () => { + const config = createConfig({ + networks: { mainnet: { chainId: 1, transport: http() } }, + contracts: { a: { network: "mainnet", abi: [] } }, + }); + + vi.stubEnv("DATABASE_URL", "postgres://username@localhost:5432/database"); + vi.stubEnv( + "DATABASE_PRIVATE_URL", + "postgres://username@localhost:5432/better_database", + ); + + const { databaseConfig } = buildPre({ config, options }); + expect(databaseConfig).toMatchObject({ + kind: "postgres", + poolConfig: { + connectionString: "postgres://username@localhost:5432/better_database", + }, + }); + + vi.unstubAllEnvs(); +}); + +test("buildPre() throws for postgres database with no connection string", async () => { + const config = createConfig({ + database: { kind: "postgres" }, + networks: { mainnet: { chainId: 1, transport: http() } }, + contracts: { a: { network: "mainnet", abi: [] } }, + }); + + const prev = process.env.DATABASE_URL; + // biome-ignore lint/performance/noDelete: Required to test default behavior. + delete process.env.DATABASE_URL; + + expect(() => buildPre({ config, options })).toThrow( + "Invalid database configuration: 'kind' is set to 'postgres' but no connection string was provided.", + ); + + process.env.DATABASE_URL = prev; +}); + +test("buildPre() database with postgres uses pool config", async () => { + const config = createConfig({ + database: { + kind: "postgres", + connectionString: "postgres://username@localhost:5432/database", + poolConfig: { max: 100 }, + }, + networks: { mainnet: { chainId: 1, transport: http() } }, + contracts: { a: { network: "mainnet", abi: [] } }, + }); + + const { databaseConfig } = buildPre({ config, options }); + expect(databaseConfig).toMatchObject({ + kind: "postgres", + poolConfig: { + connectionString: "postgres://username@localhost:5432/database", + max: 100, + }, + }); +}); diff --git a/packages/core/src/build/pre.ts b/packages/core/src/build/pre.ts new file mode 100644 index 000000000..c9fcf15e7 --- /dev/null +++ b/packages/core/src/build/pre.ts @@ -0,0 +1,141 @@ +import path from "node:path"; +import { BuildError } from "@/common/errors.js"; +import type { Options } from "@/common/options.js"; +import type { Config } from "@/config/config.js"; +import type { DatabaseConfig } from "@/config/database.js"; +import parse from "pg-connection-string"; + +function getDatabaseName(connectionString: string) { + const parsed = (parse as unknown as typeof parse.parse)(connectionString); + return `${parsed.host}:${parsed.port}/${parsed.database}`; +} + +export function buildPre({ + config, + options, +}: { + config: Config; + options: Pick; +}): { + databaseConfig: DatabaseConfig; + logs: { level: "warn" | "info" | "debug"; msg: string }[]; +} { + const logs: { level: "warn" | "info" | "debug"; msg: string }[] = []; + + // Build database. + let databaseConfig: DatabaseConfig; + + // Determine PGlite directory, preferring config.database.directory if available + const pgliteDir = + config.database?.kind === "pglite" && config.database.directory + ? config.database.directory === "memory://" + ? "memory://" + : path.resolve(config.database.directory) + : path.join(options.ponderDir, "pglite"); + + const pglitePrintPath = + pgliteDir === "memory://" + ? "memory://" + : path.relative(options.rootDir, pgliteDir); + + if (config.database?.kind) { + if (config.database.kind === "postgres") { + let connectionString: string | undefined = undefined; + let source: string | undefined = undefined; + + if (config.database.connectionString) { + connectionString = config.database.connectionString; + source = "from ponder.config.ts"; + } else if (process.env.DATABASE_PRIVATE_URL) { + connectionString = process.env.DATABASE_PRIVATE_URL; + source = "from DATABASE_PRIVATE_URL env var"; + } else if (process.env.DATABASE_URL) { + connectionString = process.env.DATABASE_URL; + source = "from DATABASE_URL env var"; + } else { + throw new Error( + `Invalid database configuration: 'kind' is set to 'postgres' but no connection string was provided.`, + ); + } + + logs.push({ + level: "info", + msg: `Using Postgres database '${getDatabaseName(connectionString)}' (${source})`, + }); + + const poolConfig = { + max: config.database.poolConfig?.max ?? 30, + connectionString, + }; + + databaseConfig = { kind: "postgres", poolConfig }; + } else { + logs.push({ + level: "info", + msg: `Using PGlite database in '${pglitePrintPath}' (from ponder.config.ts)`, + }); + + databaseConfig = { kind: "pglite", options: { dataDir: pgliteDir } }; + } + } else { + let connectionString: string | undefined = undefined; + let source: string | undefined = undefined; + if (process.env.DATABASE_PRIVATE_URL) { + connectionString = process.env.DATABASE_PRIVATE_URL; + source = "from DATABASE_PRIVATE_URL env var"; + } else if (process.env.DATABASE_URL) { + connectionString = process.env.DATABASE_URL; + source = "from DATABASE_URL env var"; + } + + // If either of the DATABASE_URL env vars are set, use Postgres. + if (connectionString !== undefined) { + logs.push({ + level: "info", + msg: `Using Postgres database ${getDatabaseName(connectionString)} (${source})`, + }); + + const poolConfig = { max: 30, connectionString }; + + databaseConfig = { kind: "postgres", poolConfig }; + } else { + // Fall back to PGlite. + logs.push({ + level: "info", + msg: `Using PGlite database at ${pglitePrintPath} (default)`, + }); + + databaseConfig = { kind: "pglite", options: { dataDir: pgliteDir } }; + } + } + + return { + databaseConfig, + logs, + }; +} + +export function safeBuildPre({ + config, + options, +}: { + config: Config; + options: Pick; +}) { + try { + const result = buildPre({ + config, + options, + }); + + return { + status: "success", + databaseConfig: result.databaseConfig, + logs: result.logs, + } as const; + } catch (_error) { + const buildError = new BuildError((_error as Error).message); + buildError.stack = undefined; + return { status: "error", error: buildError } as const; + } +} diff --git a/packages/core/src/build/schema.test.ts b/packages/core/src/build/schema.test.ts index 19bccb742..d251c6774 100644 --- a/packages/core/src/build/schema.test.ts +++ b/packages/core/src/build/schema.test.ts @@ -1,4 +1,4 @@ -import { onchainSchema, onchainTable } from "@/index.js"; +import { onchainEnum, onchainTable } from "@/index.js"; import { sql } from "drizzle-orm"; import { check, @@ -10,8 +10,6 @@ import { import { expect, test } from "vitest"; import { buildSchema } from "./schema.js"; -const instanceId = "1234"; - test("buildSchema() success", () => { const schema = { account: onchainTable("account", (p) => ({ @@ -20,19 +18,7 @@ test("buildSchema() success", () => { })), }; - buildSchema({ schema, instanceId }); -}); - -test("buildSchema() error with schema", () => { - const schema = { - ponder: onchainSchema("ponder"), - account: onchainTable("account", (p) => ({ - address: p.hex().primaryKey(), - balance: p.bigint().notNull(), - })), - }; - - expect(() => buildSchema({ schema, instanceId })).toThrowError(); + buildSchema({ schema }); }); test("buildSchema() error with multiple primary key", () => { @@ -43,7 +29,7 @@ test("buildSchema() error with multiple primary key", () => { })), }; - expect(() => buildSchema({ schema, instanceId })).toThrowError(); + expect(() => buildSchema({ schema })).toThrowError(); }); test("buildSchema() error with no primary key", () => { @@ -54,7 +40,7 @@ test("buildSchema() error with no primary key", () => { })), }; - expect(() => buildSchema({ schema, instanceId })).toThrowError(); + expect(() => buildSchema({ schema })).toThrowError(); }); test("buildSchema() success with composite primary key", () => { @@ -71,7 +57,7 @@ test("buildSchema() success with composite primary key", () => { ), }; - buildSchema({ schema, instanceId }); + buildSchema({ schema }); }); test("buildScheama() error with view", () => { @@ -84,7 +70,7 @@ test("buildScheama() error with view", () => { v: pgView("v").as((qb) => qb.select().from(account)), }; - expect(() => buildSchema({ schema, instanceId })).toThrowError(); + expect(() => buildSchema({ schema })).toThrowError(); }); test("buildScheama() error with sequences", () => { @@ -96,7 +82,7 @@ test("buildScheama() error with sequences", () => { seq: pgSequence("seq"), }; - expect(() => buildSchema({ schema, instanceId })).toThrowError(); + expect(() => buildSchema({ schema })).toThrowError(); }); test("buildScheama() error with generated", () => { @@ -107,7 +93,7 @@ test("buildScheama() error with generated", () => { })), }; - expect(() => buildSchema({ schema, instanceId })).toThrowError(); + expect(() => buildSchema({ schema })).toThrowError(); }); test("buildScheama() error with generated identity", () => { @@ -121,7 +107,7 @@ test("buildScheama() error with generated identity", () => { })), }; - expect(() => buildSchema({ schema, instanceId })).toThrowError(); + expect(() => buildSchema({ schema })).toThrowError(); }); test("buildScheama() error with serial", () => { @@ -132,7 +118,7 @@ test("buildScheama() error with serial", () => { })), }; - expect(() => buildSchema({ schema, instanceId })).toThrowError(); + expect(() => buildSchema({ schema })).toThrowError(); }); test("buildScheama() success with default", () => { @@ -143,7 +129,7 @@ test("buildScheama() success with default", () => { })), }; - buildSchema({ schema, instanceId }); + buildSchema({ schema }); }); test("buildScheama() error with default sql", () => { @@ -154,7 +140,7 @@ test("buildScheama() error with default sql", () => { })), }; - expect(() => buildSchema({ schema, instanceId })).toThrowError(); + expect(() => buildSchema({ schema })).toThrowError(); }); test("buildScheama() error with $defaultFn sql", () => { @@ -165,7 +151,7 @@ test("buildScheama() error with $defaultFn sql", () => { })), }; - expect(() => buildSchema({ schema, instanceId })).toThrowError(); + expect(() => buildSchema({ schema })).toThrowError(); }); test("buildScheama() error with $onUpdateFn sql", () => { @@ -176,7 +162,7 @@ test("buildScheama() error with $onUpdateFn sql", () => { })), }; - expect(() => buildSchema({ schema, instanceId })).toThrowError(); + expect(() => buildSchema({ schema })).toThrowError(); }); test("buildScheama() error with foreign key", () => { @@ -191,7 +177,7 @@ test("buildScheama() error with foreign key", () => { })), }; - expect(() => buildSchema({ schema, instanceId })).toThrowError(); + expect(() => buildSchema({ schema })).toThrowError(); }); test("buildScheama() error with unique", () => { @@ -202,7 +188,7 @@ test("buildScheama() error with unique", () => { })), }; - expect(() => buildSchema({ schema, instanceId })).toThrowError(); + expect(() => buildSchema({ schema })).toThrowError(); }); test("buildScheama() error with check", () => { @@ -219,20 +205,18 @@ test("buildScheama() error with check", () => { ), }; - expect(() => buildSchema({ schema, instanceId })).toThrowError(); + expect(() => buildSchema({ schema })).toThrowError(); }); test("buildScheama() success with enum", () => { - const p = onchainSchema("p"); - const mood = p.enum("mood", ["good", "bad"]); + const mood = onchainEnum("mood", ["good", "bad"]); const schema = { - p, mood, - account: p.table("account", (p) => ({ + account: onchainTable("account", (p) => ({ address: p.hex().primaryKey(), m: mood().notNull(), })), }; - buildSchema({ schema, instanceId }); + buildSchema({ schema }); }); diff --git a/packages/core/src/build/schema.ts b/packages/core/src/build/schema.ts index 6d2e431f0..69e545106 100644 --- a/packages/core/src/build/schema.ts +++ b/packages/core/src/build/schema.ts @@ -1,13 +1,11 @@ import { BuildError } from "@/common/errors.js"; -import { type Schema, isPgEnumSym } from "@/drizzle/index.js"; +import type { Schema } from "@/drizzle/index.js"; import { getSql } from "@/drizzle/kit/index.js"; import { buildGraphQLSchema } from "@/graphql/index.js"; import { SQL, getTableColumns, is } from "drizzle-orm"; import { PgBigSerial53, PgBigSerial64, - type PgEnum, - PgSchema, PgSequence, PgSerial, PgSmallSerial, @@ -16,40 +14,11 @@ import { getTableConfig, } from "drizzle-orm/pg-core"; -export const buildSchema = ({ - schema, - instanceId, -}: { schema: Schema; instanceId: string }) => { - const statements = getSql(schema, instanceId); - - // find and validate namespace - - let namespace: string; - - for (const maybeSchema of Object.values(schema)) { - if (is(maybeSchema, PgSchema)) { - namespace = maybeSchema.schemaName; - break; - } - } - - if (namespace! === undefined) { - namespace = "public"; - } +export const buildSchema = ({ schema }: { schema: Schema }) => { + const statements = getSql(schema); for (const [name, s] of Object.entries(schema)) { if (is(s, PgTable)) { - if (namespace === "public" && getTableConfig(s).schema !== undefined) { - throw new Error( - `Schema validation failed: All tables must use the same schema and ${name} uses a different schema '${getTableConfig(s).schema}' than '${namespace}'.`, - ); - } - if (namespace !== "public" && getTableConfig(s).schema !== namespace) { - throw new Error( - `Schema validation failed: All tables must use the same schema and ${name} uses a different schema '${getTableConfig(s).schema ?? "public"}' than '${namespace}'.`, - ); - } - let hasPrimaryKey = false; for (const [columnName, column] of Object.entries(getTableColumns(s))) { @@ -164,35 +133,14 @@ export const buildSchema = ({ `Schema validation failed: '${name}' is a view and views are unsupported.`, ); } - - // @ts-ignore - if (isPgEnumSym in s) { - // @ts-ignore - if (namespace === "public" && (s as PgEnum).schema !== undefined) { - throw new Error( - // @ts-ignore - `Schema validation failed: All enums must use the same schema and ${name} uses a different schema '${(s as PgEnum).schema}' than '${namespace}'.`, - ); - } - // @ts-ignore - if (namespace !== "public" && (s as PgEnum).schema !== namespace) { - throw new Error( - // @ts-ignore - `Schema validation failed: All enums must use the same schema and ${name} uses a different schema '${(s as PgEnum).schema ?? "public"}' than '${namespace}'.`, - ); - } - } } - return { statements, namespace }; + return { statements }; }; -export const safeBuildSchema = ({ - schema, - instanceId, -}: { schema: Schema; instanceId: string }) => { +export const safeBuildSchema = ({ schema }: { schema: Schema }) => { try { - const result = buildSchema({ schema, instanceId }); + const result = buildSchema({ schema }); const graphqlSchema = buildGraphQLSchema(schema); return { diff --git a/packages/core/src/build/service.ts b/packages/core/src/build/service.ts deleted file mode 100644 index 28f411555..000000000 --- a/packages/core/src/build/service.ts +++ /dev/null @@ -1,806 +0,0 @@ -import { createHash } from "node:crypto"; -import crypto from "node:crypto"; -import fs from "node:fs"; -import path from "node:path"; -import type { Common } from "@/common/common.js"; -import { BuildError } from "@/common/errors.js"; -import type { Config } from "@/config/config.js"; -import type { DatabaseConfig } from "@/config/database.js"; -import type { Network } from "@/config/networks.js"; -import type { Schema } from "@/drizzle/index.js"; -import type { SqlStatements } from "@/drizzle/kit/index.js"; -import type { PonderRoutes } from "@/hono/index.js"; -import type { Source } from "@/sync/source.js"; -import { serialize } from "@/utils/serialize.js"; -import { glob } from "glob"; -import type { GraphQLSchema } from "graphql"; -import type { Hono } from "hono"; -import { type ViteDevServer, createServer } from "vite"; -import { ViteNodeRunner } from "vite-node/client"; -import { ViteNodeServer } from "vite-node/server"; -import { installSourcemapsSupport } from "vite-node/source-map"; -import { normalizeModuleId, toFilePath } from "vite-node/utils"; -import viteTsconfigPathsPlugin from "vite-tsconfig-paths"; -import { - type IndexingFunctions, - type RawIndexingFunctions, - safeBuildConfigAndIndexingFunctions, -} from "./configAndIndexingFunctions.js"; -import { vitePluginPonder } from "./plugin.js"; -import { safeBuildSchema } from "./schema.js"; -import { parseViteNodeError } from "./stacktrace.js"; - -const BUILD_ID_VERSION = "1"; - -export type Service = { - // static - common: Common; - indexingRegex: RegExp; - apiRegex: RegExp; - indexingPattern: string; - apiPattern: string; - - // vite - viteDevServer: ViteDevServer; - viteNodeServer: ViteNodeServer; - viteNodeRunner: ViteNodeRunner; -}; - -type BaseBuild = { - // Build ID for caching - buildId: string; - instanceId: string; - // Config - databaseConfig: DatabaseConfig; - sources: Source[]; - networks: Network[]; - // Schema - schema: Schema; - statements: SqlStatements; - namespace: string; - graphqlSchema: GraphQLSchema; -}; - -export type IndexingBuild = BaseBuild & { - indexingFunctions: IndexingFunctions; -}; - -export type ApiBuild = BaseBuild & { - app: Hono; - routes: PonderRoutes; -}; - -export type BuildResult = - | { - status: "success"; - indexingBuild: IndexingBuild; - apiBuild: ApiBuild; - } - | { status: "error"; error: Error }; - -export type BuildResultDev = - | { - status: "success"; - kind: "indexing"; - indexingBuild: IndexingBuild; - apiBuild: ApiBuild; - } - | { - status: "success"; - kind: "api"; - indexingBuild?: never; - apiBuild: ApiBuild; - } - | { status: "error"; kind: "indexing" | "api"; error: Error }; - -type IndexingBuildResult = - | { status: "success"; build: IndexingBuild } - | { status: "error"; error: Error }; - -type ApiBuildResult = - | { status: "success"; build: ApiBuild } - | { status: "error"; error: Error }; - -export const create = async ({ - common, -}: { - common: Common; -}): Promise => { - const escapeRegex = /[.*+?^${}()|[\]\\]/g; - - const escapedIndexingDir = common.options.indexingDir - // If on Windows, use a POSIX path for this regex. - .replace(/\\/g, "/") - // Escape special characters in the path. - .replace(escapeRegex, "\\$&"); - const indexingRegex = new RegExp(`^${escapedIndexingDir}/.*\\.(ts|js)$`); - - const escapedApiDir = common.options.apiDir - // If on Windows, use a POSIX path for this regex. - .replace(/\\/g, "/") - // Escape special characters in the path. - .replace(escapeRegex, "\\$&"); - const apiRegex = new RegExp(`^${escapedApiDir}/.*\\.(ts|js)$`); - - const indexingPattern = path - .join(common.options.indexingDir, "**/*.{js,mjs,ts,mts}") - .replace(/\\/g, "/"); - - const apiPattern = path - .join(common.options.apiDir, "**/*.{js,mjs,ts,mts}") - .replace(/\\/g, "/"); - - const viteLogger = { - warnedMessages: new Set(), - loggedErrors: new WeakSet(), - hasWarned: false, - clearScreen() {}, - hasErrorLogged: (error: Error) => viteLogger.loggedErrors.has(error), - info: (msg: string) => { - common.logger.trace({ service: "build(vite)", msg }); - }, - warn: (msg: string) => { - viteLogger.hasWarned = true; - common.logger.trace({ service: "build(vite)", msg }); - }, - warnOnce: (msg: string) => { - if (viteLogger.warnedMessages.has(msg)) return; - viteLogger.hasWarned = true; - common.logger.trace({ service: "build(vite)", msg }); - viteLogger.warnedMessages.add(msg); - }, - error: (msg: string) => { - viteLogger.hasWarned = true; - common.logger.trace({ service: "build(vite)", msg }); - }, - }; - - const viteDevServer = await createServer({ - root: common.options.rootDir, - cacheDir: path.join(common.options.ponderDir, "vite"), - publicDir: false, - customLogger: viteLogger, - server: { hmr: false }, - plugins: [viteTsconfigPathsPlugin(), vitePluginPonder()], - }); - - // This is Vite boilerplate (initializes the Rollup container). - await viteDevServer.pluginContainer.buildStart({}); - - const viteNodeServer = new ViteNodeServer(viteDevServer); - installSourcemapsSupport({ - getSourceMap: (source) => viteNodeServer.getSourceMap(source), - }); - - const viteNodeRunner = new ViteNodeRunner({ - root: viteDevServer.config.root, - fetchModule: (id) => viteNodeServer.fetchModule(id, "ssr"), - resolveId: (id, importer) => viteNodeServer.resolveId(id, importer, "ssr"), - }); - - return { - common, - indexingRegex, - apiRegex, - indexingPattern, - apiPattern, - viteDevServer, - viteNodeServer, - viteNodeRunner, - }; -}; - -/** - * Execute, validate, and build the files the make up a Ponder app. - * If `watch` is true (dev server), then use vite to re-execute changed files, - * and validate and build again. This function only re-executes changes files, - * but doesn't attempt to skip any validation or build steps. - */ -export const start = async ( - buildService: Service, - { - watch, - onBuild, - }: - | { - watch: true; - onBuild: (buildResult: BuildResultDev) => void; - } - | { watch: false; onBuild?: never }, -): Promise => { - const { common } = buildService; - - if (common.options.command !== "serve") { - // @ts-ignore - globalThis.__PONDER_INSTANCE_ID = - process.env.PONDER_EXPERIMENTAL_INSTANCE_ID ?? - crypto.randomBytes(2).toString("hex"); - } - - // Note: Don't run these in parallel. If there are circular imports in user code, - // it's possible for ViteNodeRunner to return exports as undefined (a race condition). - const configResult = await executeConfig(buildService); - const schemaResult = await executeSchema(buildService); - const indexingResult = await executeIndexingFunctions(buildService); - const apiResult = await executeApiRoutes(buildService); - - if (configResult.status === "error") { - return { status: "error", error: configResult.error }; - } - if (schemaResult.status === "error") { - return { status: "error", error: schemaResult.error }; - } - if (indexingResult.status === "error") { - return { status: "error", error: indexingResult.error }; - } - if (apiResult.status === "error") { - return { status: "error", error: apiResult.error }; - } - - let cachedConfigResult = configResult; - let cachedSchemaResult = schemaResult; - let cachedIndexingResult = indexingResult; - let cachedApiResult = apiResult; - - // If watch is false (`ponder start` or `ponder serve`), - // don't register any event handlers on the watcher. - if (watch) { - // Define the directories and files to ignore - const ignoredDirs = [common.options.generatedDir, common.options.ponderDir]; - const ignoredFiles = [ - path.join(common.options.rootDir, "ponder-env.d.ts"), - path.join(common.options.rootDir, ".env.local"), - ]; - - const isFileIgnored = (filePath: string) => { - const isInIgnoredDir = ignoredDirs.some((dir) => { - const rel = path.relative(dir, filePath); - return !rel.startsWith("..") && !path.isAbsolute(rel); - }); - - const isIgnoredFile = ignoredFiles.includes(filePath); - return isInIgnoredDir || isIgnoredFile; - }; - - const onFileChange = async (_file: string) => { - if (isFileIgnored(_file)) return; - - // Note that `toFilePath` always returns a POSIX path, even if you pass a Windows path. - const file = toFilePath( - normalizeModuleId(_file), - common.options.rootDir, - ).path; - - // Invalidate all modules that depend on the updated files. - // Note that `invalidateDepTree` accepts and returns POSIX paths, even on Windows. - const invalidated = [ - ...buildService.viteNodeRunner.moduleCache.invalidateDepTree([file]), - ]; - - // If no files were invalidated, no need to reload. - if (invalidated.length === 0) return; - - // Note that the paths in `invalidated` are POSIX, so we need to - // convert the paths in `options` to POSIX for this comparison. - // The `srcDir` regex is already converted to POSIX. - const hasConfigUpdate = invalidated.includes( - common.options.configFile.replace(/\\/g, "/"), - ); - const hasSchemaUpdate = invalidated.includes( - common.options.schemaFile.replace(/\\/g, "/"), - ); - - const hasIndexingUpdate = invalidated.some( - (file) => - buildService.indexingRegex.test(file) && - !buildService.apiRegex.test(file), - ); - const hasApiUpdate = invalidated.some((file) => - buildService.apiRegex.test(file), - ); - - // This branch could trigger if you change a `note.txt` file within `src/`. - // Note: We could probably do a better job filtering out files in `isFileIgnored`. - if ( - !hasConfigUpdate && - !hasSchemaUpdate && - !hasIndexingUpdate && - !hasApiUpdate - ) { - return; - } - - common.logger.info({ - service: "build", - msg: `Hot reload ${invalidated - .map((f) => `'${path.relative(common.options.rootDir, f)}'`) - .join(", ")}`, - }); - - // re-execute anything that would cause the instance id to change - if (hasIndexingUpdate || hasSchemaUpdate || hasConfigUpdate) { - // @ts-ignore - globalThis.__PONDER_INSTANCE_ID = - process.env.PONDER_EXPERIMENTAL_INSTANCE_ID ?? - crypto.randomBytes(2).toString("hex"); - - buildService.viteNodeRunner.moduleCache.invalidateDepTree([ - buildService.common.options.configFile, - ]); - buildService.viteNodeRunner.moduleCache.invalidateDepTree([ - buildService.common.options.schemaFile, - ]); - buildService.viteNodeRunner.moduleCache.invalidateDepTree( - glob.sync(buildService.indexingPattern, { - ignore: buildService.apiPattern, - }), - ); - buildService.viteNodeRunner.moduleCache.deleteByModuleId("@/generated"); - - const configResult = await executeConfig(buildService); - const schemaResult = await executeSchema(buildService); - const indexingResult = await executeIndexingFunctions(buildService); - - if (configResult.status === "error") { - onBuild({ - status: "error", - kind: "indexing", - error: configResult.error, - }); - return; - } - if (schemaResult.status === "error") { - onBuild({ - status: "error", - kind: "indexing", - error: schemaResult.error, - }); - return; - } - if (indexingResult.status === "error") { - onBuild({ - status: "error", - kind: "indexing", - error: indexingResult.error, - }); - return; - } - - cachedConfigResult = configResult; - cachedSchemaResult = schemaResult; - cachedIndexingResult = indexingResult; - } - - if (hasApiUpdate) { - const files = glob.sync(buildService.apiPattern); - buildService.viteNodeRunner.moduleCache.invalidateDepTree(files); - buildService.viteNodeRunner.moduleCache.deleteByModuleId("@/generated"); - - const result = await executeApiRoutes(buildService); - if (result.status === "error") { - onBuild({ status: "error", kind: "api", error: result.error }); - return; - } - cachedApiResult = result; - } - - /** - * Build and validate updated indexing and api artifacts - * - * There are a few cases to handle: - * 1) config or schema is updated -> rebuild both api and indexing - * 2) indexing functions are updated -> rebuild indexing - * 3) api routes are updated -> rebuild api - * - * Note: the api build cannot be successful if the indexing - * build fails, this means that any indexing errors are always - * propogated to the api build. - */ - - const indexingBuildResult = await validateAndBuild( - buildService, - cachedConfigResult, - cachedSchemaResult, - cachedIndexingResult, - ); - if (indexingBuildResult.status === "error") { - onBuild({ - status: "error", - kind: "indexing", - error: indexingBuildResult.error, - }); - return; - } - - // If schema or config is updated, rebuild both api and indexing - if (hasConfigUpdate || hasSchemaUpdate || hasIndexingUpdate) { - const apiBuildResult = validateAndBuildApi( - buildService, - indexingBuildResult.build, - cachedApiResult, - ); - - if (apiBuildResult.status === "error") { - onBuild({ - status: "error", - kind: "api", - error: apiBuildResult.error, - }); - return; - } - - onBuild({ - status: "success", - kind: "indexing", - indexingBuild: indexingBuildResult.build, - apiBuild: apiBuildResult.build, - }); - } else { - const apiBuildResult = validateAndBuildApi( - buildService, - indexingBuildResult.build, - cachedApiResult, - ); - - if (apiBuildResult.status === "error") { - onBuild({ - status: "error", - kind: "api", - error: apiBuildResult.error, - }); - return; - } - - onBuild({ - status: "success", - kind: "api", - apiBuild: apiBuildResult.build, - }); - } - }; - - buildService.viteDevServer.watcher.on("change", onFileChange); - } - - // Build and validate initial indexing and server build. - // Note: the api build cannot be successful if the indexing - // build fails - - const initialBuildResult = await validateAndBuild( - buildService, - configResult, - schemaResult, - indexingResult, - ); - - if (initialBuildResult.status === "error") { - return { - status: "error", - error: initialBuildResult.error, - }; - } - - const initialApiBuildResult = validateAndBuildApi( - buildService, - initialBuildResult.build, - apiResult, - ); - - if (initialApiBuildResult.status === "error") { - return { - status: "error", - error: initialApiBuildResult.error, - }; - } - - return { - status: "success", - indexingBuild: initialBuildResult.build, - apiBuild: initialApiBuildResult.build, - }; -}; - -export const kill = async (buildService: Service): Promise => { - await buildService.viteDevServer?.close(); - buildService.common.logger.debug({ - service: "build", - msg: "Killed build service", - }); -}; - -const executeConfig = async ( - buildService: Service, -): Promise< - | { status: "success"; config: Config; contentHash: string } - | { status: "error"; error: Error } -> => { - const executeResult = await executeFile(buildService, { - file: buildService.common.options.configFile, - }); - - if (executeResult.status === "error") { - buildService.common.logger.error({ - service: "build", - msg: "Error while executing 'ponder.config.ts':", - error: executeResult.error, - }); - - return executeResult; - } - - const config = executeResult.exports.default as Config; - - const contentHash = createHash("sha256") - .update(serialize(config)) - .digest("hex"); - - return { status: "success", config, contentHash } as const; -}; - -const executeSchema = async ( - buildService: Service, -): Promise< - | { status: "success"; schema: Schema; contentHash: string } - | { status: "error"; error: Error } -> => { - const executeResult = await executeFile(buildService, { - file: buildService.common.options.schemaFile, - }); - - if (executeResult.status === "error") { - buildService.common.logger.error({ - service: "build", - msg: "Error while executing 'ponder.schema.ts':", - error: executeResult.error, - }); - - return executeResult; - } - - const schema = executeResult.exports; - - const contents = fs.readFileSync( - buildService.common.options.schemaFile, - "utf-8", - ); - return { - status: "success", - schema, - contentHash: createHash("sha256").update(contents).digest("hex"), - }; -}; - -const executeIndexingFunctions = async ( - buildService: Service, -): Promise< - | { - status: "success"; - indexingFunctions: RawIndexingFunctions; - contentHash: string; - } - | { status: "error"; error: Error } -> => { - const files = glob.sync(buildService.indexingPattern, { - ignore: buildService.apiPattern, - }); - const executeResults = await Promise.all( - files.map(async (file) => ({ - ...(await executeFile(buildService, { file })), - file, - })), - ); - - for (const executeResult of executeResults) { - if (executeResult.status === "error") { - buildService.common.logger.error({ - service: "build", - msg: `Error while executing '${path.relative( - buildService.common.options.rootDir, - executeResult.file, - )}':`, - error: executeResult.error, - }); - - return executeResult; - } - } - - // Note that we are only hashing the file contents, not the exports. This is - // different from the config/schema, where we include the serializable object itself. - const hash = createHash("sha256"); - for (const file of files) { - try { - const contents = fs.readFileSync(file, "utf-8"); - hash.update(contents); - } catch (e) { - buildService.common.logger.warn({ - service: "build", - msg: `Unable to read contents of file '${file}' while constructin build ID`, - }); - hash.update(file); - } - } - const contentHash = hash.digest("hex"); - - const exports = await buildService.viteNodeRunner.executeId("@/generated"); - - return { - status: "success", - indexingFunctions: exports.ponder.fns, - contentHash, - }; -}; - -const executeApiRoutes = async ( - buildService: Service, -): Promise< - | { - status: "success"; - app: Hono; - routes: PonderRoutes; - } - | { status: "error"; error: Error } -> => { - const files = glob.sync(buildService.apiPattern); - const executeResults = await Promise.all( - files.map(async (file) => ({ - ...(await executeFile(buildService, { file })), - file, - })), - ); - - for (const executeResult of executeResults) { - if (executeResult.status === "error") { - buildService.common.logger.error({ - service: "build", - msg: `Error while executing '${path.relative( - buildService.common.options.rootDir, - executeResult.file, - )}':`, - error: executeResult.error, - }); - - return executeResult; - } - } - - const exports = await buildService.viteNodeRunner.executeId("@/generated"); - - return { - status: "success", - app: exports.ponder.hono, - routes: exports.ponder.routes, - }; -}; - -const validateAndBuild = async ( - { common }: Pick, - config: { config: Config; contentHash: string }, - schema: { schema: Schema; contentHash: string }, - indexingFunctions: { - indexingFunctions: RawIndexingFunctions; - contentHash: string; - }, -): Promise => { - // Validate and build the schema - const buildSchemaResult = safeBuildSchema({ - schema: schema.schema, - instanceId: - process.env.PONDER_EXPERIMENTAL_INSTANCE_ID ?? - // @ts-ignore - globalThis.__PONDER_INSTANCE_ID, - }); - if (buildSchemaResult.status === "error") { - common.logger.error({ - service: "build", - msg: "Error while building schema:", - error: buildSchemaResult.error, - }); - - return buildSchemaResult; - } - - // Validates and build the config - const buildConfigAndIndexingFunctionsResult = - await safeBuildConfigAndIndexingFunctions({ - config: config.config, - rawIndexingFunctions: indexingFunctions.indexingFunctions, - options: common.options, - }); - if (buildConfigAndIndexingFunctionsResult.status === "error") { - common.logger.error({ - service: "build", - msg: "Failed build", - error: buildConfigAndIndexingFunctionsResult.error, - }); - - return buildConfigAndIndexingFunctionsResult; - } - - for (const log of buildConfigAndIndexingFunctionsResult.logs) { - common.logger[log.level]({ service: "build", msg: log.msg }); - } - - const buildId = createHash("sha256") - .update(BUILD_ID_VERSION) - .update(config.contentHash) - .update(schema.contentHash) - .update(indexingFunctions.contentHash) - .digest("hex") - .slice(0, 10); - - common.logger.debug({ - service: "build", - msg: `Completed build with ID '${buildId}' (hash of project file contents)`, - }); - - return { - status: "success", - build: { - buildId, - instanceId: - process.env.PONDER_EXPERIMENTAL_INSTANCE_ID ?? - // @ts-ignore - globalThis.__PONDER_INSTANCE_ID, - databaseConfig: buildConfigAndIndexingFunctionsResult.databaseConfig, - networks: buildConfigAndIndexingFunctionsResult.networks, - sources: buildConfigAndIndexingFunctionsResult.sources, - indexingFunctions: - buildConfigAndIndexingFunctionsResult.indexingFunctions, - schema: schema.schema, - statements: buildSchemaResult.statements, - namespace: buildSchemaResult.namespace, - graphqlSchema: buildSchemaResult.graphqlSchema, - }, - }; -}; - -const validateAndBuildApi = ( - { common }: Pick, - baseBuild: BaseBuild, - api: { app: Hono; routes: PonderRoutes }, -): ApiBuildResult => { - for (const { - pathOrHandlers: [maybePathOrHandler], - } of api.routes) { - if (typeof maybePathOrHandler === "string") { - if ( - maybePathOrHandler === "/status" || - maybePathOrHandler === "/metrics" || - maybePathOrHandler === "/health" - ) { - const error = new BuildError( - `Validation failed: API route "${maybePathOrHandler}" is reserved for internal use.`, - ); - error.stack = undefined; - common.logger.error({ service: "build", msg: "Failed build", error }); - return { status: "error", error } as const; - } - } - } - - return { - status: "success", - build: { - ...baseBuild, - app: api.app, - routes: api.routes, - }, - }; -}; - -const executeFile = async ( - { common, viteNodeRunner }: Service, - { file }: { file: string }, -): Promise< - { status: "success"; exports: any } | { status: "error"; error: Error } -> => { - try { - const exports = await viteNodeRunner.executeFile(file); - return { status: "success", exports } as const; - } catch (error_) { - const relativePath = path.relative(common.options.rootDir, file); - const error = parseViteNodeError(relativePath, error_ as Error); - return { status: "error", error } as const; - } -}; diff --git a/packages/core/src/common/codegen.ts b/packages/core/src/common/codegen.ts index 0090ab926..6569e661c 100644 --- a/packages/core/src/common/codegen.ts +++ b/packages/core/src/common/codegen.ts @@ -3,33 +3,21 @@ import path from "node:path"; import type { Common } from "@/common/common.js"; import { type GraphQLSchema, printSchema } from "graphql"; -export const ponderEnv = `// This file enables type checking and editor autocomplete for this Ponder project. +export const ponderEnv = `/// + +declare module "ponder:internal" { + const config: typeof import("./ponder.config.ts"); + const schema: typeof import("./ponder.schema.ts"); +} + +declare module "ponder:schema" { + export * from "./ponder.schema.ts"; +} + +// This file enables type checking and editor autocomplete for this Ponder project. // After upgrading, you may find that changes have been made to this file. // If this happens, please commit the changes. Do not manually edit this file. // See https://ponder.sh/docs/getting-started/installation#typescript for more information. - -declare module "@/generated" { - import type { Virtual } from "@ponder/core"; - - type config = typeof import("./ponder.config.ts").default; - type schema = typeof import("./ponder.schema.ts"); - - export const ponder: Virtual.Registry; - - export type EventNames = Virtual.EventNames; - export type Event = Virtual.Event< - config, - name - >; - export type Context = Virtual.Context< - config, - schema, - name - >; - export type ApiContext = Virtual.ApiContext; - export type IndexingFunctionArgs = - Virtual.IndexingFunctionArgs; -} `; export function runCodegen({ diff --git a/packages/core/src/common/options.ts b/packages/core/src/common/options.ts index fad33c173..111e4ed0d 100644 --- a/packages/core/src/common/options.ts +++ b/packages/core/src/common/options.ts @@ -4,7 +4,9 @@ import type { CliOptions } from "@/bin/ponder.js"; import type { LevelWithSilent } from "pino"; export type Options = { - command: "dev" | "start" | "serve" | "codegen"; + command: "dev" | "start" | "serve" | "codegen" | "list"; + + schema?: string; configFile: string; schemaFile: string; @@ -34,7 +36,6 @@ export type Options = { indexingCacheMaxBytes: number; indexingCacheFlushRatio: number; - syncStoreMaxIntervals: number; syncEventsQuerySize: number; syncHandoffStaleSeconds: number; }; @@ -65,6 +66,8 @@ export const buildOptions = ({ cliOptions }: { cliOptions: CliOptions }) => { logLevel = "info"; } + if (["list", "codegen"].includes(cliOptions.command)) logLevel = "error"; + const port = process.env.PORT !== undefined ? Number(process.env.PORT) @@ -77,6 +80,8 @@ export const buildOptions = ({ cliOptions }: { cliOptions: CliOptions }) => { return { command: cliOptions.command, + schema: cliOptions.schema, + rootDir, configFile: path.join(rootDir, cliOptions.config), schemaFile: path.join(rootDir, "ponder.schema.ts"), @@ -121,7 +126,6 @@ export const buildOptions = ({ cliOptions }: { cliOptions: CliOptions }) => { 1_024, indexingCacheFlushRatio: 0.35, - syncStoreMaxIntervals: 5_000, syncEventsQuerySize: 10_000, syncHandoffStaleSeconds: 300, } satisfies Options; diff --git a/packages/core/src/common/telemetry.ts b/packages/core/src/common/telemetry.ts index 164a57cf7..0d06f48e2 100644 --- a/packages/core/src/common/telemetry.ts +++ b/packages/core/src/common/telemetry.ts @@ -4,7 +4,8 @@ import { existsSync, readFileSync } from "node:fs"; import os from "node:os"; import path from "node:path"; import { promisify } from "node:util"; -import type { IndexingBuild } from "@/build/service.js"; +import type { PreBuild, SchemaBuild } from "@/build/index.js"; +import type { IndexingBuild } from "@/build/index.js"; import type { Options } from "@/common/options.js"; import { startClock } from "@/utils/timer.js"; import { wait } from "@/utils/wait.js"; @@ -121,12 +122,11 @@ export function createTelemetry({ // Attempt to find and read the users package.json file. const packageJson = getPackageJson(options.rootDir); - const ponderCoreVersion = - packageJson?.dependencies?.["@ponder/core"] ?? "unknown"; + const ponderVersion = packageJson?.dependencies?.ponder ?? "unknown"; const viemVersion = packageJson?.dependencies?.viem ?? "unknown"; // Make a guess as to whether the project is internal (within the monorepo) or not. - const isInternal = ponderCoreVersion === "workspace:*"; + const isInternal = ponderVersion === "workspace:*"; const cpus = os.cpus(); @@ -137,7 +137,7 @@ export function createTelemetry({ is_internal: isInternal, } satisfies CommonProperties, session: { - ponder_core_version: ponderCoreVersion, + ponder_core_version: ponderVersion, viem_version: viemVersion, package_manager: packageManager, package_manager_version: packageManagerVersion, @@ -277,17 +277,27 @@ function getPackageJson(rootDir: string) { } } -export function buildPayload(build: IndexingBuild) { - const table_count = Object.keys(build.schema).length; - const indexing_function_count = Object.values(build.indexingFunctions).reduce( - (acc, f) => acc + Object.keys(f).length, - 0, - ); +export function buildPayload({ + preBuild, + schemaBuild, + indexingBuild, +}: { + preBuild: PreBuild; + schemaBuild?: SchemaBuild; + indexingBuild?: IndexingBuild; +}) { + const table_count = schemaBuild ? Object.keys(schemaBuild.schema).length : 0; + const indexing_function_count = indexingBuild + ? Object.values(indexingBuild.indexingFunctions).reduce( + (acc, f) => acc + Object.keys(f).length, + 0, + ) + : 0; return { - database_kind: build.databaseConfig.kind, - contract_count: build.sources.length, - network_count: build.networks.length, + database_kind: preBuild?.databaseConfig.kind, + contract_count: indexingBuild?.sources.length ?? 0, + network_count: indexingBuild?.networks.length ?? 0, table_count, indexing_function_count, }; diff --git a/packages/core/src/config/address.test-d.ts b/packages/core/src/config/address.test-d.ts index 872bf8c2f..3a97add55 100644 --- a/packages/core/src/config/address.test-d.ts +++ b/packages/core/src/config/address.test-d.ts @@ -1,87 +1,36 @@ import { type AbiEvent, parseAbiItem } from "abitype"; import { test } from "vitest"; -import type { GetAddress } from "./address.js"; - -const address = ( - c: contract extends { - factory?: unknown; - } - ? GetAddress - : contract, -) => c; +import { factory } from "./address.js"; const event0 = parseAbiItem( "event Event0(bytes32 indexed arg, bytes32 indexed arg1)", ); const func = parseAbiItem("function func()"); -test("no address or factory", () => { - address({}); - //^? -}); - -test("address", () => { - address({ address: "0x" }); - // ^? -}); - -test("factory", () => { - address({ - // ^? - factory: { - address: "0x", - event: event0, - parameter: "arg", - }, - }); -}); - test("factory with invalid event", () => { - address({ + factory({ // ^? - factory: { - address: "0x", - // @ts-expect-error - event: func, - parameter: "arg", - }, + address: "0x", + // @ts-expect-error + event: func, + parameter: "arg", }); }); test("factory with weak event", () => { - address({ - // ^? - factory: { - address: "0x", - event: {} as AbiEvent, - parameter: "arg", - }, - }); -}); - -test("factory with extra parameter", () => { - address({ + factory({ // ^? - factory: { - address: "0x", - event: event0, - parameter: "arg", - - // @ts-expect-error - a: 0, - }, + address: "0x", + event: {} as AbiEvent, + parameter: "arg", }); }); -test("address and factory", () => { - address({ +test("factory", () => { + factory({ // ^? - // @ts-expect-error address: "0x", - factory: { - address: "0x", - event: event0, - parameter: "arg", - }, + event: event0, + parameter: "arg", }); }); diff --git a/packages/core/src/config/address.ts b/packages/core/src/config/address.ts index 285176101..42e202bba 100644 --- a/packages/core/src/config/address.ts +++ b/packages/core/src/config/address.ts @@ -1,49 +1,17 @@ -import type { AbiEvent } from "abitype"; +import type { AbiEvent } from "viem"; -export type GetAddress = contract extends { - factory: unknown; -} - ? // 1. Contract contains a factory - contract extends { - factory: { - event: infer event extends AbiEvent; - }; - } - ? // 1.a Contract has a valid factory event - { - address?: never; - factory?: { - /** Address of the factory contract that creates this contract. */ - address: `0x${string}` | readonly `0x${string}`[]; - /** ABI event that announces the creation of a new instance of this contract. */ - event: AbiEvent; - /** Name of the factory event parameter that contains the new child contract address. */ - parameter: Exclude; - }; - } - : // 1.b Contract has an invalid factory event - { - address?: never; - factory?: { - /** Address of the factory contract that creates this contract. */ - address: `0x${string}` | readonly `0x${string}`[]; - /** ABI event that announces the creation of a new instance of this contract. */ - event: AbiEvent; - /** Name of the factory event parameter that contains the new child contract address. */ - parameter: string; - }; - } - : // 2. Contract has an address - contract extends { address: `0x${string}` | readonly `0x${string}`[] } - ? { address?: `0x${string}` | readonly `0x${string}`[]; factory?: never } - : { - address?: `0x${string}` | readonly `0x${string}`[]; - factory?: { - /** Address of the factory contract that creates this contract. */ - address: `0x${string}` | readonly `0x${string}`[]; - /** ABI event that announces the creation of a new instance of this contract. */ - event: AbiEvent; - /** Name of the factory event parameter that contains the new child contract address. */ - parameter: string; - }; - }; +export type Factory = { + /** Address of the factory contract that creates this contract. */ + address: `0x${string}` | readonly `0x${string}`[]; + /** ABI event that announces the creation of a new instance of this contract. */ + event: event; + /** Name of the factory event parameter that contains the new child contract address. */ + parameter: Exclude; +}; + +export const factory = (factory: Factory) => + factory; + +export type AddressConfig = { + address?: `0x${string}` | readonly `0x${string}`[] | Factory; +}; diff --git a/packages/core/src/config/config.test-d.ts b/packages/core/src/config/config.test-d.ts index f930aa225..4b08ec702 100644 --- a/packages/core/src/config/config.test-d.ts +++ b/packages/core/src/config/config.test-d.ts @@ -1,5 +1,6 @@ import { http, type Abi, type HttpTransport, parseAbiItem } from "viem"; import { assertType, test } from "vitest"; +import { factory } from "./address.js"; import { createConfig } from "./config.js"; const event0 = parseAbiItem( @@ -94,39 +95,11 @@ test("createConfig factory", () => { c2: { abi: [event1], network: "mainnet", - factory: { + address: factory({ address: "0x", event: event0, parameter: "arg", - }, - }, - }, - }); -}); - -test("createConfig address and factory", () => { - createConfig({ - networks: { - mainnet: { - chainId: 1, - transport: http(), - }, - optimism: { - chainId: 10, - transport: http(), - }, - }, - contracts: { - c2: { - abi: [event1], - network: "mainnet", - factory: { - address: "0x", - event: event0, - parameter: "arg", - }, - // @ts-expect-error - address: "0x", + }), }, }, }); @@ -301,3 +274,24 @@ test("createConfig strict return type", () => { }; }>(config.contracts); }); + +test("createConfig accounts", () => { + createConfig({ + networks: { + mainnet: { + chainId: 1, + transport: http(), + }, + optimism: { + chainId: 10, + transport: http(), + }, + }, + accounts: { + me: { + network: "mainnet", + address: ["0x"], + }, + }, + }); +}); diff --git a/packages/core/src/config/config.ts b/packages/core/src/config/config.ts index 4094fe35f..082384549 100644 --- a/packages/core/src/config/config.ts +++ b/packages/core/src/config/config.ts @@ -1,17 +1,47 @@ import type { Prettify } from "@/types/utils.js"; import type { Abi } from "abitype"; import type { Narrow, Transport } from "viem"; -import type { GetAddress } from "./address.js"; +import type { AddressConfig } from "./address.js"; import type { GetEventFilter } from "./eventFilter.js"; import type { NonStrictPick } from "./utilityTypes.js"; -export type BlockConfig = { - /** Block number at which to start indexing events (inclusive). If `undefined`, events will be processed from block 0. Default: `undefined`. */ - startBlock?: number; - /** Block number at which to stop indexing events (inclusive). If `undefined`, events will be processed in real-time. Default: `undefined`. */ - endBlock?: number; +export type Config = { + networks: { [networkName: string]: NetworkConfig }; + contracts: { [contractName: string]: GetContract }; + accounts: { [accountName: string]: AccountConfig }; + database?: DatabaseConfig; + blocks: { + [sourceName: string]: GetBlockFilter; + }; }; +export type CreateConfigReturnType = { + networks: networks; + contracts: contracts; + accounts: accounts; + database?: DatabaseConfig; + blocks: blocks; +}; + +export const createConfig = < + const networks, + const contracts = {}, + const accounts = {}, + const blocks = {}, +>(config: { + database?: DatabaseConfig; + // TODO: add jsdoc to these properties. + networks: NetworksConfig>; + contracts?: ContractsConfig>; + accounts?: AccountsConfig>; + blocks?: BlockFiltersConfig; +}): CreateConfigReturnType => + config as Prettify< + CreateConfigReturnType + >; + +// database + type DatabaseConfig = | { kind: "pglite"; @@ -29,7 +59,31 @@ type DatabaseConfig = }; }; -export type NetworkConfig = { +// base + +type BlockConfig = { + /** Block number at which to start indexing events (inclusive). If `undefined`, events will be processed from block 0. Default: `undefined`. */ + startBlock?: number; + /** Block number at which to stop indexing events (inclusive). If `undefined`, events will be processed in real-time. Default: `undefined`. */ + endBlock?: number; +}; + +type TransactionReceiptConfig = { + includeTransactionReceipts?: boolean; +}; + +type FunctionCallConfig = { + /* + * Enable call trace indexing for this contract. + * + * - Docs: https://ponder.sh/docs/indexing/call-traces + */ + includeCallTraces?: boolean; +}; + +// network + +type NetworkConfig = { /** Chain ID of the network. */ chainId: network extends { chainId: infer chainId extends number } ? chainId | number @@ -57,48 +111,20 @@ export type NetworkConfig = { disableCache?: boolean; }; -export type BlockFilterConfig = { - /** Block number at which to start indexing events (inclusive). If `undefined`, events will be processed from block 0. Default: `undefined`. */ - startBlock?: number; - /** Block number at which to stop indexing events (inclusive). If `undefined`, events will be processed in real-time. Default: `undefined`. */ - endBlock?: number; - interval?: number; -}; +type NetworksConfig = {} extends networks + ? {} + : { + [networkName in keyof networks]: NetworkConfig; + }; -type GetBlockFilter< - networks, - /// - allNetworkNames extends string = [keyof networks] extends [never] - ? string - : keyof networks & string, -> = BlockFilterConfig & { - network: - | allNetworkNames - | { - [name in allNetworkNames]?: BlockFilterConfig; - }; -}; +// contracts type AbiConfig = { /** Contract application byte interface. */ abi: abi; }; -type TransactionReceiptConfig = { - includeTransactionReceipts?: boolean; -}; - -type FunctionCallConfig = { - /* - * Enable call trace indexing for this contract. - * - * - Docs: https://ponder.sh/docs/indexing/call-traces - */ - - includeCallTraces?: boolean; -}; - -type GetNetwork< +type GetContractNetwork< networks, contract, abi extends Abi, @@ -106,48 +132,28 @@ type GetNetwork< allNetworkNames extends string = [keyof networks] extends [never] ? string : keyof networks & string, -> = contract extends { network: infer network } - ? { - /** - * Network that this contract is deployed to. Must match a network name in `networks`. - * Any filter information overrides the values in the higher level "contracts" property. - * Factories cannot override an address and vice versa. - */ - network: - | allNetworkNames - | { - [name in allNetworkNames]?: Prettify< - GetAddress> & - GetEventFilter> & - TransactionReceiptConfig & - FunctionCallConfig & - BlockConfig - >; - }; - } - : { - /** - * Network that this contract is deployed to. Must match a network name in `networks`. - * Any filter information overrides the values in the higher level "contracts" property. - * Factories cannot override an address and vice versa. - */ - network: - | allNetworkNames - | { - [name in allNetworkNames]?: Prettify< - GetAddress & - GetEventFilter & - TransactionReceiptConfig & - FunctionCallConfig & - BlockConfig - >; - }; - }; +> = { + /** + * Network that this contract is deployed to. Must match a network name in `networks`. + * Any filter information overrides the values in the higher level "contracts" property. + */ + network: + | allNetworkNames + | { + [name in allNetworkNames]?: Prettify< + AddressConfig & + GetEventFilter> & + TransactionReceiptConfig & + FunctionCallConfig & + BlockConfig + >; + }; +}; type ContractConfig = Prettify< AbiConfig & - GetNetwork, abi> & - GetAddress> & + GetContractNetwork, abi> & + AddressConfig & GetEventFilter> & TransactionReceiptConfig & FunctionCallConfig & @@ -169,12 +175,65 @@ type ContractsConfig = {} extends contracts [name in keyof contracts]: GetContract; }; -type NetworksConfig = {} extends networks +// accounts + +type GetAccountNetwork< + networks, + /// + allNetworkNames extends string = [keyof networks] extends [never] + ? string + : keyof networks & string, +> = { + /** + * Network that this account is deployed to. Must match a network name in `networks`. + * Any filter information overrides the values in the higher level "accounts" property. + */ + network: + | allNetworkNames + | { + [name in allNetworkNames]?: Prettify< + AddressConfig & TransactionReceiptConfig & BlockConfig + >; + }; +}; + +type AccountConfig = Prettify< + GetAccountNetwork & + Required & + TransactionReceiptConfig & + BlockConfig +>; + +type AccountsConfig = {} extends accounts ? {} : { - [networkName in keyof networks]: NetworkConfig; + [name in keyof accounts]: AccountConfig; }; +// blocks + +type BlockFilterConfig = { + /** Block number at which to start indexing events (inclusive). If `undefined`, events will be processed from block 0. Default: `undefined`. */ + startBlock?: number; + /** Block number at which to stop indexing events (inclusive). If `undefined`, events will be processed in real-time. Default: `undefined`. */ + endBlock?: number; + interval?: number; +}; + +type GetBlockFilter< + networks, + /// + allNetworkNames extends string = [keyof networks] extends [never] + ? string + : keyof networks & string, +> = BlockFilterConfig & { + network: + | allNetworkNames + | { + [name in allNetworkNames]?: BlockFilterConfig; + }; +}; + type BlockFiltersConfig< networks = unknown, blocks = unknown, @@ -183,32 +242,3 @@ type BlockFiltersConfig< : { [name in keyof blocks]: GetBlockFilter; }; - -export const createConfig = < - const networks, - const contracts = {}, - const blocks = {}, ->(config: { - // TODO: add jsdoc to these properties. - networks: NetworksConfig>; - contracts?: ContractsConfig>; - database?: DatabaseConfig; - blocks?: BlockFiltersConfig; -}): CreateConfigReturnType => - config as Prettify>; - -export type Config = { - networks: { [networkName: string]: NetworkConfig }; - contracts: { [contractName: string]: GetContract }; - database?: DatabaseConfig; - blocks: { - [sourceName: string]: GetBlockFilter; - }; -}; - -export type CreateConfigReturnType = { - networks: networks; - contracts: contracts; - database?: DatabaseConfig; - blocks: blocks; -}; diff --git a/packages/core/src/database/index.test.ts b/packages/core/src/database/index.test.ts index 93c2917b0..93b42515a 100644 --- a/packages/core/src/database/index.test.ts +++ b/packages/core/src/database/index.test.ts @@ -1,11 +1,6 @@ import { setupCommon, setupIsolatedDatabase } from "@/_test/setup.js"; import { buildSchema } from "@/build/schema.js"; -import { - onchainEnum, - onchainSchema, - onchainTable, - primaryKey, -} from "@/drizzle/index.js"; +import { onchainEnum, onchainTable, primaryKey } from "@/drizzle/index.js"; import { createRealtimeIndexingStore } from "@/indexing-store/realtime.js"; import { encodeCheckpoint, @@ -32,26 +27,26 @@ function createCheckpoint(index: number): string { return encodeCheckpoint({ ...zeroCheckpoint, blockTimestamp: index }); } -test("setup() succeeds with a fresh database", async (context) => { +test("setup() succeeds with empty schema", async (context) => { const database = createDatabase({ common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "abc", - ...buildSchema({ + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { schema: { account }, - instanceId: "1234", - }), + statements: buildSchema({ schema: { account } }).statements, + }, }); - const { checkpoint } = await database.setup(); + const { checkpoint } = await database.setup({ buildId: "abc" }); expect(checkpoint).toMatchObject(encodeCheckpoint(zeroCheckpoint)); const tableNames = await getUserTableNames(database, "public"); - expect(tableNames).toContain("1234__account"); - expect(tableNames).toContain("1234_reorg__account"); + expect(tableNames).toContain("account"); + expect(tableNames).toContain("_reorg__account"); expect(tableNames).toContain("_ponder_meta"); const metadata = await database.qb.internal @@ -59,210 +54,243 @@ test("setup() succeeds with a fresh database", async (context) => { .selectAll() .execute(); - expect(metadata).toHaveLength(3); + expect(metadata).toHaveLength(2); await database.unlock(); await database.kill(); }); -test("setup() creates tables", async (context) => { - const mood = onchainEnum("mood", ["sad", "happy"]); - - const kyle = onchainTable("kyle", (p) => ({ - age: p.integer().primaryKey(), - mood: mood().notNull(), - })); - - const user = onchainTable( - "table", - (p) => ({ - name: p.text(), - age: p.integer(), - address: p.hex(), - }), - (table) => ({ - primaryKeys: primaryKey({ columns: [table.name, table.address] }), - }), - ); - +test("setup() throws with schema used", async (context) => { const database = createDatabase({ common: context.common, - schema: { account, kyle, mood, user }, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "abc", - ...buildSchema({ - schema: { account, kyle, mood, user }, - instanceId: "1234", - }), + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { + schema: { account }, + statements: buildSchema({ schema: { account } }).statements, + }, }); - - await database.setup(); - - const tableNames = await getUserTableNames(database, "public"); - expect(tableNames).toContain("1234__account"); - expect(tableNames).toContain("1234_reorg__account"); - expect(tableNames).toContain("1234__kyle"); - expect(tableNames).toContain("1234_reorg__kyle"); - expect(tableNames).toContain("1234__kyle"); - expect(tableNames).toContain("1234_reorg__kyle"); - expect(tableNames).toContain("_ponder_meta"); - - await database.unlock(); + await database.setup({ buildId: "abc" }); await database.kill(); -}); - -test("setup() with onchainSchema", async (context) => { - const schema = onchainSchema("multichain"); - const account = schema.table("account", (t) => ({ - address: t.hex().primaryKey(), - balance: t.bigint(), - })); - const database = createDatabase({ + const databaseTwo = createDatabase({ common: context.common, - schema: { schema, account }, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "abc", - ...buildSchema({ - schema: { schema, account }, - instanceId: "1234", - }), + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { + schema: { account }, + statements: buildSchema({ schema: { account } }).statements, + }, }); - await database.setup(); + const error = await databaseTwo.setup({ buildId: "abc" }).catch((err) => err); - const tableNames = await getUserTableNames(database, "multichain"); - expect(tableNames).toContain("1234__account"); - expect(tableNames).toContain("1234_reorg__account"); - expect(tableNames).toContain("_ponder_meta"); + expect(error).toBeDefined(); - await database.unlock(); - await database.kill(); + await databaseTwo.kill(); }); -test("setup() succeeds with a prior app in the same namespace", async (context) => { +test("setup() succeeds with crash recovery", async (context) => { const database = createDatabase({ common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "abc", - ...buildSchema({ + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { schema: { account }, - instanceId: "1234", - }), + statements: buildSchema({ schema: { account } }).statements, + }, }); - await database.setup(); - let tableNames = await getUserTableNames(database, "public"); - expect(tableNames).toContain("1234__account"); - expect(tableNames).toContain("1234_reorg__account"); - expect(tableNames).toContain("_ponder_meta"); + await database.setup({ buildId: "abc" }); + + await database.finalize({ + checkpoint: createCheckpoint(10), + }); await database.unlock(); await database.kill(); const databaseTwo = createDatabase({ common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "5678", - buildId: "def", - ...buildSchema({ + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { schema: { account }, - instanceId: "5678", - }), + statements: buildSchema({ schema: { account } }).statements, + }, }); - await databaseTwo.setup(); - - tableNames = await getUserTableNames(databaseTwo, "public"); + const { checkpoint } = await databaseTwo.setup({ buildId: "abc" }); - expect(tableNames).toContain("1234__account"); - expect(tableNames).toContain("1234_reorg__account"); - expect(tableNames).toContain("5678__account"); - expect(tableNames).toContain("5678_reorg__account"); - expect(tableNames).toContain("_ponder_meta"); + expect(checkpoint).toMatchObject(createCheckpoint(10)); const metadata = await databaseTwo.qb.internal .selectFrom("_ponder_meta") .selectAll() .execute(); - expect(metadata).toHaveLength(5); + expect(metadata).toHaveLength(2); + + const tableNames = await getUserTableNames(databaseTwo, "public"); + expect(tableNames).toContain("account"); + expect(tableNames).toContain("_reorg__account"); + expect(tableNames).toContain("_ponder_meta"); await databaseTwo.kill(); }); -test("setup() with the same build ID recovers the finality checkpoint", async (context) => { +test("setup() succeeds with crash recovery after waiting for lock", async (context) => { + context.common.options.databaseHeartbeatInterval = 750; + context.common.options.databaseHeartbeatTimeout = 500; + const database = createDatabase({ common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "abc", - ...buildSchema({ + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { schema: { account }, - instanceId: "1234", - }), + statements: buildSchema({ schema: { account } }).statements, + }, }); + await database.setup({ buildId: "abc" }); + await database.finalize({ checkpoint: createCheckpoint(10) }); - await database.setup(); - - await database.finalize({ - checkpoint: createCheckpoint(10), + const databaseTwo = createDatabase({ + common: context.common, + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { + schema: { account }, + statements: buildSchema({ schema: { account } }).statements, + }, }); + const { checkpoint } = await databaseTwo.setup({ buildId: "abc" }); + + expect(checkpoint).toMatchObject(createCheckpoint(10)); + await database.unlock(); await database.kill(); + await databaseTwo.kill(); +}); + +// PGlite not being able to concurrently connect to the same database from two different clients +// makes this test impossible. +test("setup() throws with schema used after waiting for lock", async (context) => { + if (context.databaseConfig.kind !== "postgres") return; + + context.common.options.databaseHeartbeatInterval = 250; + context.common.options.databaseHeartbeatTimeout = 1000; + + const database = createDatabase({ + common: context.common, + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { + schema: { account }, + statements: buildSchema({ schema: { account } }).statements, + }, + }); + await database.setup({ buildId: "abc" }); + await database.finalize({ checkpoint: createCheckpoint(10) }); const databaseTwo = createDatabase({ common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "5678", - buildId: "abc", - ...buildSchema({ + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { schema: { account }, - instanceId: "5678", - }), + statements: buildSchema({ schema: { account } }).statements, + }, }); - const { checkpoint } = await databaseTwo.setup(); + const error = await databaseTwo.setup({ buildId: "abc" }).catch((err) => err); - expect(checkpoint).toMatchObject(createCheckpoint(10)); + expect(error).toBeDefined(); - const metadata = await databaseTwo.qb.internal - .selectFrom("_ponder_meta") - .selectAll() - .execute(); + await database.kill(); + await databaseTwo.kill(); +}); - expect(metadata).toHaveLength(3); +// TODO(kyle) this causes an issue on pglite +test.skip("setup() with empty schema creates tables and enums", async (context) => { + const mood = onchainEnum("mood", ["sad", "happy"]); - const tableNames = await getUserTableNames(databaseTwo, "public"); - expect(tableNames).toContain("5678__account"); - expect(tableNames).toContain("5678_reorg__account"); + const kyle = onchainTable("kyle", (p) => ({ + age: p.integer().primaryKey(), + mood: mood().notNull(), + })); + + const user = onchainTable( + "table", + (p) => ({ + name: p.text(), + age: p.integer(), + address: p.hex(), + }), + (table) => ({ + primaryKeys: primaryKey({ columns: [table.name, table.address] }), + }), + ); + + const database = createDatabase({ + common: context.common, + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { + schema: { account, kyle, mood, user }, + statements: buildSchema({ schema: { account, kyle, mood, user } }) + .statements, + }, + }); + + await database.setup({ buildId: "abc" }); + + const tableNames = await getUserTableNames(database, "public"); + expect(tableNames).toContain("account"); + expect(tableNames).toContain("_reorg__account"); + expect(tableNames).toContain("kyle"); + expect(tableNames).toContain("_reorg__kyle"); + expect(tableNames).toContain("kyle"); + expect(tableNames).toContain("_reorg__kyle"); expect(tableNames).toContain("_ponder_meta"); - await databaseTwo.kill(); + await database.unlock(); + await database.kill(); }); -test("setup() with the same build ID reverts rows", async (context) => { +test("setup() with crash recovery reverts rows", async (context) => { const database = createDatabase({ common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "abc", - ...buildSchema({ + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { schema: { account }, - instanceId: "1234", - }), + statements: buildSchema({ schema: { account } }).statements, + }, }); - await database.setup(); + await database.setup({ buildId: "abc" }); // setup tables, reorg tables, and metadata checkpoint @@ -299,22 +327,22 @@ test("setup() with the same build ID reverts rows", async (context) => { const databaseTwo = createDatabase({ common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "5678", - buildId: "abc", - ...buildSchema({ + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { schema: { account }, - instanceId: "5678", - }), + statements: buildSchema({ schema: { account } }).statements, + }, }); - const { checkpoint } = await databaseTwo.setup(); + const { checkpoint } = await databaseTwo.setup({ buildId: "abc" }); expect(checkpoint).toMatchObject(createCheckpoint(10)); const rows = await databaseTwo.drizzle - .execute(sql`SELECT * from "5678__account"`) + .execute(sql`SELECT * from "account"`) .then((result) => result.rows); expect(rows).toHaveLength(1); @@ -325,12 +353,12 @@ test("setup() with the same build ID reverts rows", async (context) => { .selectAll() .execute(); - expect(metadata).toHaveLength(3); + expect(metadata).toHaveLength(2); await databaseTwo.kill(); }); -test("setup() with the same build ID drops indexes and triggers", async (context) => { +test("setup() with crash recovery drops indexes and triggers", async (context) => { const account = onchainTable( "account", (p) => ({ @@ -344,17 +372,17 @@ test("setup() with the same build ID drops indexes and triggers", async (context const database = createDatabase({ common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "abc", - ...buildSchema({ + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { schema: { account }, - instanceId: "1234", - }), + statements: buildSchema({ schema: { account } }).statements, + }, }); - await database.setup(); + await database.setup({ buildId: "abc" }); await database.finalize({ checkpoint: createCheckpoint(10), @@ -367,304 +395,46 @@ test("setup() with the same build ID drops indexes and triggers", async (context const databaseTwo = createDatabase({ common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "5678", - buildId: "abc", - ...buildSchema({ + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { schema: { account }, - instanceId: "5678", - }), + statements: buildSchema({ schema: { account } }).statements, + }, }); - await databaseTwo.setup(); + await databaseTwo.setup({ buildId: "abc" }); - const indexNames = await getUserIndexNames( - databaseTwo, - "public", - "5678__account", - ); + const indexNames = await getUserIndexNames(databaseTwo, "public", "account"); expect(indexNames).toHaveLength(1); await databaseTwo.kill(); }); -test("setup() with the same build ID recovers if the lock expires after waiting", async (context) => { - context.common.options.databaseHeartbeatInterval = 750; - context.common.options.databaseHeartbeatTimeout = 500; - - const database = createDatabase({ - common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "abc", - ...buildSchema({ - schema: { account }, - instanceId: "1234", - }), - }); - await database.setup(); - await database.finalize({ checkpoint: createCheckpoint(10) }); - - const databaseTwo = createDatabase({ - common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "5678", - buildId: "abc", - ...buildSchema({ - schema: { account }, - instanceId: "5678", - }), - }); - - const { checkpoint } = await databaseTwo.setup(); - - expect(checkpoint).toMatchObject(createCheckpoint(10)); - - await database.unlock(); - await database.kill(); - await databaseTwo.kill(); -}); - -// PGlite not being able to concurrently connect to the same database from two different clients -// makes this test impossible. -test("setup() with the same build ID succeeds if the lock doesn't expires after waiting", async (context) => { - if (context.databaseConfig.kind !== "postgres") return; - - context.common.options.databaseHeartbeatInterval = 250; - context.common.options.databaseHeartbeatTimeout = 1000; - - const database = createDatabase({ - common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "abc", - ...buildSchema({ - schema: { account }, - instanceId: "1234", - }), - }); - await database.setup(); - await database.finalize({ checkpoint: createCheckpoint(10) }); - - const databaseTwo = createDatabase({ - common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "5678", - buildId: "abc", - ...buildSchema({ - schema: { account }, - instanceId: "5678", - }), - }); - - const { checkpoint } = await databaseTwo.setup(); - - expect(checkpoint).toMatchObject(encodeCheckpoint(zeroCheckpoint)); - - await database.kill(); - await databaseTwo.kill(); -}); - -test("setup() with the same instance ID upserts", async (context) => { - context.common.options.databaseHeartbeatInterval = 750; - context.common.options.databaseHeartbeatTimeout = 500; - - const database = createDatabase({ - common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "abc", - ...buildSchema({ - schema: { account }, - instanceId: "1234", - }), - }); - await database.setup(); - await database.unlock(); - await database.kill(); - - const databaseTwo = createDatabase({ - common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "abc", - ...buildSchema({ - schema: { account }, - instanceId: "1234", - }), - }); - - const { checkpoint } = await databaseTwo.setup(); - - expect(checkpoint).toMatchObject(encodeCheckpoint(zeroCheckpoint)); - - const metadata = await databaseTwo.qb.internal - .selectFrom("_ponder_meta") - .selectAll() - .execute(); - - expect(metadata).toHaveLength(3); - - await databaseTwo.kill(); -}); - -test("setup() drops old tables", async (context) => { - for (let i = 0; i < 5; i++) { - const database = createDatabase({ - common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: `123${i}`, - buildId: `${i}`, - ...buildSchema({ - schema: { account }, - instanceId: `123${i}`, - }), - }); - await database.setup(); - await database.unlock(); - await database.kill(); - } - - const database = createDatabase({ - common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "1239", - buildId: "abc", - ...buildSchema({ - schema: { account }, - instanceId: "1239", - }), - }); - await database.setup(); - - const tableNames = await getUserTableNames(database, "public"); - expect(tableNames).toHaveLength(7); - await database.unlock(); - await database.kill(); -}); - -test('setup() with "ponder dev" publishes views', async (context) => { - const database = createDatabase({ - common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "abc", - ...buildSchema({ - schema: { account }, - instanceId: "1234", - }), - }); - - context.common.options.command = "dev"; - - await database.setup(); - - const viewNames = await getUserViewNames(database, "public"); - expect(viewNames).toContain("account"); - - await database.unlock(); - await database.kill(); -}); - -test("setup() v0.7 migration", async (context) => { - const database = createDatabase({ - common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "abc", - ...buildSchema({ - schema: { account }, - instanceId: "1234", - }), - }); - - await database.qb.internal.schema - .createTable("account") - .addColumn("id", "integer", (col) => col.primaryKey()) - .execute(); - - await database.qb.internal.schema - .createTable("_ponder_reorg__account") - .addColumn("id", "integer", (col) => col.primaryKey()) - .execute(); - - await database.qb.internal.schema - .createTable("_ponder_meta") - .addColumn("key", "text", (col) => col.primaryKey()) - .addColumn("value", "jsonb") - .execute(); - - await database.qb.internal - .insertInto("_ponder_meta") - .values({ - // @ts-ignore - key: "app", - value: { - is_locked: 0, - is_dev: 0, - heartbeat_at: 0, - build_id: "build", - checkpoint: encodeCheckpoint(zeroCheckpoint), - table_names: ["account"], - }, - }) - .execute(); - - const { checkpoint } = await database.setup(); - - expect(checkpoint).toMatchObject(encodeCheckpoint(zeroCheckpoint)); - - const tableNames = await getUserTableNames(database, "public"); - expect(tableNames).toContain("1234__account"); - expect(tableNames).toContain("1234_reorg__account"); - expect(tableNames).not.toContain("account"); - expect(tableNames).not.toContain("_ponder_reorg__account"); - expect(tableNames).toContain("_ponder_meta"); - - const metadata = await database.qb.internal - .selectFrom("_ponder_meta") - .selectAll() - .execute(); - - expect(metadata).toHaveLength(3); - - await database.unlock(); - await database.kill(); -}); - test("heartbeat updates the heartbeat_at value", async (context) => { context.common.options.databaseHeartbeatInterval = 250; context.common.options.databaseHeartbeatTimeout = 625; const database = createDatabase({ common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "abc", - ...buildSchema({ + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { schema: { account }, - instanceId: "1234", - }), + statements: buildSchema({ schema: { account } }).statements, + }, }); - await database.setup(); + await database.setup({ buildId: "abc" }); const row = await database.qb.internal .selectFrom("_ponder_meta") - .where("key", "like", "app_%") + .where("key", "=", "app") .select("value") .executeTakeFirst(); @@ -672,7 +442,7 @@ test("heartbeat updates the heartbeat_at value", async (context) => { const rowAfterHeartbeat = await database.qb.internal .selectFrom("_ponder_meta") - .where("key", "like", "app_%") + .where("key", "=", "app") .select("value") .executeTakeFirst(); @@ -689,17 +459,17 @@ test("heartbeat updates the heartbeat_at value", async (context) => { test("finalize()", async (context) => { const database = createDatabase({ common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "abc", - ...buildSchema({ + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { schema: { account }, - instanceId: "1234", - }), + statements: buildSchema({ schema: { account } }).statements, + }, }); - await database.setup(); + await database.setup({ buildId: "abc" }); // setup tables, reorg tables, and metadata checkpoint @@ -738,7 +508,7 @@ test("finalize()", async (context) => { // reorg tables const rows = await database.qb.user - .selectFrom("1234_reorg__account") + .selectFrom("_reorg__account") .selectAll() .execute(); @@ -748,7 +518,7 @@ test("finalize()", async (context) => { const metadata = await database.qb.internal .selectFrom("_ponder_meta") - .where("key", "like", "app_%") + .where("key", "=", "app") .select("value") .executeTakeFirst(); @@ -761,36 +531,36 @@ test("finalize()", async (context) => { test("unlock()", async (context) => { let database = createDatabase({ common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "abc", - ...buildSchema({ + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { schema: { account }, - instanceId: "1234", - }), + statements: buildSchema({ schema: { account } }).statements, + }, }); - await database.setup(); + await database.setup({ buildId: "abc" }); await database.unlock(); await database.kill(); database = createDatabase({ common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "abc", - ...buildSchema({ + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { schema: { account }, - instanceId: "1234", - }), + statements: buildSchema({ schema: { account } }).statements, + }, }); const metadata = await database.qb.internal .selectFrom("_ponder_meta") .selectAll() - .where("key", "like", "app_%") + .where("key", "=", "app") .execute(); expect((metadata[0]!.value as PonderApp).is_locked).toBe(0); @@ -813,130 +583,40 @@ test("createIndexes()", async (context) => { const database = createDatabase({ common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "abc", - ...buildSchema({ + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { schema: { account }, - instanceId: "1234", - }), + statements: buildSchema({ schema: { account } }).statements, + }, }); - await database.setup(); + await database.setup({ buildId: "abc" }); await database.createIndexes(); - const indexNames = await getUserIndexNames( - database, - "public", - "1234__account", - ); + const indexNames = await getUserIndexNames(database, "public", "account"); expect(indexNames).toContain("balance_index"); await database.unlock(); await database.kill(); }); -test("createLiveViews()", async (context) => { - const database = createDatabase({ - common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "abc", - ...buildSchema({ - schema: { account }, - instanceId: "1234", - }), - }); - - await database.setup(); - await database.createLiveViews(); - - const viewNames = await getUserViewNames(database, "public"); - expect(viewNames).toContain("account"); - - const metadata = await database.qb.internal - .selectFrom("_ponder_meta") - .select("value") - .where("key", "=", "live") - .executeTakeFirst(); - - expect(metadata!.value).toStrictEqual({ instance_id: "1234" }); - - await database.unlock(); - await database.kill(); -}); - -test("createLiveViews() drops old views", async (context) => { - const database = createDatabase({ - common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "abc", - ...buildSchema({ - schema: { account }, - instanceId: "1234", - }), - }); - - await database.setup(); - await database.createLiveViews(); - await database.unlock(); - await database.kill(); - - const transfer = onchainTable("transfer", (p) => ({ - id: p.text().primaryKey(), - from: p.hex().notNull(), - to: p.hex().notNull(), - amount: p.hex().notNull(), - })); - - const databaseTwo = createDatabase({ - common: context.common, - schema: { transfer }, - databaseConfig: context.databaseConfig, - instanceId: "5678", - buildId: "def", - ...buildSchema({ - schema: { transfer }, - instanceId: "5678", - }), - }); - - await databaseTwo.setup(); - await databaseTwo.createLiveViews(); - - const viewNames = await getUserViewNames(databaseTwo, "public"); - expect(viewNames).toHaveLength(1); - expect(viewNames).toContain("transfer"); - - const metadata = await databaseTwo.qb.internal - .selectFrom("_ponder_meta") - .select("value") - .where("key", "=", "live") - .executeTakeFirst(); - - expect(metadata!.value).toStrictEqual({ instance_id: "5678" }); - - await databaseTwo.kill(); -}); - test("createTriggers()", async (context) => { const database = createDatabase({ common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "abc", - ...buildSchema({ + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { schema: { account }, - instanceId: "1234", - }), + statements: buildSchema({ schema: { account } }).statements, + }, }); - await database.setup(); + await database.setup({ buildId: "abc" }); await database.createTriggers(); const indexingStore = createRealtimeIndexingStore({ @@ -950,7 +630,7 @@ test("createTriggers()", async (context) => { .values({ address: zeroAddress, balance: 10n }); const rows = await database.qb.user - .selectFrom("1234_reorg__account") + .selectFrom("_reorg__account") .selectAll() .execute(); @@ -971,17 +651,17 @@ test("createTriggers()", async (context) => { test("complete()", async (context) => { const database = createDatabase({ common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "abc", - ...buildSchema({ + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { schema: { account }, - instanceId: "1234", - }), + statements: buildSchema({ schema: { account } }).statements, + }, }); - await database.setup(); + await database.setup({ buildId: "abc" }); await database.createTriggers(); const indexingStore = createRealtimeIndexingStore({ @@ -999,7 +679,7 @@ test("complete()", async (context) => { }); const rows = await database.qb.user - .selectFrom("1234_reorg__account") + .selectFrom("_reorg__account") .selectAll() .execute(); @@ -1019,17 +699,17 @@ test("complete()", async (context) => { test("revert()", async (context) => { const database = createDatabase({ common: context.common, - schema: { account }, - databaseConfig: context.databaseConfig, - instanceId: "1234", - buildId: "abc", - ...buildSchema({ + preBuild: { + databaseConfig: context.databaseConfig, + namespace: "public", + }, + schemaBuild: { schema: { account }, - instanceId: "1234", - }), + statements: buildSchema({ schema: { account } }).statements, + }, }); - await database.setup(); + await database.setup({ buildId: "abc" }); // setup tables, reorg tables, and metadata checkpoint @@ -1066,7 +746,7 @@ test("revert()", async (context) => { }); const rows = await database.qb.user - .selectFrom("1234__account") + .selectFrom("account") .selectAll() .execute(); @@ -1088,18 +768,6 @@ async function getUserTableNames(database: Database, namespace: string) { return rows.map(({ name }) => name); } -async function getUserViewNames(database: Database, namespace: string) { - const { rows } = await database.qb.internal.executeQuery<{ name: string }>( - ksql` - SELECT table_name as name - FROM information_schema.tables - WHERE table_schema = '${ksql.raw(namespace)}' - AND table_type = 'VIEW' - `.compile(database.qb.internal), - ); - return rows.map(({ name }) => name); -} - async function getUserIndexNames( database: Database, namespace: string, diff --git a/packages/core/src/database/index.ts b/packages/core/src/database/index.ts index cce8ebab8..fe6ad0987 100644 --- a/packages/core/src/database/index.ts +++ b/packages/core/src/database/index.ts @@ -1,16 +1,13 @@ -import crypto from "node:crypto"; +import type { IndexingBuild, PreBuild, SchemaBuild } from "@/build/index.js"; import type { Common } from "@/common/common.js"; import { NonRetryableError } from "@/common/errors.js"; -import type { DatabaseConfig } from "@/config/database.js"; import { type Drizzle, type Schema, getPrimaryKeyColumns, getTableNames, - userToReorgTableName, - userToSqlTableName, } from "@/drizzle/index.js"; -import { type SqlStatements, getColumnCasing } from "@/drizzle/kit/index.js"; +import { getColumnCasing } from "@/drizzle/kit/index.js"; import type { PonderSyncSchema } from "@/sync-store/encoding.js"; import { moveLegacyTables, @@ -66,10 +63,9 @@ export type Database = { * * @returns The progress checkpoint that that app should start from. */ - setup(): Promise<{ + setup(args: Pick): Promise<{ checkpoint: string; }>; - createLiveViews(): Promise; createIndexes(): Promise; createTriggers(): Promise; removeTriggers(): Promise; @@ -84,17 +80,15 @@ export type PonderApp = { is_locked: 0 | 1; is_dev: 0 | 1; heartbeat_at: number; - instance_id: string; build_id: string; checkpoint: string; table_names: string[]; }; -type PonderInternalSchema = { +export type PonderInternalSchema = { _ponder_meta: - | { key: `app_${string}`; value: PonderApp } - | { key: `status_${string}`; value: Status | null } - | { key: `live`; value: { instance_id: string } }; + | { key: "app"; value: PonderApp } + | { key: "status"; value: Status | null }; } & { [_: ReturnType[number]["sql"]]: unknown; } & { @@ -127,14 +121,14 @@ type QueryBuilder = { sync: HeadlessKysely; }; -export const createDatabase = (args: { +export const createDatabase = ({ + common, + preBuild, + schemaBuild, +}: { common: Common; - schema: Schema; - statements: SqlStatements; - namespace: string; - databaseConfig: DatabaseConfig; - instanceId: string; - buildId: string; + preBuild: PreBuild; + schemaBuild: Omit; }): Database => { let heartbeatInterval: NodeJS.Timeout | undefined; @@ -145,14 +139,14 @@ export const createDatabase = (args: { let driver: PGliteDriver | PostgresDriver; let qb: Database["qb"]; - const dialect = args.databaseConfig.kind; + const dialect = preBuild.databaseConfig.kind; if (dialect === "pglite" || dialect === "pglite_test") { driver = { instance: dialect === "pglite" - ? createPglite(args.databaseConfig.options) - : args.databaseConfig.instance, + ? createPglite(preBuild.databaseConfig.options) + : preBuild.databaseConfig.instance, }; const kyselyDialect = new KyselyPGlite(driver.instance).dialect; @@ -160,50 +154,50 @@ export const createDatabase = (args: { qb = { internal: new HeadlessKysely({ name: "internal", - common: args.common, + common, dialect: kyselyDialect, log(event) { if (event.level === "query") { - args.common.metrics.ponder_postgres_query_total.inc({ + common.metrics.ponder_postgres_query_total.inc({ pool: "internal", }); } }, - plugins: [new WithSchemaPlugin(args.namespace)], + plugins: [new WithSchemaPlugin(preBuild.namespace)], }), user: new HeadlessKysely({ name: "user", - common: args.common, + common: common, dialect: kyselyDialect, log(event) { if (event.level === "query") { - args.common.metrics.ponder_postgres_query_total.inc({ + common.metrics.ponder_postgres_query_total.inc({ pool: "user", }); } }, - plugins: [new WithSchemaPlugin(args.namespace)], + plugins: [new WithSchemaPlugin(preBuild.namespace)], }), readonly: new HeadlessKysely({ name: "readonly", - common: args.common, + common: common, dialect: kyselyDialect, log(event) { if (event.level === "query") { - args.common.metrics.ponder_postgres_query_total.inc({ + common.metrics.ponder_postgres_query_total.inc({ pool: "readonly", }); } }, - plugins: [new WithSchemaPlugin(args.namespace)], + plugins: [new WithSchemaPlugin(preBuild.namespace)], }), sync: new HeadlessKysely({ name: "sync", - common: args.common, + common: common, dialect: kyselyDialect, log(event) { if (event.level === "query") { - args.common.metrics.ponder_postgres_query_total.inc({ + common.metrics.ponder_postgres_query_total.inc({ pool: "sync", }); } @@ -214,32 +208,32 @@ export const createDatabase = (args: { } else { const internalMax = 2; const equalMax = Math.floor( - (args.databaseConfig.poolConfig.max - internalMax) / 3, + (preBuild.databaseConfig.poolConfig.max - internalMax) / 3, ); const [readonlyMax, userMax, syncMax] = - args.common.options.command === "serve" - ? [args.databaseConfig.poolConfig.max - internalMax, 0, 0] + common.options.command === "serve" + ? [preBuild.databaseConfig.poolConfig.max - internalMax, 0, 0] : [equalMax, equalMax, equalMax]; driver = { internal: createPool({ - ...args.databaseConfig.poolConfig, - application_name: `${args.namespace}_internal`, + ...preBuild.databaseConfig.poolConfig, + application_name: `${preBuild.namespace}_internal`, max: internalMax, statement_timeout: 10 * 60 * 1000, // 10 minutes to accommodate slow sync store migrations. }), user: createPool({ - ...args.databaseConfig.poolConfig, - application_name: `${args.namespace}_user`, + ...preBuild.databaseConfig.poolConfig, + application_name: `${preBuild.namespace}_user`, max: userMax, }), readonly: createPool({ - ...args.databaseConfig.poolConfig, - application_name: `${args.namespace}_readonly`, + ...preBuild.databaseConfig.poolConfig, + application_name: `${preBuild.namespace}_readonly`, max: readonlyMax, }), sync: createPool({ - ...args.databaseConfig.poolConfig, + ...preBuild.databaseConfig.poolConfig, application_name: "ponder_sync", max: syncMax, }), @@ -248,50 +242,50 @@ export const createDatabase = (args: { qb = { internal: new HeadlessKysely({ name: "internal", - common: args.common, + common: common, dialect: new PostgresDialect({ pool: driver.internal }), log(event) { if (event.level === "query") { - args.common.metrics.ponder_postgres_query_total.inc({ + common.metrics.ponder_postgres_query_total.inc({ pool: "internal", }); } }, - plugins: [new WithSchemaPlugin(args.namespace)], + plugins: [new WithSchemaPlugin(preBuild.namespace)], }), user: new HeadlessKysely({ name: "user", - common: args.common, + common: common, dialect: new PostgresDialect({ pool: driver.user }), log(event) { if (event.level === "query") { - args.common.metrics.ponder_postgres_query_total.inc({ + common.metrics.ponder_postgres_query_total.inc({ pool: "user", }); } }, - plugins: [new WithSchemaPlugin(args.namespace)], + plugins: [new WithSchemaPlugin(preBuild.namespace)], }), readonly: new HeadlessKysely({ name: "readonly", - common: args.common, + common: common, dialect: new PostgresDialect({ pool: driver.readonly }), log(event) { if (event.level === "query") { - args.common.metrics.ponder_postgres_query_total.inc({ + common.metrics.ponder_postgres_query_total.inc({ pool: "readonly", }); } }, - plugins: [new WithSchemaPlugin(args.namespace)], + plugins: [new WithSchemaPlugin(preBuild.namespace)], }), sync: new HeadlessKysely({ name: "sync", - common: args.common, + common: common, dialect: new PostgresDialect({ pool: driver.sync }), log(event) { if (event.level === "query") { - args.common.metrics.ponder_postgres_query_total.inc({ + common.metrics.ponder_postgres_query_total.inc({ pool: "sync", }); } @@ -302,56 +296,52 @@ export const createDatabase = (args: { // Register Postgres-only metrics const d = driver as PostgresDriver; - args.common.metrics.registry.removeSingleMetric( + common.metrics.registry.removeSingleMetric( "ponder_postgres_pool_connections", ); - args.common.metrics.ponder_postgres_pool_connections = new prometheus.Gauge( - { - name: "ponder_postgres_pool_connections", - help: "Number of connections in the pool", - labelNames: ["pool", "kind"] as const, - registers: [args.common.metrics.registry], - collect() { - this.set({ pool: "internal", kind: "idle" }, d.internal.idleCount); - this.set({ pool: "internal", kind: "total" }, d.internal.totalCount); - this.set({ pool: "sync", kind: "idle" }, d.sync.idleCount); - this.set({ pool: "sync", kind: "total" }, d.sync.totalCount); - this.set({ pool: "user", kind: "idle" }, d.user.idleCount); - this.set({ pool: "user", kind: "total" }, d.user.totalCount); - this.set({ pool: "readonly", kind: "idle" }, d.readonly.idleCount); - this.set({ pool: "readonly", kind: "total" }, d.readonly.totalCount); - }, + common.metrics.ponder_postgres_pool_connections = new prometheus.Gauge({ + name: "ponder_postgres_pool_connections", + help: "Number of connections in the pool", + labelNames: ["pool", "kind"] as const, + registers: [common.metrics.registry], + collect() { + this.set({ pool: "internal", kind: "idle" }, d.internal.idleCount); + this.set({ pool: "internal", kind: "total" }, d.internal.totalCount); + this.set({ pool: "sync", kind: "idle" }, d.sync.idleCount); + this.set({ pool: "sync", kind: "total" }, d.sync.totalCount); + this.set({ pool: "user", kind: "idle" }, d.user.idleCount); + this.set({ pool: "user", kind: "total" }, d.user.totalCount); + this.set({ pool: "readonly", kind: "idle" }, d.readonly.idleCount); + this.set({ pool: "readonly", kind: "total" }, d.readonly.totalCount); }, - ); + }); - args.common.metrics.registry.removeSingleMetric( + common.metrics.registry.removeSingleMetric( "ponder_postgres_query_queue_size", ); - args.common.metrics.ponder_postgres_query_queue_size = new prometheus.Gauge( - { - name: "ponder_postgres_query_queue_size", - help: "Number of queries waiting for an available connection", - labelNames: ["pool"] as const, - registers: [args.common.metrics.registry], - collect() { - this.set({ pool: "internal" }, d.internal.waitingCount); - this.set({ pool: "sync" }, d.sync.waitingCount); - this.set({ pool: "user" }, d.user.waitingCount); - this.set({ pool: "readonly" }, d.readonly.waitingCount); - }, + common.metrics.ponder_postgres_query_queue_size = new prometheus.Gauge({ + name: "ponder_postgres_query_queue_size", + help: "Number of queries waiting for an available connection", + labelNames: ["pool"] as const, + registers: [common.metrics.registry], + collect() { + this.set({ pool: "internal" }, d.internal.waitingCount); + this.set({ pool: "sync" }, d.sync.waitingCount); + this.set({ pool: "user" }, d.user.waitingCount); + this.set({ pool: "readonly" }, d.readonly.waitingCount); }, - ); + }); } const drizzle = dialect === "pglite" || dialect === "pglite_test" ? drizzlePglite((driver as PGliteDriver).instance, { casing: "snake_case", - schema: args.schema, + schema: schemaBuild.schema, }) : drizzleNodePg((driver as PostgresDriver).user, { casing: "snake_case", - schema: args.schema, + schema: schemaBuild.schema, }); //////// @@ -372,10 +362,9 @@ export const createDatabase = (args: { tableName: ReturnType[number]; checkpoint: string; tx: Transaction; - instanceId: string; }) => { const primaryKeyColumns = getPrimaryKeyColumns( - args.schema[tableName.js] as PgTable, + schemaBuild.schema[tableName.js] as PgTable, ); const rows = await tx @@ -445,9 +434,9 @@ export const createDatabase = (args: { } } - args.common.logger.info({ + common.logger.info({ service: "database", - msg: `Reverted ${rows.length} unfinalized operations from '${tableName.user}' table`, + msg: `Reverted ${rows.length} unfinalized operations from '${tableName.sql}' table`, }); }; @@ -459,7 +448,7 @@ export const createDatabase = (args: { // TODO: Probably remove this at 1.0 to speed up startup time. // TODO(kevin) is the `WithSchemaPlugin` going to break this? await moveLegacyTables({ - common: args.common, + common: common, // @ts-expect-error db: qb.internal, newSchemaName: "ponder_sync", @@ -475,7 +464,12 @@ export const createDatabase = (args: { if (error) throw error; }); }, - async setup() { + async setup({ buildId }) { + common.logger.info({ + service: "database", + msg: `Using database schema '${preBuild.namespace}'`, + }); + //////// // Migrate //////// @@ -523,7 +517,7 @@ export const createDatabase = (args: { // @ts-ignore .select("schema") // @ts-ignore - .where("namespace", "=", args.namespace) + .where("namespace", "=", preBuild.namespace) .executeTakeFirst() .then((schema: any | undefined) => schema === undefined @@ -544,7 +538,7 @@ export const createDatabase = (args: { // @ts-ignore .deleteFrom("namespace_lock") // @ts-ignore - .where("namespace", "=", args.namespace) + .where("namespace", "=", preBuild.namespace) .execute(); if (namespaceCount!.count === 1) { @@ -553,7 +547,7 @@ export const createDatabase = (args: { .cascade() .execute(); - args.common.logger.debug({ + common.logger.debug({ service: "database", msg: `Removed 'ponder' schema`, }); @@ -563,7 +557,11 @@ export const createDatabase = (args: { } } - // v0.7 migration + // v0.8 migration + + // If the schema previously ran with a 0.7 app, remove + // all unlocked "dev" apps. Then, copy a _ponder_meta entry + // to the new format if there is one remaining. const hasPonderMetaTable = await qb.internal // @ts-ignore @@ -573,61 +571,65 @@ export const createDatabase = (args: { // @ts-ignore .where("table_name", "=", "_ponder_meta") // @ts-ignore - .where("table_schema", "=", args.namespace) + .where("table_schema", "=", preBuild.namespace) .executeTakeFirst() .then((table) => table !== undefined); if (hasPonderMetaTable) { await qb.internal.wrap({ method: "migrate" }, () => qb.internal.transaction().execute(async (tx) => { - const previousApp: PonderApp | undefined = await tx + const previousApps = await tx .selectFrom("_ponder_meta") // @ts-ignore - .where("key", "=", "app") + .where("key", "like", "app_%") .select("value") - .executeTakeFirst() - .then((row) => - row === undefined ? undefined : (row.value as PonderApp), - ); + .execute() + .then((rows) => rows.map(({ value }) => value as PonderApp)); - if (previousApp) { - const instanceId = crypto.randomBytes(2).toString("hex"); + if ( + previousApps.some( + (app) => + app.is_locked === 1 && + app.heartbeat_at + common.options.databaseHeartbeatTimeout > + Date.now(), + ) + ) { + throw new NonRetryableError( + `Migration failed: Schema '${preBuild.namespace}' has an active app`, + ); + } + for (const app of previousApps) { + for (const table of app.table_names) { + await tx.schema + // @ts-ignore + .dropTable(`${app.instance_id}__${table}`) + .cascade() + .ifExists() + .execute(); + await tx.schema + // @ts-ignore + .dropTable(`${app.instance_id}_reorg__${table}`) + .cascade() + .ifExists() + .execute(); + } await tx .deleteFrom("_ponder_meta") // @ts-ignore - .where("key", "=", "app") + .where("key", "=", `status_${app.instance_id}`) .execute(); - await tx .deleteFrom("_ponder_meta") // @ts-ignore - .where("key", "=", "status") - .execute(); - - for (const tableName of previousApp.table_names) { - await tx.schema - .alterTable(tableName) - .renameTo(userToSqlTableName(tableName, instanceId)) - .execute(); - - await tx.schema - .alterTable(`_ponder_reorg__${tableName}`) - .renameTo(userToReorgTableName(tableName, instanceId)) - .execute(); - } - - await tx - .insertInto("_ponder_meta") - .values({ - key: `app_${instanceId}`, - value: { ...previousApp, instance_id: instanceId }, - }) + .where("key", "=", `app_${app.instance_id}`) .execute(); + } - args.common.logger.debug({ + if (previousApps.length > 0) { + common.logger.debug({ service: "database", - msg: "Migrated previous app to v0.7", + msg: "Migrated previous app to v0.8", }); } }), @@ -635,9 +637,10 @@ export const createDatabase = (args: { } await qb.internal.wrap({ method: "setup" }, async () => { - for (const statement of args.statements.schema.sql) { - await sql.raw(statement).execute(qb.internal); - } + await qb.internal.schema + .createSchema(preBuild.namespace) + .ifNotExists() + .execute(); // Create "_ponder_meta" table if it doesn't exist await qb.internal.schema @@ -648,101 +651,78 @@ export const createDatabase = (args: { .execute(); }); - const attempt = async ({ isFirstAttempt }: { isFirstAttempt: boolean }) => + const attempt = () => qb.internal.wrap({ method: "setup" }, () => qb.internal.transaction().execute(async (tx) => { - const previousApps: PonderApp[] = await tx + const previousApp = await tx .selectFrom("_ponder_meta") - .where("key", "like", "app_%") + .where("key", "=", "app") .select("value") - .execute() - .then((rows) => rows.map(({ value }) => value as PonderApp)); - - const previousAppsWithBuildId = previousApps.filter( - (app) => app.build_id === args.buildId && app.is_dev === 0, - ); + .executeTakeFirst() + .then((row) => row?.value as PonderApp | undefined); const newApp = { is_locked: 1, - is_dev: args.common.options.command === "dev" ? 1 : 0, + is_dev: common.options.command === "dev" ? 1 : 0, heartbeat_at: Date.now(), - instance_id: args.instanceId, - build_id: args.buildId, + build_id: buildId, checkpoint: encodeCheckpoint(zeroCheckpoint), - table_names: getTableNames(args.schema, args.instanceId).map( - (tableName) => tableName.user, + table_names: getTableNames(schemaBuild.schema).map( + (tableName) => tableName.sql, ), } satisfies PonderApp; - /** - * If schema is empty, start - */ - if (previousAppsWithBuildId.length === 0) { + // If schema is empty, create tables + if (previousApp === undefined) { await tx .insertInto("_ponder_meta") - .values({ key: `status_${args.instanceId}`, value: null }) - .onConflict((oc) => - oc - .column("key") - // @ts-ignore - .doUpdateSet({ value: null }), - ) + .values({ key: "status", value: null }) .execute(); await tx .insertInto("_ponder_meta") .values({ - key: `app_${args.instanceId}`, + key: "app", value: newApp, }) - .onConflict((oc) => - oc - .column("key") - // @ts-ignore - .doUpdateSet({ value: newApp }), - ) .execute(); - for (const tableName of getTableNames( - args.schema, - newApp.instance_id, - )) { - await tx.schema - .dropTable(tableName.sql) - .cascade() - .ifExists() - .execute(); - await tx.schema - .dropTable(tableName.reorg) - .cascade() - .ifExists() - .execute(); - } - - for (let i = 0; i < args.statements.enums.sql.length; i++) { + for ( + let i = 0; + i < schemaBuild.statements.enums.sql.length; + i++ + ) { await sql - .raw(args.statements.enums.sql[i]!) + .raw(schemaBuild.statements.enums.sql[i]!) .execute(tx) .catch((_error) => { const error = _error as Error; if (!error.message.includes("already exists")) throw error; - throw new NonRetryableError( - `Unable to create enum '${args.namespace}'.'${args.statements.enums.json[i]!.name}' because an enum with that name already exists.`, + const e = new NonRetryableError( + `Unable to create enum '${preBuild.namespace}'.'${schemaBuild.statements.enums.json[i]!.name}' because an enum with that name already exists.`, ); + e.stack = undefined; + throw e; }); } - for (let i = 0; i < args.statements.tables.sql.length; i++) { + for ( + let i = 0; + i < schemaBuild.statements.tables.sql.length; + i++ + ) { await sql - .raw(args.statements.tables.sql[i]!) + .raw(schemaBuild.statements.tables.sql[i]!) .execute(tx) .catch((_error) => { const error = _error as Error; if (!error.message.includes("already exists")) throw error; - throw new NonRetryableError( - `Unable to create table '${args.namespace}'.'${args.statements.tables.json[i]!.tableName}' because a table with that name already exists.`, + const e = new NonRetryableError( + `Unable to create table '${preBuild.namespace}'.'${schemaBuild.statements.tables.json[i]!.tableName}' because a table with that name already exists.`, ); + e.stack = undefined; + throw e; }); } - args.common.logger.info({ + common.logger.info({ service: "database", msg: `Created tables [${newApp.table_names.join(", ")}]`, }); @@ -753,312 +733,209 @@ export const createDatabase = (args: { } as const; } - // Find the newest, unlocked, non-dev app to recover from - const crashRecoveryApp = - previousAppsWithBuildId - .filter( - (app) => - app.is_locked === 0 || - app.heartbeat_at + - args.common.options.databaseHeartbeatTimeout <= - Date.now(), - ) - .sort((a, b) => (a.checkpoint > b.checkpoint ? -1 : 1))[0] ?? - undefined; - + // dev fast path if ( - crashRecoveryApp && - crashRecoveryApp.checkpoint > encodeCheckpoint(zeroCheckpoint) && - args.common.options.command !== "dev" + previousApp.is_dev === 1 || + (process.env.PONDER_EXPERIMENTAL_DB === "platform" && + previousApp.build_id !== newApp.build_id) || + (process.env.PONDER_EXPERIMENTAL_DB === "platform" && + previousApp.checkpoint === encodeCheckpoint(zeroCheckpoint)) ) { await tx - .insertInto("_ponder_meta") - .values({ key: `status_${args.instanceId}`, value: null }) + .updateTable("_ponder_meta") + .set({ value: null }) + .where("key", "=", "status") .execute(); await tx - .insertInto("_ponder_meta") - .values({ - key: `app_${args.instanceId}`, - value: { - ...newApp, - checkpoint: crashRecoveryApp.checkpoint, - }, + .updateTable("_ponder_meta") + .set({ + value: newApp, }) + .where("key", "=", "app") .execute(); - args.common.logger.info({ - service: "database", - msg: `Detected cache hit for build '${args.buildId}' in schema '${args.namespace}' last active ${formatEta(Date.now() - crashRecoveryApp.heartbeat_at)} ago`, - }); - - // Remove triggers - - for (const tableName of getTableNames( - args.schema, - crashRecoveryApp.instance_id, - )) { - await sql - .raw( - `DROP TRIGGER IF EXISTS "${tableName.trigger}" ON "${args.namespace}"."${tableName.sql}"`, - ) - .execute(tx); - } - - // Remove indexes - - for (const indexStatement of args.statements.indexes.json) { + for (const tableName of getTableNames(schemaBuild.schema)) { await tx.schema - .dropIndex(indexStatement.data.name) + .dropTable(tableName.sql) + .cascade() .ifExists() .execute(); - - args.common.logger.info({ - service: "database", - msg: `Dropped index '${indexStatement.data.name}' in schema '${args.namespace}'`, - }); - } - - // Rename tables + reorg tables - for (const tableName of crashRecoveryApp.table_names) { await tx.schema - .alterTable( - userToSqlTableName(tableName, crashRecoveryApp.instance_id), - ) - .renameTo(userToSqlTableName(tableName, args.instanceId)) - .execute(); - - await tx.schema - .alterTable( - userToReorgTableName( - tableName, - crashRecoveryApp.instance_id, - ), - ) - .renameTo(userToReorgTableName(tableName, args.instanceId)) + .dropTable(tableName.reorg) + .cascade() + .ifExists() .execute(); } - await tx - .deleteFrom("_ponder_meta") - .where("key", "=", `status_${crashRecoveryApp.instance_id}`) - .execute(); - - // Drop app - await tx - .deleteFrom("_ponder_meta") - .where("key", "=", `app_${crashRecoveryApp.instance_id}`) - .execute(); - - // Revert unfinalized data - - const { blockTimestamp, chainId, blockNumber } = decodeCheckpoint( - crashRecoveryApp.checkpoint, - ); - - args.common.logger.info({ + for ( + let i = 0; + i < schemaBuild.statements.enums.sql.length; + i++ + ) { + await sql + .raw(schemaBuild.statements.enums.sql[i]!) + .execute(tx) + .catch((_error) => { + const error = _error as Error; + if (!error.message.includes("already exists")) throw error; + const e = new NonRetryableError( + `Unable to create enum '${preBuild.namespace}'.'${schemaBuild.statements.enums.json[i]!.name}' because an enum with that name already exists.`, + ); + e.stack = undefined; + throw e; + }); + } + for ( + let i = 0; + i < schemaBuild.statements.tables.sql.length; + i++ + ) { + await sql + .raw(schemaBuild.statements.tables.sql[i]!) + .execute(tx) + .catch((_error) => { + const error = _error as Error; + if (!error.message.includes("already exists")) throw error; + const e = new NonRetryableError( + `Unable to create table '${preBuild.namespace}'.'${schemaBuild.statements.tables.json[i]!.tableName}' because a table with that name already exists.`, + ); + e.stack = undefined; + throw e; + }); + } + common.logger.info({ service: "database", - msg: `Reverting operations after finalized checkpoint (timestamp=${blockTimestamp} chainId=${chainId} block=${blockNumber})`, + msg: `Created tables [${newApp.table_names.join(", ")}]`, }); - for (const tableName of getTableNames( - args.schema, - args.instanceId, - )) { - await revert({ - tableName, - checkpoint: crashRecoveryApp.checkpoint, - tx, - instanceId: args.instanceId, - }); - } - return { status: "success", - checkpoint: crashRecoveryApp.checkpoint, + checkpoint: encodeCheckpoint(zeroCheckpoint), } as const; } - const nextAvailableApp = previousAppsWithBuildId.sort((a, b) => - a.heartbeat_at < b.heartbeat_at ? -1 : 1, - )[0]!; - + // If crash recovery is not possible, error if ( - isFirstAttempt && - args.common.options.command !== "dev" && - (crashRecoveryApp === undefined || - crashRecoveryApp.is_locked === 1) + common.options.command === "dev" || + previousApp.build_id !== newApp.build_id || + previousApp.checkpoint === encodeCheckpoint(zeroCheckpoint) ) { + const error = new NonRetryableError( + `Schema '${preBuild.namespace}' was previously used by a different Ponder app. Drop the schema first, or use a different schema. Read more: https://ponder.sh/docs/getting-started/database#database-schema`, + ); + error.stack = undefined; + throw error; + } + + const isAppUnlocked = + previousApp.is_locked === 0 || + previousApp.heartbeat_at + + common.options.databaseHeartbeatTimeout <= + Date.now(); + + // If app is locked, wait + if (isAppUnlocked === false) { return { status: "locked", expiry: - nextAvailableApp.heartbeat_at + - args.common.options.databaseHeartbeatTimeout, + previousApp.heartbeat_at + + common.options.databaseHeartbeatTimeout, } as const; } - /** - * At this point in the control flow, there is an app with the same build_id, - * but it can't be used as a crash recovery. The new app should startup. - */ + // Crash recovery is possible, recover + + const checkpoint = previousApp.checkpoint; + newApp.checkpoint = checkpoint; await tx - .insertInto("_ponder_meta") - .values({ key: `status_${args.instanceId}`, value: null }) - // @ts-ignore - .onConflict((oc) => oc.column("key").doUpdateSet({ value: null })) + .updateTable("_ponder_meta") + .set({ value: null }) + .where("key", "=", "status") .execute(); await tx - .insertInto("_ponder_meta") - .values({ - key: `app_${args.instanceId}`, + .updateTable("_ponder_meta") + .set({ value: newApp, }) - .onConflict((oc) => - oc - .column("key") - // @ts-ignore - .doUpdateSet({ value: newApp }), - ) + .where("key", "=", "app") .execute(); - // drop tables in case of non-unique instance_id + common.logger.info({ + service: "database", + msg: `Detected crash recovery for build '${buildId}' in schema '${preBuild.namespace}' last active ${formatEta(Date.now() - previousApp.heartbeat_at)} ago`, + }); - for (const tableName of getTableNames( - args.schema, - newApp.instance_id, - )) { - await tx.schema - .dropTable(tableName.sql) - .cascade() - .ifExists() - .execute(); + // Remove triggers + + for (const tableName of getTableNames(schemaBuild.schema)) { + await sql + .raw( + `DROP TRIGGER IF EXISTS "${tableName.trigger}" ON "${preBuild.namespace}"."${tableName.sql}"`, + ) + .execute(tx); + } + + // Remove indexes + + for (const indexStatement of schemaBuild.statements.indexes.json) { await tx.schema - .dropTable(tableName.reorg) - .cascade() + .dropIndex(indexStatement.data.name) .ifExists() .execute(); - } - for (let i = 0; i < args.statements.enums.sql.length; i++) { - await sql - .raw(args.statements.enums.sql[i]!) - .execute(tx) - .catch((_error) => { - const error = _error as Error; - if (!error.message.includes("already exists")) throw error; - throw new NonRetryableError( - `Unable to create enum '${args.namespace}'.'${args.statements.enums.json[i]!.name}' because an enum with that name already exists.`, - ); - }); - } - for (let i = 0; i < args.statements.tables.sql.length; i++) { - await sql - .raw(args.statements.tables.sql[i]!) - .execute(tx) - .catch((_error) => { - const error = _error as Error; - if (!error.message.includes("already exists")) throw error; - throw new NonRetryableError( - `Unable to create table '${args.namespace}'.'${args.statements.tables.json[i]!.tableName}' because a table with that name already exists.`, - ); - }); + common.logger.info({ + service: "database", + msg: `Dropped index '${indexStatement.data.name}' in schema '${preBuild.namespace}'`, + }); } - args.common.logger.info({ + + // Revert unfinalized data + + const { blockTimestamp, chainId, blockNumber } = + decodeCheckpoint(checkpoint); + + common.logger.info({ service: "database", - msg: `Created tables [${newApp.table_names.join(", ")}]`, + msg: `Reverting operations after finalized checkpoint (timestamp=${blockTimestamp} chainId=${chainId} block=${blockNumber})`, }); + for (const tableName of getTableNames(schemaBuild.schema)) { + await revert({ + tableName, + checkpoint, + tx, + }); + } + return { status: "success", - checkpoint: encodeCheckpoint(zeroCheckpoint), + checkpoint, } as const; }), ); - let result = await attempt({ isFirstAttempt: true }); + let result = await attempt(); if (result.status === "locked") { const duration = result.expiry - Date.now(); - args.common.logger.warn({ + common.logger.warn({ service: "database", - msg: `Schema '${args.namespace}' is locked by a different Ponder app`, + msg: `Schema '${preBuild.namespace}' is locked by a different Ponder app`, }); - args.common.logger.warn({ + common.logger.warn({ service: "database", - msg: `Waiting ${formatEta(duration)} for lock on schema '${args.namespace} to expire...`, + msg: `Waiting ${formatEta(duration)} for lock on schema '${preBuild.namespace} to expire...`, }); await wait(duration); - result = await attempt({ isFirstAttempt: false }); + result = await attempt(); if (result.status === "locked") { - throw new NonRetryableError( - `Failed to acquire lock on schema '${args.namespace}'. A different Ponder app is actively using this database.`, + const error = new NonRetryableError( + `Failed to acquire lock on schema '${preBuild.namespace}'. A different Ponder app is actively using this schema.`, ); - } - } - - if (process.env.PONDER_EXPERIMENTAL_DB !== "platform") { - const apps: PonderApp[] = await qb.internal - .selectFrom("_ponder_meta") - .where("key", "like", "app_%") - .select("value") - .execute() - .then((rows) => rows.map(({ value }) => value as PonderApp)); - - const removedDevApps = apps.filter( - (app) => - app.is_dev === 1 && - (app.is_locked === 0 || - app.heartbeat_at + args.common.options.databaseHeartbeatTimeout < - Date.now()), - ); - - const removedStartApps = apps - .filter( - (app) => - app.is_dev === 0 && - (app.is_locked === 0 || - app.heartbeat_at + - args.common.options.databaseHeartbeatTimeout < - Date.now()), - ) - .sort((a, b) => (a.heartbeat_at > b.heartbeat_at ? -1 : 1)) - .slice(2); - - const removedApps = [...removedDevApps, ...removedStartApps]; - - for (const app of removedApps) { - for (const table of app.table_names) { - await qb.internal.schema - .dropTable(userToSqlTableName(table, app.instance_id)) - .cascade() - .ifExists() - .execute(); - await qb.internal.schema - .dropTable(userToReorgTableName(table, app.instance_id)) - .cascade() - .ifExists() - .execute(); - } - await qb.internal - .deleteFrom("_ponder_meta") - .where("key", "=", `status_${app.instance_id}`) - .execute(); - await qb.internal - .deleteFrom("_ponder_meta") - .where("key", "=", `app_${app.instance_id}`) - .execute(); - } - - if (removedApps.length > 0) { - args.common.logger.debug({ - service: "database", - msg: `Removed tables corresponding to apps [${removedApps.map((app) => app.instance_id)}]`, - }); - } - - if (apps.length === 1 || args.common.options.command === "dev") { - await this.createLiveViews(); + error.stack = undefined; + throw error; } } @@ -1068,100 +945,40 @@ export const createDatabase = (args: { await qb.internal .updateTable("_ponder_meta") - .where("key", "=", `app_${args.instanceId}`) + .where("key", "=", "app") .set({ value: sql`jsonb_set(value, '{heartbeat_at}', ${heartbeat})`, }) .execute(); - args.common.logger.debug({ + common.logger.debug({ service: "database", - msg: `Updated heartbeat timestamp to ${heartbeat} (build_id=${args.buildId})`, + msg: `Updated heartbeat timestamp to ${heartbeat} (build_id=${buildId})`, }); } catch (err) { const error = err as Error; - args.common.logger.error({ + common.logger.error({ service: "database", msg: `Failed to update heartbeat timestamp, retrying in ${formatEta( - args.common.options.databaseHeartbeatInterval, + common.options.databaseHeartbeatInterval, )}`, error, }); } - }, args.common.options.databaseHeartbeatInterval); + }, common.options.databaseHeartbeatInterval); return { checkpoint: result.checkpoint }; }, async createIndexes() { - for (const statement of args.statements.indexes.sql) { + for (const statement of schemaBuild.statements.indexes.sql) { await sql.raw(statement).execute(qb.internal); } }, - async createLiveViews() { - if (process.env.PONDER_EXPERIMENTAL_DB === "platform") return; - - await qb.internal.wrap({ method: "createLiveViews" }, async () => { - // drop old views - - const previousLiveInstanceId: string | undefined = await qb.internal - .selectFrom("_ponder_meta") - .select("value") - .where("key", "=", "live") - .executeTakeFirst() - .then((row) => (row?.value?.instance_id as string) ?? undefined); - - if (previousLiveInstanceId) { - const previousTableNames = await qb.internal - .selectFrom("_ponder_meta") - .select("value") - .where("key", "=", `app_${previousLiveInstanceId}`) - .executeTakeFirst() - .then((row) => (row ? (row.value as PonderApp).table_names : [])); - - await Promise.all( - previousTableNames.map((name) => - qb.internal.schema.dropView(name).ifExists().execute(), - ), - ); - } - - // update live app - - await qb.internal - .insertInto("_ponder_meta") - .values({ - key: "live", - value: { instance_id: args.instanceId }, - }) - .onConflict((oc) => - oc - .column("key") - // @ts-ignore - .doUpdateSet({ value: { instance_id: args.instanceId } }), - ) - .execute(); - - // create new views - - for (const tableName of getTableNames(args.schema, args.instanceId)) { - await qb.internal.schema - .createView(tableName.user) - .orReplace() - .as(qb.internal.selectFrom(tableName.sql).selectAll()) - .execute(); - - args.common.logger.info({ - service: "database", - msg: `Created view '${args.namespace}'.'${tableName.user}'`, - }); - } - }); - }, async createTriggers() { await qb.internal.wrap({ method: "createTriggers" }, async () => { - for (const tableName of getTableNames(args.schema, args.instanceId)) { + for (const tableName of getTableNames(schemaBuild.schema)) { const columns = getTableColumns( - args.schema[tableName.js]! as PgTable, + schemaBuild.schema[tableName.js]! as PgTable, ); const columnNames = Object.values(columns).map( @@ -1174,13 +991,13 @@ CREATE OR REPLACE FUNCTION ${tableName.triggerFn} RETURNS TRIGGER AS $$ BEGIN IF TG_OP = 'INSERT' THEN - INSERT INTO "${args.namespace}"."${tableName.reorg}" (${columnNames.join(",")}, operation, checkpoint) + INSERT INTO "${preBuild.namespace}"."${tableName.reorg}" (${columnNames.join(",")}, operation, checkpoint) VALUES (${columnNames.map((name) => `NEW.${name}`).join(",")}, 0, '${encodeCheckpoint(maxCheckpoint)}'); ELSIF TG_OP = 'UPDATE' THEN - INSERT INTO "${args.namespace}"."${tableName.reorg}" (${columnNames.join(",")}, operation, checkpoint) + INSERT INTO "${preBuild.namespace}"."${tableName.reorg}" (${columnNames.join(",")}, operation, checkpoint) VALUES (${columnNames.map((name) => `OLD.${name}`).join(",")}, 1, '${encodeCheckpoint(maxCheckpoint)}'); ELSIF TG_OP = 'DELETE' THEN - INSERT INTO "${args.namespace}"."${tableName.reorg}" (${columnNames.join(",")}, operation, checkpoint) + INSERT INTO "${preBuild.namespace}"."${tableName.reorg}" (${columnNames.join(",")}, operation, checkpoint) VALUES (${columnNames.map((name) => `OLD.${name}`).join(",")}, 2, '${encodeCheckpoint(maxCheckpoint)}'); END IF; RETURN NULL; @@ -1192,7 +1009,7 @@ $$ LANGUAGE plpgsql await sql .raw(` CREATE TRIGGER "${tableName.trigger}" - AFTER INSERT OR UPDATE OR DELETE ON "${args.namespace}"."${tableName.sql}" + AFTER INSERT OR UPDATE OR DELETE ON "${preBuild.namespace}"."${tableName.sql}" FOR EACH ROW EXECUTE FUNCTION ${tableName.triggerFn}; `) .execute(qb.internal); @@ -1201,10 +1018,10 @@ $$ LANGUAGE plpgsql }, async removeTriggers() { await qb.internal.wrap({ method: "removeTriggers" }, async () => { - for (const tableName of getTableNames(args.schema, args.instanceId)) { + for (const tableName of getTableNames(schemaBuild.schema)) { await sql .raw( - `DROP TRIGGER IF EXISTS "${tableName.trigger}" ON "${args.namespace}"."${tableName.sql}"`, + `DROP TRIGGER IF EXISTS "${tableName.trigger}" ON "${preBuild.namespace}"."${tableName.sql}"`, ) .execute(qb.internal); } @@ -1213,13 +1030,12 @@ $$ LANGUAGE plpgsql async revert({ checkpoint }) { await qb.internal.wrap({ method: "revert" }, () => Promise.all( - getTableNames(args.schema, args.instanceId).map((tableName) => + getTableNames(schemaBuild.schema).map((tableName) => qb.internal.transaction().execute((tx) => revert({ tableName, checkpoint, tx, - instanceId: args.instanceId, }), ), ), @@ -1230,14 +1046,14 @@ $$ LANGUAGE plpgsql await qb.internal.wrap({ method: "finalize" }, async () => { await qb.internal .updateTable("_ponder_meta") - .where("key", "=", `app_${args.instanceId}`) + .where("key", "=", "app") .set({ value: sql`jsonb_set(value, '{checkpoint}', to_jsonb(${checkpoint}::varchar(75)))`, }) .execute(); await Promise.all( - getTableNames(args.schema, args.instanceId).map((tableName) => + getTableNames(schemaBuild.schema).map((tableName) => qb.internal .deleteFrom(tableName.reorg) .where("checkpoint", "<=", checkpoint) @@ -1248,14 +1064,14 @@ $$ LANGUAGE plpgsql const decoded = decodeCheckpoint(checkpoint); - args.common.logger.debug({ + common.logger.debug({ service: "database", msg: `Updated finalized checkpoint to (timestamp=${decoded.blockTimestamp} chainId=${decoded.chainId} block=${decoded.blockNumber})`, }); }, async complete({ checkpoint }) { await Promise.all( - getTableNames(args.schema, args.instanceId).map((tableName) => + getTableNames(schemaBuild.schema).map((tableName) => qb.internal.wrap({ method: "complete" }, async () => { await qb.internal .updateTable(tableName.reorg) @@ -1272,7 +1088,7 @@ $$ LANGUAGE plpgsql await qb.internal.wrap({ method: "unlock" }, async () => { await qb.internal .updateTable("_ponder_meta") - .where("key", "=", `app_${args.instanceId}`) + .where("key", "=", "app") .set({ value: sql`jsonb_set(value, '{is_locked}', to_jsonb(0))`, }) @@ -1302,7 +1118,7 @@ $$ LANGUAGE plpgsql await d.sync.end(); } - args.common.logger.debug({ + common.logger.debug({ service: "database", msg: "Closed connection to database", }); diff --git a/packages/core/src/drizzle/index.test.ts b/packages/core/src/drizzle/index.test.ts index b5c277fa2..222253332 100644 --- a/packages/core/src/drizzle/index.test.ts +++ b/packages/core/src/drizzle/index.test.ts @@ -1,5 +1,4 @@ -import { getTableName } from "drizzle-orm"; -import { expect, test, vi } from "vitest"; +import { expect, test } from "vitest"; import { onchainTable, primaryKey } from "./index.js"; import { getPrimaryKeyColumns } from "./index.js"; @@ -54,16 +53,3 @@ test("getPrimaryKeyColumns() composite", () => { { js: "address", sql: "address" }, ]); }); - -test("PONDER_EXPERIMENTAL_INSTANCE_ID", async () => { - vi.stubEnv("PONDER_EXPERIMENTAL_INSTANCE_ID", "9876"); - - const account = onchainTable("account", (p) => ({ - address: p.hex().primaryKey(), - balance: p.bigint(), - })); - - expect(getTableName(account)).toBe("9876__account"); - - vi.unstubAllEnvs(); -}); diff --git a/packages/core/src/drizzle/index.ts b/packages/core/src/drizzle/index.ts index 785be5e27..f8a0dfd44 100644 --- a/packages/core/src/drizzle/index.ts +++ b/packages/core/src/drizzle/index.ts @@ -17,7 +17,6 @@ import { type PgColumnBuilderBase, PgEnumColumnBuilder, type PgEnumColumnBuilderInitial, - PgSchema, PgTable, type PgTableExtraConfig, type PgTableWithColumns, @@ -60,27 +59,20 @@ export type Drizzle = export type Schema = { [name: string]: unknown }; -export const userToSqlTableName = (tableName: string, instanceId: string) => - `${instanceId}__${tableName}`; +export const sqlToReorgTableName = (tableName: string) => + `_reorg__${tableName}`; -export const sqlToUserTableName = (tableName: string) => tableName.slice(6); - -export const userToReorgTableName = (tableName: string, instanceId: string) => - `${instanceId}_reorg__${tableName}`; - -export const getTableNames = (schema: Schema, instanceId: string) => { +export const getTableNames = (schema: Schema) => { const tableNames = Object.entries(schema) .filter(([, table]) => is(table, PgTable)) .map(([js, table]) => { - const tableName = getTableName(table as PgTable); - const user = sqlToUserTableName(tableName); + const sql = getTableName(table as PgTable); return { - user, - sql: userToSqlTableName(user, instanceId), - reorg: userToReorgTableName(user, instanceId), - trigger: userToReorgTableName(user, instanceId), - triggerFn: `operation_${instanceId}_reorg__${user}()`, + sql, + reorg: sqlToReorgTableName(sql), + trigger: sqlToReorgTableName(sql), + triggerFn: `operation_reorg__${sql}()`, js, } as const; }); @@ -170,7 +162,7 @@ type PgColumnsBuilders = Omit< * - Docs: https://ponder.sh/docs/api-reference/schema#onchaintable * * @example - * import { hex, onchainTable } from "@ponder/core"; + * import { hex, onchainTable } from "ponder"; * * export const account = onchainTable("account", (p) => ({ * address: p.hex(), @@ -183,7 +175,7 @@ type PgColumnsBuilders = Omit< * - Docs: https://ponder.sh/docs/api-reference/schema#onchaintable * * @example - * import { hex, onchainTable } from "@ponder/core"; + * import { hex, onchainTable } from "ponder"; * * export const account = onchainTable("account", (p) => ({ * balance: p.bigint(), @@ -197,7 +189,7 @@ type PgColumnsBuilders = Omit< * - Docs: https://ponder.sh/docs/api-reference/schema#onchaintable * * @example - * import { onchainTable } from "@ponder/core"; + * import { onchainTable } from "ponder"; * * export const account = onchainTable("account", (p) => ({ * address: p.hex().primaryKey(), @@ -224,31 +216,8 @@ export const onchainTable = < extra: extra; dialect: "pg"; }> => { - const instanceId: string | undefined = - process.env.PONDER_EXPERIMENTAL_INSTANCE_ID ?? - // @ts-ignore - globalThis.__PONDER_INSTANCE_ID; - if (instanceId === undefined) { - const table = pgTableWithSchema( - name, - columns, - extraConfig as any, - undefined, - ); - - // @ts-ignore - table[onchain] = true; - - // @ts-ignore - return table; - } - - const table = pgTableWithSchema( - userToSqlTableName(name, instanceId), - columns, - extraConfig as any, - undefined, - ); + const schema = process.env.PONDER_DATABASE_SCHEMA; + const table = pgTableWithSchema(name, columns, extraConfig as any, schema); // @ts-ignore table[onchain] = true; @@ -257,107 +226,6 @@ export const onchainTable = < return table; }; -class OnchainSchema extends PgSchema { - override table = < - name extends string, - columns extends Record, - extra extends PgTableExtraConfig | undefined = undefined, - >( - name: name, - columns: columns | ((columnTypes: PgColumnsBuilders) => columns), - extraConfig?: (self: BuildExtraConfigColumns) => extra, - ): OnchainTable<{ - name: name; - schema: schema; - columns: BuildColumns; - extra: extra; - dialect: "pg"; - }> => { - const instanceId: string | undefined = - process.env.PONDER_EXPERIMENTAL_INSTANCE_ID ?? - // @ts-ignore - globalThis.__PONDER_INSTANCE_ID; - if (instanceId === undefined) { - const table = pgTableWithSchema( - name, - columns, - extraConfig as any, - this.schemaName, - ); - - // @ts-ignore - table[onchain] = true; - - // @ts-ignore - return table; - } - - const table = pgTableWithSchema( - // @ts-ignore - userToSqlTableName(name, instanceId), - columns, - extraConfig as any, - this.schemaName, - ); - - // @ts-ignore - table[onchain] = true; - - // @ts-ignore - return table; - }; - - override enum = >( - enumName: string, - values: T | Writable, - ): OnchainEnum> & { [onchain]: true } => { - const instanceId: string | undefined = - process.env.PONDER_EXPERIMENTAL_INSTANCE_ID ?? - // @ts-ignore - globalThis.__PONDER_INSTANCE_ID; - if (instanceId === undefined) { - const e = pgEnumWithSchema(enumName, values, this.schemaName); - - // @ts-ignore - e[onchain] = true; - - // @ts-ignore - return e; - } - - const e = pgEnumWithSchema( - userToSqlTableName(enumName, instanceId), - values, - this.schemaName, - ); - - // @ts-ignore - e[onchain] = true; - - // @ts-ignore - return e; - }; -} - -/** - * Define the database schema for onchain tables. - * - * @example - * import { onchainSchema } from "@ponder/core"; - * - * export const schema = onchainSchema("ponder"); - * - * export const account = schema.table("account", (p) => ({ - * address: p.hex().primaryKey(), - * balance: p.bigint().notNull(), - * })); - * - * @param name - The schema for onchain tables. - * @returns The onchain schema. - */ -export const onchainSchema = (name: T) => - new OnchainSchema(name); - export const isPgEnumSym = Symbol.for("drizzle:isPgEnum"); export interface OnchainEnum { @@ -380,24 +248,8 @@ export const onchainEnum = >( enumName: string, values: T | Writable, ): OnchainEnum> & { [onchain]: true } => { - // @ts-ignore - const instanceId: string | undefined = globalThis.__PONDER_INSTANCE_ID; - if (instanceId === undefined) { - const e = pgEnumWithSchema(enumName, values, undefined); - - // @ts-ignore - e[onchain] = true; - - // @ts-ignore - return e; - } - - const e = pgEnumWithSchema( - // @ts-ignore - userToSqlTableName(enumName, instanceId), - values, - undefined, - ); + const schema = process.env.PONDER_DATABASE_SCHEMA; + const e = pgEnumWithSchema(enumName, values, schema); // @ts-ignore e[onchain] = true; diff --git a/packages/core/src/drizzle/kit/index.ts b/packages/core/src/drizzle/kit/index.ts index 43631ce73..0b0b9e233 100644 --- a/packages/core/src/drizzle/kit/index.ts +++ b/packages/core/src/drizzle/kit/index.ts @@ -18,20 +18,12 @@ import { serial, varchar, } from "drizzle-orm/pg-core"; -import { - type Schema, - sqlToUserTableName, - userToSqlTableName, -} from "../index.js"; +import { type Schema, sqlToReorgTableName } from "../index.js"; type Dialect = "postgresql"; type CasingType = "snake_case" | "camelCase"; export type SqlStatements = { - schema: { - sql: string[]; - json: JsonCreateSchema[]; - }; tables: { sql: string[]; json: JsonCreateTableStatement[]; @@ -43,7 +35,7 @@ export type SqlStatements = { indexes: { sql: string[]; json: JsonPgCreateIndexStatement[] }; }; -export const getSql = (schema: Schema, instanceId: string): SqlStatements => { +export const getSql = (schema: Schema): SqlStatements => { const { tables, enums, schemas } = prepareFromExports(schema); const json = generatePgSnapshot(tables, enums, schemas, "snake_case"); const squashed = squashPgScheme(json); @@ -61,10 +53,6 @@ export const getSql = (schema: Schema, instanceId: string): SqlStatements => { return prepareCreateEnumJson(it.name, it.schema, it.values); }) ?? []; - const jsonCreateSchemas = prepareCreateSchemasJson( - Object.values(squashed.schemas), - ); - const jsonCreateTables = Object.values(squashed.tables).map((it: any) => { return preparePgCreateTableJson(it, json); }); @@ -87,18 +75,11 @@ export const getSql = (schema: Schema, instanceId: string): SqlStatements => { .filter((it) => it !== ""); const combinedTables = jsonCreateTables.flatMap((statement) => [ - { - ...statement, - tableName: userToSqlTableName( - sqlToUserTableName(statement.tableName), - instanceId, - ), - }, - createReorgTableStatement(statement, instanceId), + statement, + createReorgTableStatement(statement), ]); return { - schema: { sql: fromJson(jsonCreateSchemas), json: jsonCreateSchemas }, tables: { sql: fromJson(combinedTables), json: combinedTables, @@ -111,10 +92,7 @@ export const getSql = (schema: Schema, instanceId: string): SqlStatements => { }; }; -const createReorgTableStatement = ( - statement: JsonCreateTableStatement, - instance_id: string, -) => { +const createReorgTableStatement = (statement: JsonCreateTableStatement) => { const reorgStatement: JsonCreateTableStatement = structuredClone(statement); reorgStatement.compositePkName = undefined; @@ -146,7 +124,7 @@ const createReorgTableStatement = ( reorgStatement.columns.push(...Object.values(reorgColumns)); - reorgStatement.tableName = `${instance_id}_reorg__${reorgStatement.tableName.slice(6)}`; + reorgStatement.tableName = sqlToReorgTableName(reorgStatement.tableName); return reorgStatement; }; @@ -638,15 +616,6 @@ const preparePgCreateIndexesJson = ( }); }; -const prepareCreateSchemasJson = (values: string[]): JsonCreateSchema[] => { - return values.map((it) => { - return { - type: "create_schema", - name: it, - } as JsonCreateSchema; - }); -}; - const prepareCreateEnumJson = ( name: string, schema: string, diff --git a/packages/core/src/graphql/middleware.ts b/packages/core/src/graphql/middleware.ts index fe1089844..741a72828 100644 --- a/packages/core/src/graphql/middleware.ts +++ b/packages/core/src/graphql/middleware.ts @@ -12,8 +12,8 @@ import { buildDataLoaderCache } from "./index.js"; * - Docs: https://ponder.sh/docs/query/api-functions#register-graphql-middleware * * @example - * import { ponder } from "@/generated"; - * import { graphql } from "@ponder/core"; + * import { ponder } from "ponder:registry"; + * import { graphql } from "ponder"; * * ponder.use("/graphql", graphql()); * diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts index a03a1d54d..79f11a827 100644 --- a/packages/core/src/index.ts +++ b/packages/core/src/index.ts @@ -1,4 +1,5 @@ export { createConfig } from "@/config/config.js"; +export { factory } from "@/config/address.js"; export type { Block, Log, @@ -25,7 +26,6 @@ export type DatabaseConfig = Prettify; export { onchainTable, - onchainSchema, onchainEnum, primaryKey, hex, diff --git a/packages/core/src/indexing-store/historical.test.ts b/packages/core/src/indexing-store/historical.test.ts index d3205a039..1f44a57f3 100644 --- a/packages/core/src/indexing-store/historical.test.ts +++ b/packages/core/src/indexing-store/historical.test.ts @@ -385,7 +385,6 @@ test("sql", async (context) => { const { database, cleanup } = await setupDatabaseServices(context, { schema, - instanceId: "1234", }); const indexingStore = createHistoricalIndexingStore({ diff --git a/packages/core/src/indexing-store/historical.ts b/packages/core/src/indexing-store/historical.ts index 5af4b41e8..676b23b00 100644 --- a/packages/core/src/indexing-store/historical.ts +++ b/packages/core/src/indexing-store/historical.ts @@ -28,6 +28,7 @@ import { and, eq, getTableColumns, + getTableName, sql, } from "drizzle-orm"; import { type PgTable, getTableConfig } from "drizzle-orm/pg-core"; @@ -165,18 +166,16 @@ export const createHistoricalIndexingStore = ({ }, }); - const tableNameCache: Map = new Map(); const primaryKeysCache: Map = new Map(); const cache: Cache = new Map(); - for (const tableName of getTableNames(schema, "")) { + for (const tableName of getTableNames(schema)) { primaryKeysCache.set( schema[tableName.js] as Table, getPrimaryKeyColumns(schema[tableName.js] as PgTable), ); cache.set(schema[tableName.js] as Table, new Map()); - tableNameCache.set(schema[tableName.js] as Table, tableName.user); } //////// @@ -265,7 +264,7 @@ export const createHistoricalIndexingStore = ({ hasEmptyValue(column) === false ) { const error = new NotNullConstraintError( - `Column '${tableNameCache.get(table)}.${columnName}' violates not-null constraint.`, + `Column '${getTableName(table)}.${columnName}' violates not-null constraint.`, ); error.meta.push( `db.${type === EntryType.INSERT ? "insert" : "update"} arguments:\n${prettyPrint(row)}`, @@ -346,7 +345,7 @@ export const createHistoricalIndexingStore = ({ find: (table: Table, key) => queue.add(() => database.qb.user.wrap( - { method: `${tableNameCache.get(table) ?? "unknown"}.find()` }, + { method: `${getTableName(table) ?? "unknown"}.find()` }, async () => { checkOnchainTable(table, "find"); @@ -388,7 +387,7 @@ export const createHistoricalIndexingStore = ({ queue.add(() => database.qb.user.wrap( { - method: `${tableNameCache.get(table) ?? "unknown"}.insert()`, + method: `${getTableName(table) ?? "unknown"}.insert()`, }, async () => { checkOnchainTable(table, "insert"); @@ -441,7 +440,7 @@ export const createHistoricalIndexingStore = ({ queue.add(() => database.qb.user.wrap( { - method: `${tableNameCache.get(table) ?? "unknown"}.insert()`, + method: `${getTableName(table) ?? "unknown"}.insert()`, }, async () => { checkOnchainTable(table, "insert"); @@ -538,7 +537,7 @@ export const createHistoricalIndexingStore = ({ .add(() => database.qb.user.wrap( { - method: `${tableNameCache.get(table) ?? "unknown"}.insert()`, + method: `${getTableName(table) ?? "unknown"}.insert()`, }, async () => { checkOnchainTable(table, "insert"); @@ -548,7 +547,7 @@ export const createHistoricalIndexingStore = ({ for (const value of values) { if (getCacheEntry(table, value)?.row) { const error = new UniqueConstraintError( - `Unique constraint failed for '${tableNameCache.get(table)}'.`, + `Unique constraint failed for '${getTableName(table)}'.`, ); error.meta.push( `db.insert arguments:\n${prettyPrint(value)}`, @@ -559,7 +558,7 @@ export const createHistoricalIndexingStore = ({ if (findResult) { const error = new UniqueConstraintError( - `Unique constraint failed for '${tableNameCache.get(table)}'.`, + `Unique constraint failed for '${getTableName(table)}'.`, ); error.meta.push( `db.insert arguments:\n${prettyPrint(value)}`, @@ -576,7 +575,7 @@ export const createHistoricalIndexingStore = ({ } else { if (getCacheEntry(table, values)?.row) { const error = new UniqueConstraintError( - `Unique constraint failed for '${tableNameCache.get(table)}'.`, + `Unique constraint failed for '${getTableName(table)}'.`, ); error.meta.push( `db.insert arguments:\n${prettyPrint(values)}`, @@ -587,7 +586,7 @@ export const createHistoricalIndexingStore = ({ if (findResult) { const error = new UniqueConstraintError( - `Unique constraint failed for '${tableNameCache.get(table)}'.`, + `Unique constraint failed for '${getTableName(table)}'.`, ); error.meta.push( `db.insert arguments:\n${prettyPrint(values)}`, @@ -629,7 +628,7 @@ export const createHistoricalIndexingStore = ({ set: (values: any) => queue.add(() => database.qb.user.wrap( - { method: `${tableNameCache.get(table) ?? "unknown"}.update()` }, + { method: `${getTableName(table) ?? "unknown"}.update()` }, async () => { checkOnchainTable(table, "update"); @@ -643,7 +642,7 @@ export const createHistoricalIndexingStore = ({ } else { if (isDatabaseEmpty) { const error = new RecordNotFoundError( - `No existing record found in table '${tableNameCache.get(table)}'`, + `No existing record found in table '${getTableName(table)}'`, ); error.meta.push( `db.update arguments:\n${prettyPrint(key)}`, @@ -657,7 +656,7 @@ export const createHistoricalIndexingStore = ({ row = findResult; } else { const error = new RecordNotFoundError( - `No existing record found in table '${tableNameCache.get(table)}'`, + `No existing record found in table '${getTableName(table)}'`, ); error.meta.push( `db.update arguments:\n${prettyPrint(key)}`, @@ -694,7 +693,7 @@ export const createHistoricalIndexingStore = ({ delete: (table: Table, key) => queue.add(() => database.qb.user.wrap( - { method: `${tableNameCache.get(table) ?? "unknown"}.delete()` }, + { method: `${getTableName(table) ?? "unknown"}.delete()` }, async () => { checkOnchainTable(table, "delete"); @@ -792,14 +791,14 @@ export const createHistoricalIndexingStore = ({ if (insertValues.length > 0) { common.logger.debug({ service: "indexing", - msg: `Inserting ${insertValues.length} cached '${tableNameCache.get(table)}' rows into the database`, + msg: `Inserting ${insertValues.length} cached '${getTableName(table)}' rows into the database`, }); while (insertValues.length > 0) { const values = insertValues.splice(0, batchSize); promises.push( database.qb.user.wrap( - { method: `${tableNameCache.get(table)}.flush()` }, + { method: `${getTableName(table)}.flush()` }, async () => { await database.drizzle .insert(table) @@ -821,7 +820,7 @@ export const createHistoricalIndexingStore = ({ if (updateValues.length > 0) { common.logger.debug({ service: "indexing", - msg: `Updating ${updateValues.length} cached '${tableNameCache.get(table)}' rows in the database`, + msg: `Updating ${updateValues.length} cached '${getTableName(table)}' rows in the database`, }); const primaryKeys = primaryKeysCache.get(table)!; @@ -840,7 +839,7 @@ export const createHistoricalIndexingStore = ({ promises.push( database.qb.user.wrap( { - method: `${tableNameCache.get(table)}.flush()`, + method: `${getTableName(table)}.flush()`, }, async () => { await database.drizzle diff --git a/packages/core/src/indexing-store/metadata.test.ts b/packages/core/src/indexing-store/metadata.test.ts index 1d4779f79..1d604272d 100644 --- a/packages/core/src/indexing-store/metadata.test.ts +++ b/packages/core/src/indexing-store/metadata.test.ts @@ -4,53 +4,15 @@ import { setupIsolatedDatabase, } from "@/_test/setup.js"; import { beforeEach, expect, test } from "vitest"; -import { getLiveMetadataStore, getMetadataStore } from "./metadata.js"; +import { getMetadataStore } from "./metadata.js"; beforeEach(setupCommon); beforeEach(setupIsolatedDatabase); -test("getLiveMetadata() empty", async (context) => { - const { database, cleanup } = await setupDatabaseServices(context); - - const metadataStore = getLiveMetadataStore({ - db: database.qb.user, - }); - - const status = await metadataStore.getStatus(); - - expect(status).toBe(null); - - await cleanup(); -}); - -test("getLiveMetadata()", async (context) => { - const { database, cleanup } = await setupDatabaseServices(context); - - await getMetadataStore({ - db: database.qb.user, - instanceId: "1234", - }).setStatus({ - mainnet: { block: { number: 10, timestamp: 10 }, ready: false }, - }); - - const metadataStore = getLiveMetadataStore({ - db: database.qb.user, - }); - - const status = await metadataStore.getStatus(); - - expect(status).toStrictEqual({ - mainnet: { block: { number: 10, timestamp: 10 }, ready: false }, - }); - - await cleanup(); -}); - test("getMetadata() empty", async (context) => { const { database, cleanup } = await setupDatabaseServices(context); const metadataStore = getMetadataStore({ db: database.qb.user, - instanceId: "1234", }); const status = await metadataStore.getStatus(); @@ -64,7 +26,6 @@ test("setMetadata()", async (context) => { const { database, cleanup } = await setupDatabaseServices(context); const metadataStore = getMetadataStore({ db: database.qb.user, - instanceId: "1234", }); await metadataStore.setStatus({ diff --git a/packages/core/src/indexing-store/metadata.ts b/packages/core/src/indexing-store/metadata.ts index 08cd13bd3..bdf8e2fd6 100644 --- a/packages/core/src/indexing-store/metadata.ts +++ b/packages/core/src/indexing-store/metadata.ts @@ -1,50 +1,22 @@ import type { HeadlessKysely } from "@/database/kysely.js"; import type { Status } from "@/sync/index.js"; -import { sql } from "kysely"; export type MetadataStore = { setStatus: (status: Status) => Promise; getStatus: () => Promise; }; -export const getLiveMetadataStore = ({ - db, -}: { db: HeadlessKysely }): Pick => ({ - getStatus: async () => { - return db.wrap({ method: "_ponder_meta.getStatus()" }, async () => { - const metadata = await sql - .raw<{ value: Status | null }>(` -WITH live AS ( - SELECT value->>'instance_id' as instance_id FROM _ponder_meta WHERE key = 'live' -) -SELECT value -FROM _ponder_meta -WHERE key = 'status_' || (SELECT instance_id FROM live); - `) - .execute(db); - - if (!metadata.rows[0]?.value === undefined) { - return null; - } - - return metadata.rows[0]!.value; - }); - }, -}); - export const getMetadataStore = ({ db, - instanceId, }: { db: HeadlessKysely; - instanceId: string; }): MetadataStore => ({ getStatus: async () => { return db.wrap({ method: "_ponder_meta.getStatus()" }, async () => { const metadata = await db .selectFrom("_ponder_meta") .select("value") - .where("key", "=", `status_${instanceId}`) + .where("key", "=", "status") .executeTakeFirst(); if (metadata!.value === null) return null; @@ -57,7 +29,7 @@ export const getMetadataStore = ({ await db .insertInto("_ponder_meta") .values({ - key: `status_${instanceId}`, + key: "status", value: status, }) .onConflict((oc) => diff --git a/packages/core/src/indexing-store/realtime.test.ts b/packages/core/src/indexing-store/realtime.test.ts index d35438046..d4212d1c7 100644 --- a/packages/core/src/indexing-store/realtime.test.ts +++ b/packages/core/src/indexing-store/realtime.test.ts @@ -331,7 +331,6 @@ test("sql", async (context) => { const { database, cleanup } = await setupDatabaseServices(context, { schema, - instanceId: "1234", }); const indexingStore = createRealtimeIndexingStore({ diff --git a/packages/core/src/indexing-store/realtime.ts b/packages/core/src/indexing-store/realtime.ts index 4918c4c15..edb265d7e 100644 --- a/packages/core/src/indexing-store/realtime.ts +++ b/packages/core/src/indexing-store/realtime.ts @@ -19,6 +19,7 @@ import { type Table, and, eq, + getTableName, } from "drizzle-orm"; import { type PgTable, getTableConfig } from "drizzle-orm/pg-core"; import { drizzle } from "drizzle-orm/pg-proxy"; @@ -63,16 +64,13 @@ export const createRealtimeIndexingStore = ({ }, }); - const tableNameCache: Map = new Map(); const primaryKeysCache: Map = new Map(); - for (const tableName of getTableNames(schema, "")) { + for (const tableName of getTableNames(schema)) { primaryKeysCache.set( schema[tableName.js] as Table, getPrimaryKeyColumns(schema[tableName.js] as PgTable), ); - - tableNameCache.set(schema[tableName.js] as Table, tableName.user); } //////// @@ -121,7 +119,7 @@ export const createRealtimeIndexingStore = ({ find: (table: Table, key) => queue.add(() => database.qb.user.wrap( - { method: `${tableNameCache.get(table) ?? "unknown"}.find()` }, + { method: `${getTableName(table) ?? "unknown"}.find()` }, async () => { checkOnchainTable(table, "find"); @@ -140,7 +138,7 @@ export const createRealtimeIndexingStore = ({ queue.add(() => database.qb.user.wrap( { - method: `${tableNameCache.get(table) ?? "unknown"}.insert()`, + method: `${getTableName(table) ?? "unknown"}.insert()`, }, async () => { checkOnchainTable(table, "insert"); @@ -184,7 +182,7 @@ export const createRealtimeIndexingStore = ({ queue.add(() => database.qb.user.wrap( { - method: `${tableNameCache.get(table) ?? "unknown"}.insert()`, + method: `${getTableName(table) ?? "unknown"}.insert()`, }, async () => { checkOnchainTable(table, "insert"); @@ -278,7 +276,7 @@ export const createRealtimeIndexingStore = ({ .add(() => database.qb.user.wrap( { - method: `${tableNameCache.get(table) ?? "unknown"}.insert()`, + method: `${getTableName(table) ?? "unknown"}.insert()`, }, async () => { checkOnchainTable(table, "insert"); @@ -325,7 +323,7 @@ export const createRealtimeIndexingStore = ({ set: (values: any) => queue.add(() => database.qb.user.wrap( - { method: `${tableNameCache.get(table) ?? "unknown"}.update()` }, + { method: `${getTableName(table) ?? "unknown"}.update()` }, async () => { checkOnchainTable(table, "update"); @@ -334,7 +332,7 @@ export const createRealtimeIndexingStore = ({ if (row === null) { const error = new RecordNotFoundError( - `No existing record found in table '${tableNameCache.get(table)}'`, + `No existing record found in table '${getTableName(table)}'`, ); error.meta.push( `db.update arguments:\n${prettyPrint(key)}`, @@ -373,7 +371,7 @@ export const createRealtimeIndexingStore = ({ delete: (table: Table, key) => queue.add(() => database.qb.user.wrap( - { method: `${tableNameCache.get(table) ?? "unknown"}.delete()` }, + { method: `${getTableName(table) ?? "unknown"}.delete()` }, async () => { checkOnchainTable(table, "delete"); diff --git a/packages/core/src/indexing/ponderActions.ts b/packages/core/src/indexing/ponderActions.ts index 9b5b31a63..3dd6be222 100644 --- a/packages/core/src/indexing/ponderActions.ts +++ b/packages/core/src/indexing/ponderActions.ts @@ -1,37 +1,53 @@ import type { Prettify } from "@/types/utils.js"; -import type { - Abi, - Account, - Chain, - Client, - ContractFunctionArgs, - ContractFunctionName, - GetBalanceParameters, - GetBalanceReturnType, - GetCodeParameters, - GetCodeReturnType, - GetEnsNameParameters, - GetEnsNameReturnType, - GetStorageAtParameters, - GetStorageAtReturnType, - MulticallParameters, - MulticallReturnType, - PublicRpcSchema, - ReadContractParameters, - ReadContractReturnType, - Transport, -} from "viem"; import { - getBalance as viemGetBalance, - getCode as viemGetCode, - getEnsName as viemGetEnsName, - getStorageAt as viemGetStorageAt, - multicall as viemMulticall, - readContract as viemReadContract, -} from "viem/actions"; + type Abi, + type Account, + type Address, + type Chain, + type Client, + type ContractFunctionArgs, + type ContractFunctionName, + type GetBlockReturnType, + type GetBlockTransactionCountReturnType, + type GetTransactionCountReturnType, + type Hash, + type MulticallParameters, + type MulticallReturnType, + type PublicActions, + type PublicRpcSchema, + type ReadContractParameters, + type ReadContractReturnType, + type SimulateContractParameters, + type SimulateContractReturnType, + type Transport, + publicActions, +} from "viem"; import type { Service } from "./service.js"; -export type BlockOptions = +/** Viem actions where the `block` property is optional and implicit. */ +const blockDependentActions = [ + "getBalance", + "call", + "estimateGas", + "getFeeHistory", + "getProof", + "getCode", + "getStorageAt", + "getEnsAddress", + "getEnsAvatar", + "getEnsName", + "getEnsResolver", + "getEnsText", +] as const satisfies readonly (keyof ReturnType)[]; + +/** Viem actions where the `block` property is non-existent. */ +const nonBlockDependentActions = [ + "getTransaction", + "getTransactionReceipt", + "getTransactionConfirmations", +] as const satisfies readonly (keyof ReturnType)[]; + +type BlockOptions = | { cache?: undefined; blockNumber?: undefined; @@ -45,17 +61,32 @@ export type BlockOptions = blockNumber: bigint; }; +type RequiredBlockOptions = + | { + /** Hash of the block. */ + blockHash: Hash; + blockNumber?: undefined; + } + | { + blockHash?: undefined; + /** The block number. */ + blockNumber: bigint; + }; + +type BlockDependentAction< + fn extends (client: any, args: any) => unknown, + /// + params = Parameters[0], + returnType = ReturnType, +> = ( + args: Omit & BlockOptions, +) => returnType; + export type PonderActions = { - getBalance: ( - args: Omit & BlockOptions, - ) => Promise; - getCode: ( - args: Omit & BlockOptions, - ) => Promise; - getStorageAt: ( - args: Omit & - BlockOptions, - ) => Promise; + [action in (typeof blockDependentActions)[number]]: BlockDependentAction< + ReturnType[action] + >; +} & { multicall: < const contracts extends readonly unknown[], allowFailure extends boolean = true, @@ -77,19 +108,60 @@ export type PonderActions = { > & BlockOptions, ) => Promise>; - getEnsName: ( - args: Omit & BlockOptions, - ) => Promise; -}; + simulateContract: < + const abi extends Abi | readonly unknown[], + functionName extends ContractFunctionName, + const args extends ContractFunctionArgs< + abi, + "nonpayable" | "payable", + functionName + >, + >( + args: Omit< + SimulateContractParameters, + "blockTag" | "blockNumber" + > & + BlockOptions, + ) => Promise>; + getBlock: ( + args: { + /** Whether or not to include transaction data in the response. */ + includeTransactions?: includeTransactions | undefined; + } & RequiredBlockOptions, + ) => Promise>; + getTransactionCount: ( + args: { + /** The account address. */ + address: Address; + } & RequiredBlockOptions, + ) => Promise; + getBlockTransactionCount: ( + args: RequiredBlockOptions, + ) => Promise; +} & Pick; export type ReadOnlyClient< transport extends Transport = Transport, chain extends Chain | undefined = Chain | undefined, > = Prettify< - Client + Omit< + Client, + | "extend" + | "key" + | "batch" + | "cacheTime" + | "account" + | "type" + | "uid" + | "chain" + | "name" + | "pollingInterval" + | "transport" + | "ccipRead" + > >; -export const buildCachedActions = ( +export const getPonderActions = ( contextState: Pick, ) => { return < @@ -98,96 +170,58 @@ export const buildCachedActions = ( TAccount extends Account | undefined = Account | undefined, >( client: Client, - ): PonderActions => ({ - getBalance: ({ - cache, - blockNumber: userBlockNumber, - ...args - }: Omit & - BlockOptions): Promise => - viemGetBalance(client, { - ...args, - ...(cache === "immutable" - ? { blockTag: "latest" } - : { blockNumber: userBlockNumber ?? contextState.blockNumber }), - }), - getCode: ({ - cache, - blockNumber: userBlockNumber, - ...args - }: Omit & - BlockOptions): Promise => - viemGetCode(client, { - ...args, - ...(cache === "immutable" - ? { blockTag: "latest" } - : { blockNumber: userBlockNumber ?? contextState.blockNumber }), - }), - getStorageAt: ({ - cache, - blockNumber: userBlockNumber, - ...args - }: Omit & - BlockOptions): Promise => - viemGetStorageAt(client, { - ...args, - ...(cache === "immutable" - ? { blockTag: "latest" } - : { blockNumber: userBlockNumber ?? contextState.blockNumber }), - }), - multicall: < - const contracts extends readonly unknown[], - allowFailure extends boolean = true, - >({ - cache, - blockNumber: userBlockNumber, - ...args - }: Omit< - MulticallParameters, - "blockTag" | "blockNumber" - > & - BlockOptions): Promise> => - viemMulticall(client, { - ...args, - ...(cache === "immutable" - ? { blockTag: "latest" } - : { blockNumber: userBlockNumber ?? contextState.blockNumber }), - }), - // @ts-ignore - readContract: < - const abi extends Abi | readonly unknown[], - functionName extends ContractFunctionName, - const args extends ContractFunctionArgs< - abi, - "pure" | "view", - functionName - >, - >({ - cache, - blockNumber: userBlockNumber, - ...args - }: Omit< - ReadContractParameters, - "blockTag" | "blockNumber" - > & - BlockOptions): Promise> => - viemReadContract(client, { - ...args, - ...(cache === "immutable" - ? { blockTag: "latest" } - : { blockNumber: userBlockNumber ?? contextState.blockNumber }), - } as ReadContractParameters), - getEnsName: ({ - cache, - blockNumber: userBlockNumber, - ...args - }: Omit & - BlockOptions): Promise => - viemGetEnsName(client, { - ...args, - ...(cache === "immutable" - ? { blockTag: "latest" } - : { blockNumber: userBlockNumber ?? contextState.blockNumber }), - }), - }); + ): PonderActions => { + const actions = {} as PonderActions; + const _publicActions = publicActions(client); + + const addAction = < + action extends + | (typeof blockDependentActions)[number] + | "multicall" + | "readContract" + | "simulateContract", + >( + action: action, + ) => { + // @ts-ignore + actions[action] = ({ + cache, + blockNumber: userBlockNumber, + ...args + }: Parameters[0]) => + // @ts-ignore + _publicActions[action]({ + ...args, + ...(cache === "immutable" + ? { blockTag: "latest" } + : { blockNumber: userBlockNumber ?? contextState.blockNumber }), + } as Parameters[action]>[0]); + }; + + for (const action of blockDependentActions) { + addAction(action); + } + + addAction("multicall"); + addAction("readContract"); + addAction("simulateContract"); + + for (const action of nonBlockDependentActions) { + // @ts-ignore + actions[action] = _publicActions[action]; + } + + // required block actions + + for (const action of [ + "getBlock", + "getBlockTransactionCount", + "getTransactionCount", + ]) { + // @ts-ignore + actions[action] = _publicActions[action]; + } + + return actions; + }; }; diff --git a/packages/core/src/indexing/service.test.ts b/packages/core/src/indexing/service.test.ts index e46d9c038..de6833970 100644 --- a/packages/core/src/indexing/service.test.ts +++ b/packages/core/src/indexing/service.test.ts @@ -1,4 +1,4 @@ -import { BOB } from "@/_test/constants.js"; +import { ALICE, BOB } from "@/_test/constants.js"; import { erc20ABI } from "@/_test/generated.js"; import { setupAnvil, @@ -6,20 +6,17 @@ import { setupDatabaseServices, setupIsolatedDatabase, } from "@/_test/setup.js"; -import { getEventsBlock, getEventsLog, getEventsTrace } from "@/_test/utils.js"; +import { deployErc20, mintErc20 } from "@/_test/simulate.js"; +import { getErc20ConfigAndIndexingFunctions } from "@/_test/utils.js"; +import { buildConfigAndIndexingFunctions } from "@/build/configAndIndexingFunctions.js"; import { onchainTable } from "@/drizzle/index.js"; import { createSync } from "@/sync/index.js"; import { encodeCheckpoint, zeroCheckpoint } from "@/utils/checkpoint.js"; import { promiseWithResolvers } from "@ponder/common"; -import { - type Address, - checksumAddress, - parseEther, - toHex, - zeroAddress, -} from "viem"; +import { checksumAddress, padHex, parseEther, toHex, zeroAddress } from "viem"; +import { encodeEventTopics } from "viem/utils"; import { beforeEach, expect, test, vi } from "vitest"; -import { decodeEvents } from "../sync/events.js"; +import { type RawEvent, decodeEvents } from "../sync/events.js"; import { type Context, create, @@ -40,8 +37,16 @@ const account = onchainTable("account", (p) => ({ const schema = { account }; +const { config, rawIndexingFunctions } = getErc20ConfigAndIndexingFunctions({ + address: zeroAddress, +}); +const { sources, networks } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, +}); + test("createIndexing()", async (context) => { - const { common, sources, networks } = context; + const { common } = context; const { syncStore, indexingStore, cleanup } = await setupDatabaseServices( context, { schema }, @@ -74,7 +79,7 @@ test("createIndexing()", async (context) => { }); test("processSetupEvents() empty", async (context) => { - const { common, sources, networks } = context; + const { common } = context; const { syncStore, indexingStore, cleanup } = await setupDatabaseServices( context, { schema }, @@ -111,7 +116,7 @@ test("processSetupEvents() empty", async (context) => { }); test("processSetupEvents()", async (context) => { - const { common, sources, networks } = context; + const { common } = context; const { syncStore, indexingStore, cleanup } = await setupDatabaseServices( context, { schema }, @@ -155,23 +160,10 @@ test("processSetupEvents()", async (context) => { contracts: { Erc20: { abi: expect.any(Object), - address: checksumAddress(sources[0].filter.address as Address), - startBlock: sources[0].filter.fromBlock, - endBlock: sources[0].filter.toBlock, - }, - Pair: { - abi: expect.any(Object), - address: undefined, - startBlock: sources[1].filter.fromBlock, - endBlock: sources[1].filter.toBlock, - }, - Factory: { - abi: expect.any(Object), - address: checksumAddress( - sources[2].filter.toAddress.address as Address, - ), - startBlock: sources[2].filter.fromBlock, - endBlock: sources[2].filter.toBlock, + // @ts-ignore + address: checksumAddress(sources[0]!.filter.address), + startBlock: sources[0]!.filter.fromBlock, + endBlock: sources[0]!.filter.toBlock, }, }, client: expect.any(Object), @@ -182,8 +174,8 @@ test("processSetupEvents()", async (context) => { await cleanup(); }); -test("processEvent() log events", async (context) => { - const { common, sources, networks } = context; +test("processEvent()", async (context) => { + const { common } = context; const { syncStore, indexingStore, cleanup } = await setupDatabaseServices( context, { schema }, @@ -215,8 +207,31 @@ test("processEvent() log events", async (context) => { setIndexingStore(indexingService, indexingStore); - const rawEvents = await getEventsLog(sources); - const events = decodeEvents(common, sources, rawEvents); + const topics = encodeEventTopics({ + abi: erc20ABI, + eventName: "Transfer", + args: { + from: zeroAddress, + to: ALICE, + }, + }); + + const data = padHex(toHex(parseEther("1")), { size: 32 }); + + const rawEvent = { + chainId: 1, + sourceIndex: 0, + checkpoint: encodeCheckpoint(zeroCheckpoint), + block: {} as RawEvent["block"], + transaction: {} as RawEvent["transaction"], + log: { + id: "test", + data, + topics, + }, + } as RawEvent; + + const events = decodeEvents(common, sources, [rawEvent]); const result = await processEvents(indexingService, { events, }); @@ -226,7 +241,7 @@ test("processEvent() log events", async (context) => { indexingFunctions[ "Erc20:Transfer(address indexed from, address indexed to, uint256 amount)" ], - ).toHaveBeenCalledTimes(2); + ).toHaveBeenCalledTimes(1); expect( indexingFunctions[ "Erc20:Transfer(address indexed from, address indexed to, uint256 amount)" @@ -238,30 +253,17 @@ test("processEvent() log events", async (context) => { log: expect.any(Object), block: expect.any(Object), transaction: expect.any(Object), - transactionReceipt: expect.any(Object), + transactionReceipt: undefined, }, context: { network: { chainId: 1, name: "mainnet" }, contracts: { Erc20: { abi: expect.any(Object), - address: checksumAddress(sources[0].filter.address as Address), - startBlock: sources[0].filter.fromBlock, - endBlock: sources[0].filter.toBlock, - }, - Pair: { - abi: expect.any(Object), - address: undefined, - startBlock: sources[1].filter.fromBlock, - endBlock: sources[1].filter.toBlock, - }, - Factory: { - abi: expect.any(Object), - address: checksumAddress( - sources[2].filter.toAddress.address as Address, - ), - startBlock: sources[2].filter.fromBlock, - endBlock: sources[2].filter.toBlock, + // @ts-ignore + address: checksumAddress(sources[0]!.filter.address), + startBlock: sources[0]!.filter.fromBlock, + endBlock: sources[0]!.filter.toBlock, }, }, client: expect.any(Object), @@ -272,8 +274,8 @@ test("processEvent() log events", async (context) => { await cleanup(); }); -test("processEvents() block events", async (context) => { - const { common, sources, networks } = context; +test("processEvents killed", async (context) => { + const { common } = context; const { syncStore, indexingStore, cleanup } = await setupDatabaseServices( context, { schema }, @@ -290,7 +292,9 @@ test("processEvents() block events", async (context) => { }); const indexingFunctions = { - "OddBlocks:block": vi.fn(), + "Erc20:Transfer(address indexed from, address indexed to, uint256 amount)": + vi.fn(), + "Pair:Swap": vi.fn(), }; const indexingService = create({ @@ -302,167 +306,33 @@ test("processEvents() block events", async (context) => { }); setIndexingStore(indexingService, indexingStore); + kill(indexingService); - const rawEvents = await getEventsBlock(sources); - const events = decodeEvents(common, sources, rawEvents); - const result = await processEvents(indexingService, { - events, - }); - expect(result).toStrictEqual({ status: "success" }); - - expect(indexingFunctions["OddBlocks:block"]).toHaveBeenCalledTimes(1); - expect(indexingFunctions["OddBlocks:block"]).toHaveBeenCalledWith({ - event: { - block: expect.any(Object), - }, - context: { - network: { chainId: 1, name: "mainnet" }, - contracts: { - Erc20: { - abi: expect.any(Object), - address: checksumAddress(sources[0].filter.address as Address), - startBlock: sources[0].filter.fromBlock, - endBlock: sources[0].filter.toBlock, - }, - Pair: { - abi: expect.any(Object), - address: undefined, - startBlock: sources[1].filter.fromBlock, - endBlock: sources[1].filter.toBlock, - }, - Factory: { - abi: expect.any(Object), - address: checksumAddress( - sources[2].filter.toAddress.address as Address, - ), - startBlock: sources[2].filter.fromBlock, - endBlock: sources[2].filter.toBlock, - }, - }, - client: expect.any(Object), - db: expect.any(Object), + const topics = encodeEventTopics({ + abi: erc20ABI, + eventName: "Transfer", + args: { + from: zeroAddress, + to: ALICE, }, }); - await cleanup(); -}); - -test("processEvents() call trace events", async (context) => { - const { common, sources, networks } = context; - const { syncStore, indexingStore, cleanup } = await setupDatabaseServices( - context, - { schema }, - ); - - const sync = await createSync({ - common, - syncStore, - networks, - sources, - onRealtimeEvent: () => Promise.resolve(), - onFatalError: () => {}, - initialCheckpoint: encodeCheckpoint(zeroCheckpoint), - }); - - const indexingFunctions = { - "Factory.createPair()": vi.fn(), - }; + const data = padHex(toHex(parseEther("1")), { size: 32 }); - const indexingService = create({ - indexingFunctions, - common, - sources, - networks, - sync, - }); - - setIndexingStore(indexingService, indexingStore); - - const rawEvents = await getEventsTrace(sources); - const events = decodeEvents(common, sources, rawEvents); - const result = await processEvents(indexingService, { - events, - }); - expect(result).toStrictEqual({ status: "success" }); - - expect(indexingFunctions["Factory.createPair()"]).toHaveBeenCalledTimes(1); - expect(indexingFunctions["Factory.createPair()"]).toHaveBeenCalledWith({ - event: { - args: undefined, - result: expect.any(String), - block: expect.any(Object), - trace: expect.any(Object), - transaction: expect.any(Object), - transactionReceipt: expect.any(Object), - }, - context: { - network: { chainId: 1, name: "mainnet" }, - contracts: { - Erc20: { - abi: expect.any(Object), - address: checksumAddress(sources[0].filter.address as Address), - startBlock: sources[0].filter.fromBlock, - endBlock: sources[0].filter.toBlock, - }, - Pair: { - abi: expect.any(Object), - address: undefined, - startBlock: sources[1].filter.fromBlock, - endBlock: sources[1].filter.toBlock, - }, - Factory: { - abi: expect.any(Object), - address: checksumAddress( - sources[2].filter.toAddress.address as Address, - ), - startBlock: sources[2].filter.fromBlock, - endBlock: sources[2].filter.toBlock, - }, - }, - client: expect.any(Object), - db: expect.any(Object), + const rawEvent = { + chainId: 1, + sourceIndex: 0, + checkpoint: encodeCheckpoint(zeroCheckpoint), + block: {} as RawEvent["block"], + transaction: {} as RawEvent["transaction"], + log: { + id: "test", + data, + topics, }, - }); - - await cleanup(); -}); - -test("processEvents killed", async (context) => { - const { common, sources, networks } = context; - const { syncStore, indexingStore, cleanup } = await setupDatabaseServices( - context, - { schema }, - ); - - const sync = await createSync({ - common, - syncStore, - networks, - sources, - onRealtimeEvent: () => Promise.resolve(), - onFatalError: () => {}, - initialCheckpoint: encodeCheckpoint(zeroCheckpoint), - }); + } as RawEvent; - const indexingFunctions = { - "Erc20:Transfer(address indexed from, address indexed to, uint256 amount)": - vi.fn(), - "Pair:Swap": vi.fn(), - }; - - const indexingService = create({ - indexingFunctions, - common, - sources, - networks, - sync, - }); - - setIndexingStore(indexingService, indexingStore); - kill(indexingService); - - const rawEvents = await getEventsLog(sources); - const events = decodeEvents(common, sources, rawEvents); + const events = decodeEvents(common, sources, [rawEvent]); const result = await processEvents(indexingService, { events, }); @@ -478,7 +348,7 @@ test("processEvents killed", async (context) => { }); test("processEvents eventCount", async (context) => { - const { common, sources, networks } = context; + const { common } = context; const { syncStore, indexingStore, cleanup } = await setupDatabaseServices( context, { schema }, @@ -497,7 +367,6 @@ test("processEvents eventCount", async (context) => { const indexingFunctions = { "Erc20:Transfer(address indexed from, address indexed to, uint256 amount)": vi.fn(), - "Pair:Swap": vi.fn(), }; const indexingService = create({ @@ -510,23 +379,45 @@ test("processEvents eventCount", async (context) => { setIndexingStore(indexingService, indexingStore); - const rawEvents = await getEventsLog(sources); - const events = decodeEvents(common, sources, rawEvents); + const topics = encodeEventTopics({ + abi: erc20ABI, + eventName: "Transfer", + args: { + from: zeroAddress, + to: ALICE, + }, + }); + + const data = padHex(toHex(parseEther("1")), { size: 32 }); + + const rawEvent = { + chainId: 1, + sourceIndex: 0, + checkpoint: encodeCheckpoint(zeroCheckpoint), + block: {} as RawEvent["block"], + transaction: {} as RawEvent["transaction"], + log: { + id: "test", + data, + topics, + }, + } as RawEvent; + + const events = decodeEvents(common, sources, [rawEvent]); const result = await processEvents(indexingService, { events, }); expect(result).toStrictEqual({ status: "success" }); expect(indexingService.eventCount).toStrictEqual({ - "Erc20:Transfer(address indexed from, address indexed to, uint256 amount)": 2, - "Pair:Swap": 1, + "Erc20:Transfer(address indexed from, address indexed to, uint256 amount)": 1, }); await cleanup(); }); test("executeSetup() context.client", async (context) => { - const { common, sources, networks } = context; + const { common } = context; const { syncStore, indexingStore, cleanup } = await setupDatabaseServices( context, { schema }, @@ -581,7 +472,7 @@ test("executeSetup() context.client", async (context) => { }); test("executeSetup() context.db", async (context) => { - const { common, sources, networks } = context; + const { common } = context; const { syncStore, indexingStore, cleanup } = await setupDatabaseServices( context, { schema }, @@ -636,7 +527,7 @@ test("executeSetup() context.db", async (context) => { }); test("executeSetup() metrics", async (context) => { - const { common, sources, networks } = context; + const { common } = context; const { syncStore, indexingStore, cleanup } = await setupDatabaseServices( context, { schema }, @@ -677,7 +568,7 @@ test("executeSetup() metrics", async (context) => { }); test("executeSetup() error", async (context) => { - const { common, sources, networks } = context; + const { common } = context; const { syncStore, indexingStore, cleanup } = await setupDatabaseServices( context, { schema }, @@ -721,7 +612,7 @@ test("executeSetup() error", async (context) => { }); test("processEvents() context.client", async (context) => { - const { common, sources, networks } = context; + const { common } = context; const { syncStore, indexingStore, cleanup } = await setupDatabaseServices( context, { schema }, @@ -747,9 +638,6 @@ test("processEvents() context.client", async (context) => { indexingFunctions: { "Erc20:Transfer(address indexed from, address indexed to, uint256 amount)": clientCall, - "Pair:Swap": clientCall, - "OddBlocks:block": clientCall, - "Factory.createPair()": clientCall, }, common, sources, @@ -764,18 +652,37 @@ test("processEvents() context.client", async (context) => { "getBalance", ); - const rawEvents = [ - ...(await getEventsLog(sources)), - ...(await getEventsBlock(sources)), - ...(await getEventsTrace(sources)), - ]; - const events = decodeEvents(common, sources, rawEvents); + const topics = encodeEventTopics({ + abi: erc20ABI, + eventName: "Transfer", + args: { + from: zeroAddress, + to: ALICE, + }, + }); + + const data = padHex(toHex(parseEther("1")), { size: 32 }); + + const rawEvent = { + chainId: 1, + sourceIndex: 0, + checkpoint: encodeCheckpoint(zeroCheckpoint), + block: {} as RawEvent["block"], + transaction: {} as RawEvent["transaction"], + log: { + id: "test", + data, + topics, + }, + } as RawEvent; + + const events = decodeEvents(common, sources, [rawEvent]); const result = await processEvents(indexingService, { events, }); expect(result).toStrictEqual({ status: "success" }); - expect(getBalanceSpy).toHaveBeenCalledTimes(5); + expect(getBalanceSpy).toHaveBeenCalledTimes(1); expect(getBalanceSpy).toHaveBeenCalledWith({ address: BOB, }); @@ -784,10 +691,10 @@ test("processEvents() context.client", async (context) => { }); test("processEvents() context.db", async (context) => { - const { common, sources, networks } = context; + const { common } = context; const { syncStore, indexingStore, cleanup } = await setupDatabaseServices( context, - { schema, instanceId: "1234" }, + { schema }, ); const sync = await createSync({ @@ -813,9 +720,6 @@ test("processEvents() context.db", async (context) => { indexingFunctions: { "Erc20:Transfer(address indexed from, address indexed to, uint256 amount)": dbCall, - "Pair:Swap": dbCall, - "OddBlocks:block": dbCall, - "Factory.createPair()": dbCall, }, common, sources, @@ -827,28 +731,47 @@ test("processEvents() context.db", async (context) => { const insertSpy = vi.spyOn(indexingService.currentEvent.context.db, "insert"); - const rawEvents = [ - ...(await getEventsLog(sources)), - ...(await getEventsBlock(sources)), - ...(await getEventsTrace(sources)), - ]; - const events = decodeEvents(common, sources, rawEvents); + const topics = encodeEventTopics({ + abi: erc20ABI, + eventName: "Transfer", + args: { + from: zeroAddress, + to: ALICE, + }, + }); + + const data = padHex(toHex(parseEther("1")), { size: 32 }); + + const rawEvent = { + chainId: 1, + sourceIndex: 0, + checkpoint: encodeCheckpoint(zeroCheckpoint), + block: {} as RawEvent["block"], + transaction: {} as RawEvent["transaction"], + log: { + id: "test", + data, + topics, + }, + } as RawEvent; + + const events = decodeEvents(common, sources, [rawEvent]); const result = await processEvents(indexingService, { events, }); expect(result).toStrictEqual({ status: "success" }); - expect(insertSpy).toHaveBeenCalledTimes(5); + expect(insertSpy).toHaveBeenCalledTimes(1); const transferEvents = await indexingStore.sql.select().from(account); - expect(transferEvents).toHaveLength(5); + expect(transferEvents).toHaveLength(1); await cleanup(); }); test("processEvents() metrics", async (context) => { - const { common, sources, networks } = context; + const { common } = context; const { syncStore, indexingStore, cleanup } = await setupDatabaseServices( context, { schema }, @@ -868,9 +791,6 @@ test("processEvents() metrics", async (context) => { indexingFunctions: { "Erc20:Transfer(address indexed from, address indexed to, uint256 amount)": vi.fn(), - "Pair:Swap": vi.fn(), - "OddBlocks:block": vi.fn(), - "Factory.createPair()": vi.fn(), }, common, sources, @@ -880,12 +800,31 @@ test("processEvents() metrics", async (context) => { setIndexingStore(indexingService, indexingStore); - const rawEvents = [ - ...(await getEventsLog(sources)), - ...(await getEventsBlock(sources)), - ...(await getEventsTrace(sources)), - ]; - const events = decodeEvents(common, sources, rawEvents); + const topics = encodeEventTopics({ + abi: erc20ABI, + eventName: "Transfer", + args: { + from: zeroAddress, + to: ALICE, + }, + }); + + const data = padHex(toHex(parseEther("1")), { size: 32 }); + + const rawEvent = { + chainId: 1, + sourceIndex: 0, + checkpoint: encodeCheckpoint(zeroCheckpoint), + block: {} as RawEvent["block"], + transaction: {} as RawEvent["transaction"], + log: { + id: "test", + data, + topics, + }, + } as RawEvent; + + const events = decodeEvents(common, sources, [rawEvent]); await processEvents(indexingService, { events, }); @@ -897,7 +836,7 @@ test("processEvents() metrics", async (context) => { }); test("processEvents() error", async (context) => { - const { common, sources, networks } = context; + const { common } = context; const { syncStore, indexingStore, cleanup } = await setupDatabaseServices( context, { schema }, @@ -916,9 +855,6 @@ test("processEvents() error", async (context) => { const indexingFunctions = { "Erc20:Transfer(address indexed from, address indexed to, uint256 amount)": vi.fn(), - "Pair:Swap": vi.fn(), - "OddBlocks:block": vi.fn(), - "Factory.createPair()": vi.fn(), }; const indexingService = create({ @@ -935,12 +871,31 @@ test("processEvents() error", async (context) => { "Erc20:Transfer(address indexed from, address indexed to, uint256 amount)" ].mockRejectedValue(new Error()); - const rawEvents = [ - ...(await getEventsLog(sources)), - ...(await getEventsBlock(sources)), - ...(await getEventsTrace(sources)), - ]; - const events = decodeEvents(common, sources, rawEvents); + const topics = encodeEventTopics({ + abi: erc20ABI, + eventName: "Transfer", + args: { + from: zeroAddress, + to: ALICE, + }, + }); + + const data = padHex(toHex(parseEther("1")), { size: 32 }); + + const rawEvent = { + chainId: 1, + sourceIndex: 0, + checkpoint: encodeCheckpoint(zeroCheckpoint), + block: {} as RawEvent["block"], + transaction: {} as RawEvent["transaction"], + log: { + id: "test", + data, + topics, + }, + } as RawEvent; + + const events = decodeEvents(common, sources, [rawEvent]); const result = await processEvents(indexingService, { events, }); @@ -954,15 +909,12 @@ test("processEvents() error", async (context) => { "Erc20:Transfer(address indexed from, address indexed to, uint256 amount)" ], ).toHaveBeenCalledTimes(1); - expect(indexingFunctions["Pair:Swap"]).toHaveBeenCalledTimes(0); - expect(indexingFunctions["OddBlocks:block"]).toHaveBeenCalledTimes(0); - expect(indexingFunctions["Factory.createPair()"]).toHaveBeenCalledTimes(0); await cleanup(); }); test("execute() error after killed", async (context) => { - const { common, sources, networks } = context; + const { common } = context; const { syncStore, indexingStore, cleanup } = await setupDatabaseServices( context, { schema }, @@ -994,8 +946,31 @@ test("execute() error after killed", async (context) => { setIndexingStore(indexingService, indexingStore); - const rawEvents = await getEventsLog(sources); - const events = decodeEvents(common, sources, rawEvents); + const topics = encodeEventTopics({ + abi: erc20ABI, + eventName: "Transfer", + args: { + from: zeroAddress, + to: ALICE, + }, + }); + + const data = padHex(toHex(parseEther("1")), { size: 32 }); + + const rawEvent = { + chainId: 1, + sourceIndex: 0, + checkpoint: encodeCheckpoint(zeroCheckpoint), + block: {} as RawEvent["block"], + transaction: {} as RawEvent["transaction"], + log: { + id: "test", + data, + topics, + }, + } as RawEvent; + + const events = decodeEvents(common, sources, [rawEvent]); const resultPromise = processEvents(indexingService, { events }); kill(indexingService); @@ -1008,7 +983,7 @@ test("execute() error after killed", async (context) => { }); test("ponderActions getBalance()", async (context) => { - const { common, sources, networks } = context; + const { common } = context; const { syncStore, indexingStore, cleanup } = await setupDatabaseServices( context, { schema }, @@ -1044,12 +1019,14 @@ test("ponderActions getBalance()", async (context) => { }); test("ponderActions getCode()", async (context) => { - const { common, sources, networks, erc20 } = context; + const { common } = context; const { syncStore, indexingStore, cleanup } = await setupDatabaseServices( context, { schema }, ); + const { address } = await deployErc20({ sender: ALICE }); + const sync = await createSync({ common, syncStore, @@ -1071,7 +1048,7 @@ test("ponderActions getCode()", async (context) => { setIndexingStore(indexingService, indexingStore); const bytecode = await indexingService.clientByChainId[1]!.getCode({ - address: erc20.address, + address, }); expect(bytecode).toBeTruthy(); @@ -1080,12 +1057,20 @@ test("ponderActions getCode()", async (context) => { }); test("ponderActions getStorageAt()", async (context) => { - const { common, sources, networks, erc20 } = context; + const { common } = context; const { syncStore, indexingStore, cleanup } = await setupDatabaseServices( context, { schema }, ); + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + const sync = await createSync({ common, syncStore, @@ -1107,7 +1092,7 @@ test("ponderActions getStorageAt()", async (context) => { setIndexingStore(indexingService, indexingStore); const storage = await indexingService.clientByChainId[1]!.getStorageAt({ - address: erc20.address, + address, // totalSupply is in the third storage slot slot: toHex(2), }); @@ -1118,12 +1103,20 @@ test("ponderActions getStorageAt()", async (context) => { }); test("ponderActions readContract()", async (context) => { - const { common, sources, networks, erc20 } = context; + const { common } = context; const { syncStore, indexingStore, cleanup } = await setupDatabaseServices( context, { schema }, ); + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + const sync = await createSync({ common, syncStore, @@ -1147,7 +1140,7 @@ test("ponderActions readContract()", async (context) => { const totalSupply = await indexingService.clientByChainId[1]!.readContract({ abi: erc20ABI, functionName: "totalSupply", - address: erc20.address, + address, }); expect(totalSupply).toBe(parseEther("1")); @@ -1156,12 +1149,20 @@ test("ponderActions readContract()", async (context) => { }); test("ponderActions readContract() blockNumber", async (context) => { - const { common, sources, networks, erc20 } = context; + const { common } = context; const { syncStore, indexingStore, cleanup } = await setupDatabaseServices( context, { schema }, ); + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + const sync = await createSync({ common, syncStore, @@ -1185,7 +1186,7 @@ test("ponderActions readContract() blockNumber", async (context) => { const totalSupply = await indexingService.clientByChainId[1]!.readContract({ abi: erc20ABI, functionName: "totalSupply", - address: erc20.address, + address, blockNumber: 1n, }); @@ -1196,12 +1197,20 @@ test("ponderActions readContract() blockNumber", async (context) => { // Note: Kyle the local chain doesn't have a deployed instance of "multicall3" test.skip("ponderActions multicall()", async (context) => { - const { common, sources, networks, erc20 } = context; + const { common } = context; const { syncStore, indexingStore, cleanup } = await setupDatabaseServices( context, { schema }, ); + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + const sync = await createSync({ common, syncStore, @@ -1228,7 +1237,7 @@ test.skip("ponderActions multicall()", async (context) => { { abi: erc20ABI, functionName: "totalSupply", - address: erc20.address, + address, }, ], }); diff --git a/packages/core/src/indexing/service.ts b/packages/core/src/indexing/service.ts index f427ec376..267bc7713 100644 --- a/packages/core/src/indexing/service.ts +++ b/packages/core/src/indexing/service.ts @@ -16,20 +16,13 @@ import { encodeCheckpoint, zeroCheckpoint, } from "@/utils/checkpoint.js"; -import { never } from "@/utils/never.js"; import { prettyPrint } from "@/utils/print.js"; import { startClock } from "@/utils/timer.js"; import type { Abi, Address } from "viem"; import { checksumAddress, createClient } from "viem"; -import type { - BlockEvent, - CallTraceEvent, - Event, - LogEvent, - SetupEvent, -} from "../sync/events.js"; +import type { Event, SetupEvent } from "../sync/events.js"; import { addStackTrace } from "./addStackTrace.js"; -import { type ReadOnlyClient, buildCachedActions } from "./ponderActions.js"; +import { type ReadOnlyClient, getPonderActions } from "./ponderActions.js"; export type Context = { network: { chainId: number; name: string }; @@ -40,7 +33,7 @@ export type Context = { { abi: Abi; address?: Address | readonly Address[]; - startBlock: number; + startBlock?: number; endBlock?: number; } >; @@ -105,7 +98,7 @@ export const create = ({ // build contractsByChainId for (const source of sources) { - if (source.type === "block") continue; + if (source.type === "block" || source.type === "account") continue; let address: Address | undefined; @@ -142,9 +135,6 @@ export const create = ({ }; } - // build cachedActions - const cachedActions = buildCachedActions(contextState); - // build clientByChainId for (const network of networks) { const transport = sync.getCachedTransport(network); @@ -152,7 +142,7 @@ export const create = ({ transport, chain: network.chain, // @ts-ignore - }).extend(cachedActions); + }).extend(getPonderActions(contextState)); } // build eventCount @@ -219,12 +209,12 @@ export const processSetupEvents = async ( checkpoint: encodeCheckpoint({ ...zeroCheckpoint, chainId: BigInt(network.chainId), - blockNumber: BigInt(source.filter.fromBlock), + blockNumber: BigInt(source.filter.fromBlock ?? 0), }), name: eventName, - block: BigInt(source.filter.fromBlock), + block: BigInt(source.filter.fromBlock ?? 0), }, }); @@ -250,74 +240,23 @@ export const processEvents = async ( const event = events[i]!; - switch (event.type) { - case "log": { - indexingService.eventCount[event.name]!++; - - indexingService.common.logger.trace({ - service: "indexing", - msg: `Started indexing function (event="${event.name}", checkpoint=${event.checkpoint})`, - }); - - const result = await executeLog(indexingService, { event }); - if (result.status !== "success") { - return result; - } - - indexingService.common.logger.trace({ - service: "indexing", - msg: `Completed indexing function (event="${event.name}", checkpoint=${event.checkpoint})`, - }); - - break; - } - - case "block": { - indexingService.eventCount[event.name]!++; - - indexingService.common.logger.trace({ - service: "indexing", - msg: `Started indexing function (event="${event.name}", checkpoint=${event.checkpoint})`, - }); - - const result = await executeBlock(indexingService, { event }); - if (result.status !== "success") { - return result; - } + indexingService.eventCount[event.name]!++; - indexingService.common.logger.trace({ - service: "indexing", - msg: `Completed indexing function (event="${event.name}", checkpoint=${event.checkpoint})`, - }); - - break; - } - - case "callTrace": { - indexingService.eventCount[event.name]!++; - - indexingService.common.logger.trace({ - service: "indexing", - msg: `Started indexing function (event="${event.name}", checkpoint=${event.checkpoint})`, - }); - - const result = await executeCallTrace(indexingService, { event }); - if (result.status !== "success") { - return result; - } - - indexingService.common.logger.trace({ - service: "indexing", - msg: `Completed indexing function (event="${event.name}", checkpoint=${event.checkpoint})`, - }); - - break; - } + indexingService.common.logger.trace({ + service: "indexing", + msg: `Started indexing function (event="${event.name}", checkpoint=${event.checkpoint})`, + }); - default: - never(event); + const result = await executeEvent(indexingService, { event }); + if (result.status !== "success") { + return result; } + indexingService.common.logger.trace({ + service: "indexing", + msg: `Completed indexing function (event="${event.name}", checkpoint=${event.checkpoint})`, + }); + // periodically update metrics if (i % 93 === 0) { updateCompletedEvents(indexingService); @@ -458,143 +397,37 @@ const executeSetup = async ( return { status: "success" }; }; -const executeLog = async ( - indexingService: Service, - { event }: { event: LogEvent }, -): Promise< - | { status: "error"; error: Error } - | { status: "success" } - | { status: "killed" } -> => { - const { - common, - indexingFunctions, - currentEvent, - networkByChainId, - contractsByChainId, - clientByChainId, - } = indexingService; - const indexingFunction = indexingFunctions[event.name]; - const metricLabel = { event: event.name }; - - try { - // set currentEvent - currentEvent.context.network.chainId = event.chainId; - currentEvent.context.network.name = networkByChainId[event.chainId]!.name; - currentEvent.context.client = clientByChainId[event.chainId]!; - currentEvent.context.contracts = contractsByChainId[event.chainId]!; - currentEvent.contextState.blockNumber = event.event.block.number; - - const endClock = startClock(); - - await indexingFunction!({ - event: event.event, - context: currentEvent.context, - }); - - common.metrics.ponder_indexing_function_duration.observe( - metricLabel, - endClock(), - ); - } catch (_error) { - if (indexingService.isKilled) return { status: "killed" }; - const error = _error as Error & { meta?: string[] }; - - const decodedCheckpoint = decodeCheckpoint(event.checkpoint); - - addStackTrace(error, common.options); - - error.meta = Array.isArray(error.meta) ? error.meta : []; - if (error.meta.length === 0) { - error.meta.push(`Event arguments:\n${prettyPrint(event.event.args)}`); +const toErrorMeta = (event: Event) => { + switch (event.type) { + case "log": + case "trace": { + return `Event arguments:\n${prettyPrint(event.event.args)}`; } - common.logger.error({ - service: "indexing", - msg: `Error while processing '${event.name}' event in '${networkByChainId[event.chainId]!.name}' block ${decodedCheckpoint.blockNumber}`, - error, - }); - - common.metrics.ponder_indexing_has_error.set(1); - - return { status: "error", error }; - } - - return { status: "success" }; -}; - -const executeBlock = async ( - indexingService: Service, - { event }: { event: BlockEvent }, -): Promise< - | { status: "error"; error: Error } - | { status: "success" } - | { status: "killed" } -> => { - const { - common, - indexingFunctions, - currentEvent, - networkByChainId, - contractsByChainId, - clientByChainId, - } = indexingService; - const indexingFunction = indexingFunctions[event.name]; - const metricLabel = { event: event.name }; - - try { - // set currentEvent - currentEvent.context.network.chainId = event.chainId; - currentEvent.context.network.name = networkByChainId[event.chainId]!.name; - currentEvent.context.client = clientByChainId[event.chainId]!; - currentEvent.context.contracts = contractsByChainId[event.chainId]!; - currentEvent.contextState.blockNumber = event.event.block.number; - - const endClock = startClock(); - - await indexingFunction!({ - event: event.event, - context: currentEvent.context, - }); - - common.metrics.ponder_indexing_function_duration.observe( - metricLabel, - endClock(), - ); - } catch (_error) { - if (indexingService.isKilled) return { status: "killed" }; - const error = _error as Error & { meta?: string[] }; - - const decodedCheckpoint = decodeCheckpoint(event.checkpoint); - - addStackTrace(error, common.options); + case "transfer": { + return `Event arguments:\n${prettyPrint(event.event.transfer)}`; + } - error.meta = Array.isArray(error.meta) ? error.meta : []; - error.meta.push( - `Block:\n${prettyPrint({ + case "block": { + return `Block:\n${prettyPrint({ hash: event.event.block.hash, number: event.event.block.number, timestamp: event.event.block.timestamp, - })}`, - ); - - common.logger.error({ - service: "indexing", - msg: `Error while processing ${event.name} event at chainId=${decodedCheckpoint.chainId}, block=${decodedCheckpoint.blockNumber}`, - error, - }); - - common.metrics.ponder_indexing_has_error.set(1); + })}`; + } - return { status: "error", error: error }; + case "transaction": { + return `Transaction:\n${prettyPrint({ + hash: event.event.transaction.hash, + block: event.event.block.number, + })}`; + } } - - return { status: "success" }; }; -const executeCallTrace = async ( +const executeEvent = async ( indexingService: Service, - { event }: { event: CallTraceEvent }, + { event }: { event: Event }, ): Promise< | { status: "error"; error: Error } | { status: "success" } @@ -639,7 +472,9 @@ const executeCallTrace = async ( addStackTrace(error, common.options); error.meta = Array.isArray(error.meta) ? error.meta : []; - error.meta.push(`Call trace arguments:\n${prettyPrint(event.event.args)}`); + if (error.meta.length === 0) { + error.meta.push(toErrorMeta(event)); + } common.logger.error({ service: "indexing", @@ -649,7 +484,7 @@ const executeCallTrace = async ( common.metrics.ponder_indexing_has_error.set(1); - return { status: "error", error: error }; + return { status: "error", error }; } return { status: "success" }; diff --git a/packages/core/src/server/index.test.ts b/packages/core/src/server/index.test.ts index 2a1350bea..e38d07700 100644 --- a/packages/core/src/server/index.test.ts +++ b/packages/core/src/server/index.test.ts @@ -18,17 +18,15 @@ test("port", async (context) => { const server1 = await createServer({ common: context.common, - app: new Hono(), - routes: [], - graphqlSchema: buildGraphQLSchema({}), + schemaBuild: { graphqlSchema: buildGraphQLSchema({}) }, + apiBuild: { app: new Hono(), routes: [] }, database, }); const server2 = await createServer({ common: context.common, - app: new Hono(), - routes: [], - graphqlSchema: buildGraphQLSchema({}), + schemaBuild: { graphqlSchema: buildGraphQLSchema({}) }, + apiBuild: { app: new Hono(), routes: [] }, database, }); @@ -44,9 +42,8 @@ test("listens on ipv4", async (context) => { const server = await createServer({ common: context.common, - app: new Hono(), - routes: [], - graphqlSchema: buildGraphQLSchema({}), + schemaBuild: { graphqlSchema: buildGraphQLSchema({}) }, + apiBuild: { app: new Hono(), routes: [] }, database, }); @@ -62,9 +59,8 @@ test("listens on ipv6", async (context) => { const server = await createServer({ common: context.common, - app: new Hono(), - routes: [], - graphqlSchema: buildGraphQLSchema({}), + schemaBuild: { graphqlSchema: buildGraphQLSchema({}) }, + apiBuild: { app: new Hono(), routes: [] }, database, }); @@ -80,9 +76,8 @@ test("not ready", async (context) => { const server = await createServer({ common: context.common, - app: new Hono(), - routes: [], - graphqlSchema: buildGraphQLSchema({}), + schemaBuild: { graphqlSchema: buildGraphQLSchema({}) }, + apiBuild: { app: new Hono(), routes: [] }, database, }); @@ -99,15 +94,13 @@ test("ready", async (context) => { const server = await createServer({ common: context.common, - app: new Hono(), - routes: [], - graphqlSchema: buildGraphQLSchema({}), + schemaBuild: { graphqlSchema: buildGraphQLSchema({}) }, + apiBuild: { app: new Hono(), routes: [] }, database, }); await getMetadataStore({ db: database.qb.user, - instanceId: "1234", }).setStatus({}); const response = await server.hono.request("/ready"); @@ -123,9 +116,8 @@ test("health", async (context) => { const server = await createServer({ common: context.common, - app: new Hono(), - routes: [], - graphqlSchema: buildGraphQLSchema({}), + schemaBuild: { graphqlSchema: buildGraphQLSchema({}) }, + apiBuild: { app: new Hono(), routes: [] }, database, }); @@ -142,9 +134,8 @@ test("healthy PUT", async (context) => { const server = await createServer({ common: context.common, - app: new Hono(), - routes: [], - graphqlSchema: buildGraphQLSchema({}), + schemaBuild: { graphqlSchema: buildGraphQLSchema({}) }, + apiBuild: { app: new Hono(), routes: [] }, database, }); @@ -163,9 +154,8 @@ test("metrics", async (context) => { const server = await createServer({ common: context.common, - app: new Hono(), - routes: [], - graphqlSchema: buildGraphQLSchema({}), + schemaBuild: { graphqlSchema: buildGraphQLSchema({}) }, + apiBuild: { app: new Hono(), routes: [] }, database, }); @@ -182,9 +172,8 @@ test("metrics error", async (context) => { const server = await createServer({ common: context.common, - app: new Hono(), - routes: [], - graphqlSchema: buildGraphQLSchema({}), + schemaBuild: { graphqlSchema: buildGraphQLSchema({}) }, + apiBuild: { app: new Hono(), routes: [] }, database, }); @@ -204,9 +193,8 @@ test("metrics PUT", async (context) => { const server = await createServer({ common: context.common, - app: new Hono(), - routes: [], - graphqlSchema: buildGraphQLSchema({}), + schemaBuild: { graphqlSchema: buildGraphQLSchema({}) }, + apiBuild: { app: new Hono(), routes: [] }, database, }); @@ -225,9 +213,8 @@ test("metrics unmatched route", async (context) => { const server = await createServer({ common: context.common, - app: new Hono(), - routes: [], - graphqlSchema: buildGraphQLSchema({}), + schemaBuild: { graphqlSchema: buildGraphQLSchema({}) }, + apiBuild: { app: new Hono(), routes: [] }, database, }); @@ -250,9 +237,8 @@ test("missing route", async (context) => { const server = await createServer({ common: context.common, - app: new Hono(), - routes: [], - graphqlSchema: buildGraphQLSchema({}), + schemaBuild: { graphqlSchema: buildGraphQLSchema({}) }, + apiBuild: { app: new Hono(), routes: [] }, database, }); @@ -269,11 +255,16 @@ test("custom api route", async (context) => { const server = await createServer({ common: context.common, - app: new Hono(), - routes: [ - { method: "GET", pathOrHandlers: ["/hi", (c: Context) => c.text("hi")] }, - ], - graphqlSchema: buildGraphQLSchema({}), + schemaBuild: { graphqlSchema: buildGraphQLSchema({}) }, + apiBuild: { + app: new Hono(), + routes: [ + { + method: "GET", + pathOrHandlers: ["/hi", (c: Context) => c.text("hi")], + }, + ], + }, database, }); @@ -293,9 +284,8 @@ test("custom hono route", async (context) => { const server = await createServer({ common: context.common, - app, - routes: [], - graphqlSchema: buildGraphQLSchema({}), + schemaBuild: { graphqlSchema: buildGraphQLSchema({}) }, + apiBuild: { app, routes: [] }, database, }); @@ -315,9 +305,8 @@ test.skip("kill", async (context) => { const server = await createServer({ common: context.common, - app: new Hono(), - routes: [], - graphqlSchema: buildGraphQLSchema({}), + schemaBuild: { graphqlSchema: buildGraphQLSchema({}) }, + apiBuild: { app: new Hono(), routes: [] }, database, }); diff --git a/packages/core/src/server/index.ts b/packages/core/src/server/index.ts index 8b0d7e6eb..849534807 100644 --- a/packages/core/src/server/index.ts +++ b/packages/core/src/server/index.ts @@ -1,15 +1,13 @@ import http from "node:http"; +import type { ApiBuild } from "@/build/index.js"; +import type { SchemaBuild } from "@/build/index.js"; import type { Common } from "@/common/common.js"; import type { Database } from "@/database/index.js"; import { graphql } from "@/graphql/middleware.js"; -import { type PonderRoutes, applyHonoRoutes } from "@/hono/index.js"; -import { - getLiveMetadataStore, - getMetadataStore, -} from "@/indexing-store/metadata.js"; +import { applyHonoRoutes } from "@/hono/index.js"; +import { getMetadataStore } from "@/indexing-store/metadata.js"; import { startClock } from "@/utils/timer.js"; import { serve } from "@hono/node-server"; -import type { GraphQLSchema } from "graphql"; import { Hono } from "hono"; import { cors } from "hono/cors"; import { createMiddleware } from "hono/factory"; @@ -23,29 +21,21 @@ type Server = { }; export async function createServer({ - app: userApp, - routes: userRoutes, common, - graphqlSchema, database, - instanceId, + schemaBuild, + apiBuild, }: { - app: Hono; - routes: PonderRoutes; common: Common; - graphqlSchema: GraphQLSchema; database: Database; - instanceId?: string; + schemaBuild: Pick; + apiBuild: ApiBuild; }): Promise { // Create hono app - const metadataStore = - instanceId === undefined - ? getLiveMetadataStore({ db: database.qb.readonly }) - : getMetadataStore({ - db: database.qb.readonly, - instanceId, - }); + const metadataStore = getMetadataStore({ + db: database.qb.readonly, + }); const metricsMiddleware = createMiddleware(async (c, next) => { const matchedPathLabels = c.req.matchedRoutes @@ -95,7 +85,7 @@ export async function createServer({ const contextMiddleware = createMiddleware(async (c, next) => { c.set("db", database.drizzle); c.set("metadataStore", metadataStore); - c.set("graphqlSchema", graphqlSchema); + c.set("graphqlSchema", schemaBuild.graphqlSchema); await next(); }); @@ -132,25 +122,25 @@ export async function createServer({ }) .use(contextMiddleware); - if (userRoutes.length === 0 && userApp.routes.length === 0) { + if (apiBuild.routes.length === 0 && apiBuild.app.routes.length === 0) { // apply graphql middleware if no custom api exists hono.use("/graphql", graphql()); hono.use("/", graphql()); } else { // apply user routes to hono instance, registering a custom error handler - applyHonoRoutes(hono, userRoutes, { db: database.drizzle }).onError( - (error, c) => onError(error, c, common), - ); + applyHonoRoutes(hono, apiBuild.routes, { + db: database.drizzle, + }).onError((error, c) => onError(error, c, common)); common.logger.debug({ service: "server", - msg: `Detected a custom server with routes: [${userRoutes + msg: `Detected a custom server with routes: [${apiBuild.routes .map(({ pathOrHandlers: [maybePathOrHandler] }) => maybePathOrHandler) .filter((maybePathOrHandler) => typeof maybePathOrHandler === "string") .join(", ")}]`, }); - hono.route("/", userApp); + hono.route("/", apiBuild.app); } // Create nodejs server diff --git a/packages/core/src/sync-historical/index.test.ts b/packages/core/src/sync-historical/index.test.ts index 9ddfcfd34..bb4d86453 100644 --- a/packages/core/src/sync-historical/index.test.ts +++ b/packages/core/src/sync-historical/index.test.ts @@ -1,13 +1,32 @@ +import { ALICE, BOB } from "@/_test/constants.js"; +import { erc20ABI } from "@/_test/generated.js"; import { setupAnvil, setupCommon, setupDatabaseServices, setupIsolatedDatabase, } from "@/_test/setup.js"; -import { simulateFactoryDeploy, simulatePairSwap } from "@/_test/simulate.js"; -import { getRawRPCData } from "@/_test/utils.js"; -import type { RequestQueue } from "@/utils/requestQueue.js"; -import { hexToNumber } from "viem"; +import { + createPair, + deployErc20, + deployFactory, + mintErc20, + swapPair, + transferErc20, + transferEth, +} from "@/_test/simulate.js"; +import { + getAccountsConfigAndIndexingFunctions, + getBlocksConfigAndIndexingFunctions, + getErc20ConfigAndIndexingFunctions, + getNetwork, + getPairWithFactoryConfigAndIndexingFunctions, + testClient, +} from "@/_test/utils.js"; +import { buildConfigAndIndexingFunctions } from "@/build/configAndIndexingFunctions.js"; +import { createRequestQueue } from "@/utils/requestQueue.js"; +import { encodeFunctionData, encodeFunctionResult, toHex } from "viem"; +import { parseEther } from "viem/utils"; import { beforeEach, expect, test, vi } from "vitest"; import { createHistoricalSync } from "./index.js"; @@ -15,52 +34,29 @@ beforeEach(setupCommon); beforeEach(setupAnvil); beforeEach(setupIsolatedDatabase); -// Helper function used to spoof "trace_filter" requests -// because they aren't supported by foundry. -const getRequestQueue = async (requestQueue: RequestQueue) => { - const rpcData = await getRawRPCData(); - - return { - ...requestQueue, - request: (request: any) => { - if (request.method === "trace_filter") { - let traces = [ - ...rpcData.block2.callTraces, - ...rpcData.block3.callTraces, - ...rpcData.block4.callTraces, - ]; - - if (request.params[0].fromBlock !== undefined) { - traces = traces.filter( - (t) => - hexToNumber(t.blockNumber) >= - hexToNumber(request.params[0].fromBlock), - ); - } - if (request.params[0].toBlock) { - traces = traces.filter( - (t) => - hexToNumber(t.blockNumber) <= - hexToNumber(request.params[0].toBlock), - ); - } - - return Promise.resolve(traces); - } - return requestQueue.request(request); - }, - } as RequestQueue; -}; - test("createHistoricalSync()", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + const historicalSync = await createHistoricalSync({ common: context.common, - network: context.networks[0], - sources: [context.sources[0]], + network, + sources, syncStore, - requestQueue: await getRequestQueue(context.requestQueues[0]), + requestQueue, onFatalError: () => {}, }); @@ -72,23 +68,45 @@ test("createHistoricalSync()", async (context) => { test("sync() with log filter", async (context) => { const { cleanup, syncStore, database } = await setupDatabaseServices(context); + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + + const { config, rawIndexingFunctions } = getErc20ConfigAndIndexingFunctions({ + address, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + const historicalSync = await createHistoricalSync({ common: context.common, - network: context.networks[0], - sources: [context.sources[0]], + network, + sources, syncStore, - requestQueue: await getRequestQueue(context.requestQueues[0]), + requestQueue, onFatalError: () => {}, }); - await historicalSync.sync([0, 5]); + await historicalSync.sync([1, 2]); const logs = await database.qb.sync.selectFrom("logs").selectAll().execute(); - expect(logs).toHaveLength(2); + expect(logs).toHaveLength(1); const intervals = await database.qb.sync - .selectFrom("logFilterIntervals") + .selectFrom("intervals") .selectAll() .execute(); @@ -100,32 +118,49 @@ test("sync() with log filter", async (context) => { test("sync() with log filter and transaction receipts", async (context) => { const { cleanup, syncStore, database } = await setupDatabaseServices(context); - context.sources[0].filter.includeTransactionReceipts = true; + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + + const { config, rawIndexingFunctions } = getErc20ConfigAndIndexingFunctions({ + address, + includeTransactionReceipts: true, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); const historicalSync = await createHistoricalSync({ common: context.common, - network: context.networks[0], - sources: [context.sources[0]], + network, + sources, syncStore, - requestQueue: await getRequestQueue(context.requestQueues[0]), + requestQueue, onFatalError: () => {}, }); - await historicalSync.sync([0, 5]); - - const logs = await database.qb.sync.selectFrom("logs").selectAll().execute(); - - expect(logs).toHaveLength(2); + await historicalSync.sync([1, 2]); const transactionReceipts = await database.qb.sync .selectFrom("transactionReceipts") .selectAll() .execute(); - expect(transactionReceipts).toHaveLength(2); + expect(transactionReceipts).toHaveLength(1); const intervals = await database.qb.sync - .selectFrom("logFilterIntervals") + .selectFrom("intervals") .selectAll() .execute(); @@ -137,16 +172,32 @@ test("sync() with log filter and transaction receipts", async (context) => { test("sync() with block filter", async (context) => { const { cleanup, syncStore, database } = await setupDatabaseServices(context); + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + await testClient.mine({ blocks: 3 }); + const historicalSync = await createHistoricalSync({ common: context.common, - network: context.networks[0], - sources: [context.sources[4]], + network, + sources, syncStore, - requestQueue: await getRequestQueue(context.requestQueues[0]), + requestQueue, onFatalError: () => {}, }); - await historicalSync.sync([0, 5]); + await historicalSync.sync([1, 3]); const blocks = await database.qb.sync .selectFrom("blocks") @@ -156,7 +207,7 @@ test("sync() with block filter", async (context) => { expect(blocks).toHaveLength(3); const intervals = await database.qb.sync - .selectFrom("blockFilterIntervals") + .selectFrom("intervals") .selectAll() .execute(); @@ -168,23 +219,48 @@ test("sync() with block filter", async (context) => { test("sync() with log factory", async (context) => { const { cleanup, syncStore, database } = await setupDatabaseServices(context); + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployFactory({ sender: ALICE }); + const { result } = await createPair({ factory: address, sender: ALICE }); + await swapPair({ + pair: result, + amount0Out: 1n, + amount1Out: 1n, + to: ALICE, + sender: ALICE, + }); + + const { config, rawIndexingFunctions } = + getPairWithFactoryConfigAndIndexingFunctions({ + address, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + const historicalSync = await createHistoricalSync({ common: context.common, - network: context.networks[0], - sources: [context.sources[1]], + network, + sources, syncStore, - requestQueue: await getRequestQueue(context.requestQueues[0]), + requestQueue, onFatalError: () => {}, }); - await historicalSync.sync([0, 5]); + await historicalSync.sync([1, 3]); const logs = await database.qb.sync.selectFrom("logs").selectAll().execute(); expect(logs).toHaveLength(2); const intervals = await database.qb.sync - .selectFrom("factoryLogFilterIntervals") + .selectFrom("intervals") .selectAll() .execute(); @@ -196,26 +272,93 @@ test("sync() with log factory", async (context) => { test("sync() with trace filter", async (context) => { const { cleanup, syncStore, database } = await setupDatabaseServices(context); + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + const { hash } = await transferErc20({ + erc20: address, + to: BOB, + amount: parseEther("1"), + sender: ALICE, + }); + + const { config, rawIndexingFunctions } = getErc20ConfigAndIndexingFunctions({ + address, + includeCallTraces: true, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const request = async (request: any) => { + if (request.method === "debug_traceBlockByNumber") { + if (request.params[0] === "0x1") return Promise.resolve([]); + if (request.params[0] === "0x2") return Promise.resolve([]); + if (request.params[0] === "0x3") { + return Promise.resolve([ + { + txHash: hash, + result: { + type: "CALL", + from: ALICE, + to: address, + gas: "0x0", + gasUsed: "0x0", + input: encodeFunctionData({ + abi: erc20ABI, + functionName: "transfer", + args: [BOB, parseEther("1")], + }), + output: encodeFunctionResult({ + abi: erc20ABI, + functionName: "transfer", + result: true, + }), + value: "0x0", + }, + }, + ]); + } + } + + return requestQueue.request(request); + }; + const historicalSync = await createHistoricalSync({ common: context.common, - network: context.networks[0], - sources: [context.sources[3]], + network, + sources: sources.filter(({ filter }) => filter.type === "trace"), syncStore, - requestQueue: await getRequestQueue(context.requestQueues[0]), + requestQueue: { + ...requestQueue, + // @ts-ignore + request, + }, onFatalError: () => {}, }); - await historicalSync.sync([0, 5]); + await historicalSync.sync([1, 3]); - const callTraces = await database.qb.sync - .selectFrom("callTraces") + const traces = await database.qb.sync + .selectFrom("traces") .selectAll() .execute(); - expect(callTraces).toHaveLength(4); + expect(traces).toHaveLength(1); const intervals = await database.qb.sync - .selectFrom("traceFilterIntervals") + .selectFrom("intervals") .selectAll() .execute(); @@ -224,28 +367,203 @@ test("sync() with trace filter", async (context) => { await cleanup(); }); +test("sync() with transaction filter", async (context) => { + const { cleanup, syncStore, database } = await setupDatabaseServices(context); + + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + await transferEth({ + to: BOB, + amount: parseEther("1"), + sender: ALICE, + }); + + const { config, rawIndexingFunctions } = + getAccountsConfigAndIndexingFunctions({ + address: ALICE, + }); + + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const historicalSync = await createHistoricalSync({ + common: context.common, + network, + sources: sources.filter(({ filter }) => filter.type === "transaction"), + syncStore, + requestQueue, + onFatalError: () => {}, + }); + + await historicalSync.sync([1, 1]); + + const transactions = await database.qb.sync + .selectFrom("transactions") + .selectAll() + .execute(); + + expect(transactions).toHaveLength(1); + + const transactionReceipts = await database.qb.sync + .selectFrom("transactionReceipts") + .selectAll() + .execute(); + + expect(transactionReceipts).toHaveLength(1); + + const intervals = await database.qb.sync + .selectFrom("intervals") + .selectAll() + .execute(); + + // transaction:from and transaction:to + expect(intervals).toHaveLength(2); + + await cleanup(); +}); + +test("sync() with transfer filter", async (context) => { + const { cleanup, syncStore, database } = await setupDatabaseServices(context); + + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { hash } = await transferEth({ + to: BOB, + amount: parseEther("1"), + sender: ALICE, + }); + + const { config, rawIndexingFunctions } = + getAccountsConfigAndIndexingFunctions({ + address: ALICE, + }); + + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const request = async (request: any) => { + if (request.method === "debug_traceBlockByNumber") { + if (request.params[0] === "0x1") { + return Promise.resolve([ + { + txHash: hash, + result: { + type: "CALL", + from: ALICE, + to: BOB, + gas: "0x0", + gasUsed: "0x0", + input: "0x0", + output: "0x0", + value: toHex(parseEther("1")), + }, + }, + ]); + } + } + + return requestQueue.request(request); + }; + + const historicalSync = await createHistoricalSync({ + common: context.common, + network, + sources: sources.filter(({ filter }) => filter.type === "transfer"), + syncStore, + requestQueue: { + ...requestQueue, + // @ts-ignore + request, + }, + onFatalError: () => {}, + }); + + await historicalSync.sync([1, 1]); + + const transactions = await database.qb.sync + .selectFrom("transactions") + .selectAll() + .execute(); + + expect(transactions).toHaveLength(1); + + const intervals = await database.qb.sync + .selectFrom("intervals") + .selectAll() + .execute(); + + // transfer:from and transfer:to + expect(intervals).toHaveLength(2); + + await cleanup(); +}); + test("sync() with many filters", async (context) => { const { cleanup, syncStore, database } = await setupDatabaseServices(context); + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + + const { sources: erc20Sources } = await buildConfigAndIndexingFunctions({ + ...getErc20ConfigAndIndexingFunctions({ + address, + }), + }); + const { sources: blockSources } = await buildConfigAndIndexingFunctions({ + ...getBlocksConfigAndIndexingFunctions({ + interval: 1, + }), + }); + const historicalSync = await createHistoricalSync({ common: context.common, - network: context.networks[0], - sources: context.sources, + network, + sources: [...erc20Sources, ...blockSources], syncStore, - requestQueue: await getRequestQueue(context.requestQueues[0]), + requestQueue, onFatalError: () => {}, }); - await historicalSync.sync([0, 5]); + await historicalSync.sync([1, 2]); const logs = await database.qb.sync.selectFrom("logs").selectAll().execute(); - expect(logs).toHaveLength(4); + expect(logs).toHaveLength(1); const blocks = await database.qb.sync .selectFrom("blocks") .selectAll() .execute(); - expect(blocks).toHaveLength(5); + expect(blocks).toHaveLength(2); + + const intervals = await database.qb.sync + .selectFrom("intervals") + .selectAll() + .execute(); + + expect(intervals).toHaveLength(2); await cleanup(); }); @@ -253,30 +571,53 @@ test("sync() with many filters", async (context) => { test("sync() with cache hit", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + + const { config, rawIndexingFunctions } = getErc20ConfigAndIndexingFunctions({ + address, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + let historicalSync = await createHistoricalSync({ common: context.common, - network: context.networks[0], - sources: [context.sources[0]], + network, + sources, syncStore, - requestQueue: await getRequestQueue(context.requestQueues[0]), + requestQueue, onFatalError: () => {}, }); - await historicalSync.sync([0, 5]); + + await historicalSync.sync([1, 2]); // re-instantiate `historicalSync` to reset the cached intervals - const spy = vi.spyOn(context.requestQueues[0], "request"); + const spy = vi.spyOn(requestQueue, "request"); historicalSync = await createHistoricalSync({ common: context.common, - network: context.networks[0], - sources: [context.sources[0]], + network, + sources, syncStore, - requestQueue: await getRequestQueue(context.requestQueues[0]), + requestQueue, onFatalError: () => {}, }); - await historicalSync.sync([0, 5]); + await historicalSync.sync([1, 2]); expect(spy).toHaveBeenCalledTimes(0); await cleanup(); @@ -285,29 +626,47 @@ test("sync() with cache hit", async (context) => { test("syncBlock() with cache", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); - // block 2 and 4 will be requested - const blockFilter = context.sources[4].filter; - blockFilter.offset = 0; + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + + const { sources: erc20Sources } = await buildConfigAndIndexingFunctions({ + ...getErc20ConfigAndIndexingFunctions({ + address, + }), + }); + const { sources: blockSources } = await buildConfigAndIndexingFunctions({ + ...getBlocksConfigAndIndexingFunctions({ + interval: 1, + }), + }); const historicalSync = await createHistoricalSync({ common: context.common, - network: context.networks[0], - sources: [ - context.sources[0], - { ...context.sources[4], filter: blockFilter }, - ], + network, + sources: [...erc20Sources, ...blockSources], syncStore, - requestQueue: await getRequestQueue(context.requestQueues[0]), + requestQueue, onFatalError: () => {}, }); - const spy = vi.spyOn(context.requestQueues[0], "request"); + const spy = vi.spyOn(requestQueue, "request"); - await historicalSync.sync([0, 5]); + await historicalSync.sync([1, 2]); - // 1 call to `syncBlock()` will be cached because - // each source in `sources` matches block 2 - expect(spy).toHaveBeenCalledTimes(4); + // 1 "eth_getLogs" request and only 2 "eth_getBlockByNumber" requests + // because the erc20 and block sources share the block 2 + expect(spy).toHaveBeenCalledTimes(3); await cleanup(); }); @@ -315,28 +674,52 @@ test("syncBlock() with cache", async (context) => { test("syncAddress() handles many addresses", async (context) => { const { cleanup, syncStore, database } = await setupDatabaseServices(context); + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + context.common.options.factoryAddressCountThreshold = 10; + const { address } = await deployFactory({ sender: ALICE }); + for (let i = 0; i < 10; i++) { - await simulateFactoryDeploy(context.factory.address); + await createPair({ factory: address, sender: ALICE }); } - const pair = await simulateFactoryDeploy(context.factory.address); - await simulatePairSwap(pair); + const { result } = await createPair({ factory: address, sender: ALICE }); + await swapPair({ + pair: result, + amount0Out: 1n, + amount1Out: 1n, + to: ALICE, + sender: ALICE, + }); + + const { config, rawIndexingFunctions } = + getPairWithFactoryConfigAndIndexingFunctions({ + address, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); const historicalSync = await createHistoricalSync({ common: context.common, - network: context.networks[0], - sources: [context.sources[1]], + network, + sources, syncStore, - requestQueue: await getRequestQueue(context.requestQueues[0]), + requestQueue, onFatalError: () => {}, }); - await historicalSync.sync([0, 10 + 5 + 2]); + await historicalSync.sync([1, 13]); const logs = await database.qb.sync.selectFrom("logs").selectAll().execute(); - expect(logs).toHaveLength(14); + // 11 pair creations and 1 swap + expect(logs).toHaveLength(12); await cleanup(); }); diff --git a/packages/core/src/sync-historical/index.ts b/packages/core/src/sync-historical/index.ts index a292fc005..9403ab624 100644 --- a/packages/core/src/sync-historical/index.ts +++ b/packages/core/src/sync-historical/index.ts @@ -1,29 +1,37 @@ import type { Common } from "@/common/common.js"; import type { Network } from "@/config/networks.js"; +import { + isTraceFilterMatched, + isTransactionFilterMatched, + isTransferFilterMatched, +} from "@/sync-realtime/filter.js"; import type { SyncStore } from "@/sync-store/index.js"; import { type BlockFilter, - type CallTraceFilter, type Factory, type Filter, type LogFactory, type LogFilter, + type TraceFilter, + type TransferFilter, isAddressFactory, + shouldGetTransactionReceipt, } from "@/sync/source.js"; -import type { Source } from "@/sync/source.js"; -import type { SyncBlock, SyncCallTrace, SyncLog } from "@/types/sync.js"; +import type { Source, TransactionFilter } from "@/sync/source.js"; +import type { SyncBlock, SyncLog, SyncTrace } from "@/types/sync.js"; import { type Interval, getChunks, intervalDifference, + intervalRange, } from "@/utils/interval.js"; import { never } from "@/utils/never.js"; import type { RequestQueue } from "@/utils/requestQueue.js"; import { + _debug_traceBlockByNumber, _eth_getBlockByNumber, _eth_getLogs, _eth_getTransactionReceipt, - _trace_filter, } from "@/utils/rpc.js"; import { getLogsRetryHelper } from "@ponder/utils"; import { @@ -63,7 +71,12 @@ export const createHistoricalSync = async ( * Blocks that have already been extracted. * Note: All entries are deleted at the end of each call to `sync()`. */ - const blockCache = new Map>(); + const blockCache = new Map>(); + /** + * Traces that have already been fetched. + * Note: All entries are deleted at the end of each call to `sync()`. + */ + const traceCache = new Map>(); /** * Transactions that should be saved to the sync-store. * Note: All entries are deleted at the end of each call to `sync()`. @@ -87,25 +100,30 @@ export const createHistoricalSync = async ( * * Note: `intervalsCache` is not updated after a new interval is synced. */ - const intervalsCache: Map = new Map(); - - // Populate `intervalsCache` by querying the sync-store. - for (const { filter } of args.sources) { - const intervals = await args.syncStore.getIntervals({ filter }); - intervalsCache.set(filter, intervals); + let intervalsCache: Map; + if (args.network.disableCache) { + intervalsCache = new Map(); + for (const { filter } of args.sources) { + intervalsCache.set(filter, []); + } + } else { + intervalsCache = await args.syncStore.getIntervals({ + filters: args.sources.map(({ filter }) => filter), + }); } // Closest-to-tip block that has been synced. let latestBlock: SyncBlock | undefined; //////// - // Helper functions for specific sync tasks + // Helper functions for sync tasks //////// /** - * Split "eth_getLogs" requests into ranges inferred from errors. + * Split "eth_getLogs" requests into ranges inferred from errors + * and batch requests. */ - const getLogsDynamic = async ({ + const syncLogsDynamic = async ({ filter, address, interval, @@ -126,20 +144,30 @@ export const createHistoricalSync = async ( : [interval]; const topics = - "eventSelector" in filter ? [filter.eventSelector] : filter.topics; + "eventSelector" in filter + ? [filter.eventSelector] + : [ + filter.topic0 ?? null, + filter.topic1 ?? null, + filter.topic2 ?? null, + filter.topic3 ?? null, + ]; - // Batch large arrays of addresses, handling arrays that are empty or over the threshold + // Batch large arrays of addresses, handling arrays that are empty let addressBatches: (Address | Address[] | undefined)[]; - if (address === undefined || typeof address === "string") { + + if (address === undefined) { + // no address (match all) + addressBatches = [undefined]; + } else if (typeof address === "string") { + // single address addressBatches = [address]; } else if (address.length === 0) { + // no address (factory with no children) return []; - } else if ( - address.length >= args.common.options.factoryAddressCountThreshold - ) { - addressBatches = [undefined]; } else { + // many addresses addressBatches = []; for (let i = 0; i < address.length; i += 50) { addressBatches.push(address.slice(i, i + 50)); @@ -187,7 +215,7 @@ export const createHistoricalSync = async ( : undefined, }); - return getLogsDynamic({ address, interval, filter }); + return syncLogsDynamic({ address, interval, filter }); }), ), ), @@ -210,20 +238,116 @@ export const createHistoricalSync = async ( return logs; }; + /** + * Extract block, using `blockCache` to avoid fetching + * the same block twice. Also, update `latestBlock`. + * + * @param number Block to be extracted + * + * Note: This function could more accurately skip network requests by taking + * advantage of `syncStore.hasBlock` and `syncStore.hasTransaction`. + */ + const syncBlock = async (number: number): Promise => { + let block: SyncBlock; + + /** + * `blockCache` contains all blocks that have been extracted during the + * current call to `sync()`. If `number` is present in `blockCache` use it, + * otherwise, request the block and add it to `blockCache` and the sync-store. + */ + + if (blockCache.has(number)) { + block = await blockCache.get(number)!; + } else { + const _block = _eth_getBlockByNumber(args.requestQueue, { + blockNumber: toHex(number), + }); + blockCache.set(number, _block); + block = await _block; + + // Update `latestBlock` if `block` is closer to tip. + if ( + hexToBigInt(block.number) >= hexToBigInt(latestBlock?.number ?? "0x0") + ) { + latestBlock = block; + } + } + + return block; + }; + + const syncTrace = async (block: number) => { + if (traceCache.has(block)) { + return await traceCache.get(block)!; + } else { + const traces = _debug_traceBlockByNumber(args.requestQueue, { + blockNumber: block, + }); + traceCache.set(block, traces); + return await traces; + } + }; + + /** Extract and insert the log-based addresses that match `filter` + `interval`. */ + const syncLogFactory = async (filter: LogFactory, interval: Interval) => { + const logs = await syncLogsDynamic({ + filter, + interval, + address: filter.address, + }); + + if (isKilled) return; + + // Insert `logs` into the sync-store + await args.syncStore.insertLogs({ + logs: logs.map((log) => ({ log })), + shouldUpdateCheckpoint: false, + chainId: args.network.chainId, + }); + }; + + /** + * Return all addresses that match `filter` after extracting addresses + * that match `filter` and `interval`. Returns `undefined` if the number of + * child addresses is above the limit. + */ + const syncAddressFactory = async ( + filter: Factory, + interval: Interval, + ): Promise => { + await syncLogFactory(filter, interval); + + // Query the sync-store for all addresses that match `filter`. + const addresses = await args.syncStore.getChildAddresses({ + filter, + limit: args.common.options.factoryAddressCountThreshold, + }); + + if (addresses.length === args.common.options.factoryAddressCountThreshold) { + return undefined; + } + + return addresses; + }; + + //////// + // Helper function for filter types + //////// + const syncLogFilter = async (filter: LogFilter, interval: Interval) => { // Resolve `filter.address` const address = isAddressFactory(filter.address) - ? await syncAddress(filter.address, interval) + ? await syncAddressFactory(filter.address, interval) : filter.address; if (isKilled) return; - const logs = await getLogsDynamic({ filter, interval, address }); + const logs = await syncLogsDynamic({ filter, interval, address }); if (isKilled) return; const blocks = await Promise.all( - logs.map((log) => syncBlock(hexToBigInt(log.blockNumber))), + logs.map((log) => syncBlock(hexToNumber(log.blockNumber))), ); // Validate that logs point to the valid transaction hash in the block @@ -262,7 +386,7 @@ export const createHistoricalSync = async ( if (isKilled) return; - if (filter.includeTransactionReceipts) { + if (shouldGetTransactionReceipt(filter)) { const transactionReceipts = await Promise.all( Array.from(transactionHashes).map((hash) => _eth_getTransactionReceipt(args.requestQueue, { hash }), @@ -288,180 +412,165 @@ export const createHistoricalSync = async ( requiredBlocks.push(b); } - await Promise.all(requiredBlocks.map((b) => syncBlock(BigInt(b)))); + await Promise.all(requiredBlocks.map((number) => syncBlock(number))); }; - const syncTraceFilter = async ( - filter: CallTraceFilter, + const syncTransactionFilter = async ( + filter: TransactionFilter, interval: Interval, ) => { - // Resolve `filter.toAddress` - let toAddress: Address[] | undefined; - if (isAddressFactory(filter.toAddress)) { - const childAddresses = await syncAddress(filter.toAddress, interval); - if ( - childAddresses.length < args.common.options.factoryAddressCountThreshold - ) { - toAddress = childAddresses; - } else { - toAddress = undefined; - } - } else { - toAddress = filter.toAddress; - } - - if (isKilled) return; - - let callTraces = await _trace_filter(args.requestQueue, { - fromAddress: filter.fromAddress, - toAddress, - fromBlock: interval[0], - toBlock: interval[1], - }).then( - (traces) => - traces.flat().filter((t) => t.type === "call") as SyncCallTrace[], - ); + const fromChildAddresses = isAddressFactory(filter.fromAddress) + ? await syncAddressFactory(filter.fromAddress, interval).then( + (addresses) => + addresses === undefined ? undefined : new Set(addresses), + ) + : undefined; + + const toChildAddresses = isAddressFactory(filter.toAddress) + ? await syncAddressFactory(filter.toAddress, interval).then( + (addresses) => + addresses === undefined ? undefined : new Set(addresses), + ) + : undefined; if (isKilled) return; const blocks = await Promise.all( - callTraces.map((trace) => syncBlock(hexToBigInt(trace.blockNumber))), + intervalRange(interval).map((number) => syncBlock(number)), ); - const transactionHashes = new Set(callTraces.map((t) => t.transactionHash)); + if (isKilled) return; - // Validate that traces point to the valid transaction hash in the block - for (let i = 0; i < callTraces.length; i++) { - const callTrace = callTraces[i]!; - const block = blocks[i]!; + const transactionHashes: Set = new Set(); - if (block.hash !== callTrace.blockHash) { - throw new Error( - `Detected inconsistent RPC responses. 'trace.blockHash' ${callTrace.blockHash} does not match 'block.hash' ${block.hash}`, - ); - } + for (const block of blocks) { + block.transactions.map((transaction) => { + if ( + isTransactionFilterMatched({ + filter, + block, + transaction, + fromChildAddresses, + toChildAddresses, + }) + ) { + transactionHashes.add(transaction.hash); + } + }); + } - if ( - block.transactions.find((t) => t.hash === callTrace.transactionHash) === - undefined - ) { - throw new Error( - `Detected inconsistent RPC responses. 'trace.transactionHash' ${callTrace.transactionHash} not found in 'block.transactions' ${block.hash}`, - ); - } + for (const hash of transactionHashes) { + transactionsCache.add(hash); } - // Request transactionReceipts to check for reverted transactions. + if (isKilled) return; + const transactionReceipts = await Promise.all( Array.from(transactionHashes).map((hash) => - _eth_getTransactionReceipt(args.requestQueue, { - hash, - }), + _eth_getTransactionReceipt(args.requestQueue, { hash }), ), ); - const revertedTransactions = new Set(); - for (const receipt of transactionReceipts) { - if (receipt.status === "0x0") { - revertedTransactions.add(receipt.transactionHash); - } - } - - callTraces = callTraces.filter( - (trace) => revertedTransactions.has(trace.transactionHash) === false, - ); - - if (isKilled) return; - - for (const hash of transactionHashes) { - if (revertedTransactions.has(hash) === false) { - transactionsCache.add(hash); - } - } - if (isKilled) return; - await args.syncStore.insertCallTraces({ - callTraces: callTraces.map((callTrace, i) => ({ - callTrace, - block: blocks[i]!, - })), + await args.syncStore.insertTransactionReceipts({ + transactionReceipts, chainId: args.network.chainId, }); }; - /** Extract and insert the log-based addresses that match `filter` + `interval`. */ - const syncLogFactory = async (filter: LogFactory, interval: Interval) => { - const logs = await getLogsDynamic({ - filter, - interval, - address: filter.address, - }); + const syncTraceOrTransferFilter = async ( + filter: TraceFilter | TransferFilter, + interval: Interval, + ) => { + const fromChildAddresses = isAddressFactory(filter.fromAddress) + ? await syncAddressFactory(filter.fromAddress, interval) + : undefined; + + const toChildAddresses = isAddressFactory(filter.toAddress) + ? await syncAddressFactory(filter.toAddress, interval) + : undefined; + + const traces = await Promise.all( + intervalRange(interval).map(async (number) => { + let traces = await syncTrace(number); + + // remove unmatched traces + traces = traces.filter((trace) => + filter.type === "trace" + ? isTraceFilterMatched({ + filter, + block: { number: toHex(number) }, + trace: trace.trace, + fromChildAddresses: fromChildAddresses + ? new Set(fromChildAddresses) + : undefined, + toChildAddresses: toChildAddresses + ? new Set(toChildAddresses) + : undefined, + }) + : isTransferFilterMatched({ + filter, + block: { number: toHex(number) }, + trace: trace.trace, + fromChildAddresses: fromChildAddresses + ? new Set(fromChildAddresses) + : undefined, + toChildAddresses: toChildAddresses + ? new Set(toChildAddresses) + : undefined, + }), + ); - if (isKilled) return; + if (traces.length === 0) return []; - // Insert `logs` into the sync-store - await args.syncStore.insertLogs({ - logs: logs.map((log) => ({ log })), - shouldUpdateCheckpoint: false, - chainId: args.network.chainId, - }); - }; + const block = await syncBlock(number); - /** - * Extract block, using `blockCache` to avoid fetching - * the same block twice. Also, update `latestBlock`. - * - * @param number Block to be extracted - * @param transactionHashes Hashes to be inserted into the sync-store - * - * Note: This function could more accurately skip network requests by taking - * advantage of `syncStore.hasBlock` and `syncStore.hasTransaction`. - */ - const syncBlock = async (number: bigint): Promise => { - let block: SyncBlock; + return traces.map((trace) => { + const transaction = block.transactions.find( + (t) => t.hash === trace.transactionHash, + ); - /** - * `blockCache` contains all blocks that have been extracted during the - * current call to `sync()`. If `number` is present in `blockCache` use it, - * otherwise, request the block and add it to `blockCache` and the sync-store. - */ + if (transaction === undefined) { + throw new Error( + `Detected inconsistent RPC responses. 'trace.transactionHash' ${trace.transactionHash} not found in 'block.transactions' ${block.hash}`, + ); + } - if (blockCache.has(number)) { - block = await blockCache.get(number)!; - } else { - const _block = _eth_getBlockByNumber(args.requestQueue, { - blockNumber: toHex(number), - }); - blockCache.set(number, _block); - block = await _block; + transactionsCache.add(transaction.hash); - // Update `latestBlock` if `block` is closer to tip. - if ( - hexToBigInt(block.number) >= hexToBigInt(latestBlock?.number ?? "0x0") - ) { - latestBlock = block; - } - } + return { trace, transaction, block }; + }); + }), + ).then((traces) => traces.flat()); - return block; - }; + if (isKilled) return; - /** - * Return all addresses that match `filter` after extracting addresses - * that match `filter` and `interval`. - */ - const syncAddress = async ( - filter: Factory, - interval: Interval, - ): Promise => { - await syncLogFactory(filter, interval); + const transactionHashes = new Set( + traces.map(({ transaction }) => transaction.hash), + ); - // Query the sync-store for all addresses that match `filter`. - return await args.syncStore.getChildAddresses({ - filter, - limit: args.common.options.factoryAddressCountThreshold, + await args.syncStore.insertTraces({ + traces, + chainId: args.network.chainId, }); + + if (isKilled) return; + + if (shouldGetTransactionReceipt(filter)) { + const transactionReceipts = await Promise.all( + Array.from(transactionHashes).map((hash) => + _eth_getTransactionReceipt(args.requestQueue, { hash }), + ), + ); + + if (isKilled) return; + + await args.syncStore.insertTransactionReceipts({ + transactionReceipts, + chainId: args.network.chainId, + }); + } }; return { @@ -471,25 +580,25 @@ export const createHistoricalSync = async ( await Promise.all( args.sources.map(async (source) => { + const filter = source.filter; + // Compute the required interval to sync, accounting for cached // intervals and start + end block. // Skip sync if the interval is after the `toBlock` or before // the `fromBlock`. if ( - source.filter.fromBlock > _interval[1] || - (source.filter.toBlock && source.filter.toBlock < _interval[0]) + (filter.fromBlock !== undefined && + filter.fromBlock > _interval[1]) || + (filter.toBlock !== undefined && filter.toBlock < _interval[0]) ) { return; } const interval: Interval = [ - Math.max(source.filter.fromBlock, _interval[0]), - Math.min( - source.filter.toBlock ?? Number.POSITIVE_INFINITY, - _interval[1], - ), + Math.max(filter.fromBlock ?? 0, _interval[0]), + Math.min(filter.toBlock ?? Number.POSITIVE_INFINITY, _interval[1]), ]; - const completedIntervals = intervalsCache.get(source.filter)!; + const completedIntervals = intervalsCache.get(filter)!; const requiredIntervals = intervalDifference( [interval], completedIntervals, @@ -499,35 +608,36 @@ export const createHistoricalSync = async ( if (requiredIntervals.length === 0) return; // Request last block of interval - const blockPromise = syncBlock(BigInt(interval[1])); + const blockPromise = syncBlock(interval[1]); try { // sync required intervals, account for chunk sizes await Promise.all( requiredIntervals.map(async (interval) => { - if (source.type === "contract") { - const filter = source.filter; - switch (filter.type) { - case "log": { - await syncLogFilter(filter, interval); - break; - } - - case "callTrace": - await Promise.all( - getChunks({ interval, maxChunkSize: 10 }).map( - async (interval) => { - await syncTraceFilter(filter, interval); - }, - ), - ); - break; - - default: - never(filter); + switch (filter.type) { + case "log": { + await syncLogFilter(filter, interval); + break; + } + + case "block": { + await syncBlockFilter(filter, interval); + break; + } + + case "transaction": { + await syncTransactionFilter(filter, interval); + break; + } + + case "trace": + case "transfer": { + await syncTraceOrTransferFilter(filter, interval); + break; } - } else { - await syncBlockFilter(source.filter, interval); + + default: + never(filter); } }), ); @@ -549,7 +659,7 @@ export const createHistoricalSync = async ( await blockPromise; - syncedIntervals.push({ filter: source.filter, interval }); + syncedIntervals.push({ filter, interval }); }), ); @@ -558,8 +668,13 @@ export const createHistoricalSync = async ( await Promise.all([ args.syncStore.insertBlocks({ blocks, chainId: args.network.chainId }), args.syncStore.insertTransactions({ - transactions: blocks.flatMap(({ transactions }) => - transactions.filter(({ hash }) => transactionsCache.has(hash)), + transactions: blocks.flatMap((block) => + block.transactions + .filter(({ hash }) => transactionsCache.has(hash)) + .map((transaction) => ({ + transaction, + block, + })), ), chainId: args.network.chainId, }), @@ -567,16 +682,14 @@ export const createHistoricalSync = async ( // Add corresponding intervals to the sync-store // Note: this should happen after so the database doesn't become corrupted - await Promise.all( - syncedIntervals.map(({ filter, interval }) => - args.syncStore.insertInterval({ - filter, - interval, - }), - ), - ); + if (args.network.disableCache === false) { + await args.syncStore.insertIntervals({ + intervals: syncedIntervals, + }); + } blockCache.clear(); + traceCache.clear(); transactionsCache.clear(); return latestBlock; diff --git a/packages/core/src/sync-realtime/bloom.ts b/packages/core/src/sync-realtime/bloom.ts index 7cfcf770e..d154e99a3 100644 --- a/packages/core/src/sync-realtime/bloom.ts +++ b/packages/core/src/sync-realtime/bloom.ts @@ -42,28 +42,28 @@ export function isFilterInBloom({ }): boolean { // Return `false` for out of range blocks if ( - hexToNumber(block.number) < filter.fromBlock || + hexToNumber(block.number) < (filter.fromBlock ?? 0) || hexToNumber(block.number) > (filter.toBlock ?? Number.POSITIVE_INFINITY) ) { return false; } - let isTopicsInBloom: boolean; - let isAddressInBloom: boolean; + const isTopicsInBloom = [ + filter.topic0, + filter.topic1, + filter.topic2, + filter.topic3, + ].every((topic) => { + if (topic === null || topic === undefined) { + return true; + } else if (Array.isArray(topic)) { + return topic.some((t) => isInBloom(block.logsBloom, t)); + } else { + return isInBloom(block.logsBloom, topic); + } + }); - if (filter.topics === undefined || filter.topics.length === 0) { - isTopicsInBloom = true; - } else { - isTopicsInBloom = filter.topics.some((topic) => { - if (topic === null || topic === undefined) { - return true; - } else if (Array.isArray(topic)) { - return topic.some((t) => isInBloom(block.logsBloom, t)); - } else { - return isInBloom(block.logsBloom, topic); - } - }); - } + let isAddressInBloom: boolean; if (filter.address === undefined) isAddressInBloom = true; else if (isAddressFactory(filter.address)) { diff --git a/packages/core/src/sync-realtime/filter.test.ts b/packages/core/src/sync-realtime/filter.test.ts index 284f4a881..f7e326263 100644 --- a/packages/core/src/sync-realtime/filter.test.ts +++ b/packages/core/src/sync-realtime/filter.test.ts @@ -1,116 +1,399 @@ +import { ALICE, BOB } from "@/_test/constants.js"; +import { erc20ABI } from "@/_test/generated.js"; import { setupAnvil, setupCommon } from "@/_test/setup.js"; -import { getRawRPCData } from "@/_test/utils.js"; -import type { Address } from "viem"; +import { + createPair, + deployErc20, + deployFactory, + mintErc20, + transferErc20, + transferEth, +} from "@/_test/simulate.js"; +import { + getAccountsConfigAndIndexingFunctions, + getBlocksConfigAndIndexingFunctions, + getErc20ConfigAndIndexingFunctions, + getNetwork, + getPairWithFactoryConfigAndIndexingFunctions, +} from "@/_test/utils.js"; +import { buildConfigAndIndexingFunctions } from "@/build/configAndIndexingFunctions.js"; +import type { + BlockFilter, + LogFactory, + LogFilter, + TraceFilter, + TransactionFilter, + TransferFilter, +} from "@/sync/source.js"; +import type { SyncTrace } from "@/types/sync.js"; +import { createRequestQueue } from "@/utils/requestQueue.js"; +import { _eth_getBlockByNumber, _eth_getLogs } from "@/utils/rpc.js"; +import { + type Address, + encodeFunctionData, + encodeFunctionResult, + parseEther, + zeroAddress, + zeroHash, +} from "viem"; import { beforeEach, expect, test } from "vitest"; import { isBlockFilterMatched, - isCallTraceFilterMatched, isLogFactoryMatched, isLogFilterMatched, + isTraceFilterMatched, + isTransactionFilterMatched, + isTransferFilterMatched, } from "./filter.js"; beforeEach(setupCommon); beforeEach(setupAnvil); test("isLogFactoryMatched()", async (context) => { - const rpcData = await getRawRPCData(); + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployFactory({ sender: ALICE }); + await createPair({ + factory: address, + sender: ALICE, + }); + + const { config, rawIndexingFunctions } = + getPairWithFactoryConfigAndIndexingFunctions({ + address, + }); + + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const filter = sources[0]!.filter as LogFilter; + + const rpcLogs = await _eth_getLogs(requestQueue, { + fromBlock: 2, + toBlock: 2, + }); let isMatched = isLogFactoryMatched({ - filter: context.sources[1].filter.address, - log: rpcData.block3.logs[0], + filter: filter.address, + log: rpcLogs[0]!, }); expect(isMatched).toBe(true); + filter.address.address = [filter.address.address as Address]; + isMatched = isLogFactoryMatched({ - filter: { - ...context.sources[1].filter.address, - address: [context.sources[1].filter.address.address as Address], - }, - log: rpcData.block3.logs[0], + filter: filter.address, + log: rpcLogs[0]!, }); expect(isMatched).toBe(true); + rpcLogs[0]!.topics[0] = zeroHash; + isMatched = isLogFactoryMatched({ - filter: context.sources[1].filter.address, - log: rpcData.block2.logs[0], + filter: filter.address, + log: rpcLogs[0]!, }); expect(isMatched).toBe(false); +}); - isMatched = isLogFactoryMatched({ - filter: context.sources[2].filter.toAddress, - log: rpcData.block3.logs[0], +test("isLogFilterMatched()", async (context) => { + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, }); - expect(isMatched).toBe(true); - isMatched = isLogFactoryMatched({ - filter: context.sources[2].filter.toAddress, - log: rpcData.block2.logs[0], + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + + const { config, rawIndexingFunctions } = getErc20ConfigAndIndexingFunctions({ + address, + }); + + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, }); - expect(isMatched).toBe(false); -}); -test("isLogFilterMatched", async (context) => { - const rpcData = await getRawRPCData(); + const filter = sources[0]!.filter as LogFilter; + + const rpcLogs = await _eth_getLogs(requestQueue, { + fromBlock: 2, + toBlock: 2, + }); + + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 2, + }); let isMatched = isLogFilterMatched({ - filter: context.sources[0].filter, - block: rpcData.block2.block, - log: rpcData.block2.logs[1], + filter, + block: rpcBlock, + log: rpcLogs[0]!, }); expect(isMatched).toBe(true); + filter.topic0 = null; + isMatched = isLogFilterMatched({ - filter: context.sources[1].filter, - block: rpcData.block4.block, - log: rpcData.block4.logs[0], + filter, + block: rpcBlock, + log: rpcLogs[0]!, }); expect(isMatched).toBe(true); + rpcLogs[0]!.address = zeroAddress; + isMatched = isLogFilterMatched({ - filter: context.sources[0].filter, - block: rpcData.block4.block, - log: rpcData.block4.logs[0], + filter, + block: rpcBlock, + log: rpcLogs[0]!, }); expect(isMatched).toBe(false); }); -test("isCallTraceFilterMatched", async (context) => { - const rpcData = await getRawRPCData(); +test("isBlockFilterMatched", async (context) => { + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const filter = sources[0]!.filter as BlockFilter; - let isMatched = isCallTraceFilterMatched({ - filter: context.sources[3].filter, - block: rpcData.block3.block, - callTrace: rpcData.block3.callTraces[0], + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 0, + }); + + let isMatched = isBlockFilterMatched({ + filter, + block: rpcBlock, }); expect(isMatched).toBe(true); - isMatched = isCallTraceFilterMatched({ - filter: context.sources[2].filter, - block: rpcData.block3.block, - callTrace: rpcData.block3.callTraces[0], + filter.interval = 2; + filter.offset = 1; + + isMatched = isBlockFilterMatched({ + filter, + block: rpcBlock, + }); + expect(isMatched).toBe(false); +}); + +test("isTransactionFilterMatched()", async (context) => { + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + await transferEth({ + to: BOB, + amount: parseEther("1"), + sender: ALICE, + }); + + const { config, rawIndexingFunctions } = + getAccountsConfigAndIndexingFunctions({ + address: ALICE, + }); + + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + // transaction:from + const filter = sources[1]!.filter as TransactionFilter; + + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 1, + }); + + let isMatched = isTransactionFilterMatched({ + filter, + block: rpcBlock, + transaction: rpcBlock.transactions[0]!, }); expect(isMatched).toBe(true); - isMatched = isCallTraceFilterMatched({ - filter: context.sources[3].filter, - block: rpcData.block2.block, - callTrace: rpcData.block2.callTraces[0], + rpcBlock.transactions[0]!.from = zeroAddress; + + isMatched = isTransactionFilterMatched({ + filter, + block: rpcBlock, + transaction: rpcBlock.transactions[0]!, }); expect(isMatched).toBe(false); }); -test("isBlockFilterMatched", async (context) => { - const rpcData = await getRawRPCData(); +test("isTransferFilterMatched()", async (context) => { + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); - let isMatched = isBlockFilterMatched({ - filter: context.sources[4].filter, - block: rpcData.block2.block, + const { hash } = await transferEth({ + to: BOB, + amount: parseEther("1"), + sender: ALICE, + }); + + const { config, rawIndexingFunctions } = + getAccountsConfigAndIndexingFunctions({ + address: ALICE, + }); + + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + // transfer:from + const filter = sources[3]!.filter as TransferFilter; + + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 1, + }); + + const rpcTrace = { + trace: { + type: "CALL", + from: ALICE, + to: BOB, + gas: "0x0", + gasUsed: "0x0", + input: "0x0", + output: "0x0", + value: rpcBlock.transactions[0]!.value, + index: 0, + subcalls: 0, + }, + transactionHash: hash, + } satisfies SyncTrace; + + let isMatched = isTransferFilterMatched({ + filter, + block: rpcBlock, + trace: rpcTrace.trace, + }); + expect(isMatched).toBe(true); + + rpcTrace.trace.value = "0x0"; + + isMatched = isTransferFilterMatched({ + filter, + block: rpcBlock, + trace: rpcTrace.trace, }); expect(isMatched).toBe(false); +}); - isMatched = isBlockFilterMatched({ - filter: context.sources[4].filter, - block: rpcData.block3.block, +test("isTraceFilterMatched()", async (context) => { + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + const { hash } = await transferErc20({ + erc20: address, + to: BOB, + amount: parseEther("1"), + sender: ALICE, + }); + + const { config, rawIndexingFunctions } = getErc20ConfigAndIndexingFunctions({ + address, + includeCallTraces: true, + }); + + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const filter = sources[1]!.filter as TraceFilter; + + const rpcTrace = { + trace: { + type: "CALL", + from: ALICE, + to: address, + gas: "0x0", + gasUsed: "0x0", + input: encodeFunctionData({ + abi: erc20ABI, + functionName: "transfer", + args: [BOB, parseEther("1")], + }), + output: encodeFunctionResult({ + abi: erc20ABI, + functionName: "transfer", + result: true, + }), + value: "0x0", + index: 0, + subcalls: 0, + }, + transactionHash: hash, + } satisfies SyncTrace; + + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 3, + }); + + let isMatched = isTraceFilterMatched({ + filter, + block: rpcBlock, + trace: rpcTrace.trace, + }); + expect(isMatched).toBe(true); + + filter.functionSelector = undefined; + + isMatched = isTraceFilterMatched({ + filter, + block: rpcBlock, + trace: rpcTrace.trace, }); expect(isMatched).toBe(true); + + rpcTrace.trace.to = zeroAddress; + + isMatched = isTraceFilterMatched({ + filter, + block: rpcBlock, + trace: rpcTrace.trace, + }); + expect(isMatched).toBe(false); }); diff --git a/packages/core/src/sync-realtime/filter.ts b/packages/core/src/sync-realtime/filter.ts index a76fc84c0..3c797f89c 100644 --- a/packages/core/src/sync-realtime/filter.ts +++ b/packages/core/src/sync-realtime/filter.ts @@ -1,18 +1,52 @@ -import { - type TraceFilterFragment, - buildLogFilterFragments, - buildTraceFilterFragments, -} from "@/sync/fragments.js"; import { type BlockFilter, - type CallTraceFilter, type LogFactory, type LogFilter, + type TraceFilter, + type TransactionFilter, + type TransferFilter, isAddressFactory, } from "@/sync/source.js"; -import type { SyncBlock, SyncCallTrace, SyncLog } from "@/types/sync.js"; +import type { + SyncBlock, + SyncLog, + SyncTrace, + SyncTransaction, +} from "@/types/sync.js"; import { toLowerCase } from "@/utils/lowercase.js"; -import { hexToNumber } from "viem"; +import { type Address, hexToBigInt, hexToNumber } from "viem"; + +const isValueMatched = ( + filterValue: T | T[] | Set | null | undefined, + eventValue: T | undefined, +): boolean => { + // match all + if (filterValue === null || filterValue === undefined) return true; + + // missing value + if (eventValue === undefined) return false; + + // array + if ( + Array.isArray(filterValue) && + filterValue.some((v) => v === toLowerCase(eventValue)) + ) { + return true; + } + + // set + if ( + filterValue instanceof Set && + filterValue.has(toLowerCase(eventValue) as T) + ) { + return true; + } + + // single + if (filterValue === toLowerCase(eventValue)) return true; + + return false; +}; /** * Returns `true` if `log` matches `filter` @@ -41,91 +75,299 @@ export const isLogFilterMatched = ({ filter, block, log, + childAddresses, }: { filter: LogFilter; block: SyncBlock; log: SyncLog; + childAddresses?: Set
| Set
[]; }): boolean => { // Return `false` for out of range blocks if ( - hexToNumber(block.number) < filter.fromBlock || + hexToNumber(block.number) < (filter.fromBlock ?? 0) || hexToNumber(block.number) > (filter.toBlock ?? Number.POSITIVE_INFINITY) ) { return false; } - return buildLogFilterFragments(filter).some((fragment) => { - if ( - fragment.topic0 !== null && - fragment.topic0 !== log.topics[0]?.toLowerCase() - ) + if (isValueMatched(filter.topic0, log.topics[0]) === false) return false; + if (isValueMatched(filter.topic1, log.topics[1]) === false) return false; + if (isValueMatched(filter.topic2, log.topics[2]) === false) return false; + if (isValueMatched(filter.topic3, log.topics[3]) === false) return false; + + if (isAddressFactory(filter.address)) { + if (Array.isArray(childAddresses)) { + if ( + childAddresses.every( + (address) => isValueMatched(address, log.address) === false, + ) + ) { + return false; + } + } else { + if (isValueMatched(childAddresses, log.address) === false) { + return false; + } + } + } else { + if (isValueMatched(filter.address, log.address) === false) { return false; + } + } + + return true; +}; + +/** + * Returns `true` if `transaction` matches `filter` + */ +export const isTransactionFilterMatched = ({ + filter, + block, + transaction, + fromChildAddresses, + toChildAddresses, +}: { + filter: TransactionFilter; + block: Pick; + transaction: SyncTransaction; + fromChildAddresses?: Set
| Set
[]; + toChildAddresses?: Set
| Set
[]; +}): boolean => { + // Return `false` for out of range blocks + if ( + hexToNumber(block.number) < (filter.fromBlock ?? 0) || + hexToNumber(block.number) > (filter.toBlock ?? Number.POSITIVE_INFINITY) + ) { + return false; + } + + if (isAddressFactory(filter.fromAddress)) { + if (Array.isArray(fromChildAddresses)) { + if ( + fromChildAddresses.every( + (address) => isValueMatched(address, transaction.from) === false, + ) + ) { + return false; + } + } else { + if (isValueMatched(fromChildAddresses, transaction.from) === false) { + return false; + } + } + } else { if ( - fragment.topic1 !== null && - fragment.topic1 !== log.topics[1]?.toLowerCase() - ) + isValueMatched( + filter.fromAddress as Address | Address[] | undefined, + transaction.from, + ) === false + ) { return false; + } + } + + if (isAddressFactory(filter.toAddress)) { + if (Array.isArray(toChildAddresses)) { + if ( + transaction.to !== null && + toChildAddresses.every( + (address) => isValueMatched(address, transaction.to!) === false, + ) + ) { + return false; + } + } else { + if ( + transaction.to !== null && + isValueMatched(toChildAddresses, transaction.to) === false + ) { + return false; + } + } + } else { if ( - fragment.topic2 !== null && - fragment.topic2 !== log.topics[2]?.toLowerCase() - ) + transaction.to !== null && + isValueMatched( + filter.toAddress as Address | Address[] | undefined, + transaction.to, + ) === false + ) { return false; + } + } + + // NOTE: `filter.includeReverted` is intentionally ignored + + return true; +}; + +/** + * Returns `true` if `trace` matches `filter` + */ +export const isTraceFilterMatched = ({ + filter, + block, + trace, + fromChildAddresses, + toChildAddresses, +}: { + filter: TraceFilter; + block: Pick; + trace: Omit; + fromChildAddresses?: Set
| Set
[]; + toChildAddresses?: Set
| Set
[]; +}): boolean => { + // Return `false` for out of range blocks + if ( + hexToNumber(block.number) < (filter.fromBlock ?? 0) || + hexToNumber(block.number) > (filter.toBlock ?? Number.POSITIVE_INFINITY) + ) { + return false; + } + + if (isAddressFactory(filter.fromAddress)) { + if (Array.isArray(fromChildAddresses)) { + if ( + fromChildAddresses.every( + (address) => isValueMatched(address, trace.from) === false, + ) + ) { + return false; + } + } else { + if (isValueMatched(fromChildAddresses, trace.from) === false) { + return false; + } + } + } else { if ( - fragment.topic3 !== null && - fragment.topic3 !== log.topics[3]?.toLowerCase() - ) + isValueMatched( + filter.fromAddress as Address | Address[] | undefined, + trace.from, + ) === false + ) { return false; + } + } + if (isAddressFactory(filter.toAddress)) { + if (Array.isArray(toChildAddresses)) { + if ( + toChildAddresses.every( + (address) => isValueMatched(address, trace.to) === false, + ) + ) { + return false; + } + } else { + if (isValueMatched(toChildAddresses, trace.to) === false) { + return false; + } + } + } else { if ( - isAddressFactory(filter.address) === false && - fragment.address !== null && - fragment.address !== log.address.toLowerCase() - ) + isValueMatched( + filter.toAddress as Address | Address[] | undefined, + trace.to, + ) === false + ) { return false; + } + } - return true; - }); + if ( + isValueMatched(filter.functionSelector, trace.input.slice(0, 10)) === false + ) { + return false; + } + + // NOTE: `filter.callType` and `filter.includeReverted` is intentionally ignored + + return true; }; /** - * Returns `true` if `callTrace` matches `filter` + * Returns `true` if `trace` matches `filter` */ -export const isCallTraceFilterMatched = ({ +export const isTransferFilterMatched = ({ filter, block, - callTrace, + trace, + fromChildAddresses, + toChildAddresses, }: { - filter: CallTraceFilter; - block: SyncBlock; - callTrace: SyncCallTrace; + filter: TransferFilter; + block: Pick; + trace: Omit; + fromChildAddresses?: Set
| Set
[]; + toChildAddresses?: Set
| Set
[]; }): boolean => { // Return `false` for out of range blocks if ( - hexToNumber(block.number) < filter.fromBlock || + hexToNumber(block.number) < (filter.fromBlock ?? 0) || hexToNumber(block.number) > (filter.toBlock ?? Number.POSITIVE_INFINITY) ) { return false; } - return buildTraceFilterFragments(filter).some((fragment) => { + if (trace.value === undefined || hexToBigInt(trace.value) === 0n) { + return false; + } + + if (isAddressFactory(filter.fromAddress)) { + if (Array.isArray(fromChildAddresses)) { + if ( + fromChildAddresses.every( + (address) => isValueMatched(address, trace.from) === false, + ) + ) { + return false; + } + } else { + if (isValueMatched(fromChildAddresses, trace.from) === false) { + return false; + } + } + } else { if ( - fragment.fromAddress !== null && - fragment.fromAddress !== callTrace.action.from.toLowerCase() + isValueMatched( + filter.fromAddress as Address | Address[] | undefined, + trace.from, + ) === false ) { return false; } + } + if (isAddressFactory(filter.toAddress)) { + if (Array.isArray(toChildAddresses)) { + if ( + toChildAddresses.every( + (address) => isValueMatched(address, trace.to) === false, + ) + ) { + return false; + } + } else { + if (isValueMatched(toChildAddresses, trace.to) === false) { + return false; + } + } + } else { if ( - isAddressFactory(filter.toAddress) === false && - (fragment as TraceFilterFragment).toAddress !== null && - (fragment as TraceFilterFragment).toAddress !== - callTrace.action.to.toLowerCase() + isValueMatched( + filter.toAddress as Address | Address[] | undefined, + trace.to, + ) === false ) { return false; } + } - return true; - }); + // NOTE: `filter.includeReverted` is intentionally ignored + + return true; }; /** @@ -140,7 +382,7 @@ export const isBlockFilterMatched = ({ }): boolean => { // Return `false` for out of range blocks if ( - hexToNumber(block.number) < filter.fromBlock || + hexToNumber(block.number) < (filter.fromBlock ?? 0) || hexToNumber(block.number) > (filter.toBlock ?? Number.POSITIVE_INFINITY) ) { return false; diff --git a/packages/core/src/sync-realtime/index.test.ts b/packages/core/src/sync-realtime/index.test.ts index 03cbc7227..0c609b5d2 100644 --- a/packages/core/src/sync-realtime/index.test.ts +++ b/packages/core/src/sync-realtime/index.test.ts @@ -1,13 +1,38 @@ +import { ALICE, BOB } from "@/_test/constants.js"; +import { erc20ABI } from "@/_test/generated.js"; import { setupAnvil, setupCommon, setupDatabaseServices, setupIsolatedDatabase, } from "@/_test/setup.js"; -import { getRawRPCData, testClient } from "@/_test/utils.js"; -import type { SyncTrace } from "@/types/sync.js"; -import type { RequestQueue } from "@/utils/requestQueue.js"; +import { + createPair, + deployErc20, + deployFactory, + mintErc20, + swapPair, + transferErc20, + transferEth, +} from "@/_test/simulate.js"; +import { + getAccountsConfigAndIndexingFunctions, + getBlocksConfigAndIndexingFunctions, + getErc20ConfigAndIndexingFunctions, + getNetwork, + getPairWithFactoryConfigAndIndexingFunctions, + testClient, +} from "@/_test/utils.js"; +import { buildConfigAndIndexingFunctions } from "@/build/configAndIndexingFunctions.js"; +import type { LogFactory, LogFilter } from "@/sync/source.js"; +import { createRequestQueue } from "@/utils/requestQueue.js"; import { _eth_getBlockByNumber } from "@/utils/rpc.js"; +import { + encodeFunctionData, + encodeFunctionResult, + parseEther, + toHex, +} from "viem"; import { beforeEach, expect, test, vi } from "vitest"; import { type RealtimeSyncEvent, createRealtimeSync } from "./index.js"; @@ -15,41 +40,28 @@ beforeEach(setupCommon); beforeEach(setupAnvil); beforeEach(setupIsolatedDatabase); -// Helper function used to spoof "trace_filter" requests -// because they aren't supported by foundry. -const getRequestQueue = async (requestQueue: RequestQueue) => { - const rpcData = await getRawRPCData(); - - return { - ...requestQueue, - request: (request: any) => { - if (request.method === "trace_block") { - const blockNumber = request.params[0]; - const traces: SyncTrace[] = - blockNumber === rpcData.block1.block.number - ? rpcData.block1.callTraces - : blockNumber === rpcData.block2.block.number - ? rpcData.block2.callTraces - : blockNumber === rpcData.block3.block.number - ? rpcData.block3.callTraces - : blockNumber === rpcData.block4.block.number - ? rpcData.block4.callTraces - : rpcData.block5.callTraces; - - return Promise.resolve(traces); - } else return requestQueue.request(request); - }, - } as RequestQueue; -}; - test("createRealtimeSyncService()", async (context) => { - const { common, networks, requestQueues, sources } = context; + const { common } = context; const { cleanup } = await setupDatabaseServices(context); + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + const realtimeSync = createRealtimeSync({ common, - network: networks[0], - requestQueue: requestQueues[0], + network, + requestQueue, sources, onEvent: vi.fn(), onFatalError: vi.fn(), @@ -61,17 +73,33 @@ test("createRealtimeSyncService()", async (context) => { }); test("start() handles block", async (context) => { - const { common, networks, requestQueues, sources } = context; + const { common } = context; const { cleanup } = await setupDatabaseServices(context); - const finalizedBlock = await _eth_getBlockByNumber(requestQueues[0], { - blockNumber: 4, + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, }); + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const finalizedBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 0, + }); + + await testClient.mine({ blocks: 1 }); + const realtimeSync = createRealtimeSync({ common, - network: networks[0], - requestQueue: await getRequestQueue(requestQueues[0]), + network, + requestQueue, sources, onEvent: vi.fn(), onFatalError: vi.fn(), @@ -91,17 +119,33 @@ test("start() handles block", async (context) => { }); test("start() no-op when receiving same block twice", async (context) => { - const { common, networks, requestQueues, sources } = context; + const { common } = context; const { cleanup } = await setupDatabaseServices(context); - const finalizedBlock = await _eth_getBlockByNumber(requestQueues[0], { - blockNumber: 4, + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, }); + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const finalizedBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 0, + }); + + await testClient.mine({ blocks: 1 }); + const realtimeSync = createRealtimeSync({ common, - network: networks[0], - requestQueue: await getRequestQueue(requestQueues[0]), + network, + requestQueue, sources, onEvent: vi.fn(), onFatalError: vi.fn(), @@ -113,7 +157,7 @@ test("start() no-op when receiving same block twice", async (context) => { }); await queue.onIdle(); - await _eth_getBlockByNumber(requestQueues[0], { blockNumber: 5 }).then( + await _eth_getBlockByNumber(requestQueue, { blockNumber: 1 }).then( // @ts-ignore (block) => queue.add({ block }), ); @@ -128,17 +172,33 @@ test("start() no-op when receiving same block twice", async (context) => { }); test("start() gets missing block", async (context) => { - const { common, networks, requestQueues, sources } = context; + const { common } = context; const { cleanup } = await setupDatabaseServices(context); - const finalizedBlock = await _eth_getBlockByNumber(requestQueues[0], { + const network = getNetwork({ finalityBlockCount: 2 }); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const finalizedBlock = await _eth_getBlockByNumber(requestQueue, { blockNumber: 0, }); + await testClient.mine({ blocks: 2 }); + const realtimeSync = createRealtimeSync({ common, - network: networks[0], - requestQueue: await getRequestQueue(requestQueues[0]), + network, + requestQueue, sources, onEvent: vi.fn(), onFatalError: vi.fn(), @@ -146,15 +206,12 @@ test("start() gets missing block", async (context) => { const queue = await realtimeSync.start({ syncProgress: { finalized: finalizedBlock }, - initialChildAddresses: new Map([ - [sources[1].filter.address, new Set()], - [sources[2].filter.toAddress, new Set()], - ]), + initialChildAddresses: new Map(), }); await queue.onIdle(); - expect(realtimeSync.unfinalizedBlocks).toHaveLength(5); + expect(realtimeSync.unfinalizedBlocks).toHaveLength(2); await realtimeSync.kill(); @@ -162,20 +219,34 @@ test("start() gets missing block", async (context) => { }); test("start() retries on error", async (context) => { - const { common, networks, requestQueues, sources } = context; + const { common } = context; const { cleanup } = await setupDatabaseServices(context); - const finalizedBlock = await _eth_getBlockByNumber(requestQueues[0], { + const network = getNetwork({ finalityBlockCount: 2 }); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const finalizedBlock = await _eth_getBlockByNumber(requestQueue, { blockNumber: 0, }); - const requestQueue = await getRequestQueue(requestQueues[0]); + await testClient.mine({ blocks: 1 }); const requestSpy = vi.spyOn(requestQueue, "request"); const realtimeSync = createRealtimeSync({ common, - network: networks[0], + network, requestQueue, sources, onEvent: vi.fn(), @@ -186,10 +257,7 @@ test("start() retries on error", async (context) => { const queue = await realtimeSync.start({ syncProgress: { finalized: finalizedBlock }, - initialChildAddresses: new Map([ - [sources[1].filter.address, new Set()], - [sources[2].filter.toAddress, new Set()], - ]), + initialChildAddresses: new Map(), }); await queue.onIdle(); @@ -202,17 +270,33 @@ test("start() retries on error", async (context) => { }); test("kill()", async (context) => { - const { common, networks, requestQueues, sources } = context; + const { common } = context; const { cleanup } = await setupDatabaseServices(context); - const finalizedBlock = await _eth_getBlockByNumber(requestQueues[0], { - blockNumber: 3, + const network = getNetwork({ finalityBlockCount: 2 }); + const requestQueue = createRequestQueue({ + network, + common: context.common, }); + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const finalizedBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 0, + }); + + await testClient.mine({ blocks: 2 }); + const realtimeSync = createRealtimeSync({ common, - network: networks[0], - requestQueue: await getRequestQueue(requestQueues[0]), + network, + requestQueue, sources, onEvent: vi.fn(), onFatalError: vi.fn(), @@ -220,10 +304,7 @@ test("kill()", async (context) => { await realtimeSync.start({ syncProgress: { finalized: finalizedBlock }, - initialChildAddresses: new Map([ - [sources[1].filter.address, new Set()], - [sources[2].filter.toAddress, new Set()], - ]), + initialChildAddresses: new Map(), }); await realtimeSync.kill(); @@ -233,41 +314,222 @@ test("kill()", async (context) => { await cleanup(); }); -test("handleBlock() block event", async (context) => { - const { common, networks, requestQueues, sources } = context; +test("handleBlock() block event with log", async (context) => { + const { common } = context; + const { cleanup } = await setupDatabaseServices(context); + + const network = getNetwork({ finalityBlockCount: 2 }); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + + const { config, rawIndexingFunctions } = getErc20ConfigAndIndexingFunctions({ + address, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const data: Extract[] = []; + + const onEvent = vi.fn(async (_data) => { + data.push(_data); + }); + + const finalizedBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 1, + }); + + const realtimeSync = createRealtimeSync({ + common, + network, + requestQueue, + sources, + onEvent, + onFatalError: vi.fn(), + }); + + const queue = await realtimeSync.start({ + syncProgress: { finalized: finalizedBlock }, + initialChildAddresses: new Map(), + }); + await queue.onIdle(); + + expect(realtimeSync.unfinalizedBlocks).toHaveLength(1); + + expect(onEvent).toHaveBeenCalledTimes(1); + expect(onEvent).toHaveBeenCalledWith({ + type: "block", + filters: expect.any(Object), + block: expect.any(Object), + logs: expect.any(Object), + factoryLogs: expect.any(Object), + transactions: expect.any(Object), + traces: expect.any(Object), + transactionReceipts: expect.any(Object), + }); + + expect(data[0]?.block.number).toBe("0x2"); + expect(data[0]?.logs).toHaveLength(1); + expect(data[0]?.traces).toHaveLength(0); + expect(data[0]?.transactions).toHaveLength(1); + + await realtimeSync.kill(); + + await cleanup(); +}); + +test("handleBlock() block event with log factory", async (context) => { + const { common } = context; const { cleanup } = await setupDatabaseServices(context); + const network = getNetwork({ finalityBlockCount: 2 }); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployFactory({ sender: ALICE }); + const { result: pair } = await createPair({ + factory: address, + sender: ALICE, + }); + await swapPair({ + pair, + amount0Out: 1n, + amount1Out: 1n, + to: ALICE, + sender: ALICE, + }); + + const { config, rawIndexingFunctions } = + getPairWithFactoryConfigAndIndexingFunctions({ + address, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const filter = sources[0]!.filter as LogFilter; + const data: Extract[] = []; const onEvent = vi.fn(async (_data) => { data.push(_data); }); - const finalizedBlock = await _eth_getBlockByNumber(requestQueues[0], { + const finalizedBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 1, + }); + + const realtimeSync = createRealtimeSync({ + common, + network, + requestQueue, + sources, + onEvent, + onFatalError: vi.fn(), + }); + + const queue = await realtimeSync.start({ + syncProgress: { finalized: finalizedBlock }, + initialChildAddresses: new Map([[filter.address, new Set()]]), + }); + await queue.onIdle(); + + expect(realtimeSync.unfinalizedBlocks).toHaveLength(2); + + expect(onEvent).toHaveBeenCalledTimes(2); + expect(onEvent).toHaveBeenCalledWith({ + type: "block", + filters: expect.any(Object), + block: expect.any(Object), + logs: expect.any(Object), + factoryLogs: expect.any(Object), + transactions: expect.any(Object), + traces: expect.any(Object), + transactionReceipts: expect.any(Object), + }); + + expect(data[0]?.block.number).toBe("0x2"); + expect(data[1]?.block.number).toBe("0x3"); + + expect(data[0]?.logs).toHaveLength(0); + expect(data[1]?.logs).toHaveLength(1); + + expect(data[0]?.factoryLogs).toHaveLength(1); + expect(data[1]?.factoryLogs).toHaveLength(0); + + expect(data[0]?.traces).toHaveLength(0); + expect(data[1]?.traces).toHaveLength(0); + + expect(data[0]?.transactions).toHaveLength(0); + expect(data[1]?.transactions).toHaveLength(1); + + await realtimeSync.kill(); + + await cleanup(); +}); + +test("handleBlock() block event with block", async (context) => { + const { common } = context; + const { cleanup } = await setupDatabaseServices(context); + + const network = getNetwork({ finalityBlockCount: 2 }); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const data: Extract[] = []; + + const onEvent = vi.fn(async (_data) => { + data.push(_data); + }); + + const finalizedBlock = await _eth_getBlockByNumber(requestQueue, { blockNumber: 0, }); const realtimeSync = createRealtimeSync({ common, - network: networks[0], - requestQueue: await getRequestQueue(requestQueues[0]), + network, + requestQueue, sources, onEvent, onFatalError: vi.fn(), }); + await testClient.mine({ blocks: 1 }); + const queue = await realtimeSync.start({ syncProgress: { finalized: finalizedBlock }, - initialChildAddresses: new Map([ - [sources[1].filter.address, new Set()], - [sources[2].filter.toAddress, new Set()], - ]), + initialChildAddresses: new Map(), }); await queue.onIdle(); - expect(realtimeSync.unfinalizedBlocks).toHaveLength(5); + expect(realtimeSync.unfinalizedBlocks).toHaveLength(1); - expect(onEvent).toHaveBeenCalledTimes(5); + expect(onEvent).toHaveBeenCalledTimes(1); expect(onEvent).toHaveBeenCalledWith({ type: "block", filters: expect.any(Object), @@ -275,33 +537,344 @@ test("handleBlock() block event", async (context) => { logs: expect.any(Object), factoryLogs: expect.any(Object), transactions: expect.any(Object), - callTraces: expect.any(Object), + traces: expect.any(Object), transactionReceipts: expect.any(Object), }); expect(data[0]?.block.number).toBe("0x1"); - expect(data[1]?.block.number).toBe("0x2"); - expect(data[2]?.block.number).toBe("0x3"); - expect(data[3]?.block.number).toBe("0x4"); - expect(data[4]?.block.number).toBe("0x5"); + expect(data[0]?.logs).toHaveLength(0); + expect(data[0]?.traces).toHaveLength(0); + expect(data[0]?.transactions).toHaveLength(0); + + await realtimeSync.kill(); + + await cleanup(); +}); + +test("handleBlock() block event with transaction", async (context) => { + const { common } = context; + const { cleanup } = await setupDatabaseServices(context); + + const network = getNetwork({ finalityBlockCount: 2 }); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + await transferEth({ + to: BOB, + amount: parseEther("1"), + sender: ALICE, + }); + const { config, rawIndexingFunctions } = + getAccountsConfigAndIndexingFunctions({ + address: ALICE, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const data: Extract[] = []; + + const onEvent = vi.fn(async (_data) => { + data.push(_data); + }); + + const finalizedBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 0, + }); + + const realtimeSync = createRealtimeSync({ + common, + network, + requestQueue, + sources: sources.filter(({ filter }) => filter.type === "transaction"), + onEvent, + onFatalError: vi.fn(), + }); + + const queue = await realtimeSync.start({ + syncProgress: { finalized: finalizedBlock }, + initialChildAddresses: new Map(), + }); + await queue.onIdle(); + + expect(realtimeSync.unfinalizedBlocks).toHaveLength(1); + + expect(onEvent).toHaveBeenCalledTimes(1); + expect(onEvent).toHaveBeenCalledWith({ + type: "block", + filters: expect.any(Object), + block: expect.any(Object), + logs: expect.any(Object), + factoryLogs: expect.any(Object), + transactions: expect.any(Object), + traces: expect.any(Object), + transactionReceipts: expect.any(Object), + }); + + expect(data[0]?.block.number).toBe("0x1"); expect(data[0]?.logs).toHaveLength(0); - expect(data[1]?.logs).toHaveLength(2); - expect(data[2]?.logs).toHaveLength(0); - expect(data[3]?.logs).toHaveLength(1); - expect(data[4]?.logs).toHaveLength(0); + expect(data[0]?.traces).toHaveLength(0); + expect(data[0]?.transactions).toHaveLength(1); + expect(data[0]?.transactionReceipts).toHaveLength(1); - expect(data[0]?.callTraces).toHaveLength(0); - expect(data[1]?.callTraces).toHaveLength(0); - expect(data[2]?.callTraces).toHaveLength(1); - expect(data[3]?.callTraces).toHaveLength(1); - expect(data[4]?.callTraces).toHaveLength(0); + await realtimeSync.kill(); - expect(data[0]?.transactions).toHaveLength(0); - expect(data[1]?.transactions).toHaveLength(2); - expect(data[2]?.transactions).toHaveLength(1); - expect(data[3]?.transactions).toHaveLength(1); - expect(data[4]?.transactions).toHaveLength(0); + await cleanup(); +}); + +test("handleBlock() block event with transfer", async (context) => { + const { common } = context; + const { cleanup } = await setupDatabaseServices(context); + + const network = getNetwork({ finalityBlockCount: 2 }); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { hash } = await transferEth({ + to: BOB, + amount: parseEther("1"), + sender: ALICE, + }); + + const { config, rawIndexingFunctions } = + getAccountsConfigAndIndexingFunctions({ + address: ALICE, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const request = async (request: any) => { + if (request.method === "debug_traceBlockByHash") { + return Promise.resolve([ + { + txHash: hash, + result: { + type: "CALL", + from: ALICE, + to: BOB, + gas: "0x0", + gasUsed: "0x0", + input: "0x0", + output: "0x0", + value: toHex(parseEther("1")), + }, + }, + ]); + } + + return requestQueue.request(request); + }; + + const data: Extract[] = []; + + const onEvent = vi.fn(async (_data) => { + data.push(_data); + }); + + const finalizedBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 0, + }); + + const realtimeSync = createRealtimeSync({ + common, + network, + requestQueue: { + ...requestQueue, + // @ts-ignore + request, + }, + sources, + onEvent, + onFatalError: vi.fn(), + }); + + const queue = await realtimeSync.start({ + syncProgress: { finalized: finalizedBlock }, + initialChildAddresses: new Map(), + }); + await queue.onIdle(); + + expect(realtimeSync.unfinalizedBlocks).toHaveLength(1); + + expect(onEvent).toHaveBeenCalledTimes(1); + expect(onEvent).toHaveBeenCalledWith({ + type: "block", + filters: expect.any(Object), + block: expect.any(Object), + logs: expect.any(Object), + factoryLogs: expect.any(Object), + transactions: expect.any(Object), + traces: expect.any(Object), + transactionReceipts: expect.any(Object), + }); + + expect(data[0]?.block.number).toBe("0x1"); + expect(data[0]?.logs).toHaveLength(0); + expect(data[0]?.traces).toHaveLength(1); + expect(data[0]?.transactions).toHaveLength(1); + expect(data[0]?.transactionReceipts).toHaveLength(1); + + await realtimeSync.kill(); + + await cleanup(); +}); + +test("handleBlock() block event with trace", async (context) => { + const { common } = context; + const { cleanup } = await setupDatabaseServices(context); + + const network = getNetwork({ finalityBlockCount: 2 }); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + await transferErc20({ + erc20: address, + to: BOB, + amount: parseEther("1"), + sender: ALICE, + }); + + const block2 = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 2, + }); + + const block3 = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 3, + }); + + const { config, rawIndexingFunctions } = getErc20ConfigAndIndexingFunctions({ + address, + includeCallTraces: true, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const request = async (request: any) => { + if (request.method === "debug_traceBlockByHash") { + if (request.params[0] === block2.hash) { + return Promise.resolve([ + { + txHash: block2.transactions[0]!.hash, + result: { + type: "CREATE", + from: ALICE, + gas: "0x0", + gasUsed: "0x0", + input: "0x0", + value: "0x0", + }, + }, + ]); + } + + if (request.params[0] === block3.hash) { + return Promise.resolve([ + { + txHash: block3.transactions[0]!.hash, + result: { + type: "CALL", + from: ALICE, + to: address, + gas: "0x0", + gasUsed: "0x0", + input: encodeFunctionData({ + abi: erc20ABI, + functionName: "transfer", + args: [BOB, parseEther("1")], + }), + output: encodeFunctionResult({ + abi: erc20ABI, + functionName: "transfer", + result: true, + }), + value: "0x0", + }, + }, + ]); + } + + return Promise.resolve([]); + } + + return requestQueue.request(request); + }; + + const data: Extract[] = []; + + const onEvent = vi.fn(async (_data) => { + data.push(_data); + }); + + const finalizedBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 1, + }); + + const realtimeSync = createRealtimeSync({ + common, + network, + requestQueue: { + ...requestQueue, + // @ts-ignore + request, + }, + sources, + onEvent, + onFatalError: vi.fn(), + }); + + const queue = await realtimeSync.start({ + syncProgress: { finalized: finalizedBlock }, + initialChildAddresses: new Map(), + }); + await queue.onIdle(); + + expect(realtimeSync.unfinalizedBlocks).toHaveLength(2); + + expect(onEvent).toHaveBeenCalledTimes(2); + expect(onEvent).toHaveBeenCalledWith({ + type: "block", + filters: expect.any(Object), + block: expect.any(Object), + logs: expect.any(Object), + factoryLogs: expect.any(Object), + transactions: expect.any(Object), + traces: expect.any(Object), + transactionReceipts: expect.any(Object), + }); + + expect(data[0]?.block.number).toBe("0x2"); + expect(data[1]?.block.number).toBe("0x3"); + + expect(data[0]?.logs).toHaveLength(1); + expect(data[1]?.logs).toHaveLength(1); + + expect(data[0]?.traces).toHaveLength(0); + expect(data[1]?.traces).toHaveLength(1); + + expect(data[0]?.transactions).toHaveLength(1); + expect(data[1]?.transactions).toHaveLength(1); + + expect(data[0]?.transactionReceipts).toHaveLength(0); + expect(data[1]?.transactionReceipts).toHaveLength(0); await realtimeSync.kill(); @@ -309,10 +882,24 @@ test("handleBlock() block event", async (context) => { }); test("handleBlock() finalize event", async (context) => { - const { common, networks, requestQueues, sources } = context; + const { common } = context; const { cleanup } = await setupDatabaseServices(context); - const finalizedBlock = await _eth_getBlockByNumber(requestQueues[0], { + const network = getNetwork({ finalityBlockCount: 2 }); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const finalizedBlock = await _eth_getBlockByNumber(requestQueue, { blockNumber: 0, }); @@ -324,8 +911,8 @@ test("handleBlock() finalize event", async (context) => { const realtimeSync = createRealtimeSync({ common, - network: networks[0], - requestQueue: await getRequestQueue(requestQueues[0]), + network, + requestQueue, sources, onEvent, onFatalError: vi.fn(), @@ -335,10 +922,7 @@ test("handleBlock() finalize event", async (context) => { const queue = await realtimeSync.start({ syncProgress: { finalized: finalizedBlock }, - initialChildAddresses: new Map([ - [sources[1].filter.address, new Set()], - [sources[2].filter.toAddress, new Set()], - ]), + initialChildAddresses: new Map(), }); await queue.onIdle(); @@ -347,9 +931,9 @@ test("handleBlock() finalize event", async (context) => { block: expect.any(Object), }); - expect(realtimeSync.unfinalizedBlocks).toHaveLength(5); + expect(realtimeSync.unfinalizedBlocks).toHaveLength(2); - expect(data[0]?.block.number).toBe("0x4"); + expect(data[0]?.block.number).toBe("0x2"); await realtimeSync.kill(); @@ -357,33 +941,46 @@ test("handleBlock() finalize event", async (context) => { }); test("handleReorg() finds common ancestor", async (context) => { - const { common, networks, requestQueues, sources } = context; + const { common } = context; const { cleanup } = await setupDatabaseServices(context); - const finalizedBlock = await _eth_getBlockByNumber(requestQueues[0], { - blockNumber: 0, + const network = getNetwork({ finalityBlockCount: 2 }); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, }); const onEvent = vi.fn(); + const finalizedBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 0, + }); + const realtimeSync = createRealtimeSync({ common, - network: networks[0], - requestQueue: await getRequestQueue(requestQueues[0]), + network, + requestQueue, sources, onEvent, onFatalError: vi.fn(), }); + await testClient.mine({ blocks: 3 }); + const queue = await realtimeSync.start({ syncProgress: { finalized: finalizedBlock }, - initialChildAddresses: new Map([ - [sources[1].filter.address, new Set()], - [sources[2].filter.toAddress, new Set()], - ]), + initialChildAddresses: new Map(), }); - await _eth_getBlockByNumber(requestQueues[0], { blockNumber: 3 }).then( + await _eth_getBlockByNumber(requestQueue, { blockNumber: 2 }).then( // @ts-ignore (block) => queue.add({ block }), ); @@ -392,10 +989,10 @@ test("handleReorg() finds common ancestor", async (context) => { expect(onEvent).toHaveBeenCalledWith({ type: "reorg", block: expect.any(Object), - reorgedBlocks: [expect.any(Object), expect.any(Object), expect.any(Object)], + reorgedBlocks: [expect.any(Object), expect.any(Object)], }); - expect(realtimeSync.unfinalizedBlocks).toHaveLength(2); + expect(realtimeSync.unfinalizedBlocks).toHaveLength(1); await realtimeSync.kill(); @@ -403,42 +1000,55 @@ test("handleReorg() finds common ancestor", async (context) => { }); test("handleReorg() throws error for deep reorg", async (context) => { - const { common, networks, requestQueues, sources } = context; + const { common } = context; const { cleanup } = await setupDatabaseServices(context); - const finalizedBlock = await _eth_getBlockByNumber(requestQueues[0], { + const network = getNetwork({ finalityBlockCount: 2 }); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const finalizedBlock = await _eth_getBlockByNumber(requestQueue, { blockNumber: 0, }); const realtimeSync = createRealtimeSync({ common, - network: networks[0], - requestQueue: await getRequestQueue(requestQueues[0]), + network, + requestQueue, sources, onEvent: vi.fn(), onFatalError: vi.fn(), }); + await testClient.mine({ blocks: 3 }); + const queue = await realtimeSync.start({ syncProgress: { finalized: finalizedBlock }, - initialChildAddresses: new Map([ - [sources[1].filter.address, new Set()], - [sources[2].filter.toAddress, new Set()], - ]), + initialChildAddresses: new Map(), }); await queue.onIdle(); - const block = await _eth_getBlockByNumber(requestQueues[0], { - blockNumber: 5, + const block = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 3, }); // @ts-ignore await queue.add({ block: { ...block, - number: "0x6", + number: "0x4", hash: "0x0000000000000000000000000000000000000000000000000000000000000000", - parentHash: realtimeSync.unfinalizedBlocks[3]!.hash, + parentHash: realtimeSync.unfinalizedBlocks[1]!.hash, }, }); diff --git a/packages/core/src/sync-realtime/index.ts b/packages/core/src/sync-realtime/index.ts index 5ffbe1b4b..c9ecd2705 100644 --- a/packages/core/src/sync-realtime/index.ts +++ b/packages/core/src/sync-realtime/index.ts @@ -3,30 +3,33 @@ import type { Network } from "@/config/networks.js"; import { type SyncProgress, syncBlockToLightBlock } from "@/sync/index.js"; import { type BlockFilter, - type CallTraceFilter, type Factory, type Filter, type LogFilter, type Source, + type TraceFilter, + type TransactionFilter, + type TransferFilter, getChildAddress, isAddressFactory, + shouldGetTransactionReceipt, } from "@/sync/source.js"; import type { LightBlock, SyncBlock, - SyncCallTrace, SyncLog, + SyncTrace, SyncTransaction, SyncTransactionReceipt, } from "@/types/sync.js"; import { range } from "@/utils/range.js"; import type { RequestQueue } from "@/utils/requestQueue.js"; import { + _debug_traceBlockByHash, _eth_getBlockByHash, _eth_getBlockByNumber, _eth_getLogs, _eth_getTransactionReceipt, - _trace_block, } from "@/utils/rpc.js"; import { wait } from "@/utils/wait.js"; import { type Queue, createQueue } from "@ponder/common"; @@ -34,9 +37,11 @@ import { type Address, type Hash, hexToNumber } from "viem"; import { isFilterInBloom, zeroLogsBloom } from "./bloom.js"; import { isBlockFilterMatched, - isCallTraceFilterMatched, isLogFactoryMatched, isLogFilterMatched, + isTraceFilterMatched, + isTransactionFilterMatched, + isTransferFilterMatched, } from "./filter.js"; export type RealtimeSync = { @@ -64,7 +69,7 @@ export type BlockWithEventData = { filters: Set; logs: SyncLog[]; factoryLogs: SyncLog[]; - callTraces: SyncCallTrace[]; + traces: SyncTrace[]; transactions: SyncTransaction[]; transactionReceipts: SyncTransactionReceipt[]; }; @@ -112,27 +117,52 @@ export const createRealtimeSync = ( const factories: Factory[] = []; const logFilters: LogFilter[] = []; - const callTraceFilters: CallTraceFilter[] = []; + const traceFilters: TraceFilter[] = []; + const transactionFilters: TransactionFilter[] = []; + const transferFilters: TransferFilter[] = []; const blockFilters: BlockFilter[] = []; for (const source of args.sources) { + // Collect filters from sources if (source.type === "contract") { if (source.filter.type === "log") { logFilters.push(source.filter); - } else if (source.filter.type === "callTrace") { - callTraceFilters.push(source.filter); + } else if (source.filter.type === "trace") { + traceFilters.push(source.filter); } - - const _address = - source.filter.type === "log" - ? source.filter.address - : source.filter.toAddress; - if (isAddressFactory(_address)) { - factories.push(_address); + } else if (source.type === "account") { + if (source.filter.type === "transaction") { + transactionFilters.push(source.filter); + } else if (source.filter.type === "transfer") { + transferFilters.push(source.filter); } } else if (source.type === "block") { blockFilters.push(source.filter); } + + // Collect factories from sources + switch (source.filter.type) { + case "trace": + case "transaction": + case "transfer": { + const { fromAddress, toAddress } = source.filter; + + if (isAddressFactory(fromAddress)) { + factories.push(fromAddress); + } + if (isAddressFactory(toAddress)) { + factories.push(toAddress); + } + break; + } + case "log": { + const { address } = source.filter; + if (isAddressFactory(address)) { + factories.push(address); + } + break; + } + } } for (const factory of factories) { @@ -153,7 +183,7 @@ export const createRealtimeSync = ( block, logs, factoryLogs, - callTraces, + traces, transactions, transactionReceipts, }: Omit) => { @@ -185,16 +215,20 @@ export const createRealtimeSync = ( let isMatched = false; for (const filter of logFilters) { + const childAddresses = isAddressFactory(filter.address) + ? [ + finalizedChildAddresses.get(filter.address)!, + unfinalizedChildAddresses.get(filter.address)!, + ] + : undefined; + if ( - isLogFilterMatched({ filter, block, log }) && - (isAddressFactory(filter.address) - ? finalizedChildAddresses - .get(filter.address)! - .has(log.address.toLowerCase() as Address) || - unfinalizedChildAddresses - .get(filter.address)! - .has(log.address.toLowerCase() as Address) - : true) + isLogFilterMatched({ + filter, + block, + log, + childAddresses, + }) ) { matchedFilters.add(filter); isMatched = true; @@ -204,21 +238,60 @@ export const createRealtimeSync = ( return isMatched; }); - // Remove call traces that don't match a filter, accounting for factory addresses - callTraces = callTraces.filter((callTrace) => { + traces = traces.filter((trace) => { let isMatched = false; + for (const filter of transferFilters) { + const fromChildAddresses = isAddressFactory(filter.fromAddress) + ? [ + finalizedChildAddresses.get(filter.fromAddress)!, + unfinalizedChildAddresses.get(filter.fromAddress)!, + ] + : undefined; + + const toChildAddresses = isAddressFactory(filter.toAddress) + ? [ + finalizedChildAddresses.get(filter.toAddress)!, + unfinalizedChildAddresses.get(filter.toAddress)!, + ] + : undefined; - for (const filter of callTraceFilters) { if ( - isCallTraceFilterMatched({ filter, block, callTrace }) && - (isAddressFactory(filter.toAddress) - ? finalizedChildAddresses - .get(filter.toAddress)! - .has(callTrace.action.to.toLowerCase() as Address) || - unfinalizedChildAddresses - .get(filter.toAddress)! - .has(callTrace.action.to.toLowerCase() as Address) - : true) + isTransferFilterMatched({ + filter, + block: { number: block.number }, + trace: trace.trace, + fromChildAddresses, + toChildAddresses, + }) + ) { + matchedFilters.add(filter); + isMatched = true; + } + } + + for (const filter of traceFilters) { + const fromChildAddresses = isAddressFactory(filter.fromAddress) + ? [ + finalizedChildAddresses.get(filter.fromAddress)!, + unfinalizedChildAddresses.get(filter.fromAddress)!, + ] + : undefined; + + const toChildAddresses = isAddressFactory(filter.toAddress) + ? [ + finalizedChildAddresses.get(filter.toAddress)!, + unfinalizedChildAddresses.get(filter.toAddress)!, + ] + : undefined; + + if ( + isTraceFilterMatched({ + filter, + block: { number: block.number }, + trace: trace.trace, + fromChildAddresses, + toChildAddresses, + }) ) { matchedFilters.add(filter); isMatched = true; @@ -229,15 +302,52 @@ export const createRealtimeSync = ( }); // Remove transactions and transaction receipts that may have been filtered out + const transactionHashes = new Set(); for (const log of logs) { transactionHashes.add(log.transactionHash); } - for (const trace of callTraces) { + for (const trace of traces) { transactionHashes.add(trace.transactionHash); } - transactions = transactions.filter((t) => transactionHashes.has(t.hash)); + transactions = transactions.filter((transaction) => { + let isMatched = transactionHashes.has(transaction.hash); + for (const filter of transactionFilters) { + const fromChildAddresses = isAddressFactory(filter.fromAddress) + ? [ + finalizedChildAddresses.get(filter.fromAddress)!, + unfinalizedChildAddresses.get(filter.fromAddress)!, + ] + : undefined; + + const toChildAddresses = isAddressFactory(filter.toAddress) + ? [ + finalizedChildAddresses.get(filter.toAddress)!, + unfinalizedChildAddresses.get(filter.toAddress)!, + ] + : undefined; + + if ( + isTransactionFilterMatched({ + filter, + block, + transaction, + fromChildAddresses, + toChildAddresses, + }) + ) { + matchedFilters.add(filter); + isMatched = true; + } + } + return isMatched; + }); + + for (const transaction of transactions) { + transactionHashes.add(transaction.hash); + } + transactionReceipts = transactionReceipts.filter((t) => transactionHashes.has(t.transactionHash), ); @@ -249,7 +359,7 @@ export const createRealtimeSync = ( } } - if (logs.length > 0 || callTraces.length > 0) { + if (logs.length > 0 || traces.length > 0 || transactions.length > 0) { const _text: string[] = []; if (logs.length === 1) { @@ -258,10 +368,16 @@ export const createRealtimeSync = ( _text.push(`${logs.length} logs`); } - if (callTraces.length === 1) { - _text.push("1 call trace"); - } else if (callTraces.length > 1) { - _text.push(`${callTraces.length} call traces`); + if (traces.length === 1) { + _text.push("1 trace"); + } else if (traces.length > 1) { + _text.push(`${traces.length} traces`); + } + + if (transactions.length === 1) { + _text.push("1 transaction"); + } else if (transactions.length > 1) { + _text.push(`${transactions.length} transactions`); } const text = _text.filter((t) => t !== undefined).join(" and "); @@ -288,7 +404,7 @@ export const createRealtimeSync = ( block, factoryLogs, logs, - callTraces, + traces, transactions, transactionReceipts, }); @@ -513,32 +629,32 @@ export const createRealtimeSync = ( // Traces //////// - const shouldRequestTraces = callTraceFilters.length > 0; + const shouldRequestTraces = + traceFilters.length > 0 || transferFilters.length > 0; - let callTraces: SyncCallTrace[] = []; + let traces: SyncTrace[] = []; if (shouldRequestTraces) { - const traces = await _trace_block(args.requestQueue, { - blockNumber: hexToNumber(block.number), + traces = await _debug_traceBlockByHash(args.requestQueue, { + hash: block.hash, }); // Protect against RPCs returning empty traces. Known to happen near chain tip. // Use the fact that any transaction produces a trace. if (block.transactions.length !== 0 && traces.length === 0) { throw new Error( - "Detected invalid trace_block response. `block.transactions` is not empty but zero traces were returned.", + "Detected invalid debug_traceBlock response. `block.transactions` is not empty but zero traces were returned.", ); } - - callTraces = traces.filter( - (trace) => trace.type === "call", - ) as SyncCallTrace[]; } - // Check that traces refer to the correct block - for (const trace of callTraces) { - if (trace.blockHash !== block.hash) { + // Validate that each trace point to valid transaction in the block + for (const trace of traces) { + if ( + block.transactions.find((t) => t.hash === trace.transactionHash) === + undefined + ) { throw new Error( - `Detected inconsistent RPC responses. 'trace.blockHash' ${trace.blockHash} does not match 'block.hash' ${block.hash}`, + `Detected inconsistent RPC responses. 'trace.txHash' ${trace.transactionHash} not found in 'block' ${block.hash}`, ); } } @@ -571,44 +687,80 @@ export const createRealtimeSync = ( // Remove logs that don't match a filter, recording required transactions logs = logs.filter((log) => { - let isLogMatched = false; + let isMatched = false; for (const filter of logFilters) { if (isLogFilterMatched({ filter, block, log })) { - isLogMatched = true; requiredTransactions.add(log.transactionHash); - if (filter.includeTransactionReceipts) { + isMatched = true; + if (shouldGetTransactionReceipt(filter)) { requiredTransactionReceipts.add(log.transactionHash); + // skip to next log + break; } } } - return isLogMatched; + return isMatched; }); - // Remove call traces that don't match a filter, recording required transactions - callTraces = callTraces.filter((callTrace) => { - let isCallTraceMatched = false; - for (const filter of callTraceFilters) { - if (isCallTraceFilterMatched({ filter, block, callTrace })) { - isCallTraceMatched = true; - requiredTransactions.add(callTrace.transactionHash); - if (filter.includeTransactionReceipts) { - requiredTransactionReceipts.add(callTrace.transactionHash); + // Initial weak trace filtering before full filtering with factory addresses in handleBlock + traces = traces.filter((trace) => { + let isMatched = false; + for (const filter of transferFilters) { + if ( + isTransferFilterMatched({ + filter, + block: { number: block.number }, + trace: trace.trace, + }) + ) { + requiredTransactions.add(trace.transactionHash); + isMatched = true; + if (shouldGetTransactionReceipt(filter)) { + requiredTransactionReceipts.add(trace.transactionHash); + // skip to next trace + break; + } + } + } + + for (const filter of traceFilters) { + if ( + isTraceFilterMatched({ + filter, + block: { number: block.number }, + trace: trace.trace, + }) + ) { + requiredTransactions.add(trace.transactionHash); + isMatched = true; + if (shouldGetTransactionReceipt(filter)) { + requiredTransactionReceipts.add(trace.transactionHash); + // skip to next trace + break; } } } - return isCallTraceMatched; + return isMatched; }); //////// // Transactions //////// - const transactions = block.transactions.filter(({ hash }) => - requiredTransactions.has(hash), - ); + const transactions = block.transactions.filter((transaction) => { + let isMatched = requiredTransactions.has(transaction.hash); + for (const filter of transactionFilters) { + if (isTransactionFilterMatched({ filter, block, transaction })) { + requiredTransactions.add(transaction.hash); + requiredTransactionReceipts.add(transaction.hash); + isMatched = true; + } + } + return isMatched; + }); // Validate that filtered logs/callTraces point to valid transaction in the block const blockTransactionsHashes = new Set( @@ -634,24 +786,11 @@ export const createRealtimeSync = ( ), ); - // Filter out call traces from reverted transactions - - const revertedTransactions = new Set(); - for (const receipt of transactionReceipts) { - if (receipt.status === "0x0") { - revertedTransactions.add(receipt.transactionHash); - } - } - - callTraces = callTraces.filter( - (trace) => revertedTransactions.has(trace.transactionHash) === false, - ); - return { block, logs, factoryLogs, - callTraces, + traces, transactions, transactionReceipts, }; @@ -717,6 +856,7 @@ export const createRealtimeSync = ( hexToNumber(latestBlock.number) + MAX_QUEUED_BLOCKS, ), ); + const pendingBlocks = await Promise.all( missingBlockRange.map((blockNumber) => _eth_getBlockByNumber(args.requestQueue, { diff --git a/packages/core/src/sync-store/encoding.ts b/packages/core/src/sync-store/encoding.ts index 23e665df3..d18e3d03f 100644 --- a/packages/core/src/sync-store/encoding.ts +++ b/packages/core/src/sync-store/encoding.ts @@ -1,7 +1,8 @@ +import type { FragmentId } from "@/sync/fragments.js"; import type { SyncBlock, - SyncCallTrace, SyncLog, + SyncTrace, SyncTransaction, SyncTransactionReceipt, } from "@/types/sync.js"; @@ -12,7 +13,7 @@ import { zeroCheckpoint, } from "@/utils/checkpoint.js"; import { toLowerCase } from "@/utils/lowercase.js"; -import type { ColumnType, Generated, Insertable } from "kysely"; +import type { ColumnType, Insertable } from "kysely"; import type { Address, Hash, Hex } from "viem"; import { hexToBigInt, hexToNumber } from "viem"; @@ -139,6 +140,9 @@ export const encodeLog = ({ type TransactionsTable = { hash: Hash; + chainId: number; + /** `checkpoint` will be null for transactions inserted before 0.8. This is to avoid a very slow migration. */ + checkpoint: string | null; blockHash: Hash; blockNumber: ColumnType; from: Address; @@ -160,19 +164,27 @@ type TransactionsTable = { string | bigint > | null; accessList: string | null; - - chainId: number; }; export const encodeTransaction = ({ transaction, + block, chainId, }: { transaction: SyncTransaction; + block: Pick; chainId: number; }): Insertable => { return { hash: transaction.hash, + checkpoint: encodeCheckpoint({ + blockTimestamp: hexToNumber(block.timestamp), + chainId: BigInt(chainId), + blockNumber: hexToBigInt(transaction.blockNumber), + transactionIndex: hexToBigInt(transaction.transactionIndex), + eventType: EVENT_TYPES.transactions, + eventIndex: zeroCheckpoint.eventIndex, + }), chainId, blockHash: transaction.blockHash, blockNumber: hexToBigInt(transaction.blockNumber), @@ -210,7 +222,6 @@ type TransactionReceiptsTable = { effectiveGasPrice: ColumnType; from: Address; gasUsed: ColumnType; - logs: string; logsBloom: Hex; status: Hex; to: Address | null; @@ -237,7 +248,6 @@ export const encodeTransactionReceipt = ({ effectiveGasPrice: hexToBigInt(transactionReceipt.effectiveGasPrice), from: toLowerCase(transactionReceipt.from), gasUsed: hexToBigInt(transactionReceipt.gasUsed), - logs: JSON.stringify(transactionReceipt.logs), logsBloom: transactionReceipt.logsBloom, status: transactionReceipt.status, to: transactionReceipt.to ? toLowerCase(transactionReceipt.to) : null, @@ -246,144 +256,91 @@ export const encodeTransactionReceipt = ({ }; }; -type CallTracesTable = { +type TracesTable = { id: string; chainId: number; checkpoint: string; - callType: string; + type: string; + transactionHash: Hex; + blockHash: Hex; + blockNumber: ColumnType; from: Address; + to: Address | null; gas: ColumnType; + gasUsed: ColumnType; input: Hex; - to: Address; - value: ColumnType; - blockHash: Hex; - blockNumber: ColumnType; - error: string | null; - gasUsed: ColumnType | null; - output: Hex | null; - subtraces: number; - traceAddress: string; - transactionHash: Hex; - transactionPosition: number; functionSelector: Hex; + output: Hex | null; + error: string | null; + revertReason: string | null; + value: ColumnType< + string | null, + string | bigint | null, + string | bigint | null + >; + index: number; + subcalls: number; + isReverted: number; }; -export function encodeCallTrace({ +export function encodeTrace({ trace, + block, + transaction, chainId, }: { - trace: SyncCallTrace; + trace: Omit; + block: Pick; + transaction: Pick; chainId: number; -}): Insertable> { +}): Insertable { return { - id: `${trace.transactionHash}-${JSON.stringify(trace.traceAddress)}`, + id: `${transaction.hash}-${trace.index}`, chainId, - callType: trace.action.callType, - from: toLowerCase(trace.action.from), - gas: hexToBigInt(trace.action.gas), - input: trace.action.input, - to: toLowerCase(trace.action.to), - value: hexToBigInt(trace.action.value), - blockHash: trace.blockHash, - blockNumber: hexToBigInt(trace.blockNumber), + checkpoint: encodeCheckpoint({ + blockTimestamp: hexToNumber(block.timestamp), + chainId: BigInt(chainId), + blockNumber: hexToBigInt(block.number), + transactionIndex: hexToBigInt(transaction.transactionIndex), + eventType: EVENT_TYPES.traces, + eventIndex: BigInt(trace.index), + }), + type: trace.type, + transactionHash: transaction.hash, + blockHash: block.hash, + blockNumber: hexToBigInt(block.number), + from: toLowerCase(trace.from), + to: trace.to ? toLowerCase(trace.to) : null, + gas: hexToBigInt(trace.gas), + gasUsed: hexToBigInt(trace.gasUsed), + input: trace.input, + functionSelector: trace.input.slice(0, 10) as Hex, + output: trace.output ?? null, + revertReason: trace.revertReason ?? null, error: trace.error ?? null, - gasUsed: trace.result ? hexToBigInt(trace.result.gasUsed) : null, - output: trace.result ? trace.result.output : null, - subtraces: trace.subtraces, - traceAddress: JSON.stringify(trace.traceAddress), - transactionHash: trace.transactionHash, - transactionPosition: trace.transactionPosition, - functionSelector: trace.action.input.slice(0, 10).toLowerCase() as Hex, + value: trace.value ? hexToBigInt(trace.value) : null, + index: trace.index, + subcalls: trace.subcalls, + isReverted: trace.error === undefined ? 0 : 1, }; } type RpcRequestResultsTable = { request: string; - chainId: number; - blockNumber: ColumnType; + request_hash: ColumnType; + chain_id: number; + block_number: ColumnType< + string | undefined, + string | bigint | undefined, + string | bigint | undefined + >; result: string; }; -type LogFiltersTable = { - id: string; - chainId: number; - address: Hex | null; - topic0: Hex | null; - topic1: Hex | null; - topic2: Hex | null; - topic3: Hex | null; - includeTransactionReceipts: 0 | 1; -}; - -type LogFilterIntervalsTable = { - id: Generated; - logFilterId: string; - startBlock: ColumnType; - endBlock: ColumnType; -}; - -type FactoryLogFiltersTable = { - id: string; - chainId: number; - address: Hex; - eventSelector: Hex; - childAddressLocation: `topic${1 | 2 | 3}` | `offset${number}`; - topic0: Hex | null; - topic1: Hex | null; - topic2: Hex | null; - topic3: Hex | null; - includeTransactionReceipts: 0 | 1; -}; - -type FactoryLogFilterIntervalsTable = { - id: Generated; - factoryId: string; - startBlock: ColumnType; - endBlock: ColumnType; -}; - -type TraceFiltersTable = { - id: string; - chainId: number; - fromAddress: Address | null; - toAddress: Address | null; -}; - -type TraceFilterIntervalsTable = { - id: Generated; - traceFilterId: string; - startBlock: ColumnType; - endBlock: ColumnType; -}; - -type FactoryTraceFiltersTable = { - id: string; - chainId: number; - address: Hex; - eventSelector: Hex; - childAddressLocation: `topic${1 | 2 | 3}` | `offset${number}`; - fromAddress: Address | null; -}; - -type FactoryTraceFilterIntervalsTable = { - id: Generated; - factoryId: string; - startBlock: ColumnType; - endBlock: ColumnType; -}; - -type BlockFiltersTable = { - id: string; - chainId: number; - interval: number; - offset: number; -}; - -type BlockFilterIntervalsTable = { - id: Generated; - blockFilterId: string; - startBlock: ColumnType; - endBlock: ColumnType; +type IntervalTable = { + fragment_id: FragmentId; + chain_id: number; + blocks: string; }; export type PonderSyncSchema = { @@ -391,18 +348,9 @@ export type PonderSyncSchema = { logs: LogsTable; transactions: TransactionsTable; transactionReceipts: TransactionReceiptsTable; - callTraces: CallTracesTable; + traces: TracesTable; - rpcRequestResults: RpcRequestResultsTable; + rpc_request_results: RpcRequestResultsTable; - logFilters: LogFiltersTable; - logFilterIntervals: LogFilterIntervalsTable; - factoryLogFilters: FactoryLogFiltersTable; - factoryLogFilterIntervals: FactoryLogFilterIntervalsTable; - traceFilters: TraceFiltersTable; - traceFilterIntervals: TraceFilterIntervalsTable; - factoryTraceFilters: FactoryTraceFiltersTable; - factoryTraceFilterIntervals: FactoryTraceFilterIntervalsTable; - blockFilters: BlockFiltersTable; - blockFilterIntervals: BlockFilterIntervalsTable; + intervals: IntervalTable; }; diff --git a/packages/core/src/sync-store/index.test.ts b/packages/core/src/sync-store/index.test.ts index 1e676c9ad..081a65c70 100644 --- a/packages/core/src/sync-store/index.test.ts +++ b/packages/core/src/sync-store/index.test.ts @@ -1,21 +1,56 @@ +import { ALICE, BOB } from "@/_test/constants.js"; +import { erc20ABI } from "@/_test/generated.js"; import { setupAnvil, setupCommon, setupDatabaseServices, setupIsolatedDatabase, } from "@/_test/setup.js"; -import { getRawRPCData } from "@/_test/utils.js"; -import { NonRetryableError } from "@/common/errors.js"; -import type { Factory, LogFactory, LogFilter } from "@/sync/source.js"; +import { + createPair, + deployErc20, + deployFactory, + mintErc20, + swapPair, + transferEth, +} from "@/_test/simulate.js"; +import { + getAccountsConfigAndIndexingFunctions, + getBlocksConfigAndIndexingFunctions, + getErc20ConfigAndIndexingFunctions, + getNetwork, + getPairWithFactoryConfigAndIndexingFunctions, + testClient, +} from "@/_test/utils.js"; +import { buildConfigAndIndexingFunctions } from "@/build/configAndIndexingFunctions.js"; +import type { + BlockFilter, + Factory, + LogFactory, + LogFilter, +} from "@/sync/source.js"; +import type { SyncTrace, SyncTransaction } from "@/types/sync.js"; import { decodeCheckpoint, encodeCheckpoint, maxCheckpoint, zeroCheckpoint, } from "@/utils/checkpoint.js"; -import { range } from "@/utils/range.js"; -import { _eth_getLogs } from "@/utils/rpc.js"; -import { type Address, hexToNumber } from "viem"; +import { createRequestQueue } from "@/utils/requestQueue.js"; +import { + _eth_getBlockByNumber, + _eth_getLogs, + _eth_getTransactionReceipt, +} from "@/utils/rpc.js"; +import { + type Address, + encodeFunctionData, + encodeFunctionResult, + hexToNumber, + parseEther, + zeroAddress, + zeroHash, +} from "viem"; import { beforeEach, expect, test } from "vitest"; beforeEach(setupCommon); @@ -30,21 +65,31 @@ test("setup creates tables", async (context) => { expect(tableNames).toContain("blocks"); expect(tableNames).toContain("logs"); expect(tableNames).toContain("transactions"); - expect(tableNames).toContain("callTraces"); + expect(tableNames).toContain("traces"); expect(tableNames).toContain("transactionReceipts"); - expect(tableNames).toContain("rpcRequestResults"); + expect(tableNames).toContain("rpc_request_results"); await cleanup(); }); test("getIntervals() empty", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); + const filter = { + type: "block", + chainId: 1, + interval: 1, + offset: 0, + fromBlock: undefined, + toBlock: undefined, + include: [], + } satisfies BlockFilter; + const intervals = await syncStore.getIntervals({ - filter: context.sources[0].filter, + filters: [filter], }); - expect(intervals).toHaveLength(0); + expect(Array.from(intervals.values())[0]).toHaveLength(0); await cleanup(); }); @@ -52,17 +97,31 @@ test("getIntervals() empty", async (context) => { test("getIntervals() returns intervals", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); - await syncStore.insertInterval({ - filter: context.sources[0].filter, - interval: [0, 4], + const filter = { + type: "block", + chainId: 1, + interval: 1, + offset: 0, + fromBlock: undefined, + toBlock: undefined, + include: [], + } satisfies BlockFilter; + + await syncStore.insertIntervals({ + intervals: [ + { + filter, + interval: [0, 4], + }, + ], }); const intervals = await syncStore.getIntervals({ - filter: context.sources[0].filter, + filters: [filter], }); - expect(intervals).toHaveLength(1); - expect(intervals[0]).toStrictEqual([0, 4]); + expect(Array.from(intervals.values())[0]).toHaveLength(1); + expect(Array.from(intervals.values())[0]![0]).toStrictEqual([0, 4]); await cleanup(); }); @@ -70,238 +129,126 @@ test("getIntervals() returns intervals", async (context) => { test("getIntervals() merges intervals", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); - await syncStore.insertInterval({ - filter: context.sources[0].filter, - interval: [0, 4], + const filter = { + type: "block", + chainId: 1, + interval: 1, + offset: 0, + fromBlock: undefined, + toBlock: undefined, + include: [], + } satisfies BlockFilter; + + await syncStore.insertIntervals({ + intervals: [ + { + filter, + interval: [0, 4], + }, + ], }); - await syncStore.insertInterval({ - filter: context.sources[0].filter, - interval: [5, 8], + await syncStore.insertIntervals({ + intervals: [ + { + filter, + interval: [5, 8], + }, + ], }); - const intervals = await syncStore.getIntervals({ - filter: context.sources[0].filter, - }); - - expect(intervals).toHaveLength(1); - expect(intervals[0]).toStrictEqual([0, 8]); - - await cleanup(); -}); - -test("getIntervals() handles log filter logic", async (context) => { - const { cleanup, syncStore } = await setupDatabaseServices(context); - - await syncStore.insertInterval({ - filter: context.sources[0].filter, - interval: [0, 4], - }); - - let intervals = await syncStore.getIntervals({ - filter: { - ...context.sources[0].filter, - includeTransactionReceipts: false, - }, - }); - - expect(intervals).toHaveLength(1); - expect(intervals[0]).toStrictEqual([0, 4]); - - intervals = await syncStore.getIntervals({ - filter: { ...context.sources[0].filter, address: context.factory.address }, + filters: [filter], }); - expect(intervals).toHaveLength(0); + expect(Array.from(intervals.values())[0]).toHaveLength(1); + expect(Array.from(intervals.values())[0]![0]).toStrictEqual([0, 8]); await cleanup(); }); -test("getIntervals() handles factory log filter logic", async (context) => { +test("getIntervals() adjacent intervals", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); - await syncStore.insertInterval({ - filter: context.sources[1].filter, - interval: [0, 4], - }); - - let intervals = await syncStore.getIntervals({ - filter: { - ...context.sources[1].filter, - includeTransactionReceipts: false, - }, - }); - - expect(intervals).toHaveLength(1); - expect(intervals[0]).toStrictEqual([0, 4]); + const filter = { + type: "log", + chainId: 1, + topic0: null, + topic1: null, + topic2: null, + topic3: null, + address: [zeroAddress], + fromBlock: undefined, + toBlock: undefined, + include: [], + } satisfies LogFilter; - intervals = await syncStore.getIntervals({ - filter: { - ...context.sources[1].filter, - address: { - ...context.sources[1].filter.address, - childAddressLocation: "topic2", + await syncStore.insertIntervals({ + intervals: [ + { + filter, + interval: [0, 4], }, - }, - }); - - expect(intervals).toHaveLength(0); - - await cleanup(); -}); - -test("getIntervals() handles trace filter logic", async (context) => { - const { cleanup, syncStore } = await setupDatabaseServices(context); - - await syncStore.insertInterval({ - filter: context.sources[3].filter, - interval: [0, 4], - }); - - let intervals = await syncStore.getIntervals({ - filter: context.sources[3].filter, - }); - - expect(intervals).toHaveLength(1); - expect(intervals[0]).toStrictEqual([0, 4]); - - intervals = await syncStore.getIntervals({ - filter: { - ...context.sources[3].filter, - toAddress: [context.erc20.address], - }, - }); - - expect(intervals).toHaveLength(0); - - await cleanup(); -}); - -test("getIntervals() handles factory trace filter logic", async (context) => { - const { cleanup, syncStore } = await setupDatabaseServices(context); - - await syncStore.insertInterval({ - filter: context.sources[2].filter, - interval: [0, 4], - }); - - let intervals = await syncStore.getIntervals({ - filter: context.sources[2].filter, + ], }); - expect(intervals).toHaveLength(1); - expect(intervals[0]).toStrictEqual([0, 4]); - - intervals = await syncStore.getIntervals({ - filter: { - ...context.sources[2].filter, - toAddress: { - ...context.sources[2].filter.toAddress, - childAddressLocation: "topic2", + await syncStore.insertIntervals({ + intervals: [ + { + filter: { ...filter, address: undefined }, + interval: [5, 8], }, - }, + ], + }); + const intervals = await syncStore.getIntervals({ + filters: [filter], }); - expect(intervals).toHaveLength(0); + expect(Array.from(intervals.values())[0]).toHaveLength(1); + expect(Array.from(intervals.values())[0]![0]).toStrictEqual([0, 8]); await cleanup(); }); -test("getIntervals() handles block filter logic", async (context) => { +test("getChildAddresses()", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); - await syncStore.getIntervals({ - filter: context.sources[4].filter, - }); - - await syncStore.insertInterval({ - filter: context.sources[4].filter, - interval: [0, 4], - }); - - let intervals = await syncStore.getIntervals({ - filter: context.sources[4].filter, + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, }); - expect(intervals).toHaveLength(1); - expect(intervals[0]).toStrictEqual([0, 4]); - - intervals = await syncStore.getIntervals({ - filter: { ...context.sources[4].filter, interval: 69 }, + const { address } = await deployFactory({ sender: ALICE }); + const { result } = await createPair({ factory: address, sender: ALICE }); + const rpcLogs = await _eth_getLogs(requestQueue, { + fromBlock: 2, + toBlock: 2, }); - expect(intervals).toHaveLength(0); - - await cleanup(); -}); - -test("getIntervals() handles size over max", async (context) => { - const { syncStore, cleanup } = await setupDatabaseServices(context); - - context.common.options = { - ...context.common.options, - syncStoreMaxIntervals: 20, - }; - - for (const i of range(0, 25)) { - await syncStore.insertInterval({ - filter: context.sources[0].filter, - interval: [i, i], + const { config, rawIndexingFunctions } = + getPairWithFactoryConfigAndIndexingFunctions({ + address, }); - } - - const intervals = await syncStore.getIntervals({ - filter: context.sources[0].filter, + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, }); - expect(intervals).toMatchObject([[0, 24]]); - - await cleanup(); -}); - -test("getIntervals() throws non-retryable error after no merges", async (context) => { - const { syncStore, cleanup } = await setupDatabaseServices(context); - - context.common.options = { - ...context.common.options, - syncStoreMaxIntervals: 20, - }; - - for (let i = 0; i < 50; i += 2) { - await syncStore.insertInterval({ - filter: context.sources[0].filter, - interval: [i, i], - }); - } - - const error = await syncStore - .getIntervals({ - filter: context.sources[0].filter, - }) - .catch((err) => err); - - expect(error).toBeInstanceOf(NonRetryableError); - - await cleanup(); -}); - -test("getChildAddresses()", async (context) => { - const { cleanup, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); - await syncStore.insertLogs({ - logs: [{ log: rpcData.block3.logs[0] }], + logs: [{ log: rpcLogs[0]! }], shouldUpdateCheckpoint: false, chainId: 1, }); + const filter = sources[0]!.filter as LogFilter; + const addresses = await syncStore.getChildAddresses({ - filter: context.sources[1].filter.address as Factory, + filter: filter.address, limit: 10, }); expect(addresses).toHaveLength(1); - expect(addresses[0]).toBe(context.factory.pair); + expect(addresses[0]).toBe(result); await cleanup(); }); @@ -309,8 +256,21 @@ test("getChildAddresses()", async (context) => { test("getChildAddresses() empty", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); + const { address } = await deployFactory({ sender: ALICE }); + + const { config, rawIndexingFunctions } = + getPairWithFactoryConfigAndIndexingFunctions({ + address, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const filter = sources[0]!.filter as LogFilter; + const addresses = await syncStore.getChildAddresses({ - filter: context.sources[1].filter.address as Factory, + filter: filter.address, limit: 10, }); @@ -321,21 +281,40 @@ test("getChildAddresses() empty", async (context) => { test("filterChildAddresses()", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); + + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployFactory({ sender: ALICE }); + const { result } = await createPair({ factory: address, sender: ALICE }); + const rpcLogs = await _eth_getLogs(requestQueue, { + fromBlock: 2, + toBlock: 2, + }); + + const { config, rawIndexingFunctions } = + getPairWithFactoryConfigAndIndexingFunctions({ + address, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); await syncStore.insertLogs({ - logs: [{ log: rpcData.block3.logs[0] }], + logs: [{ log: rpcLogs[0]! }], shouldUpdateCheckpoint: false, chainId: 1, }); + const filter = sources[0]!.filter as LogFilter; + const addresses = await syncStore.filterChildAddresses({ - filter: context.sources[1].filter.address as Factory, - addresses: [ - context.erc20.address, - context.factory.address, - context.factory.pair, - ], + filter: filter.address, + addresses: [address, result, zeroAddress], }); expect(addresses.size).toBe(1); @@ -345,11 +324,28 @@ test("filterChildAddresses()", async (context) => { test("insertLogs()", async (context) => { const { cleanup, database, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); + + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + const rpcLogs = await _eth_getLogs(requestQueue, { + fromBlock: 2, + toBlock: 2, + }); await syncStore.insertLogs({ - logs: [{ log: rpcData.block3.logs[0], block: rpcData.block3.block }], - shouldUpdateCheckpoint: true, + logs: [{ log: rpcLogs[0]! }], + shouldUpdateCheckpoint: false, chainId: 1, }); @@ -361,16 +357,34 @@ test("insertLogs()", async (context) => { test("insertLogs() with duplicates", async (context) => { const { cleanup, database, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); + + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + const rpcLogs = await _eth_getLogs(requestQueue, { + fromBlock: 2, + toBlock: 2, + }); await syncStore.insertLogs({ - logs: [{ log: rpcData.block3.logs[0], block: rpcData.block3.block }], - shouldUpdateCheckpoint: true, + logs: [{ log: rpcLogs[0]! }], + shouldUpdateCheckpoint: false, chainId: 1, }); + await syncStore.insertLogs({ - logs: [{ log: rpcData.block3.logs[0], block: rpcData.block3.block }], - shouldUpdateCheckpoint: true, + logs: [{ log: rpcLogs[0]! }], + shouldUpdateCheckpoint: false, chainId: 1, }); @@ -382,10 +396,30 @@ test("insertLogs() with duplicates", async (context) => { test("insertLogs() creates checkpoint", async (context) => { const { cleanup, database, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); + + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + const rpcLogs = await _eth_getLogs(requestQueue, { + fromBlock: 2, + toBlock: 2, + }); + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 2, + }); await syncStore.insertLogs({ - logs: [{ log: rpcData.block3.logs[0], block: rpcData.block3.block }], + logs: [{ log: rpcLogs[0]!, block: rpcBlock }], shouldUpdateCheckpoint: true, chainId: 1, }); @@ -393,11 +427,9 @@ test("insertLogs() creates checkpoint", async (context) => { const logs = await database.qb.sync.selectFrom("logs").selectAll().execute(); const checkpoint = decodeCheckpoint(logs[0]!.checkpoint!); - expect(checkpoint.blockTimestamp).toBe( - hexToNumber(rpcData.block3.block.timestamp), - ); + expect(checkpoint.blockTimestamp).toBe(hexToNumber(rpcBlock.timestamp)); expect(checkpoint.chainId).toBe(1n); - expect(checkpoint.blockNumber).toBe(3n); + expect(checkpoint.blockNumber).toBe(2n); expect(checkpoint.transactionIndex).toBe(0n); expect(checkpoint.eventType).toBe(5); expect(checkpoint.eventIndex).toBe(0n); @@ -407,10 +439,30 @@ test("insertLogs() creates checkpoint", async (context) => { test("insertLogs() upserts checkpoint", async (context) => { const { cleanup, database, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); + + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + const rpcLogs = await _eth_getLogs(requestQueue, { + fromBlock: 2, + toBlock: 2, + }); + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 2, + }); await syncStore.insertLogs({ - logs: [{ log: rpcData.block3.logs[0] }], + logs: [{ log: rpcLogs[0]! }], shouldUpdateCheckpoint: false, chainId: 1, }); @@ -419,7 +471,7 @@ test("insertLogs() upserts checkpoint", async (context) => { expect(logs[0]!.checkpoint).toBe(null); await syncStore.insertLogs({ - logs: [{ log: rpcData.block3.logs[0], block: rpcData.block3.block }], + logs: [{ log: rpcLogs[0]!, block: rpcBlock }], shouldUpdateCheckpoint: true, chainId: 1, }); @@ -428,7 +480,7 @@ test("insertLogs() upserts checkpoint", async (context) => { expect(logs[0]!.checkpoint).not.toBe(null); await syncStore.insertLogs({ - logs: [{ log: rpcData.block3.logs[0] }], + logs: [{ log: rpcLogs[0]! }], shouldUpdateCheckpoint: false, chainId: 1, }); @@ -441,9 +493,19 @@ test("insertLogs() upserts checkpoint", async (context) => { test("insertBlocks()", async (context) => { const { cleanup, database, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); - await syncStore.insertBlocks({ blocks: [rpcData.block3.block], chainId: 1 }); + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + await testClient.mine({ blocks: 1 }); + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 1, + }); + + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); const blocks = await database.qb.sync .selectFrom("blocks") @@ -456,10 +518,20 @@ test("insertBlocks()", async (context) => { test("insertBlocks() with duplicates", async (context) => { const { cleanup, database, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); - await syncStore.insertBlocks({ blocks: [rpcData.block3.block], chainId: 1 }); - await syncStore.insertBlocks({ blocks: [rpcData.block3.block], chainId: 1 }); + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + await testClient.mine({ blocks: 1 }); + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 1, + }); + + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); const blocks = await database.qb.sync .selectFrom("blocks") @@ -472,24 +544,29 @@ test("insertBlocks() with duplicates", async (context) => { test("insertBlocks() creates checkpoint", async (context) => { const { cleanup, database, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); - await syncStore.insertBlocks({ - blocks: [rpcData.block3.block], - chainId: 1, + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + await testClient.mine({ blocks: 1 }); + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 1, }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); + const blocks = await database.qb.sync .selectFrom("blocks") .selectAll() .execute(); const checkpoint = decodeCheckpoint(blocks[0]!.checkpoint!); - expect(checkpoint.blockTimestamp).toBe( - hexToNumber(rpcData.block3.block.timestamp), - ); + expect(checkpoint.blockTimestamp).toBe(hexToNumber(rpcBlock.timestamp)); expect(checkpoint.chainId).toBe(1n); - expect(checkpoint.blockNumber).toBe(3n); + expect(checkpoint.blockNumber).toBe(1n); expect(checkpoint.transactionIndex).toBe(maxCheckpoint.transactionIndex); expect(checkpoint.eventType).toBe(5); expect(checkpoint.eventIndex).toBe(0n); @@ -499,16 +576,27 @@ test("insertBlocks() creates checkpoint", async (context) => { test("hasBlock()", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); - await syncStore.insertBlocks({ blocks: [rpcData.block3.block], chainId: 1 }); + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + await testClient.mine({ blocks: 1 }); + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 1, + }); + + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); + let block = await syncStore.hasBlock({ - hash: rpcData.block3.block.hash, + hash: rpcBlock.hash, }); expect(block).toBe(true); block = await syncStore.hasBlock({ - hash: rpcData.block2.block.hash, + hash: zeroHash, }); expect(block).toBe(false); @@ -517,10 +605,26 @@ test("hasBlock()", async (context) => { test("insertTransactions()", async (context) => { const { cleanup, database, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 2, + }); await syncStore.insertTransactions({ - transactions: rpcData.block3.transactions, + transactions: [{ transaction: rpcBlock.transactions[0]!, block: rpcBlock }], chainId: 1, }); @@ -535,14 +639,30 @@ test("insertTransactions()", async (context) => { test("insertTransactions() with duplicates", async (context) => { const { cleanup, database, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 2, + }); await syncStore.insertTransactions({ - transactions: rpcData.block3.transactions, + transactions: [{ transaction: rpcBlock.transactions[0]!, block: rpcBlock }], chainId: 1, }); await syncStore.insertTransactions({ - transactions: rpcData.block3.transactions, + transactions: [{ transaction: rpcBlock.transactions[0]!, block: rpcBlock }], chainId: 1, }); @@ -557,19 +677,36 @@ test("insertTransactions() with duplicates", async (context) => { test("hasTransaction()", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + const { hash } = await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 2, + }); await syncStore.insertTransactions({ - transactions: rpcData.block3.transactions, + transactions: [{ transaction: rpcBlock.transactions[0]!, block: rpcBlock }], chainId: 1, }); + let transaction = await syncStore.hasTransaction({ - hash: rpcData.block3.transactions[0].hash, + hash, }); expect(transaction).toBe(true); transaction = await syncStore.hasTransaction({ - hash: rpcData.block2.transactions[0].hash, + hash: zeroHash, }); expect(transaction).toBe(false); @@ -578,10 +715,27 @@ test("hasTransaction()", async (context) => { test("insertTransactionReceipts()", async (context) => { const { cleanup, database, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); + + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + const { hash } = await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + + const rpcTransactionReceipt = await _eth_getTransactionReceipt(requestQueue, { + hash, + }); await syncStore.insertTransactionReceipts({ - transactionReceipts: rpcData.block3.transactionReceipts, + transactionReceipts: [rpcTransactionReceipt], chainId: 1, }); @@ -596,14 +750,31 @@ test("insertTransactionReceipts()", async (context) => { test("insertTransactionReceipts() with duplicates", async (context) => { const { cleanup, database, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); + + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + const { hash } = await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + + const rpcTransactionReceipt = await _eth_getTransactionReceipt(requestQueue, { + hash, + }); await syncStore.insertTransactionReceipts({ - transactionReceipts: rpcData.block3.transactionReceipts, + transactionReceipts: [rpcTransactionReceipt], chainId: 1, }); await syncStore.insertTransactionReceipts({ - transactionReceipts: rpcData.block3.transactionReceipts, + transactionReceipts: [rpcTransactionReceipt], chainId: 1, }); @@ -618,38 +789,101 @@ test("insertTransactionReceipts() with duplicates", async (context) => { test("hasTransactionReceipt()", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); + + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + const { hash } = await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + + const rpcTransactionReceipt = await _eth_getTransactionReceipt(requestQueue, { + hash, + }); await syncStore.insertTransactionReceipts({ - transactionReceipts: rpcData.block3.transactionReceipts, + transactionReceipts: [rpcTransactionReceipt], chainId: 1, }); + let transaction = await syncStore.hasTransactionReceipt({ - hash: rpcData.block3.transactionReceipts[0].transactionHash, + hash: rpcTransactionReceipt.transactionHash, }); expect(transaction).toBe(true); transaction = await syncStore.hasTransactionReceipt({ - hash: rpcData.block2.transactionReceipts[0].transactionHash, + hash: zeroHash, }); expect(transaction).toBe(false); await cleanup(); }); -test("insertCallTraces()", async (context) => { +test("insertTraces()", async (context) => { const { cleanup, database, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); - await syncStore.insertCallTraces({ - callTraces: [ - { callTrace: rpcData.block3.callTraces[0], block: rpcData.block3.block }, + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + const { hash } = await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + + const trace = { + trace: { + type: "CALL", + from: ALICE, + to: address, + gas: "0x0", + gasUsed: "0x0", + input: encodeFunctionData({ + abi: erc20ABI, + functionName: "transfer", + args: [BOB, parseEther("1")], + }), + output: encodeFunctionResult({ + abi: erc20ABI, + functionName: "transfer", + result: true, + }), + value: "0x0", + index: 0, + subcalls: 0, + }, + transactionHash: hash, + } satisfies SyncTrace; + + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 1, + }); + + await syncStore.insertTraces({ + traces: [ + { + trace, + block: rpcBlock, + transaction: rpcBlock.transactions[0] as SyncTransaction, + }, ], chainId: 1, }); const traces = await database.qb.sync - .selectFrom("callTraces") + .selectFrom("traces") .selectAll() .execute(); expect(traces).toHaveLength(1); @@ -657,28 +891,71 @@ test("insertCallTraces()", async (context) => { await cleanup(); }); -test("insertCallTraces() creates checkpoint", async (context) => { +test("insertTraces() creates checkpoint", async (context) => { const { cleanup, database, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); - await syncStore.insertCallTraces({ - callTraces: [ - { callTrace: rpcData.block3.callTraces[0], block: rpcData.block3.block }, + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + const { hash } = await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + + const trace = { + trace: { + type: "CALL", + from: ALICE, + to: address, + gas: "0x0", + gasUsed: "0x0", + input: encodeFunctionData({ + abi: erc20ABI, + functionName: "transfer", + args: [BOB, parseEther("1")], + }), + output: encodeFunctionResult({ + abi: erc20ABI, + functionName: "transfer", + result: true, + }), + value: "0x0", + index: 0, + subcalls: 0, + }, + transactionHash: hash, + } satisfies SyncTrace; + + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 1, + }); + + await syncStore.insertTraces({ + traces: [ + { + trace, + block: rpcBlock, + transaction: rpcBlock.transactions[0] as SyncTransaction, + }, ], chainId: 1, }); const traces = await database.qb.sync - .selectFrom("callTraces") + .selectFrom("traces") .selectAll() .execute(); const checkpoint = decodeCheckpoint(traces[0]!.checkpoint!); - expect(checkpoint.blockTimestamp).toBe( - hexToNumber(rpcData.block3.block.timestamp), - ); + expect(checkpoint.blockTimestamp).toBe(hexToNumber(rpcBlock.timestamp)); expect(checkpoint.chainId).toBe(1n); - expect(checkpoint.blockNumber).toBe(3n); + expect(checkpoint.blockNumber).toBe(1n); expect(checkpoint.transactionIndex).toBe(0n); expect(checkpoint.eventType).toBe(7); expect(checkpoint.eventIndex).toBe(0n); @@ -686,25 +963,74 @@ test("insertCallTraces() creates checkpoint", async (context) => { await cleanup(); }); -test("insertCallTraces() with duplicates", async (context) => { +test("insertTraces() with duplicates", async (context) => { const { cleanup, database, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); - await syncStore.insertCallTraces({ - callTraces: [ - { callTrace: rpcData.block3.callTraces[0], block: rpcData.block3.block }, + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + const { hash } = await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + + const trace = { + trace: { + type: "CALL", + from: ALICE, + to: address, + gas: "0x0", + gasUsed: "0x0", + input: encodeFunctionData({ + abi: erc20ABI, + functionName: "transfer", + args: [BOB, parseEther("1")], + }), + output: encodeFunctionResult({ + abi: erc20ABI, + functionName: "transfer", + result: true, + }), + value: "0x0", + index: 0, + subcalls: 0, + }, + transactionHash: hash, + } satisfies SyncTrace; + + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 1, + }); + + await syncStore.insertTraces({ + traces: [ + { + trace, + block: rpcBlock, + transaction: rpcBlock.transactions[0] as SyncTransaction, + }, ], chainId: 1, }); - await syncStore.insertCallTraces({ - callTraces: [ - { callTrace: rpcData.block3.callTraces[0], block: rpcData.block3.block }, + await syncStore.insertTraces({ + traces: [ + { + trace, + block: rpcBlock, + transaction: rpcBlock.transactions[0] as SyncTransaction, + }, ], chainId: 1, }); const traces = await database.qb.sync - .selectFrom("callTraces") + .selectFrom("traces") .selectAll() .execute(); expect(traces).toHaveLength(1); @@ -714,27 +1040,51 @@ test("insertCallTraces() with duplicates", async (context) => { test("getEvents() returns events", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); + + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + const rpcLogs = await _eth_getLogs(requestQueue, { + fromBlock: 2, + toBlock: 2, + }); + + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 2, + }); await syncStore.insertLogs({ - logs: [{ log: rpcData.block3.logs[0], block: rpcData.block3.block }], + logs: [{ log: rpcLogs[0]!, block: rpcBlock }], shouldUpdateCheckpoint: true, chainId: 1, }); - await syncStore.insertBlocks({ blocks: [rpcData.block3.block], chainId: 1 }); await syncStore.insertTransactions({ - transactions: rpcData.block3.transactions, + transactions: [{ transaction: rpcBlock.transactions[0]!, block: rpcBlock }], chainId: 1, }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); const filter = { type: "log", chainId: 1, address: undefined, - topics: [null], - fromBlock: 0, - toBlock: 5, - includeTransactionReceipts: false, + topic0: null, + topic1: null, + topic2: null, + topic3: null, + fromBlock: undefined, + toBlock: undefined, + include: [], } satisfies LogFilter; const { events } = await syncStore.getEvents({ @@ -751,67 +1101,161 @@ test("getEvents() returns events", async (context) => { test("getEvents() handles log filter logic", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); - await syncStore.insertLogs({ - logs: [ - { log: rpcData.block2.logs[0], block: rpcData.block2.block }, - { log: rpcData.block2.logs[1], block: rpcData.block2.block }, - ], - shouldUpdateCheckpoint: true, - chainId: 1, + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + + // noisy data + const { address: factory } = await deployFactory({ sender: ALICE }); + await createPair({ factory, sender: ALICE }); + + const { config, rawIndexingFunctions } = getErc20ConfigAndIndexingFunctions({ + address, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + let rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 2, }); - await syncStore.insertBlocks({ blocks: [rpcData.block2.block], chainId: 1 }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); + await syncStore.insertTransactions({ - transactions: rpcData.block2.transactions, + transactions: [{ transaction: rpcBlock.transactions[0]!, block: rpcBlock }], chainId: 1, }); + let rpcLogs = await _eth_getLogs(requestQueue, { + fromBlock: 2, + toBlock: 2, + }); await syncStore.insertLogs({ - logs: [{ log: rpcData.block3.logs[0], block: rpcData.block3.block }], + logs: [{ log: rpcLogs[0]!, block: rpcBlock }], shouldUpdateCheckpoint: true, chainId: 1, }); - await syncStore.insertBlocks({ blocks: [rpcData.block3.block], chainId: 1 }); + + // noisy data + + rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 4, + }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); + await syncStore.insertTransactions({ - transactions: rpcData.block3.transactions, + transactions: [{ transaction: rpcBlock.transactions[0]!, block: rpcBlock }], + chainId: 1, + }); + + rpcLogs = await _eth_getLogs(requestQueue, { + fromBlock: 4, + toBlock: 4, + }); + syncStore.insertLogs({ + logs: [{ log: rpcLogs[0]!, block: rpcBlock }], + shouldUpdateCheckpoint: true, chainId: 1, }); const { events } = await syncStore.getEvents({ - filters: [context.sources[0].filter], + filters: [sources[0]!.filter], from: encodeCheckpoint(zeroCheckpoint), to: encodeCheckpoint(maxCheckpoint), limit: 10, }); - expect(events).toHaveLength(2); + expect(events).toHaveLength(1); await cleanup(); }); test("getEvents() handles log factory", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); - await syncStore.insertLogs({ - logs: [{ log: rpcData.block3.logs[0], block: rpcData.block3.block }], - shouldUpdateCheckpoint: true, - chainId: 1, + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, }); - await syncStore.insertLogs({ - logs: [{ log: rpcData.block4.logs[0], block: rpcData.block4.block }], - shouldUpdateCheckpoint: true, - chainId: 1, + + const { address: factory } = await deployFactory({ sender: ALICE }); + const { result: pair } = await createPair({ factory, sender: ALICE }); + await swapPair({ + pair, + sender: ALICE, + amount0Out: 1n, + amount1Out: 1n, + to: ALICE, + }); + + const { config, rawIndexingFunctions } = + getPairWithFactoryConfigAndIndexingFunctions({ + address: factory, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + // factory + + let rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 2, }); - await syncStore.insertBlocks({ blocks: [rpcData.block4.block], chainId: 1 }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); + await syncStore.insertTransactions({ - transactions: rpcData.block4.transactions, + transactions: [{ transaction: rpcBlock.transactions[0]!, block: rpcBlock }], + chainId: 1, + }); + + let rpcLogs = await _eth_getLogs(requestQueue, { + fromBlock: 2, + toBlock: 2, + }); + await syncStore.insertLogs({ + logs: [{ log: rpcLogs[0]! }], + shouldUpdateCheckpoint: false, + chainId: 1, + }); + + // pair + + rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 3, + }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); + + await syncStore.insertTransactions({ + transactions: [{ transaction: rpcBlock.transactions[0]!, block: rpcBlock }], + chainId: 1, + }); + + rpcLogs = await _eth_getLogs(requestQueue, { + fromBlock: 3, + toBlock: 3, + }); + await syncStore.insertLogs({ + logs: [{ log: rpcLogs[0]!, block: rpcBlock }], + shouldUpdateCheckpoint: true, chainId: 1, }); const { events } = await syncStore.getEvents({ - filters: [context.sources[1].filter], + filters: [sources[0]!.filter], from: encodeCheckpoint(zeroCheckpoint), to: encodeCheckpoint(maxCheckpoint), limit: 10, @@ -824,37 +1268,127 @@ test("getEvents() handles log factory", async (context) => { test("getEvents() handles multiple log factories", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); - await syncStore.insertLogs({ - logs: [{ log: rpcData.block3.logs[0], block: rpcData.block3.block }], - shouldUpdateCheckpoint: true, + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address: factory } = await deployFactory({ sender: ALICE }); + const { result: pair } = await createPair({ factory, sender: ALICE }); + await swapPair({ + pair, + sender: ALICE, + amount0Out: 1n, + amount1Out: 1n, + to: ALICE, + }); + + const { config, rawIndexingFunctions } = + getPairWithFactoryConfigAndIndexingFunctions({ + address: factory, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + // factory + + let rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 2, + }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); + + await syncStore.insertTransactions({ + transactions: [{ transaction: rpcBlock.transactions[0]!, block: rpcBlock }], chainId: 1, }); + + let rpcLogs = await _eth_getLogs(requestQueue, { + fromBlock: 2, + toBlock: 2, + }); await syncStore.insertLogs({ - logs: [{ log: rpcData.block4.logs[0], block: rpcData.block4.block }], - shouldUpdateCheckpoint: true, + logs: [{ log: rpcLogs[0]! }], + shouldUpdateCheckpoint: false, chainId: 1, }); - await syncStore.insertBlocks({ blocks: [rpcData.block4.block], chainId: 1 }); + + // pair + + rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 3, + }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); + await syncStore.insertTransactions({ - transactions: rpcData.block4.transactions, + transactions: [{ transaction: rpcBlock.transactions[0]!, block: rpcBlock }], chainId: 1, }); - context.sources[1].filter = { - ...context.sources[1].filter, - address: { - ...context.sources[1].filter.address, - address: [ - context.sources[1].filter.address.address as Address, - context.sources[1].filter.address.address as Address, - ], - }, - } satisfies LogFilter; + rpcLogs = await _eth_getLogs(requestQueue, { + fromBlock: 3, + toBlock: 3, + }); + await syncStore.insertLogs({ + logs: [{ log: rpcLogs[0]!, block: rpcBlock }], + shouldUpdateCheckpoint: true, + chainId: 1, + }); + + const filter = sources[0]!.filter as LogFilter; + + filter.address.address = [ + filter.address.address as Address, + filter.address.address as Address, + zeroAddress, + ]; const { events } = await syncStore.getEvents({ - filters: [context.sources[1].filter], + filters: [filter], + from: encodeCheckpoint(zeroCheckpoint), + to: encodeCheckpoint(maxCheckpoint), + limit: 10, + }); + + expect(events).toHaveLength(1); + + await cleanup(); +}); + +test("getEvents() handles block filter logic", async (context) => { + const { cleanup, syncStore } = await setupDatabaseServices(context); + + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + await testClient.mine({ blocks: 2 }); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 2, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + let rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 1, + }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); + + rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 2, + }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); + + const { events } = await syncStore.getEvents({ + filters: [sources[0]!.filter], from: encodeCheckpoint(zeroCheckpoint), to: encodeCheckpoint(maxCheckpoint), limit: 10, @@ -867,26 +1401,131 @@ test("getEvents() handles multiple log factories", async (context) => { test("getEvents() handles trace filter logic", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); - await syncStore.insertCallTraces({ - callTraces: [ - { callTrace: rpcData.block3.callTraces[0], block: rpcData.block3.block }, + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + const { hash } = await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + + const { config, rawIndexingFunctions } = getErc20ConfigAndIndexingFunctions({ + address, + includeCallTraces: true, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 2, + }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); + + await syncStore.insertTransactions({ + transactions: [{ transaction: rpcBlock.transactions[0]!, block: rpcBlock }], + chainId: 1, + }); + + const rpcTrace = { + trace: { + type: "CALL", + from: ALICE, + to: address, + gas: "0x0", + gasUsed: "0x0", + input: encodeFunctionData({ + abi: erc20ABI, + functionName: "transfer", + args: [BOB, parseEther("1")], + }), + output: encodeFunctionResult({ + abi: erc20ABI, + functionName: "transfer", + result: true, + }), + value: "0x0", + index: 0, + subcalls: 0, + }, + transactionHash: hash, + } satisfies SyncTrace; + + await syncStore.insertTraces({ + traces: [ + { + trace: rpcTrace, + block: rpcBlock, + transaction: rpcBlock.transactions[0] as SyncTransaction, + }, ], chainId: 1, }); - await syncStore.insertBlocks({ blocks: [rpcData.block3.block], chainId: 1 }); + + const { events } = await syncStore.getEvents({ + filters: sources.map((source) => source.filter), + from: encodeCheckpoint(zeroCheckpoint), + to: encodeCheckpoint(maxCheckpoint), + limit: 10, + }); + + expect(events).toHaveLength(1); + + await cleanup(); +}); + +test("getEvents() handles transaction filter logic", async (context) => { + const { cleanup, syncStore } = await setupDatabaseServices(context); + + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { hash } = await transferEth({ + to: BOB, + amount: parseEther("1"), + sender: ALICE, + }); + + const { config, rawIndexingFunctions } = + getAccountsConfigAndIndexingFunctions({ + address: ALICE, + }); + + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 1, + }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); + await syncStore.insertTransactions({ - transactions: rpcData.block3.transactions, + transactions: [{ transaction: rpcBlock.transactions[0]!, block: rpcBlock }], chainId: 1, }); + + const rpcReceipt = await _eth_getTransactionReceipt(requestQueue, { hash }); + await syncStore.insertTransactionReceipts({ - transactionReceipts: rpcData.block3.transactionReceipts, + transactionReceipts: [rpcReceipt], chainId: 1, }); const { events } = await syncStore.getEvents({ - filters: [context.sources[3].filter], + filters: sources.map((source) => source.filter), from: encodeCheckpoint(zeroCheckpoint), to: encodeCheckpoint(maxCheckpoint), limit: 10, @@ -897,22 +1536,83 @@ test("getEvents() handles trace filter logic", async (context) => { await cleanup(); }); -test("getEvents() handles block filter logic", async (context) => { +test("getEvents() handles transfer filter logic", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); - await syncStore.insertBlocks({ blocks: [rpcData.block2.block], chainId: 1 }); - await syncStore.insertBlocks({ blocks: [rpcData.block3.block], chainId: 1 }); - await syncStore.insertBlocks({ blocks: [rpcData.block4.block], chainId: 1 }); - await syncStore.insertBlocks({ blocks: [rpcData.block5.block], chainId: 1 }); + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { hash } = await transferEth({ + to: BOB, + amount: parseEther("1"), + sender: ALICE, + }); + + const { config, rawIndexingFunctions } = + getAccountsConfigAndIndexingFunctions({ + address: ALICE, + }); + + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 1, + }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); + + await syncStore.insertTransactions({ + transactions: [{ transaction: rpcBlock.transactions[0]!, block: rpcBlock }], + chainId: 1, + }); + + const rpcReceipt = await _eth_getTransactionReceipt(requestQueue, { hash }); + + await syncStore.insertTransactionReceipts({ + transactionReceipts: [rpcReceipt], + chainId: 1, + }); + + const rpcTrace = { + trace: { + type: "CALL", + from: ALICE, + to: BOB, + gas: "0x0", + gasUsed: "0x0", + input: "0x0", + output: "0x0", + value: rpcBlock.transactions[0]!.value, + index: 0, + subcalls: 0, + }, + transactionHash: hash, + } satisfies SyncTrace; + + await syncStore.insertTraces({ + traces: [ + { + trace: rpcTrace, + block: rpcBlock, + transaction: rpcBlock.transactions[0] as SyncTransaction, + }, + ], + chainId: 1, + }); const { events } = await syncStore.getEvents({ - filters: [context.sources[4].filter], + filters: sources.map((source) => source.filter), from: encodeCheckpoint(zeroCheckpoint), to: encodeCheckpoint(maxCheckpoint), limit: 10, }); + // transaction:from and transfer:from expect(events).toHaveLength(2); await cleanup(); @@ -920,42 +1620,54 @@ test("getEvents() handles block filter logic", async (context) => { test("getEvents() handles block bounds", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); - await syncStore.insertLogs({ - logs: [ - { log: rpcData.block2.logs[0], block: rpcData.block2.block }, - { log: rpcData.block2.logs[1], block: rpcData.block2.block }, - ], - shouldUpdateCheckpoint: true, - chainId: 1, + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, }); - await syncStore.insertBlocks({ blocks: [rpcData.block2.block], chainId: 1 }); - await syncStore.insertTransactions({ - transactions: rpcData.block2.transactions, - chainId: 1, + + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, }); + + const { config, rawIndexingFunctions } = getErc20ConfigAndIndexingFunctions({ + address, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 2, + }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); + await syncStore.insertTransactions({ - transactions: rpcData.block2.transactions, + transactions: [{ transaction: rpcBlock.transactions[0]!, block: rpcBlock }], chainId: 1, }); + const rpcLogs = await _eth_getLogs(requestQueue, { + fromBlock: 2, + toBlock: 2, + }); await syncStore.insertLogs({ - logs: [{ log: rpcData.block3.logs[0], block: rpcData.block3.block }], + logs: [{ log: rpcLogs[0]!, block: rpcBlock }], shouldUpdateCheckpoint: true, chainId: 1, }); - await syncStore.insertBlocks({ blocks: [rpcData.block3.block], chainId: 1 }); - await syncStore.insertTransactions({ - transactions: rpcData.block3.transactions, - chainId: 1, - }); - const filter = context.sources[0].filter; + const filter = sources[0]!.filter as LogFilter; filter.toBlock = 1; const { events } = await syncStore.getEvents({ - filters: [filter], + filters: [sources[0]!.filter], from: encodeCheckpoint(zeroCheckpoint), to: encodeCheckpoint(maxCheckpoint), limit: 10, @@ -968,28 +1680,35 @@ test("getEvents() handles block bounds", async (context) => { test("getEvents() pagination", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); - await syncStore.insertLogs({ - logs: [ - { log: rpcData.block2.logs[0], block: rpcData.block2.block }, - { log: rpcData.block2.logs[1], block: rpcData.block2.block }, - ], - shouldUpdateCheckpoint: true, - chainId: 1, + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, }); - await syncStore.insertBlocks({ blocks: [rpcData.block2.block], chainId: 1 }); - await syncStore.insertTransactions({ - transactions: rpcData.block2.transactions, - chainId: 1, + + await testClient.mine({ blocks: 2 }); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, }); - await syncStore.insertTransactions({ - transactions: rpcData.block2.transactions, - chainId: 1, + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, }); + let rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 1, + }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); + + rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 2, + }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); + const { events, cursor } = await syncStore.getEvents({ - filters: [context.sources[0].filter], + filters: [sources[0]!.filter], from: encodeCheckpoint(zeroCheckpoint), to: encodeCheckpoint(maxCheckpoint), limit: 1, @@ -998,7 +1717,7 @@ test("getEvents() pagination", async (context) => { expect(events).toHaveLength(1); const { events: events2 } = await syncStore.getEvents({ - filters: [context.sources[0].filter], + filters: [sources[0]!.filter], from: cursor, to: encodeCheckpoint(maxCheckpoint), limit: 1, @@ -1043,7 +1762,7 @@ test("pruneRpcRequestResult", async (context) => { }); const requestResults = await database.qb.sync - .selectFrom("rpcRequestResults") + .selectFrom("rpc_request_results") .selectAll() .execute(); @@ -1052,288 +1771,135 @@ test("pruneRpcRequestResult", async (context) => { await cleanup(); }); -test("pruneByChain deletes filters", async (context) => { - const { sources } = context; +test("pruneByChain deletes blocks, logs, traces, transactions", async (context) => { const { syncStore, database, cleanup } = await setupDatabaseServices(context); - await syncStore.getIntervals({ filter: sources[0].filter }); - await syncStore.getIntervals({ filter: sources[1].filter }); - await syncStore.getIntervals({ filter: sources[2].filter }); - await syncStore.getIntervals({ filter: sources[3].filter }); - - await syncStore.insertInterval({ - filter: sources[0].filter, - interval: [1, 4], - }); - await syncStore.insertInterval({ - filter: sources[1].filter, - interval: [1, 4], + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, }); - await syncStore.insertInterval({ - filter: sources[2].filter, - interval: [1, 4], - }); - await syncStore.insertInterval({ - filter: sources[3].filter, - interval: [1, 4], - }); - - sources[0].filter.chainId = 2; - sources[1].filter.chainId = 2; - sources[2].filter.chainId = 2; - sources[3].filter.chainId = 2; - await syncStore.getIntervals({ filter: sources[0].filter }); - await syncStore.getIntervals({ filter: sources[1].filter }); - await syncStore.getIntervals({ filter: sources[2].filter }); - await syncStore.getIntervals({ filter: sources[3].filter }); - - await syncStore.insertInterval({ - filter: sources[0].filter, - interval: [1, 4], - }); - await syncStore.insertInterval({ - filter: sources[1].filter, - interval: [1, 4], + const { address } = await deployErc20({ sender: ALICE }); + const { hash: hash1 } = await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, }); - await syncStore.insertInterval({ - filter: sources[2].filter, - interval: [1, 4], + const { hash: hash2 } = await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, }); - await syncStore.insertInterval({ - filter: sources[3].filter, - interval: [1, 4], - }); - - await syncStore.pruneByChain({ chainId: 1, fromBlock: 0 }); - - const logFilterIntervals = await database.qb.sync - .selectFrom("logFilterIntervals") - .selectAll() - .execute(); - expect(logFilterIntervals).toHaveLength(1); - - const factoryLogFilterIntervals = await database.qb.sync - .selectFrom("factoryLogFilterIntervals") - .selectAll() - .execute(); - expect(factoryLogFilterIntervals).toHaveLength(1); - - const traceFilterIntervals = await database.qb.sync - .selectFrom("traceFilterIntervals") - .selectAll() - .execute(); - expect(traceFilterIntervals).toHaveLength(1); - - const factoryTraceFilterIntervals = await database.qb.sync - .selectFrom("factoryTraceFilterIntervals") - .selectAll() - .execute(); - expect(factoryTraceFilterIntervals).toHaveLength(1); - await cleanup(); -}); - -test("pruneByChain updates filters", async (context) => { - const { sources } = context; - const { syncStore, database, cleanup } = await setupDatabaseServices(context); + // block 2 (first mint) - await syncStore.getIntervals({ filter: sources[0].filter }); - await syncStore.getIntervals({ filter: sources[1].filter }); - await syncStore.getIntervals({ filter: sources[2].filter }); - await syncStore.getIntervals({ filter: sources[3].filter }); - - await syncStore.insertInterval({ - filter: sources[0].filter, - interval: [0, 4], - }); - await syncStore.insertInterval({ - filter: sources[1].filter, - interval: [0, 4], - }); - await syncStore.insertInterval({ - filter: sources[2].filter, - interval: [0, 4], - }); - await syncStore.insertInterval({ - filter: sources[3].filter, - interval: [0, 4], + let rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 2, }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); - sources[0].filter.chainId = 2; - sources[1].filter.chainId = 2; - sources[2].filter.chainId = 2; - sources[3].filter.chainId = 2; - - await syncStore.getIntervals({ filter: sources[0].filter }); - await syncStore.getIntervals({ filter: sources[1].filter }); - await syncStore.getIntervals({ filter: sources[2].filter }); - await syncStore.getIntervals({ filter: sources[3].filter }); - - await syncStore.insertInterval({ - filter: sources[0].filter, - interval: [0, 4], - }); - await syncStore.insertInterval({ - filter: sources[1].filter, - interval: [0, 4], + await syncStore.insertTransactions({ + transactions: [{ transaction: rpcBlock.transactions[0]!, block: rpcBlock }], + chainId: 1, }); - await syncStore.insertInterval({ - filter: sources[2].filter, - interval: [0, 4], + + let rpcLogs = await _eth_getLogs(requestQueue, { + fromBlock: 2, + toBlock: 2, }); - await syncStore.insertInterval({ - filter: sources[3].filter, - interval: [0, 4], + await syncStore.insertLogs({ + logs: [{ log: rpcLogs[0]!, block: rpcBlock }], + shouldUpdateCheckpoint: true, + chainId: 1, }); - await syncStore.pruneByChain({ chainId: 1, fromBlock: 1 }); - - const logFilterIntervals = await database.qb.sync - .selectFrom("logFilterIntervals") - .selectAll() - .orderBy("endBlock", "asc") - .execute(); - expect(logFilterIntervals).toHaveLength(2); - expect(Number(logFilterIntervals[0]!.endBlock)).toBe(1); - - const factoryLogFilterIntervals = await database.qb.sync - .selectFrom("factoryLogFilterIntervals") - .selectAll() - .orderBy("endBlock", "asc") - .execute(); - expect(factoryLogFilterIntervals).toHaveLength(2); - expect(Number(factoryLogFilterIntervals[0]!.endBlock)).toBe(1); - - const traceFilterIntervals = await database.qb.sync - .selectFrom("traceFilterIntervals") - .selectAll() - .orderBy("endBlock", "asc") - .execute(); - expect(traceFilterIntervals).toHaveLength(2); - expect(Number(traceFilterIntervals[0]!.endBlock)).toBe(1); - - const factoryTraceFilterIntervals = await database.qb.sync - .selectFrom("factoryTraceFilterIntervals") - .selectAll() - .orderBy("endBlock", "asc") - .execute(); - expect(factoryTraceFilterIntervals).toHaveLength(2); - expect(Number(factoryTraceFilterIntervals[0]!.endBlock)).toBe(1); - - await cleanup(); -}); - -test("pruneByChain deletes block filters", async (context) => { - const { sources } = context; - const { syncStore, database, cleanup } = await setupDatabaseServices(context); - - await syncStore.getIntervals({ filter: sources[4].filter }); - - await syncStore.insertInterval({ - filter: sources[4].filter, - interval: [2, 4], + let rpcTransactionReceipt = await _eth_getTransactionReceipt(requestQueue, { + hash: hash1, }); - sources[4].filter.chainId = 2; - - await syncStore.getIntervals({ filter: sources[4].filter }); - - await syncStore.insertInterval({ - filter: sources[4].filter, - interval: [2, 4], + await syncStore.insertTransactionReceipts({ + transactionReceipts: [rpcTransactionReceipt], + chainId: 1, }); - await syncStore.pruneByChain({ chainId: 1, fromBlock: 1 }); - - const blockFilterIntervals = await database.qb.sync - .selectFrom("blockFilterIntervals") - .selectAll() - .execute(); - expect(blockFilterIntervals).toHaveLength(1); - - await cleanup(); -}); - -test("pruneByChain updates block filters", async (context) => { - const { sources } = context; - const { syncStore, database, cleanup } = await setupDatabaseServices(context); - - await syncStore.getIntervals({ filter: sources[4].filter }); - - await syncStore.insertInterval({ - filter: sources[4].filter, - interval: [0, 4], + const rpcTrace = { + trace: { + type: "CALL", + from: ALICE, + to: address, + gas: "0x0", + gasUsed: "0x0", + input: encodeFunctionData({ + abi: erc20ABI, + functionName: "transfer", + args: [BOB, parseEther("1")], + }), + output: encodeFunctionResult({ + abi: erc20ABI, + functionName: "transfer", + result: true, + }), + value: "0x0", + index: 0, + subcalls: 0, + }, + transactionHash: hash1, + } satisfies SyncTrace; + + await syncStore.insertTraces({ + traces: [ + { + trace: rpcTrace, + block: rpcBlock, + transaction: rpcBlock.transactions[0] as SyncTransaction, + }, + ], + chainId: 1, }); - sources[4].filter.chainId = 2; - - await syncStore.getIntervals({ filter: sources[4].filter }); + // block 3 (second mint) - await syncStore.insertInterval({ - filter: sources[4].filter, - interval: [0, 4], + rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 3, }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); - await syncStore.pruneByChain({ chainId: 1, fromBlock: 1 }); - - const blockFilterIntervals = await database.qb.sync - .selectFrom("blockFilterIntervals") - .selectAll() - .orderBy("endBlock", "asc") - .execute(); - expect(blockFilterIntervals).toHaveLength(2); - expect(Number(blockFilterIntervals[0]!.endBlock)).toBe(1); - - await cleanup(); -}); - -test("pruneByChain deletes blocks, logs, traces, transactions", async (context) => { - const { syncStore, database, cleanup } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); - - await syncStore.insertBlocks({ blocks: [rpcData.block2.block], chainId: 1 }); - await syncStore.insertLogs({ - logs: [ - { log: rpcData.block2.logs[0], block: rpcData.block2.block }, - { log: rpcData.block2.logs[1], block: rpcData.block2.block }, - ], - shouldUpdateCheckpoint: true, - chainId: 1, - }); await syncStore.insertTransactions({ - transactions: rpcData.block2.transactions, - chainId: 1, - }); - await syncStore.insertTransactionReceipts({ - transactionReceipts: rpcData.block2.transactionReceipts, - chainId: 1, - }); - await syncStore.insertCallTraces({ - callTraces: [ - { callTrace: rpcData.block2.callTraces[0], block: rpcData.block2.block }, - { callTrace: rpcData.block2.callTraces[1], block: rpcData.block2.block }, - ], + transactions: [{ transaction: rpcBlock.transactions[0]!, block: rpcBlock }], chainId: 1, }); - await syncStore.insertBlocks({ blocks: [rpcData.block3.block], chainId: 1 }); + rpcLogs = await _eth_getLogs(requestQueue, { + fromBlock: 3, + toBlock: 3, + }); await syncStore.insertLogs({ - logs: [{ log: rpcData.block3.logs[0], block: rpcData.block3.block }], + logs: [{ log: rpcLogs[0]!, block: rpcBlock }], shouldUpdateCheckpoint: true, chainId: 1, }); - await syncStore.insertTransactions({ - transactions: rpcData.block3.transactions, - chainId: 1, + + rpcTransactionReceipt = await _eth_getTransactionReceipt(requestQueue, { + hash: hash1, }); + await syncStore.insertTransactionReceipts({ - transactionReceipts: rpcData.block3.transactionReceipts, + transactionReceipts: [rpcTransactionReceipt], chainId: 1, }); - await syncStore.insertCallTraces({ - callTraces: [ - { callTrace: rpcData.block3.callTraces[0], block: rpcData.block3.block }, + + rpcTrace.transactionHash = hash2; + + await syncStore.insertTraces({ + traces: [ + { + trace: rpcTrace, + block: rpcBlock, + transaction: rpcBlock.transactions[0] as SyncTransaction, + }, ], chainId: 1, }); @@ -1345,8 +1911,8 @@ test("pruneByChain deletes blocks, logs, traces, transactions", async (context) .selectFrom("blocks") .selectAll() .execute(); - const callTraces = await database.qb.sync - .selectFrom("callTraces") + const traces = await database.qb.sync + .selectFrom("traces") .selectAll() .execute(); const transactions = await database.qb.sync @@ -1358,11 +1924,11 @@ test("pruneByChain deletes blocks, logs, traces, transactions", async (context) .selectAll() .execute(); - expect(logs).toHaveLength(2); + expect(logs).toHaveLength(1); expect(blocks).toHaveLength(1); - expect(callTraces).toHaveLength(2); - expect(transactions).toHaveLength(2); - expect(transactionReceipts).toHaveLength(2); + expect(traces).toHaveLength(1); + expect(transactions).toHaveLength(1); + expect(transactionReceipts).toHaveLength(1); await cleanup(); }); diff --git a/packages/core/src/sync-store/index.ts b/packages/core/src/sync-store/index.ts index f86be92bb..bd070a815 100644 --- a/packages/core/src/sync-store/index.ts +++ b/packages/core/src/sync-store/index.ts @@ -1,72 +1,59 @@ import type { Common } from "@/common/common.js"; -import { NonRetryableError } from "@/common/errors.js"; import type { HeadlessKysely } from "@/database/kysely.js"; import type { RawEvent } from "@/sync/events.js"; -import { - type BlockFilterFragment, - type LogFilterFragment, - type TraceFilterFragment, - buildBlockFilterFragment, - buildLogFilterFragments, - buildTraceFilterFragments, -} from "@/sync/fragments.js"; +import { getFragmentIds } from "@/sync/fragments.js"; import { type BlockFilter, - type CallTraceFilter, type Factory, type Filter, type LogFactory, type LogFilter, + type TraceFilter, + type TransactionFilter, + type TransferFilter, isAddressFactory, + shouldGetTransactionReceipt, } from "@/sync/source.js"; -import type { CallTrace, Log, TransactionReceipt } from "@/types/eth.js"; +import type { Log, Trace } from "@/types/eth.js"; import type { LightBlock, SyncBlock, - SyncCallTrace, SyncLog, + SyncTrace, SyncTransaction, SyncTransactionReceipt, } from "@/types/sync.js"; import type { NonNull } from "@/types/utils.js"; -import { EVENT_TYPES, encodeCheckpoint } from "@/utils/checkpoint.js"; -import { - type Interval, - intervalIntersectionMany, - intervalUnion, -} from "@/utils/interval.js"; -import { never } from "@/utils/never.js"; -import { - type Insertable, - type Kysely, - type SelectQueryBuilder, - sql as ksql, -} from "kysely"; +import { type Interval, intervalIntersectionMany } from "@/utils/interval.js"; +import { type Kysely, type SelectQueryBuilder, sql as ksql, sql } from "kysely"; +import type { InsertObject } from "kysely"; import { type Address, type Hash, type Hex, + type TransactionReceipt, checksumAddress, hexToBigInt, - hexToNumber, } from "viem"; import { type PonderSyncSchema, encodeBlock, - encodeCallTrace, encodeLog, + encodeTrace, encodeTransaction, encodeTransactionReceipt, } from "./encoding.js"; export type SyncStore = { - insertInterval(args: { - filter: Filter; - interval: Interval; + insertIntervals(args: { + intervals: { + filter: Filter; + interval: Interval; + }[]; }): Promise; getIntervals(args: { - filter: Filter; - }): Promise; + filters: Filter[]; + }): Promise>; getChildAddresses(args: { filter: Factory; limit?: number; @@ -84,7 +71,7 @@ export type SyncStore = { /** Return true if the block receipt is present in the database. */ hasBlock(args: { hash: Hash }): Promise; insertTransactions(args: { - transactions: SyncTransaction[]; + transactions: { transaction: SyncTransaction; block: SyncBlock }[]; chainId: number; }): Promise; /** Return true if the transaction is present in the database. */ @@ -95,8 +82,12 @@ export type SyncStore = { }): Promise; /** Return true if the transaction receipt is present in the database. */ hasTransactionReceipt(args: { hash: Hash }): Promise; - insertCallTraces(args: { - callTraces: { callTrace: SyncCallTrace; block: SyncBlock }[]; + insertTraces(args: { + traces: { + trace: SyncTrace; + block: SyncBlock; + transaction: SyncTransaction; + }[]; chainId: number; }): Promise; /** Returns an ordered list of events based on the `filters` and pagination arguments. */ @@ -108,15 +99,14 @@ export type SyncStore = { }): Promise<{ events: RawEvent[]; cursor: string }>; insertRpcRequestResult(args: { request: string; - blockNumber: bigint; chainId: number; + blockNumber: bigint | undefined; result: string; }): Promise; getRpcRequestResult(args: { request: string; - blockNumber: bigint; chainId: number; - }): Promise; + }): Promise; pruneRpcRequestResult(args: { blocks: Pick[]; chainId: number; @@ -166,329 +156,94 @@ export const createSyncStore = ({ common: Common; db: HeadlessKysely; }): SyncStore => ({ - insertInterval: async ({ filter, interval }) => - db.wrap({ method: "insertInterval" }, async () => { - const startBlock = BigInt(interval[0]); - const endBlock = BigInt(interval[1]); + insertIntervals: async ({ intervals }) => { + if (intervals.length === 0) return; - switch (filter.type) { - case "log": { - for (const fragment of buildLogFilterFragments(filter)) { - if (isAddressFactory(filter.address)) { - await db - .insertInto("factoryLogFilterIntervals") - .values({ - factoryId: fragment.id, - startBlock, - endBlock, - }) - .execute(); - } else { - await db - .insertInto("logFilterIntervals") - .values({ - logFilterId: fragment.id, - startBlock, - endBlock, - }) - .execute(); - } - } - break; - } + await db.wrap({ method: "insertIntervals" }, async () => { + const values: InsertObject[] = []; - case "block": { - const fragment = buildBlockFilterFragment(filter); - await db - .insertInto("blockFilterIntervals") - .values({ - blockFilterId: fragment.id, - startBlock, - endBlock, - }) - .execute(); - break; - } + // NOTE: In order to force proper range union behavior, `interval[1]` must + // be rounded up. - case "callTrace": { - for (const fragment of buildTraceFilterFragments(filter)) { - if (isAddressFactory(filter.toAddress)) { - await db - .insertInto("factoryTraceFilterIntervals") - .values({ - factoryId: fragment.id, - startBlock, - endBlock, - }) - .execute(); - } else { - await db - .insertInto("traceFilterIntervals") - .values({ - traceFilterId: fragment.id, - startBlock, - endBlock, - }) - .execute(); - } - } - break; + for (const { interval, filter } of intervals) { + for (const fragment of getFragmentIds(filter)) { + values.push({ + fragment_id: fragment.id, + chain_id: filter.chainId, + blocks: ksql`nummultirange(numrange(${interval[0]}, ${interval[1] + 1}, '[]'))`, + }); } - - default: - never(filter); } - }), - getIntervals: async ({ filter }) => - db.wrap({ method: "getIntervals" }, async () => { - const topicSQL = ( - qb: SelectQueryBuilder< - PonderSyncSchema, - | "logFilters" - | "logFilterIntervals" - | "factoryLogFilters" - | "factoryLogFilterIntervals", - {} - >, - fragment: LogFilterFragment, - ) => - qb - .where((eb) => - eb.or([ - eb("topic0", "is", null), - eb("topic0", "=", fragment.topic0), - ]), - ) - .where((eb) => - eb.or([ - eb("topic1", "is", null), - eb("topic1", "=", fragment.topic1), - ]), - ) - .where((eb) => - eb.or([ - eb("topic2", "is", null), - eb("topic2", "=", fragment.topic2), - ]), - ) - .where((eb) => - eb.or([ - eb("topic3", "is", null), - eb("topic3", "=", fragment.topic3), - ]), - ); - - let fragments: - | LogFilterFragment[] - | TraceFilterFragment[] - | BlockFilterFragment[]; - let table: - | "logFilter" - | "factoryLogFilter" - | "traceFilter" - | "factoryTraceFilter" - | "blockFilter"; - let idCol: - | "logFilterId" - | "traceFilterId" - | "blockFilterId" - | "factoryId"; - let fragmentSelect: ( - fragment: any, - qb: SelectQueryBuilder, - ) => SelectQueryBuilder; - - switch (filter.type) { - case "log": - { - if (isAddressFactory(filter.address)) { - fragments = buildLogFilterFragments(filter); - table = "factoryLogFilter"; - idCol = "factoryId"; - // @ts-ignore - fragmentSelect = (fragment: LogFilterFragment, qb) => - qb - .where("address", "=", fragment.address) - .where("eventSelector", "=", fragment.eventSelector) - .where( - "childAddressLocation", - "=", - fragment.childAddressLocation, - ) - .where( - "includeTransactionReceipts", - ">=", - fragment.includeTransactionReceipts, - ) - .$call((qb) => topicSQL(qb, fragment)); - } else { - fragments = buildLogFilterFragments(filter); - table = "logFilter"; - idCol = "logFilterId"; - // @ts-ignore - fragmentSelect = (fragment: LogFilterFragment, qb) => - qb - .where((eb) => - eb.or([ - eb("address", "is", null), - eb("address", "=", fragment.address), - ]), - ) - .where( - "includeTransactionReceipts", - ">=", - fragment.includeTransactionReceipts, - ) - .$call((qb) => topicSQL(qb, fragment)); - } - } - break; - - case "block": - { - fragments = [buildBlockFilterFragment(filter)]; - table = "blockFilter"; - idCol = "blockFilterId"; - fragmentSelect = (fragment, qb) => - qb.where("blockFilterId", "=", fragment.id); - } - break; - - case "callTrace": - { - if (isAddressFactory(filter.toAddress)) { - fragments = buildTraceFilterFragments(filter); - table = "factoryTraceFilter"; - idCol = "factoryId"; - fragmentSelect = (fragment: TraceFilterFragment, qb) => - qb - .where("address", "=", fragment.address) - .where("eventSelector", "=", fragment.eventSelector) - .where( - "childAddressLocation", - "=", - fragment.childAddressLocation, - ) - .where((eb) => - eb.or([ - eb("fromAddress", "is", null), - eb("fromAddress", "=", fragment.fromAddress), - ]), - ); - } else { - fragments = buildTraceFilterFragments(filter); - table = "traceFilter"; - idCol = "traceFilterId"; - fragmentSelect = (fragment: TraceFilterFragment, qb) => - qb - .where((eb) => - eb.or([ - eb("fromAddress", "is", null), - eb("fromAddress", "=", fragment.fromAddress), - ]), - ) - .where((eb) => - eb.or([ - eb("toAddress", "is", null), - eb("toAddress", "=", fragment.toAddress), - ]), - ); - } - } - break; - default: - never(filter); + await db + .insertInto("intervals") + .values(values) + .onConflict((oc) => + oc.column("fragment_id").doUpdateSet({ + blocks: ksql`intervals.blocks + excluded.blocks`, + }), + ) + .execute(); + }); + }, + getIntervals: async ({ filters }) => + db.wrap({ method: "getIntervals" }, async () => { + let query: + | SelectQueryBuilder< + PonderSyncSchema, + "intervals", + { merged_blocks: string | null; filter: string } + > + | undefined; + + for (let i = 0; i < filters.length; i++) { + const filter = filters[i]!; + const fragments = getFragmentIds(filter); + for (const fragment of fragments) { + const _query = db + .selectFrom( + db + .selectFrom("intervals") + .select(sql`unnest(blocks)`.as("blocks")) + .where("fragment_id", "in", fragment.adjacent) + .as("unnested"), + ) + .select([ + sql`range_agg(unnested.blocks)`.as("merged_blocks"), + sql`${i}`.as("filter"), + ]); + // @ts-ignore + query = query === undefined ? _query : query.unionAll(_query); + } } - // First, attempt to merge overlapping and adjacent intervals. - for (const fragment of fragments!) { - await db - .insertInto(`${table!}s`) - .values(fragment) - .onConflict((oc) => oc.column("id").doNothing()) - .execute(); - - let mergeComplete = false; - while (mergeComplete === false) { - await db.transaction().execute(async (tx) => { - // This is a trick to add a LIMIT to a DELETE statement - const existingIntervals = await tx - .deleteFrom(`${table}Intervals`) - .where( - "id", - "in", - tx - .selectFrom(`${table}Intervals`) - .where(idCol, "=", fragment.id) - .select("id") - .orderBy("startBlock asc") - .limit(common.options.syncStoreMaxIntervals), - ) - .returning(["startBlock", "endBlock"]) - .execute(); - - const mergedIntervals = intervalUnion( - existingIntervals.map((i) => [ - Number(i.startBlock), - Number(i.endBlock), - ]), - ); - - const mergedIntervalRows = mergedIntervals.map( - ([startBlock, endBlock]) => ({ - [idCol as string]: fragment.id, - startBlock: BigInt(startBlock), - endBlock: BigInt(endBlock), - }), - ); - - if (mergedIntervalRows.length > 0) { - await tx - .insertInto(`${table}Intervals`) - .values(mergedIntervalRows) - .execute(); - } + const rows = await query!.execute(); - if ( - mergedIntervalRows.length === common.options.syncStoreMaxIntervals - ) { - // This occurs when there are too many non-mergeable ranges with the same logFilterId. Should be almost impossible. - throw new NonRetryableError( - `'${table}Intervals' table for chain '${fragment.chainId}' has reached an unrecoverable level of fragmentation.`, - ); - } + const result: Map = new Map(); - if ( - existingIntervals.length !== common.options.syncStoreMaxIntervals - ) { - mergeComplete = true; - } - }); - } - } + // intervals use "union" for the same fragment, and + // "intersection" for the same filter - const intervals: Interval[][] = []; - for (const fragment of fragments!) { - const _intervals = await db - .selectFrom(`${table!}Intervals`) - .innerJoin(`${table!}s`, idCol!, `${table!}s.id`) - .$call((qb) => fragmentSelect(fragment, qb as any)) - .where("chainId", "=", fragment.chainId) - .select(["startBlock", "endBlock"]) - .execute(); + // NOTE: `interval[1]` must be rounded down in order to offset the previous + // rounding. - const union = intervalUnion( - _intervals.map(({ startBlock, endBlock }) => [ - Number(startBlock), - Number(endBlock), - ]), - ); + for (let i = 0; i < filters.length; i++) { + const filter = filters[i]!; + const intervals = rows + .filter((row) => row.filter === `${i}`) + .map((row) => + (row.merged_blocks + ? (JSON.parse( + `[${row.merged_blocks.slice(1, -1)}]`, + ) as Interval[]) + : [] + ).map((interval) => [interval[0], interval[1] - 1] as Interval), + ); - intervals.push(union); + result.set(filter, intervalIntersectionMany(intervals)); } - return intervalIntersectionMany(intervals); + return result; }), getChildAddresses: ({ filter, limit }) => db.wrap({ method: "getChildAddresses" }, async () => { @@ -532,13 +287,11 @@ export const createSyncStore = ({ Object.keys(encodeLog({ log: logs[0]!.log, chainId })).length, ); - /** - * As an optimization, logs that are matched by a factory do - * not contain a checkpoint, because not corresponding block is - * fetched (no block.timestamp). However, when a log is matched by - * both a log filter and a factory, the checkpoint must be included - * in the db. - */ + // As an optimization, logs that are matched by a factory do + // not contain a checkpoint, because not corresponding block is + // fetched (no block.timestamp). However, when a log is matched by + // both a log filter and a factory, the checkpoint must be included + // in the db. for (let i = 0; i < logs.length; i += batchSize) { await db @@ -602,23 +355,32 @@ export const createSyncStore = ({ common.options.databaseMaxQueryParameters / Object.keys( encodeTransaction({ - transaction: transactions[0]!, + transaction: transactions[0]!.transaction, + block: transactions[0]!.block, chainId, }), ).length, ); + // As an optimization for the migration, transactions inserted before 0.8 do not + // contain a checkpoint. However, for correctness the checkpoint must be inserted + // for new transactions (using onConflictDoUpdate). + for (let i = 0; i < transactions.length; i += batchSize) { await db .insertInto("transactions") .values( transactions .slice(i, i + batchSize) - .map((transaction) => - encodeTransaction({ transaction, chainId }), + .map(({ transaction, block }) => + encodeTransaction({ transaction, block, chainId }), ), ) - .onConflict((oc) => oc.column("hash").doNothing()) + .onConflict((oc) => + oc.column("hash").doUpdateSet((eb) => ({ + checkpoint: eb.ref("excluded.checkpoint"), + })), + ) .execute(); } }); @@ -674,89 +436,48 @@ export const createSyncStore = ({ .executeTakeFirst() .then((result) => result !== undefined); }), - insertCallTraces: async ({ callTraces, chainId }) => { - if (callTraces.length === 0) return; - await db.wrap({ method: "insertCallTrace" }, async () => { - // Delete existing traces with the same `transactionHash`. Then, calculate "callTraces.checkpoint" - // based on the ordering of "callTraces.traceAddress" and add all traces to "callTraces" table. - const traceByTransactionHash: { - [transactionHash: Hex]: { traces: SyncCallTrace[]; block: SyncBlock }; - } = {}; + insertTraces: async ({ traces, chainId }) => { + if (traces.length === 0) return; + await db.wrap({ method: "insertTraces" }, async () => { + // Calculate `batchSize` based on how many parameters the + // input will have + const batchSize = Math.floor( + common.options.databaseMaxQueryParameters / + Object.keys( + encodeTrace({ + trace: traces[0]!.trace.trace, + block: traces[0]!.block, + transaction: traces[0]!.transaction, + chainId, + }), + ).length, + ); - for (const { callTrace, block } of callTraces) { - if (traceByTransactionHash[callTrace.transactionHash] === undefined) { - traceByTransactionHash[callTrace.transactionHash] = { - traces: [], - block, - }; - } - traceByTransactionHash[callTrace.transactionHash]!.traces.push( - callTrace, - ); + for (let i = 0; i < traces.length; i += batchSize) { + await db + .insertInto("traces") + .values( + traces + .slice(i, i + batchSize) + .map(({ trace, block, transaction }) => + encodeTrace({ + trace: trace.trace, + block, + transaction, + chainId, + }), + ), + ) + .onConflict((oc) => oc.column("id").doNothing()) + .execute(); } - - const values: Insertable[] = []; - - await db.transaction().execute(async (tx) => { - for (const transactionHash of Object.keys(traceByTransactionHash)) { - const block = traceByTransactionHash[transactionHash as Hex]!.block; - const traces = await tx - .deleteFrom("callTraces") - .returningAll() - .where("transactionHash", "=", transactionHash as Hex) - .where("chainId", "=", chainId) - .execute(); - - traces.push( - // @ts-ignore - ...traceByTransactionHash[transactionHash as Hex]!.traces.map( - (trace) => encodeCallTrace({ trace, chainId }), - ), - ); - - // Use lexographical sort of stringified `traceAddress`. - traces.sort((a, b) => { - return a.traceAddress < b.traceAddress ? -1 : 1; - }); - - for (let i = 0; i < traces.length; i++) { - const trace = traces[i]!; - - const checkpoint = encodeCheckpoint({ - blockTimestamp: hexToNumber(block.timestamp), - chainId: BigInt(chainId), - blockNumber: hexToBigInt(block.number), - transactionIndex: BigInt(trace.transactionPosition), - eventType: EVENT_TYPES.callTraces, - eventIndex: BigInt(i), - }); - trace.checkpoint = checkpoint; - values.push(trace); - } - } - - // Calculate `batchSize` based on how many parameters the - // input will have - const batchSize = Math.floor( - common.options.databaseMaxQueryParameters / - Object.keys(values[0]!).length, - ); - - for (let i = 0; i < values.length; i += batchSize) { - await tx - .insertInto("callTraces") - .values(values.slice(i, i + batchSize)) - .onConflict((oc) => oc.column("id").doNothing()) - .execute(); - } - }); }); }, getEvents: async ({ filters, from, to, limit }) => { const addressSQL = ( qb: SelectQueryBuilder< PonderSyncSchema, - "logs" | "blocks" | "callTraces", + "logs" | "blocks" | "traces", {} >, address: LogFilter["address"], @@ -789,14 +510,13 @@ export const createSyncStore = ({ "blockHash", "transactionHash", "id as logId", - ksql`null`.as("callTraceId"), + ksql`null`.as("traceId"), ]) .where("chainId", "=", filter.chainId) - .$if(filter.topics !== undefined, (qb) => { - for (const idx_ of [0, 1, 2, 3]) { - const idx = idx_ as 0 | 1 | 2 | 3; + .$call((qb) => { + for (const idx of [0, 1, 2, 3] as const) { // If it's an array of length 1, collapse it. - const raw = filter.topics![idx] ?? null; + const raw = filter[`topic${idx}`] ?? null; if (raw === null) continue; const topic = Array.isArray(raw) && raw.length === 1 ? raw[0]! : raw; @@ -811,66 +531,147 @@ export const createSyncStore = ({ return qb; }) .$call((qb) => addressSQL(qb as any, filter.address, "address")) - .where("blockNumber", ">=", filter.fromBlock.toString()) + .$if(filter.fromBlock !== undefined, (qb) => + qb.where("blockNumber", ">=", filter.fromBlock!.toString()), + ) .$if(filter.toBlock !== undefined, (qb) => qb.where("blockNumber", "<=", filter.toBlock!.toString()), ); - const callTraceSQL = ( - filter: CallTraceFilter, + const blockSQL = ( + filter: BlockFilter, db: Kysely, index: number, ) => db - .selectFrom("callTraces") + .selectFrom("blocks") + .select([ + ksql.raw(`'${index}'`).as("filterIndex"), + "checkpoint", + "chainId", + "hash as blockHash", + ksql`null`.as("transactionHash"), + ksql`null`.as("logId"), + ksql`null`.as("traceId"), + ]) + .where("chainId", "=", filter.chainId) + .$if(filter !== undefined && filter.interval !== undefined, (qb) => + qb.where(ksql`(number - ${filter.offset}) % ${filter.interval} = 0`), + ) + .$if(filter.fromBlock !== undefined, (qb) => + qb.where("number", ">=", filter.fromBlock!.toString()), + ) + .$if(filter.toBlock !== undefined, (qb) => + qb.where("number", "<=", filter.toBlock!.toString()), + ); + + const transactionSQL = ( + filter: TransactionFilter, + db: Kysely, + index: number, + ) => + db + .selectFrom("transactions") .select([ ksql.raw(`'${index}'`).as("filterIndex"), "checkpoint", "chainId", "blockHash", - "transactionHash", + "hash as transactionHash", ksql`null`.as("logId"), - "id as callTraceId", + ksql`null`.as("traceId"), ]) .where("chainId", "=", filter.chainId) - .where((eb) => - eb.or( - filter.functionSelectors.map((fs) => - eb("callTraces.functionSelector", "=", fs), - ), + .$call((qb) => addressSQL(qb as any, filter.fromAddress, "from")) + .$call((qb) => addressSQL(qb as any, filter.toAddress, "to")) + .$if(filter.includeReverted === false, (qb) => + qb.where( + db + .selectFrom("transactionReceipts") + .select("status") + .where( + "transactionReceipts.transactionHash", + "=", + sql.ref("transactions.hash"), + ), + "=", + "0x1", ), ) - .where(ksql`${ksql.ref("callTraces.error")} IS NULL`) + .$if(filter.fromBlock !== undefined, (qb) => + qb.where("blockNumber", ">=", filter.fromBlock!.toString()), + ) + .$if(filter.toBlock !== undefined, (qb) => + qb.where("blockNumber", "<=", filter.toBlock!.toString()), + ); + + const transferSQL = ( + filter: TransferFilter, + db: Kysely, + index: number, + ) => + db + .selectFrom("traces") + .select([ + ksql.raw(`'${index}'`).as("filterIndex"), + "checkpoint", + "chainId", + "blockHash", + "transactionHash", + ksql`null`.as("logId"), + "id as traceId", + ]) + .where("chainId", "=", filter.chainId) .$call((qb) => addressSQL(qb as any, filter.fromAddress, "from")) - .$call((qb) => addressSQL(qb, filter.toAddress, "to")) - .where("blockNumber", ">=", filter.fromBlock.toString()) + .$call((qb) => addressSQL(qb as any, filter.toAddress, "to")) + .where("value", ">", "0") + .$if(filter.includeReverted === false, (qb) => + qb.where("isReverted", "=", 0), + ) + .$if(filter.fromBlock !== undefined, (qb) => + qb.where("blockNumber", ">=", filter.fromBlock!.toString()), + ) .$if(filter.toBlock !== undefined, (qb) => qb.where("blockNumber", "<=", filter.toBlock!.toString()), ); - const blockSQL = ( - filter: BlockFilter, + const traceSQL = ( + filter: TraceFilter, db: Kysely, index: number, ) => db - .selectFrom("blocks") + .selectFrom("traces") .select([ ksql.raw(`'${index}'`).as("filterIndex"), "checkpoint", "chainId", - "hash as blockHash", - ksql`null`.as("transactionHash"), + "blockHash", + "transactionHash", ksql`null`.as("logId"), - ksql`null`.as("callTraceId"), + "id as traceId", ]) .where("chainId", "=", filter.chainId) - .$if(filter !== undefined && filter.interval !== undefined, (qb) => - qb.where(ksql`(number - ${filter.offset}) % ${filter.interval} = 0`), + .$call((qb) => addressSQL(qb as any, filter.fromAddress, "from")) + .$call((qb) => addressSQL(qb as any, filter.toAddress, "to")) + .$if(filter.includeReverted === false, (qb) => + qb.where("isReverted", "=", 0), + ) + .$if(filter.callType !== undefined, (qb) => + qb.where("type", "=", filter.callType!), + ) + .$if(filter.functionSelector !== undefined, (qb) => { + if (Array.isArray(filter.functionSelector)) { + return qb.where("functionSelector", "in", filter.functionSelector!); + } else { + return qb.where("functionSelector", "=", filter.functionSelector!); + } + }) + .$if(filter.fromBlock !== undefined, (qb) => + qb.where("blockNumber", ">=", filter.fromBlock!.toString()), ) - .where("number", ">=", filter.fromBlock.toString()) .$if(filter.toBlock !== undefined, (qb) => - qb.where("number", "<=", filter.toBlock!.toString()), + qb.where("blockNumber", "<=", filter.toBlock!.toString()), ); const rows = await db.wrap( @@ -884,15 +685,15 @@ export const createSyncStore = ({ let query: | SelectQueryBuilder< PonderSyncSchema, - "logs" | "callTraces" | "blocks", + "logs" | "blocks" | "traces" | "transactions", { filterIndex: number; checkpoint: string; chainId: number; + logId: string; blockHash: string; transactionHash: string; - logId: string; - callTraceId: string; + traceId: string; } > | undefined; @@ -903,9 +704,13 @@ export const createSyncStore = ({ const _query = filter.type === "log" ? logSQL(filter, db, i) - : filter.type === "callTrace" - ? callTraceSQL(filter, db, i) - : blockSQL(filter, db, i); + : filter.type === "block" + ? blockSQL(filter, db, i) + : filter.type === "transaction" + ? transactionSQL(filter, db, i) + : filter.type === "transfer" + ? transferSQL(filter, db, i) + : traceSQL(filter, db, i); // @ts-ignore query = query === undefined ? _query : query.unionAll(_query); @@ -943,8 +748,6 @@ export const createSyncStore = ({ .leftJoin("logs", "logs.id", "event.logId") .select([ "logs.address as log_address", - "logs.blockHash as log_blockHash", - "logs.blockNumber as log_blockNumber", "logs.chainId as log_chainId", "logs.data as log_data", "logs.id as log_id", @@ -953,8 +756,6 @@ export const createSyncStore = ({ "logs.topic1 as log_topic1", "logs.topic2 as log_topic2", "logs.topic3 as log_topic3", - "logs.transactionHash as log_transactionHash", - "logs.transactionIndex as log_transactionIndex", ]) .leftJoin( "transactions", @@ -963,8 +764,6 @@ export const createSyncStore = ({ ) .select([ "transactions.accessList as tx_accessList", - "transactions.blockHash as tx_blockHash", - "transactions.blockNumber as tx_blockNumber", "transactions.from as tx_from", "transactions.gas as tx_gas", "transactions.gasPrice as tx_gasPrice", @@ -981,23 +780,21 @@ export const createSyncStore = ({ "transactions.value as tx_value", "transactions.v as tx_v", ]) - .leftJoin("callTraces", "callTraces.id", "event.callTraceId") + .leftJoin("traces", "traces.id", "event.traceId") .select([ - "callTraces.id as callTrace_id", - "callTraces.callType as callTrace_callType", - "callTraces.from as callTrace_from", - "callTraces.gas as callTrace_gas", - "callTraces.input as callTrace_input", - "callTraces.to as callTrace_to", - "callTraces.value as callTrace_value", - "callTraces.blockHash as callTrace_blockHash", - "callTraces.blockNumber as callTrace_blockNumber", - "callTraces.gasUsed as callTrace_gasUsed", - "callTraces.output as callTrace_output", - "callTraces.subtraces as callTrace_subtraces", - "callTraces.traceAddress as callTrace_traceAddress", - "callTraces.transactionHash as callTrace_transactionHash", - "callTraces.transactionPosition as callTrace_transactionPosition", + "traces.id as trace_id", + "traces.type as trace_callType", + "traces.from as trace_from", + "traces.to as trace_to", + "traces.gas as trace_gas", + "traces.gasUsed as trace_gasUsed", + "traces.input as trace_input", + "traces.output as trace_output", + "traces.error as trace_error", + "traces.revertReason as trace_revertReason", + "traces.value as trace_value", + "traces.index as trace_index", + "traces.subcalls as trace_subcalls", ]) .leftJoin( "transactionReceipts", @@ -1005,19 +802,14 @@ export const createSyncStore = ({ "event.transactionHash", ) .select([ - "transactionReceipts.blockHash as txr_blockHash", - "transactionReceipts.blockNumber as txr_blockNumber", "transactionReceipts.contractAddress as txr_contractAddress", "transactionReceipts.cumulativeGasUsed as txr_cumulativeGasUsed", "transactionReceipts.effectiveGasPrice as txr_effectiveGasPrice", "transactionReceipts.from as txr_from", "transactionReceipts.gasUsed as txr_gasUsed", - "transactionReceipts.logs as txr_logs", "transactionReceipts.logsBloom as txr_logsBloom", "transactionReceipts.status as txr_status", "transactionReceipts.to as txr_to", - "transactionReceipts.transactionHash as txr_transactionHash", - "transactionReceipts.transactionIndex as txr_transactionIndex", "transactionReceipts.type as txr_type", ]) .where("event.checkpoint", ">", from) @@ -1039,10 +831,8 @@ export const createSyncStore = ({ const hasLog = row.log_id !== null; const hasTransaction = row.tx_hash !== null; - const hasCallTrace = row.callTrace_id !== null; - const hasTransactionReceipt = - (filter.type === "log" || filter.type === "callTrace") && - filter.includeTransactionReceipts; + const hasTrace = row.trace_id !== null; + const hasTransactionReceipt = shouldGetTransactionReceipt(filter); return { chainId: filter.chainId, @@ -1078,8 +868,6 @@ export const createSyncStore = ({ log: hasLog ? { address: checksumAddress(row.log_address!), - blockHash: row.log_blockHash, - blockNumber: BigInt(row.log_blockNumber), data: row.log_data, id: row.log_id as Log["id"], logIndex: Number(row.log_logIndex), @@ -1090,14 +878,10 @@ export const createSyncStore = ({ row.log_topic2, row.log_topic3, ].filter((t): t is Hex => t !== null) as [Hex, ...Hex[]] | [], - transactionHash: row.log_transactionHash, - transactionIndex: Number(row.log_transactionIndex), } : undefined, transaction: hasTransaction ? { - blockHash: row.tx_blockHash, - blockNumber: BigInt(row.tx_blockNumber), from: checksumAddress(row.tx_from), gas: BigInt(row.tx_gas), hash: row.tx_hash, @@ -1145,29 +929,23 @@ export const createSyncStore = ({ }), } : undefined, - trace: hasCallTrace + trace: hasTrace ? { - id: row.callTrace_id, - from: checksumAddress(row.callTrace_from), - to: checksumAddress(row.callTrace_to), - gas: BigInt(row.callTrace_gas), - value: BigInt(row.callTrace_value), - input: row.callTrace_input, - output: row.callTrace_output, - gasUsed: BigInt(row.callTrace_gasUsed), - subtraces: row.callTrace_subtraces, - traceAddress: JSON.parse(row.callTrace_traceAddress), - blockHash: row.callTrace_blockHash, - blockNumber: BigInt(row.callTrace_blockNumber), - transactionHash: row.callTrace_transactionHash, - transactionIndex: row.callTrace_transactionPosition, - callType: row.callTrace_callType as CallTrace["callType"], + id: row.trace_id, + type: row.trace_callType as Trace["type"], + from: checksumAddress(row.trace_from), + to: checksumAddress(row.trace_to), + gas: BigInt(row.trace_gas), + gasUsed: BigInt(row.trace_gasUsed), + input: row.trace_input, + output: row.trace_output, + value: BigInt(row.trace_value), + traceIndex: Number(row.trace_index), + subcalls: Number(row.trace_subcalls), } : undefined, transactionReceipt: hasTransactionReceipt ? { - blockHash: row.txr_blockHash, - blockNumber: BigInt(row.txr_blockNumber), contractAddress: row.txr_contractAddress ? checksumAddress(row.txr_contractAddress) : null, @@ -1175,23 +953,6 @@ export const createSyncStore = ({ effectiveGasPrice: BigInt(row.txr_effectiveGasPrice), from: checksumAddress(row.txr_from), gasUsed: BigInt(row.txr_gasUsed), - logs: JSON.parse(row.txr_logs).map((log: SyncLog) => ({ - id: `${log.blockHash}-${log.logIndex}`, - address: checksumAddress(log.address), - blockHash: log.blockHash, - blockNumber: hexToBigInt(log.blockNumber), - data: log.data, - logIndex: hexToNumber(log.logIndex), - removed: false, - topics: [ - log.topics[0] ?? null, - log.topics[1] ?? null, - log.topics[2] ?? null, - log.topics[3] ?? null, - ].filter((t): t is Hex => t !== null) as [Hex, ...Hex[]] | [], - transactionHash: log.transactionHash, - transactionIndex: hexToNumber(log.transactionIndex), - })), logsBloom: row.txr_logsBloom, status: row.txr_status === "0x1" @@ -1200,8 +961,6 @@ export const createSyncStore = ({ ? "reverted" : (row.txr_status as TransactionReceipt["status"]), to: row.txr_to ? checksumAddress(row.txr_to) : null, - transactionHash: row.txr_transactionHash, - transactionIndex: Number(row.txr_transactionIndex), type: row.txr_type === "0x0" ? "legacy" @@ -1229,31 +988,29 @@ export const createSyncStore = ({ insertRpcRequestResult: async ({ request, blockNumber, chainId, result }) => db.wrap({ method: "insertRpcRequestResult" }, async () => { await db - .insertInto("rpcRequestResults") + .insertInto("rpc_request_results") .values({ request, - blockNumber, - chainId, + block_number: blockNumber, + chain_id: chainId, result, }) .onConflict((oc) => - oc - .columns(["request", "chainId", "blockNumber"]) - .doUpdateSet({ result }), + oc.columns(["request_hash", "chain_id"]).doUpdateSet({ result }), ) .execute(); }), - getRpcRequestResult: async ({ request, blockNumber, chainId }) => + getRpcRequestResult: async ({ request, chainId }) => db.wrap({ method: "getRpcRequestResult" }, async () => { const result = await db - .selectFrom("rpcRequestResults") + .selectFrom("rpc_request_results") .select("result") - .where("request", "=", request) - .where("chainId", "=", chainId) - .where("blockNumber", "=", blockNumber.toString()) + + .where("request_hash", "=", sql`MD5(${request})`) + .where("chain_id", "=", chainId) .executeTakeFirst(); - return result?.result ?? null; + return result?.result; }), pruneRpcRequestResult: async ({ blocks, chainId }) => db.wrap({ method: "pruneRpcRequestResult" }, async () => { @@ -1264,223 +1021,14 @@ export const createSyncStore = ({ ); await db - .deleteFrom("rpcRequestResults") - .where("chainId", "=", chainId) - .where("blockNumber", "in", numbers) + .deleteFrom("rpc_request_results") + .where("chain_id", "=", chainId) + .where("block_number", "in", numbers) .execute(); }), pruneByChain: async ({ fromBlock, chainId }) => db.wrap({ method: "pruneByChain" }, () => db.transaction().execute(async (tx) => { - await tx - .with("deleteLogFilter(logFilterId)", (qb) => - qb - .selectFrom("logFilterIntervals") - .innerJoin("logFilters", "logFilterId", "logFilters.id") - .select("logFilterId") - .where("chainId", "=", chainId) - .where("startBlock", ">=", fromBlock.toString()), - ) - .deleteFrom("logFilterIntervals") - .where( - "logFilterId", - "in", - ksql`(SELECT "logFilterId" FROM ${ksql.table("deleteLogFilter")})`, - ) - .execute(); - - await tx - .with("updateLogFilter(logFilterId)", (qb) => - qb - .selectFrom("logFilterIntervals") - .innerJoin("logFilters", "logFilterId", "logFilters.id") - .select("logFilterId") - .where("chainId", "=", chainId) - .where("startBlock", "<", fromBlock.toString()) - .where("endBlock", ">", fromBlock.toString()), - ) - .updateTable("logFilterIntervals") - .set({ - endBlock: fromBlock.toString(), - }) - .where( - "logFilterId", - "in", - ksql`(SELECT "logFilterId" FROM ${ksql.table("updateLogFilter")})`, - ) - .execute(); - - await tx - .with("deleteFactoryLogFilter(factoryId)", (qb) => - qb - .selectFrom("factoryLogFilterIntervals") - .innerJoin( - "factoryLogFilters", - "factoryId", - "factoryLogFilters.id", - ) - - .select("factoryId") - .where("chainId", "=", chainId) - .where("startBlock", ">=", fromBlock.toString()), - ) - .deleteFrom("factoryLogFilterIntervals") - .where( - "factoryId", - "in", - ksql`(SELECT "factoryId" FROM ${ksql.table("deleteFactoryLogFilter")})`, - ) - .execute(); - - await tx - .with("updateFactoryLogFilter(factoryId)", (qb) => - qb - .selectFrom("factoryLogFilterIntervals") - .innerJoin( - "factoryLogFilters", - "factoryId", - "factoryLogFilters.id", - ) - - .select("factoryId") - .where("chainId", "=", chainId) - .where("startBlock", "<", fromBlock.toString()) - .where("endBlock", ">", fromBlock.toString()), - ) - .updateTable("factoryLogFilterIntervals") - .set({ - endBlock: BigInt(fromBlock), - }) - .where( - "factoryId", - "in", - ksql`(SELECT "factoryId" FROM ${ksql.table("updateFactoryLogFilter")})`, - ) - .execute(); - - await tx - .with("deleteTraceFilter(traceFilterId)", (qb) => - qb - .selectFrom("traceFilterIntervals") - .innerJoin("traceFilters", "traceFilterId", "traceFilters.id") - .select("traceFilterId") - .where("chainId", "=", chainId) - .where("startBlock", ">=", fromBlock.toString()), - ) - .deleteFrom("traceFilterIntervals") - .where( - "traceFilterId", - "in", - ksql`(SELECT "traceFilterId" FROM ${ksql.table("deleteTraceFilter")})`, - ) - .execute(); - - await tx - .with("updateTraceFilter(traceFilterId)", (qb) => - qb - .selectFrom("traceFilterIntervals") - .innerJoin("traceFilters", "traceFilterId", "traceFilters.id") - .select("traceFilterId") - .where("chainId", "=", chainId) - .where("startBlock", "<", fromBlock.toString()) - .where("endBlock", ">", fromBlock.toString()), - ) - .updateTable("traceFilterIntervals") - .set({ - endBlock: BigInt(fromBlock), - }) - .where( - "traceFilterId", - "in", - ksql`(SELECT "traceFilterId" FROM ${ksql.table("updateTraceFilter")})`, - ) - .execute(); - - await tx - .with("deleteFactoryTraceFilter(factoryId)", (qb) => - qb - .selectFrom("factoryTraceFilterIntervals") - .innerJoin( - "factoryTraceFilters", - "factoryId", - "factoryTraceFilters.id", - ) - .select("factoryId") - .where("chainId", "=", chainId) - .where("startBlock", ">=", fromBlock.toString()), - ) - .deleteFrom("factoryTraceFilterIntervals") - .where( - "factoryId", - "in", - ksql`(SELECT "factoryId" FROM ${ksql.table("deleteFactoryTraceFilter")})`, - ) - .execute(); - - await tx - .with("updateFactoryTraceFilter(factoryId)", (qb) => - qb - .selectFrom("factoryTraceFilterIntervals") - .innerJoin( - "factoryTraceFilters", - "factoryId", - "factoryTraceFilters.id", - ) - - .select("factoryId") - .where("chainId", "=", chainId) - .where("startBlock", "<", fromBlock.toString()) - .where("endBlock", ">", fromBlock.toString()), - ) - .updateTable("factoryTraceFilterIntervals") - .set({ - endBlock: BigInt(fromBlock), - }) - .where( - "factoryId", - "in", - ksql`(SELECT "factoryId" FROM ${ksql.table("updateFactoryTraceFilter")})`, - ) - .execute(); - - await tx - .with("deleteBlockFilter(blockFilterId)", (qb) => - qb - .selectFrom("blockFilterIntervals") - .innerJoin("blockFilters", "blockFilterId", "blockFilters.id") - .select("blockFilterId") - .where("chainId", "=", chainId) - .where("startBlock", ">=", fromBlock.toString()), - ) - .deleteFrom("blockFilterIntervals") - .where( - "blockFilterId", - "in", - ksql`(SELECT "blockFilterId" FROM ${ksql.table("deleteBlockFilter")})`, - ) - .execute(); - - await tx - .with("updateBlockFilter(blockFilterId)", (qb) => - qb - .selectFrom("blockFilterIntervals") - .innerJoin("blockFilters", "blockFilterId", "blockFilters.id") - .select("blockFilterId") - .where("chainId", "=", chainId) - .where("startBlock", "<", fromBlock.toString()) - .where("endBlock", ">", fromBlock.toString()), - ) - .updateTable("blockFilterIntervals") - .set({ - endBlock: BigInt(fromBlock), - }) - .where( - "blockFilterId", - "in", - ksql`(SELECT "blockFilterId" FROM ${ksql.table("updateBlockFilter")})`, - ) - .execute(); - await tx .deleteFrom("logs") .where("chainId", "=", chainId) @@ -1492,12 +1040,12 @@ export const createSyncStore = ({ .where("number", ">=", fromBlock.toString()) .execute(); await tx - .deleteFrom("rpcRequestResults") - .where("chainId", "=", chainId) - .where("blockNumber", ">=", fromBlock.toString()) + .deleteFrom("rpc_request_results") + .where("chain_id", "=", chainId) + .where("block_number", ">=", fromBlock.toString()) .execute(); await tx - .deleteFrom("callTraces") + .deleteFrom("traces") .where("chainId", "=", chainId) .where("blockNumber", ">=", fromBlock.toString()) .execute(); diff --git a/packages/core/src/sync-store/migrations.ts b/packages/core/src/sync-store/migrations.ts index 163175756..cb338f774 100644 --- a/packages/core/src/sync-store/migrations.ts +++ b/packages/core/src/sync-store/migrations.ts @@ -840,6 +840,440 @@ const migrations: Record = { .execute(); }, }, + "2024_11_04_0_request_cache": { + async up(db: Kysely) { + await db.schema + .createTable("rpc_request_results") + .addColumn("request", "text", (col) => col.notNull()) + .addColumn("block_number", "numeric(78, 0)") + .addColumn("chain_id", "integer", (col) => col.notNull()) + .addColumn("result", "text", (col) => col.notNull()) + .addPrimaryKeyConstraint("rpc_request_result_primary_key", [ + "request", + "chain_id", + ]) + .execute(); + + await db.executeQuery( + sql` +INSERT INTO ponder_sync.rpc_request_results (request, block_number, chain_id, result) +SELECT + CONCAT ( + '{"method":"eth_getbalance","params":["', + LOWER(SUBSTRING(request, 16)), + '","0x', + to_hex("blockNumber"::bigint), + '"]}' + ) as request, + "blockNumber" as block_number, + "chainId" as chain_id, + result +FROM ponder_sync."rpcRequestResults" +WHERE ponder_sync."rpcRequestResults".request LIKE 'eth_getBalance_%' +AND ponder_sync."rpcRequestResults"."blockNumber" <= 9223372036854775807; +`.compile(db), + ); + + await db.executeQuery( + sql` +INSERT INTO ponder_sync.rpc_request_results (request, block_number, chain_id, result) +SELECT + CONCAT ( + '{"method":"eth_call","params":[{"data":"', + LOWER(SUBSTRING(request, 53)), + '","to":"', + LOWER(SUBSTRING(request, 10, 42)), + '"},"0x', + to_hex("blockNumber"::bigint), + '"]}' + ) as request, + "blockNumber" as block_number, + "chainId" as chain_id, + result +FROM ponder_sync."rpcRequestResults" +WHERE ponder_sync."rpcRequestResults".request LIKE 'eth_call_%' +AND ponder_sync."rpcRequestResults"."blockNumber" <= 9223372036854775807; +`.compile(db), + ); + + await db.schema + .dropTable("rpcRequestResults") + .ifExists() + .cascade() + .execute(); + }, + }, + "2024_11_09_0_adjacent_interval": { + async up(db: Kysely) { + await db.schema + .createTable("intervals") + .addColumn("fragment_id", "text", (col) => col.notNull().primaryKey()) + .addColumn("chain_id", "integer", (col) => col.notNull()) + .addColumn("blocks", sql`nummultirange`, (col) => col.notNull()) + .execute(); + + await db + .with("range(fragment_id, chain_id, blocks)", (db) => + db + .selectFrom("logFilters as lf") + .innerJoin("logFilterIntervals as lfi", "lf.id", "lfi.logFilterId") + .select([ + sql`concat('log', '_', lf.id)`.as("fragment_id"), + "lf.chainId as chain_id", + sql`numrange(lfi."startBlock", lfi."endBlock" + 1, '[]')`.as( + "blocks", + ), + ]), + ) + .insertInto("intervals") + .columns(["fragment_id", "chain_id", "blocks"]) + .expression( + sql.raw(` +SELECT + fragment_id, + chain_id, + range_agg(range.blocks) as blocks +FROM range +GROUP BY fragment_id, chain_id +`), + ) + .execute(); + + await db.schema.dropTable("logFilters").ifExists().cascade().execute(); + await db.schema + .dropTable("logFilterIntervals") + .ifExists() + .cascade() + .execute(); + + await db + .with("range(fragment_id, chain_id, blocks)", (db) => + db + .selectFrom("factoryLogFilters as flf") + .innerJoin( + "factoryLogFilterIntervals as flfi", + "flf.id", + "flfi.factoryId", + ) + .select([ + sql`concat('log', '_', flf.id)`.as("fragment_id"), + "flf.chainId as chain_id", + sql`numrange(flfi."startBlock", flfi."endBlock" + 1, '[]')`.as( + "blocks", + ), + ]), + ) + .insertInto("intervals") + .columns(["fragment_id", "chain_id", "blocks"]) + .expression( + sql.raw(` + SELECT + fragment_id, + chain_id, + range_agg(range.blocks) as blocks + FROM range + GROUP BY fragment_id, chain_id + `), + ) + .onConflict((oc) => + oc.column("fragment_id").doUpdateSet({ + blocks: sql`intervals.blocks + excluded.blocks`, + }), + ) + .execute(); + + await db.schema + .dropTable("factoryLogFilters") + .ifExists() + .cascade() + .execute(); + await db.schema + .dropTable("factoryLogFilterIntervals") + .ifExists() + .cascade() + .execute(); + + await db + .with("range(fragment_id, chain_id, blocks)", (db) => + db + .selectFrom("traceFilters as tf") + .innerJoin( + "traceFilterIntervals as tfi", + "tf.id", + "tfi.traceFilterId", + ) + .select([ + sql`concat('trace', '_', tf.id)`.as("fragment_id"), + "tf.chainId as chain_id", + sql`numrange(tfi."startBlock", tfi."endBlock" + 1, '[]')`.as( + "blocks", + ), + ]), + ) + .insertInto("intervals") + .columns(["fragment_id", "chain_id", "blocks"]) + .expression( + sql.raw(` + SELECT + fragment_id, + chain_id, + range_agg(range.blocks) as blocks + FROM range + GROUP BY fragment_id, chain_id + `), + ) + .onConflict((oc) => + oc.column("fragment_id").doUpdateSet({ + blocks: sql`intervals.blocks + excluded.blocks`, + }), + ) + .execute(); + + await db.schema.dropTable("traceFilters").ifExists().cascade().execute(); + await db.schema + .dropTable("traceFilterIntervals") + .ifExists() + .cascade() + .execute(); + + await db + .with("range(fragment_id, chain_id, blocks)", (db) => + db + .selectFrom("factoryTraceFilters as ftf") + .innerJoin( + "factoryTraceFilterIntervals as ftfi", + "ftf.id", + "ftfi.factoryId", + ) + .select([ + sql`concat('trace', '_', ftf.id)`.as("fragment_id"), + "ftf.chainId as chain_id", + sql`numrange(ftfi."startBlock", ftfi."endBlock" + 1, '[]')`.as( + "blocks", + ), + ]), + ) + .insertInto("intervals") + .columns(["fragment_id", "chain_id", "blocks"]) + .expression( + sql.raw(` + SELECT + fragment_id, + chain_id, + range_agg(range.blocks) as blocks + FROM range + GROUP BY fragment_id, chain_id + `), + ) + .onConflict((oc) => + oc.column("fragment_id").doUpdateSet({ + blocks: sql`intervals.blocks + excluded.blocks`, + }), + ) + .execute(); + + await db.schema + .dropTable("factoryTraceFilters") + .ifExists() + .cascade() + .execute(); + await db.schema + .dropTable("factoryTraceFilterIntervals") + .ifExists() + .cascade() + .execute(); + + await db + .with("range(fragment_id, chain_id, blocks)", (db) => + db + .selectFrom("blockFilters as bf") + .innerJoin( + "blockFilterIntervals as bfi", + "bf.id", + "bfi.blockFilterId", + ) + .select([ + sql`concat('block', '_', bf.id)`.as("fragment_id"), + "bf.chainId as chain_id", + sql`numrange(bfi."startBlock", bfi."endBlock" + 1, '[]')`.as( + "blocks", + ), + ]), + ) + .insertInto("intervals") + .columns(["fragment_id", "chain_id", "blocks"]) + .expression( + sql.raw(` + SELECT + fragment_id, + chain_id, + range_agg(range.blocks) as blocks + FROM range + GROUP BY fragment_id, chain_id + `), + ) + .onConflict((oc) => + oc.column("fragment_id").doUpdateSet({ + blocks: sql`intervals.blocks + excluded.blocks`, + }), + ) + .execute(); + + await db.schema.dropTable("blockFilters").ifExists().cascade().execute(); + await db.schema + .dropTable("blockFilterIntervals") + .ifExists() + .cascade() + .execute(); + }, + }, + "2024_11_12_0_debug": { + async up(db) { + await db.schema.dropTable("callTraces").ifExists().cascade().execute(); + + await db + .deleteFrom("intervals") + .where("fragment_id", "like", "trace_%") + .execute(); + + await db.schema + .createTable("traces") + .addColumn("id", "text", (col) => col.notNull().primaryKey()) + .addColumn("chainId", "integer", (col) => col.notNull()) + .addColumn("checkpoint", "varchar(75)", (col) => col.notNull()) + .addColumn("type", "text", (col) => col.notNull()) + .addColumn("transactionHash", "varchar(66)", (col) => col.notNull()) + .addColumn("blockNumber", "numeric(78, 0)", (col) => col.notNull()) + .addColumn("blockHash", "varchar(66)", (col) => col.notNull()) + .addColumn("from", "varchar(42)", (col) => col.notNull()) + .addColumn("to", "varchar(42)") + .addColumn("gas", "numeric(78, 0)", (col) => col.notNull()) + .addColumn("gasUsed", "numeric(78, 0)", (col) => col.notNull()) + .addColumn("input", "text", (col) => col.notNull()) + .addColumn("functionSelector", "text", (col) => col.notNull()) + .addColumn("output", "text") + .addColumn("error", "text") + .addColumn("revertReason", "text") + .addColumn("value", "numeric(78, 0)") + .addColumn("index", "integer", (col) => col.notNull()) + .addColumn("subcalls", "integer", (col) => col.notNull()) + .addColumn("isReverted", "integer", (col) => col.notNull()) + .execute(); + + // `getEvents` benefits from an index on + // "blockNumber", "functionSelector", "blockHash" + // "transactionHash", "checkpoint", "chainId", "from", "to", + // "value", "type", and "isReverted" + + await db.schema + .createIndex("trace_block_number_index") + .on("traces") + .column("blockNumber") + .execute(); + + await db.schema + .createIndex("trace_function_selector_index") + .on("traces") + .column("functionSelector") + .execute(); + + await db.schema + .createIndex("trace_is_reverted_index") + .on("traces") + .column("isReverted") + .execute(); + + await db.schema + .createIndex("trace_block_hash_index") + .on("traces") + .column("blockHash") + .execute(); + + await db.schema + .createIndex("trace_transaction_hash_index") + .on("traces") + .column("transactionHash") + .execute(); + + await db.schema + .createIndex("trace_checkpoint_index") + .on("traces") + .column("checkpoint") + .execute(); + + await db.schema + .createIndex("trace_chain_id_index") + .on("traces") + .column("chainId") + .execute(); + + await db.schema + .createIndex("trace_value_index") + .on("traces") + .column("value") + .execute(); + + await db.schema + .createIndex("trace_from_index") + .on("traces") + .column("from") + .execute(); + + await db.schema + .createIndex("trace_to_index") + .on("traces") + .column("to") + .execute(); + + await db.schema + .createIndex("trace_type_index") + .on("traces") + .column("type") + .execute(); + + // add `checkpoint` to `transactions` + await db.schema + .alterTable("transactions") + .addColumn("checkpoint", "varchar(75)") + .execute(); + + await db.schema + .createIndex("transactions_checkpoint_index") + .on("transactions") + .column("checkpoint") + .execute(); + + await db.schema + .alterTable("transactionReceipts") + .dropColumn("logs") + .execute(); + }, + }, + "2024_12_02_0_request_cache": { + async up(db) { + await db.schema + .alterTable("rpc_request_results") + .addColumn("request_hash", "text", (col) => + col.generatedAlwaysAs(sql`MD5(request)`).stored().notNull(), + ) + .execute(); + + // Drop previous primary key constraint, on columns "request" and "chain_id" + + await db.schema + .alterTable("rpc_request_results") + .dropConstraint("rpc_request_result_primary_key") + .execute(); + + await db.schema + .alterTable("rpc_request_results") + .addPrimaryKeyConstraint("rpc_request_result_primary_key", [ + "request_hash", + "chain_id", + ]) + .execute(); + }, + }, }; class StaticMigrationProvider implements MigrationProvider { diff --git a/packages/core/src/sync/abi.ts b/packages/core/src/sync/abi.ts index e60507d09..d23f5eb12 100644 --- a/packages/core/src/sync/abi.ts +++ b/packages/core/src/sync/abi.ts @@ -8,12 +8,11 @@ import { import { type GetEventArgs, type Hex, - type LogTopic, encodeEventTopics, getAbiItem, - getEventSelector, - getFunctionSelector, parseAbiItem, + toEventSelector, + toFunctionSelector, } from "viem"; import type { Config } from "../config/config.js"; @@ -76,7 +75,7 @@ export const buildAbiEvents = ({ abi }: { abi: Abi }) => { const safeName = overloadedEventNames.has(item.name) ? signature.split("event ")[1]! : item.name; - const selector = getEventSelector(item); + const selector = toEventSelector(item); const abiEventMeta = { safeName, signature, selector, item }; @@ -92,18 +91,35 @@ export const buildAbiEvents = ({ abi }: { abi: Abi }) => { export function buildTopics( abi: Abi, filter: NonNullable, -): LogTopic[] { +): { + topic0: Hex | Hex[]; + topic1: Hex | Hex[] | null; + topic2: Hex | Hex[] | null; + topic3: Hex | Hex[] | null; +} { if (Array.isArray(filter.event)) { // List of event signatures - return [ - filter.event.map((event) => getEventSelector(findAbiEvent(abi, event))), - ]; + return { + topic0: filter.event.map((event) => + toEventSelector(findAbiEvent(abi, event)), + ), + topic1: null, + topic2: null, + topic3: null, + }; } else { // Single event with args - return encodeEventTopics({ + const topics = encodeEventTopics({ abi: [findAbiEvent(abi, filter.event)], args: filter.args as GetEventArgs, }); + + return { + topic0: topics[0], + topic1: topics[1] ?? null, + topic2: topics[2] ?? null, + topic3: topics[3] ?? null, + }; } } @@ -136,7 +152,7 @@ export const buildAbiFunctions = ({ abi }: { abi: Abi }) => { const safeName = overloadedFunctionNames.has(item.name) ? signature.split("function ")[1]! : `${item.name}()`; - const selector = getFunctionSelector(item); + const selector = toFunctionSelector(item); const abiEventMeta = { safeName, signature, selector, item }; diff --git a/packages/core/src/sync/events.test.ts b/packages/core/src/sync/events.test.ts index 8c17943c9..0d1f116f7 100644 --- a/packages/core/src/sync/events.test.ts +++ b/packages/core/src/sync/events.test.ts @@ -1,4 +1,5 @@ import { ALICE, BOB } from "@/_test/constants.js"; +import { erc20ABI } from "@/_test/generated.js"; import { setupAnvil, setupCommon, @@ -6,42 +7,96 @@ import { setupIsolatedDatabase, } from "@/_test/setup.js"; import { - getEventsBlock, - getEventsLog, - getEventsTrace, - getRawRPCData, + createPair, + deployErc20, + deployFactory, + mintErc20, + swapPair, + transferEth, +} from "@/_test/simulate.js"; +import { + getAccountsConfigAndIndexingFunctions, + getBlocksConfigAndIndexingFunctions, + getErc20ConfigAndIndexingFunctions, + getNetwork, + getPairWithFactoryConfigAndIndexingFunctions, } from "@/_test/utils.js"; +import { buildConfigAndIndexingFunctions } from "@/build/configAndIndexingFunctions.js"; +import type { SyncTrace, SyncTransaction } from "@/types/sync.js"; import { encodeCheckpoint, maxCheckpoint, zeroCheckpoint, } from "@/utils/checkpoint.js"; -import { checksumAddress, parseEther, zeroAddress } from "viem"; +import { createRequestQueue } from "@/utils/requestQueue.js"; +import { + _eth_getBlockByNumber, + _eth_getLogs, + _eth_getTransactionReceipt, +} from "@/utils/rpc.js"; +import { + type Hex, + encodeEventTopics, + padHex, + parseEther, + toHex, + zeroAddress, +} from "viem"; +import { encodeFunctionData, encodeFunctionResult } from "viem/utils"; import { beforeEach, expect, test } from "vitest"; import { type BlockEvent, - type CallTraceEvent, type LogEvent, + type RawEvent, + type TraceEvent, + type TransferEvent, buildEvents, decodeEvents, } from "./events.js"; +import type { LogFactory, LogFilter } from "./source.js"; beforeEach(setupCommon); beforeEach(setupAnvil); beforeEach(setupIsolatedDatabase); test("decodeEvents() log", async (context) => { - const { common, sources } = context; + const { common } = context; + + const { config, rawIndexingFunctions } = getErc20ConfigAndIndexingFunctions({ + address: zeroAddress, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const topics = encodeEventTopics({ + abi: erc20ABI, + eventName: "Transfer", + args: { + from: zeroAddress, + to: ALICE, + }, + }); + + const data = padHex(toHex(parseEther("1")), { size: 32 }); - const rawEvents = await getEventsLog(sources); + const rawEvent = { + chainId: 1, + sourceIndex: 0, + checkpoint: encodeCheckpoint(zeroCheckpoint), + block: {} as RawEvent["block"], + transaction: {} as RawEvent["transaction"], + log: { + id: "test", + data, + topics, + }, + } as RawEvent; - const events = decodeEvents(common, sources, rawEvents) as [ - LogEvent, - LogEvent, - LogEvent, - ]; + const events = decodeEvents(common, sources, [rawEvent]) as [LogEvent]; - expect(events).toHaveLength(3); + expect(events).toHaveLength(1); expect(events[0].event.args).toMatchObject({ from: zeroAddress, to: ALICE, @@ -50,223 +105,728 @@ test("decodeEvents() log", async (context) => { expect(events[0].event.name).toBe( "Transfer(address indexed from, address indexed to, uint256 amount)", ); - expect(events[1].event.args).toMatchObject({ - from: ALICE, - to: BOB, - amount: parseEther("1"), +}); + +test("decodeEvents() log error", async (context) => { + const { common } = context; + + const { config, rawIndexingFunctions } = getErc20ConfigAndIndexingFunctions({ + address: zeroAddress, }); - expect(events[1].event.name).toBe( - "Transfer(address indexed from, address indexed to, uint256 amount)", - ); - expect(events[2].event.args).toMatchObject({ - sender: ALICE, - to: ALICE, - amount0Out: 1n, - amount1Out: 2n, + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, }); - expect(events[2].event.name).toBe("Swap"); + + const topics = encodeEventTopics({ + abi: erc20ABI, + eventName: "Transfer", + args: { + from: zeroAddress, + to: ALICE, + }, + }); + + // invalid log.data, causing an error when decoding + const rawEvent = { + chainId: 1, + sourceIndex: 0, + checkpoint: encodeCheckpoint(zeroCheckpoint), + block: {} as RawEvent["block"], + transaction: {} as RawEvent["transaction"], + log: { + id: "test", + data: "0x0" as Hex, + topics, + }, + } as RawEvent; + + const events = decodeEvents(common, sources, [rawEvent]) as [LogEvent]; + + expect(events).toHaveLength(0); }); -test("decodeEvents() log error", async (context) => { - const { common, sources } = context; +test("decodeEvents() block", async (context) => { + const { common } = context; - const rawEvents = await getEventsLog(sources); + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); - // remove data from log, causing an error when decoding - rawEvents[0]!.log!.data = "0x0"; - const events = decodeEvents(common, sources, rawEvents) as [ - LogEvent, - LogEvent, - ]; + const rawEvent = { + chainId: 1, + sourceIndex: 0, + checkpoint: encodeCheckpoint(zeroCheckpoint), + block: { + number: 1n, + } as RawEvent["block"], + transaction: undefined, + log: undefined, + } as RawEvent; - expect(events).toHaveLength(2); + const events = decodeEvents(common, sources, [rawEvent]) as [BlockEvent]; - expect(events[0].event.args).toMatchObject({ + expect(events).toHaveLength(1); + expect(events[0].event.block).toMatchObject({ + number: 1n, + }); +}); + +test("decodeEvents() transfer", async (context) => { + const { common } = context; + + const { config, rawIndexingFunctions } = + getAccountsConfigAndIndexingFunctions({ + address: ALICE, + }); + + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const rawEvent = { + chainId: 1, + sourceIndex: 3, + checkpoint: encodeCheckpoint(zeroCheckpoint), + block: {} as RawEvent["block"], + transaction: {} as RawEvent["transaction"], + log: undefined, + trace: { + id: "test", + type: "CALL", + from: ALICE, + to: BOB, + gas: 0n, + gasUsed: 0n, + input: "0x0", + output: "0x0", + value: parseEther("1"), + traceIndex: 0, + subcalls: 0, + }, + } as RawEvent; + + const events = decodeEvents(common, sources, [rawEvent]) as [TransferEvent]; + + expect(events).toHaveLength(1); + expect(events[0].event.transfer).toMatchObject({ from: ALICE, to: BOB, - amount: parseEther("1"), - }); - expect(events[1].event.args).toMatchObject({ - sender: ALICE, - to: ALICE, - amount0Out: 1n, - amount1Out: 2n, + value: parseEther("1"), }); + expect(events[0].name).toBe("Accounts:transfer:from"); }); -test("decodeEvents() block", async (context) => { - const { common, sources } = context; +test("decodeEvents() transaction", async (context) => { + const { common } = context; + + const { config, rawIndexingFunctions } = + getAccountsConfigAndIndexingFunctions({ + address: ALICE, + }); - const rawEvents = await getEventsBlock(sources); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const rawEvent = { + chainId: 1, + sourceIndex: 0, + checkpoint: encodeCheckpoint(zeroCheckpoint), + block: {} as RawEvent["block"], + transaction: {} as RawEvent["transaction"], + log: undefined, + trace: undefined, + } as RawEvent; - const events = decodeEvents(common, sources, rawEvents) as [BlockEvent]; + const events = decodeEvents(common, sources, [rawEvent]) as [TransferEvent]; expect(events).toHaveLength(1); - expect(events[0].event.block).toMatchObject({ - number: 3n, - }); + + expect(events[0].name).toBe("Accounts:transaction:to"); }); test("decodeEvents() trace", async (context) => { - const { common, sources } = context; + const { common } = context; - const rawEvents = await getEventsTrace(sources); + const { config, rawIndexingFunctions } = getErc20ConfigAndIndexingFunctions({ + address: zeroAddress, + includeCallTraces: true, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const rawEvent = { + chainId: 1, + sourceIndex: 1, + checkpoint: encodeCheckpoint(zeroCheckpoint), + block: {} as RawEvent["block"], + transaction: {} as RawEvent["transaction"], + log: undefined, + trace: { + id: "test", + type: "CALL", + from: ALICE, + to: BOB, + input: encodeFunctionData({ + abi: erc20ABI, + functionName: "transfer", + args: [BOB, parseEther("1")], + }), + output: encodeFunctionResult({ + abi: erc20ABI, + functionName: "transfer", + result: true, + }), + gas: 0n, + gasUsed: 0n, + value: 0n, + traceIndex: 0, + subcalls: 0, + }, + } as RawEvent; - const events = decodeEvents(common, sources, rawEvents) as [CallTraceEvent]; + const events = decodeEvents(common, sources, [rawEvent]) as [TraceEvent]; expect(events).toHaveLength(1); - expect(events[0].event.args).toBeUndefined(); - expect(events[0].event.result).toBe(checksumAddress(context.factory.pair)); - expect(events[0].name).toBe("Factory.createPair()"); + expect(events[0].event.args).toStrictEqual([BOB, parseEther("1")]); + expect(events[0].event.result).toBe(true); + expect(events[0].name).toBe("Erc20.transfer()"); }); test("decodeEvents() trace error", async (context) => { - const { common, sources } = context; + const { common } = context; + + const { config, rawIndexingFunctions } = getErc20ConfigAndIndexingFunctions({ + address: zeroAddress, + includeCallTraces: true, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); - const rawEvents = await getEventsTrace(sources); + const rawEvent = { + chainId: 1, + sourceIndex: 1, + checkpoint: encodeCheckpoint(zeroCheckpoint), + block: {} as RawEvent["block"], + transaction: {} as RawEvent["transaction"], + log: undefined, + trace: { + id: "test", + type: "CALL", + from: ALICE, + to: BOB, + input: "0x", + output: encodeFunctionResult({ + abi: erc20ABI, + functionName: "transfer", + result: true, + }), + gas: 0n, + gasUsed: 0n, + value: 0n, + traceIndex: 0, + subcalls: 0, + }, + } as RawEvent; - // change function selector, causing an error when decoding - rawEvents[0]!.trace!.input = "0x0"; - const events = decodeEvents(common, sources, rawEvents) as [CallTraceEvent]; + const events = decodeEvents(common, sources, [rawEvent]) as [TraceEvent]; expect(events).toHaveLength(0); }); -test("buildEvents() matches getEvents()", async (context) => { +test("buildEvents() matches getEvents() log", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); - const rpcData = await getRawRPCData(); - - await syncStore.insertBlocks({ - blocks: [ - rpcData.block1.block, - rpcData.block2.block, - rpcData.block3.block, - rpcData.block4.block, - rpcData.block5.block, - ], + + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + + const { config, rawIndexingFunctions } = getErc20ConfigAndIndexingFunctions({ + address, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + // insert block 2 + + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 2, + }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); + + await syncStore.insertTransactions({ + transactions: [{ transaction: rpcBlock.transactions[0]!, block: rpcBlock }], chainId: 1, }); + + const rpcLogs = await _eth_getLogs(requestQueue, { + fromBlock: 2, + toBlock: 2, + }); await syncStore.insertLogs({ - logs: [ - { log: rpcData.block2.logs[0], block: rpcData.block2.block }, - { log: rpcData.block2.logs[1], block: rpcData.block2.block }, - { log: rpcData.block3.logs[0], block: rpcData.block3.block }, - { log: rpcData.block4.logs[0], block: rpcData.block4.block }, - ], + logs: [{ log: rpcLogs[0]!, block: rpcBlock }], shouldUpdateCheckpoint: true, chainId: 1, }); + + const { events: events1 } = await syncStore.getEvents({ + filters: sources.map((s) => s.filter), + from: encodeCheckpoint(zeroCheckpoint), + to: encodeCheckpoint(maxCheckpoint), + limit: 10, + }); + + const events2 = buildEvents({ + sources, + chainId: 1, + blockWithEventData: { + block: rpcBlock, + logs: rpcLogs, + transactions: rpcBlock.transactions, + traces: [], + transactionReceipts: [], + }, + finalizedChildAddresses: new Map(), + unfinalizedChildAddresses: new Map(), + }); + + expect(events1).toHaveLength(1); + + expect(events2).toStrictEqual(events1); + + await cleanup(); +}); + +test("buildEvents() matches getEvents() log factory", async (context) => { + const { cleanup, syncStore } = await setupDatabaseServices(context); + + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployFactory({ sender: ALICE }); + const { result: pair } = await createPair({ + factory: address, + sender: ALICE, + }); + await swapPair({ + pair, + amount0Out: 1n, + amount1Out: 1n, + to: ALICE, + sender: ALICE, + }); + + const { config, rawIndexingFunctions } = + getPairWithFactoryConfigAndIndexingFunctions({ + address, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + // insert block 2 + + let rpcLogs = await _eth_getLogs(requestQueue, { + fromBlock: 2, + toBlock: 2, + }); + await syncStore.insertLogs({ + logs: [{ log: rpcLogs[0]! }], + shouldUpdateCheckpoint: false, + chainId: 1, + }); + + // insert block 3 + + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 3, + }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); + await syncStore.insertTransactions({ - transactions: [ - ...rpcData.block2.transactions, - ...rpcData.block3.transactions, - ...rpcData.block4.transactions, - ], + transactions: [{ transaction: rpcBlock.transactions[0]!, block: rpcBlock }], chainId: 1, }); + + rpcLogs = await _eth_getLogs(requestQueue, { + fromBlock: 3, + toBlock: 3, + }); + await syncStore.insertLogs({ + logs: [{ log: rpcLogs[0]!, block: rpcBlock }], + shouldUpdateCheckpoint: true, + chainId: 1, + }); + + const { events: events1 } = await syncStore.getEvents({ + filters: sources.map((s) => s.filter), + from: encodeCheckpoint(zeroCheckpoint), + to: encodeCheckpoint(maxCheckpoint), + limit: 10, + }); + + const filter = sources[0]!.filter as LogFilter; + + const events2 = buildEvents({ + sources, + chainId: 1, + blockWithEventData: { + block: rpcBlock, + logs: rpcLogs, + transactions: rpcBlock.transactions, + traces: [], + transactionReceipts: [], + }, + finalizedChildAddresses: new Map([[filter.address, new Set()]]), + unfinalizedChildAddresses: new Map([[filter.address, new Set([pair])]]), + }); + + expect(events1).toHaveLength(1); + + expect(events2).toStrictEqual(events1); + + await cleanup(); +}); + +test("buildEvents() matches getEvents() block", async (context) => { + const { cleanup, syncStore } = await setupDatabaseServices(context); + + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + // insert block 0 + + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 0, + }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); + + const { events: events1 } = await syncStore.getEvents({ + filters: sources.map((s) => s.filter), + from: encodeCheckpoint(zeroCheckpoint), + to: encodeCheckpoint(maxCheckpoint), + limit: 10, + }); + + const events2 = buildEvents({ + sources, + chainId: 1, + blockWithEventData: { + block: rpcBlock, + logs: [], + transactions: [], + traces: [], + transactionReceipts: [], + }, + finalizedChildAddresses: new Map(), + unfinalizedChildAddresses: new Map(), + }); + + expect(events1).toHaveLength(1); + + expect(events2).toStrictEqual(events1); + + await cleanup(); +}); + +test("buildEvents() matches getEvents() transfer", async (context) => { + const { cleanup, syncStore } = await setupDatabaseServices(context); + + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { hash } = await transferEth({ + to: BOB, + amount: parseEther("1"), + sender: ALICE, + }); + + const { config, rawIndexingFunctions } = + getAccountsConfigAndIndexingFunctions({ + address: ALICE, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 1, + }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); + + await syncStore.insertTransactions({ + transactions: [{ transaction: rpcBlock.transactions[0]!, block: rpcBlock }], + chainId: 1, + }); + + const rpcReceipt = await _eth_getTransactionReceipt(requestQueue, { hash }); + await syncStore.insertTransactionReceipts({ - transactionReceipts: [ - ...rpcData.block2.transactionReceipts, - ...rpcData.block3.transactionReceipts, - ...rpcData.block4.transactionReceipts, - ], + transactionReceipts: [rpcReceipt], chainId: 1, }); - await syncStore.insertCallTraces({ - callTraces: [ - { callTrace: rpcData.block2.callTraces[0], block: rpcData.block2.block }, - { callTrace: rpcData.block2.callTraces[1], block: rpcData.block2.block }, - { callTrace: rpcData.block3.callTraces[0], block: rpcData.block3.block }, - { callTrace: rpcData.block4.callTraces[0], block: rpcData.block4.block }, + + const rpcTrace = { + trace: { + type: "CALL", + from: ALICE, + to: BOB, + gas: "0x0", + gasUsed: "0x0", + input: "0x0", + output: "0x0", + value: rpcBlock.transactions[0]!.value, + index: 0, + subcalls: 0, + }, + transactionHash: hash, + } satisfies SyncTrace; + + await syncStore.insertTraces({ + traces: [ + { + trace: rpcTrace, + block: rpcBlock, + transaction: rpcBlock.transactions[0] as SyncTransaction, + }, ], chainId: 1, }); const { events: events1 } = await syncStore.getEvents({ - filters: context.sources.map((s) => s.filter), + filters: sources.map((s) => s.filter), from: encodeCheckpoint(zeroCheckpoint), to: encodeCheckpoint(maxCheckpoint), limit: 10, }); - const events2 = [ - ...buildEvents({ - sources: context.sources, - chainId: 1, - blockWithEventData: { - ...rpcData.block1, - callTraces: [], - }, - finalizedChildAddresses: new Map([ - [context.sources[1].filter.address, new Set()], - [context.sources[2].filter.toAddress, new Set()], - ]), - unfinalizedChildAddresses: new Map([ - [context.sources[1].filter.address, new Set()], - [context.sources[2].filter.toAddress, new Set()], - ]), - }), - ...buildEvents({ - sources: context.sources, - chainId: 1, - blockWithEventData: { - ...rpcData.block2, - }, - finalizedChildAddresses: new Map([ - [context.sources[1].filter.address, new Set()], - [context.sources[2].filter.toAddress, new Set()], - ]), - unfinalizedChildAddresses: new Map([ - [context.sources[1].filter.address, new Set()], - [context.sources[2].filter.toAddress, new Set()], - ]), - }), - ...buildEvents({ - sources: context.sources, - chainId: 1, - blockWithEventData: { - ...rpcData.block3, - }, - finalizedChildAddresses: new Map([ - [context.sources[1].filter.address, new Set()], - [context.sources[2].filter.toAddress, new Set()], - ]), - unfinalizedChildAddresses: new Map([ - [context.sources[1].filter.address, new Set()], - [context.sources[2].filter.toAddress, new Set()], - ]), - }), - ...buildEvents({ - sources: context.sources, - chainId: 1, - blockWithEventData: { - ...rpcData.block4, - }, - finalizedChildAddresses: new Map([ - [context.sources[1].filter.address, new Set()], - [context.sources[2].filter.toAddress, new Set()], - ]), - unfinalizedChildAddresses: new Map([ - [context.sources[1].filter.address, new Set([context.factory.pair])], - [context.sources[2].filter.toAddress, new Set([context.factory.pair])], - ]), - }), - ...buildEvents({ - sources: context.sources, - chainId: 1, - blockWithEventData: { - ...rpcData.block5, + const events2 = buildEvents({ + sources, + chainId: 1, + blockWithEventData: { + block: rpcBlock, + logs: [], + transactions: rpcBlock.transactions, + traces: [rpcTrace], + transactionReceipts: [rpcReceipt], + }, + finalizedChildAddresses: new Map(), + unfinalizedChildAddresses: new Map(), + }); + + // transaction:from and transfer:from + expect(events1).toHaveLength(2); + + expect(events2).toStrictEqual(events1); + + await cleanup(); +}); + +test("buildEvents() matches getEvents() transaction", async (context) => { + const { cleanup, syncStore } = await setupDatabaseServices(context); + + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { hash } = await transferEth({ + to: BOB, + amount: parseEther("1"), + sender: ALICE, + }); + + const { config, rawIndexingFunctions } = + getAccountsConfigAndIndexingFunctions({ + address: ALICE, + }); + + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 1, + }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); + + await syncStore.insertTransactions({ + transactions: [{ transaction: rpcBlock.transactions[0]!, block: rpcBlock }], + chainId: 1, + }); + + const rpcReceipt = await _eth_getTransactionReceipt(requestQueue, { hash }); + + await syncStore.insertTransactionReceipts({ + transactionReceipts: [rpcReceipt], + chainId: 1, + }); + + const { events: events1 } = await syncStore.getEvents({ + filters: sources.map((s) => s.filter), + from: encodeCheckpoint(zeroCheckpoint), + to: encodeCheckpoint(maxCheckpoint), + limit: 10, + }); + + const events2 = buildEvents({ + sources, + chainId: 1, + blockWithEventData: { + block: rpcBlock, + logs: [], + transactions: rpcBlock.transactions, + traces: [], + transactionReceipts: [rpcReceipt], + }, + finalizedChildAddresses: new Map(), + unfinalizedChildAddresses: new Map(), + }); + + expect(events1).toHaveLength(1); + + expect(events2).toStrictEqual(events1); + + await cleanup(); +}); + +test("buildEvents() matches getEvents() trace", async (context) => { + const { cleanup, syncStore } = await setupDatabaseServices(context); + + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + + const { address } = await deployErc20({ sender: ALICE }); + const { hash } = await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, + }); + + const { config, rawIndexingFunctions } = getErc20ConfigAndIndexingFunctions({ + address, + includeCallTraces: true, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const rpcBlock = await _eth_getBlockByNumber(requestQueue, { + blockNumber: 2, + }); + await syncStore.insertBlocks({ blocks: [rpcBlock], chainId: 1 }); + + await syncStore.insertTransactions({ + transactions: [{ transaction: rpcBlock.transactions[0]!, block: rpcBlock }], + chainId: 1, + }); + + const rpcTrace = { + trace: { + type: "CALL", + from: ALICE, + to: address, + gas: "0x0", + gasUsed: "0x0", + input: encodeFunctionData({ + abi: erc20ABI, + functionName: "transfer", + args: [BOB, parseEther("1")], + }), + output: encodeFunctionResult({ + abi: erc20ABI, + functionName: "transfer", + result: true, + }), + value: "0x0", + index: 0, + subcalls: 0, + }, + transactionHash: hash, + } satisfies SyncTrace; + + await syncStore.insertTraces({ + traces: [ + { + trace: rpcTrace, + block: rpcBlock, + transaction: rpcBlock.transactions[0] as SyncTransaction, }, - finalizedChildAddresses: new Map([ - [context.sources[1].filter.address, new Set()], - [context.sources[2].filter.toAddress, new Set()], - ]), - unfinalizedChildAddresses: new Map([ - [context.sources[1].filter.address, new Set([context.factory.pair])], - [context.sources[2].filter.toAddress, new Set([context.factory.pair])], - ]), - }), - ]; + ], + chainId: 1, + }); + + const { events: events1 } = await syncStore.getEvents({ + filters: sources.map((s) => s.filter), + from: encodeCheckpoint(zeroCheckpoint), + to: encodeCheckpoint(maxCheckpoint), + limit: 10, + }); + + const events2 = buildEvents({ + sources, + chainId: 1, + blockWithEventData: { + block: rpcBlock, + logs: [], + transactions: rpcBlock.transactions, + traces: [rpcTrace], + transactionReceipts: [], + }, + finalizedChildAddresses: new Map(), + unfinalizedChildAddresses: new Map(), + }); + + expect(events1).toHaveLength(1); expect(events2).toStrictEqual(events1); diff --git a/packages/core/src/sync/events.ts b/packages/core/src/sync/events.ts index fd1023d60..6543aa4bc 100644 --- a/packages/core/src/sync/events.ts +++ b/packages/core/src/sync/events.ts @@ -1,21 +1,24 @@ import type { Common } from "@/common/common.js"; import { isBlockFilterMatched, - isCallTraceFilterMatched, isLogFilterMatched, + isTraceFilterMatched, + isTransactionFilterMatched, + isTransferFilterMatched, } from "@/sync-realtime/filter.js"; import type { BlockWithEventData } from "@/sync-realtime/index.js"; import type { Block, - CallTrace, Log, + Trace, Transaction, TransactionReceipt, + Transfer, } from "@/types/eth.js"; import type { SyncBlock, - SyncCallTrace, SyncLog, + SyncTrace, SyncTransaction, SyncTransactionReceipt, } from "@/types/sync.js"; @@ -41,7 +44,13 @@ import { hexToBigInt, hexToNumber, } from "viem"; -import { type Factory, type Source, isAddressFactory } from "./source.js"; +import { + type BlockFilter, + type Factory, + type Source, + isAddressFactory, + shouldGetTransactionReceipt, +} from "./source.js"; export type RawEvent = { chainId: number; @@ -51,10 +60,15 @@ export type RawEvent = { block: Block; transaction?: Transaction; transactionReceipt?: TransactionReceipt; - trace?: CallTrace; + trace?: Trace; }; -export type Event = LogEvent | BlockEvent | CallTraceEvent; +export type Event = + | LogEvent + | BlockEvent + | TransactionEvent + | TransferEvent + | TraceEvent; export type SetupEvent = { type: "setup"; @@ -98,18 +112,50 @@ export type BlockEvent = { }; }; -export type CallTraceEvent = { - type: "callTrace"; +export type TransactionEvent = { + type: "transaction"; chainId: number; checkpoint: string; - /** `${source.name}.${safeName}()` */ + /** `${source.name}.{safeName}()` */ + name: string; + + event: { + block: Block; + transaction: Transaction; + transactionReceipt?: TransactionReceipt; + }; +}; + +export type TransferEvent = { + type: "transfer"; + chainId: number; + checkpoint: string; + + /** `${source.name}:transfer:from` | `${source.name}:transfer:to` */ + name: string; + + event: { + transfer: Transfer; + block: Block; + transaction: Transaction; + transactionReceipt?: TransactionReceipt; + trace: Trace; + }; +}; + +export type TraceEvent = { + type: "trace"; + chainId: number; + checkpoint: string; + + /** `${source.name}:transfer:from` | `${source.name}:transfer:to` */ name: string; event: { args: any; result: any; - trace: CallTrace; + trace: Trace; block: Block; transaction: Transaction; transactionReceipt?: TransactionReceipt; @@ -126,7 +172,7 @@ export const buildEvents = ({ logs, transactions, transactionReceipts, - callTraces, + traces, }, finalizedChildAddresses, unfinalizedChildAddresses, @@ -142,7 +188,6 @@ export const buildEvents = ({ const transactionCache = new Map(); const transactionReceiptCache = new Map(); - const traceByTransactionHash = new Map(); for (const transaction of transactions) { transactionCache.set(transaction.hash, transaction); } @@ -152,56 +197,237 @@ export const buildEvents = ({ transactionReceipt, ); } - for (const callTrace of callTraces) { - if (traceByTransactionHash.has(callTrace.transactionHash) === false) { - traceByTransactionHash.set(callTrace.transactionHash, []); - } - traceByTransactionHash.get(callTrace.transactionHash)!.push(callTrace); - } for (let i = 0; i < sources.length; i++) { - const filter = sources[i]!.filter; + const source = sources[i]!; + const filter = source.filter; if (chainId !== filter.chainId) continue; - switch (filter.type) { - case "log": { - for (const log of logs) { - if ( - isLogFilterMatched({ filter, block, log }) && - (isAddressFactory(filter.address) - ? finalizedChildAddresses.get(filter.address)!.has(log.address) || - unfinalizedChildAddresses.get(filter.address)!.has(log.address) - : true) - ) { - events.push({ - chainId: filter.chainId, - sourceIndex: i, - checkpoint: encodeCheckpoint({ - blockTimestamp: hexToNumber(block.timestamp), - chainId: BigInt(filter.chainId), - blockNumber: hexToBigInt(log.blockNumber), - transactionIndex: hexToBigInt(log.transactionIndex), - eventType: EVENT_TYPES.logs, - eventIndex: hexToBigInt(log.logIndex), - }), - log: convertLog(log), - block: convertBlock(block), - transaction: convertTransaction( - transactionCache.get(log.transactionHash)!, - ), - transactionReceipt: filter.includeTransactionReceipts - ? convertTransactionReceipt( - transactionReceiptCache.get(log.transactionHash)!, - ) - : undefined, - trace: undefined, - }); + switch (source.type) { + case "contract": { + switch (filter.type) { + case "log": { + for (const log of logs) { + if ( + isLogFilterMatched({ filter, block, log }) && + (isAddressFactory(filter.address) + ? finalizedChildAddresses + .get(filter.address)! + .has(log.address) || + unfinalizedChildAddresses + .get(filter.address)! + .has(log.address) + : true) + ) { + events.push({ + chainId: filter.chainId, + sourceIndex: i, + checkpoint: encodeCheckpoint({ + blockTimestamp: hexToNumber(block.timestamp), + chainId: BigInt(filter.chainId), + blockNumber: hexToBigInt(log.blockNumber), + transactionIndex: hexToBigInt(log.transactionIndex), + eventType: EVENT_TYPES.logs, + eventIndex: hexToBigInt(log.logIndex), + }), + log: convertLog(log), + block: convertBlock(block), + transaction: convertTransaction( + transactionCache.get(log.transactionHash)!, + ), + transactionReceipt: shouldGetTransactionReceipt(filter) + ? convertTransactionReceipt( + transactionReceiptCache.get(log.transactionHash)!, + ) + : undefined, + trace: undefined, + }); + } + } + break; + } + + case "trace": { + for (const trace of traces) { + const fromChildAddresses = isAddressFactory(filter.fromAddress) + ? [ + finalizedChildAddresses.get(filter.fromAddress)!, + unfinalizedChildAddresses.get(filter.fromAddress)!, + ] + : undefined; + + const toChildAddresses = isAddressFactory(filter.toAddress) + ? [ + finalizedChildAddresses.get(filter.toAddress)!, + unfinalizedChildAddresses.get(filter.toAddress)!, + ] + : undefined; + + if ( + isTraceFilterMatched({ + filter, + block, + trace: trace.trace, + fromChildAddresses, + toChildAddresses, + }) && + (filter.callType === undefined + ? true + : filter.callType === trace.trace.type) && + (filter.includeReverted + ? true + : trace.trace.error === undefined) + ) { + const transaction = transactionCache.get( + trace.transactionHash, + )!; + const transactionReceipt = transactionReceiptCache.get( + trace.transactionHash, + )!; + events.push({ + chainId: filter.chainId, + sourceIndex: i, + checkpoint: encodeCheckpoint({ + blockTimestamp: hexToNumber(block.timestamp), + chainId: BigInt(filter.chainId), + blockNumber: hexToBigInt(block.number), + transactionIndex: BigInt(transaction.transactionIndex), + eventType: EVENT_TYPES.traces, + eventIndex: BigInt(trace.trace.index), + }), + log: undefined, + trace: convertTrace(trace), + block: convertBlock(block), + transaction: convertTransaction(transaction), + transactionReceipt: shouldGetTransactionReceipt(filter) + ? convertTransactionReceipt(transactionReceipt) + : undefined, + }); + } + } + break; + } + } + break; + } + + case "account": { + switch (filter.type) { + case "transaction": { + for (const transaction of transactions) { + const fromChildAddresses = isAddressFactory(filter.fromAddress) + ? [ + finalizedChildAddresses.get(filter.fromAddress)!, + unfinalizedChildAddresses.get(filter.fromAddress)!, + ] + : undefined; + + const toChildAddresses = isAddressFactory(filter.toAddress) + ? [ + finalizedChildAddresses.get(filter.toAddress)!, + unfinalizedChildAddresses.get(filter.toAddress)!, + ] + : undefined; + + if ( + isTransactionFilterMatched({ + filter, + block, + transaction, + fromChildAddresses, + toChildAddresses, + }) && + (filter.includeReverted + ? true + : transactionReceiptCache.get(transaction.hash)!.status === + "0x1") + ) { + events.push({ + chainId: filter.chainId, + sourceIndex: i, + checkpoint: encodeCheckpoint({ + blockTimestamp: hexToNumber(block.timestamp), + chainId: BigInt(filter.chainId), + blockNumber: hexToBigInt(block.number), + transactionIndex: BigInt(transaction.transactionIndex), + eventType: EVENT_TYPES.transactions, + eventIndex: 0n, + }), + log: undefined, + trace: undefined, + block: convertBlock(block), + transaction: convertTransaction(transaction), + transactionReceipt: convertTransactionReceipt( + transactionReceiptCache.get(transaction.hash)!, + ), + }); + } + } + break; + } + + case "transfer": { + for (const trace of traces) { + const fromChildAddresses = isAddressFactory(filter.fromAddress) + ? [ + finalizedChildAddresses.get(filter.fromAddress)!, + unfinalizedChildAddresses.get(filter.fromAddress)!, + ] + : undefined; + + const toChildAddresses = isAddressFactory(filter.toAddress) + ? [ + finalizedChildAddresses.get(filter.toAddress)!, + unfinalizedChildAddresses.get(filter.toAddress)!, + ] + : undefined; + + if ( + isTransferFilterMatched({ + filter, + block, + trace: trace.trace, + fromChildAddresses, + toChildAddresses, + }) && + (filter.includeReverted + ? true + : trace.trace.error === undefined) + ) { + const transaction = transactionCache.get( + trace.transactionHash, + )!; + const transactionReceipt = transactionReceiptCache.get( + trace.transactionHash, + )!; + events.push({ + chainId: filter.chainId, + sourceIndex: i, + checkpoint: encodeCheckpoint({ + blockTimestamp: hexToNumber(block.timestamp), + chainId: BigInt(filter.chainId), + blockNumber: hexToBigInt(block.number), + transactionIndex: BigInt(transaction.transactionIndex), + eventType: EVENT_TYPES.traces, + eventIndex: BigInt(trace.trace.index), + }), + log: undefined, + trace: convertTrace(trace), + block: convertBlock(block), + transaction: convertTransaction(transaction), + transactionReceipt: shouldGetTransactionReceipt(filter) + ? convertTransactionReceipt(transactionReceipt) + : undefined, + }); + } + } + break; } } break; } case "block": { - if (isBlockFilterMatched({ filter, block })) { + if (isBlockFilterMatched({ filter: filter as BlockFilter, block })) { events.push({ chainId: filter.chainId, sourceIndex: i, @@ -222,62 +448,8 @@ export const buildEvents = ({ } break; } - - case "callTrace": { - for (const callTraces of Array.from(traceByTransactionHash.values())) { - // Use lexographical sort of stringified `traceAddress`. - callTraces.sort((a, b) => { - return a.traceAddress < b.traceAddress ? -1 : 1; - }); - - let eventIndex = 0n; - for (const callTrace of callTraces) { - if ( - isCallTraceFilterMatched({ filter, block, callTrace }) && - (isAddressFactory(filter.toAddress) - ? finalizedChildAddresses - .get(filter.toAddress)! - .has(callTrace.action.to) || - unfinalizedChildAddresses - .get(filter.toAddress)! - .has(callTrace.action.to) - : true) && - callTrace.result !== null && - filter.functionSelectors.includes( - callTrace.action.input.slice(0, 10).toLowerCase() as Hex, - ) - ) { - events.push({ - chainId: filter.chainId, - sourceIndex: i, - checkpoint: encodeCheckpoint({ - blockTimestamp: hexToNumber(block.timestamp), - chainId: BigInt(filter.chainId), - blockNumber: hexToBigInt(callTrace.blockNumber), - transactionIndex: BigInt(callTrace.transactionPosition), - eventType: EVENT_TYPES.callTraces, - eventIndex: eventIndex++, - }), - log: undefined, - trace: convertCallTrace(callTrace), - block: convertBlock(block), - transaction: convertTransaction( - transactionCache.get(callTrace.transactionHash)!, - ), - transactionReceipt: filter.includeTransactionReceipts - ? convertTransactionReceipt( - transactionReceiptCache.get(callTrace.transactionHash)!, - ) - : undefined, - }); - } - } - } - - break; - } default: - never(filter); + never(source); } } @@ -297,18 +469,6 @@ export const decodeEvents = ( const source = sources[event.sourceIndex]!; switch (source.type) { - case "block": { - events.push({ - type: "block", - chainId: event.chainId, - checkpoint: event.checkpoint, - name: `${source.name}:block`, - event: { - block: event.block, - }, - }); - break; - } case "contract": { switch (source.filter.type) { case "log": { @@ -361,7 +521,7 @@ export const decodeEvents = ( break; } - case "callTrace": { + case "trace": { try { const selector = event .trace!.input.slice(0, 10) @@ -371,7 +531,7 @@ export const decodeEvents = ( throw new Error(); } - const { safeName, item } = + const { item, safeName } = source.abiFunctions.bySelector[selector]!; const { args, functionName } = decodeFunctionData({ @@ -381,15 +541,16 @@ export const decodeEvents = ( const result = decodeFunctionResult({ abi: [item], - data: event.trace!.output, + data: event.trace!.output!, functionName, }); events.push({ - type: "callTrace", + type: "trace", chainId: event.chainId, checkpoint: event.checkpoint, + // NOTE: `safename` includes () name: `${source.name}.${safeName}`, event: { @@ -423,6 +584,70 @@ export const decodeEvents = ( break; } + case "account": { + switch (source.filter.type) { + case "transaction": { + const isFrom = source.filter.toAddress === undefined; + + events.push({ + type: "transaction", + chainId: event.chainId, + checkpoint: event.checkpoint, + + name: `${source.name}:transaction:${isFrom ? "from" : "to"}`, + + event: { + block: event.block, + transaction: event.transaction!, + transactionReceipt: event.transactionReceipt, + }, + }); + + break; + } + + case "transfer": { + const isFrom = source.filter.toAddress === undefined; + + events.push({ + type: "transfer", + chainId: event.chainId, + checkpoint: event.checkpoint, + + name: `${source.name}:transfer:${isFrom ? "from" : "to"}`, + + event: { + transfer: { + from: event.trace!.from, + to: event.trace!.to!, + value: event.trace!.value!, + }, + block: event.block, + transaction: event.transaction!, + transactionReceipt: event.transactionReceipt, + trace: event.trace!, + }, + }); + + break; + } + } + break; + } + + case "block": { + events.push({ + type: "block", + chainId: event.chainId, + checkpoint: event.checkpoint, + name: `${source.name}:block`, + event: { + block: event.block, + }, + }); + break; + } + default: never(source); } @@ -531,19 +756,13 @@ const convertBlock = (block: SyncBlock): Block => ({ const convertLog = (log: SyncLog): Log => ({ id: `${log.blockHash}-${log.logIndex}`, address: checksumAddress(log.address!), - blockHash: log.blockHash, - blockNumber: hexToBigInt(log.blockNumber), data: log.data, logIndex: Number(log.logIndex), removed: false, topics: log.topics, - transactionHash: log.transactionHash, - transactionIndex: Number(log.transactionIndex), }); const convertTransaction = (transaction: SyncTransaction): Transaction => ({ - blockHash: transaction.blockHash, - blockNumber: hexToBigInt(transaction.blockNumber), from: checksumAddress(transaction.from), gas: hexToBigInt(transaction.gas), hash: transaction.hash, @@ -596,8 +815,6 @@ const convertTransaction = (transaction: SyncTransaction): Transaction => ({ const convertTransactionReceipt = ( transactionReceipt: SyncTransactionReceipt, ): TransactionReceipt => ({ - blockHash: transactionReceipt.blockHash, - blockNumber: hexToBigInt(transactionReceipt.blockNumber), contractAddress: transactionReceipt.contractAddress ? checksumAddress(transactionReceipt.contractAddress) : null, @@ -605,23 +822,6 @@ const convertTransactionReceipt = ( effectiveGasPrice: hexToBigInt(transactionReceipt.effectiveGasPrice), from: checksumAddress(transactionReceipt.from), gasUsed: hexToBigInt(transactionReceipt.gasUsed), - logs: transactionReceipt.logs.map((log) => ({ - id: `${log.blockHash}-${log.logIndex}`, - address: checksumAddress(log.address), - blockHash: log.blockHash!, - blockNumber: hexToBigInt(log.blockNumber!), - data: log.data, - logIndex: hexToNumber(log.logIndex!), - removed: false, - topics: [ - log.topics[0] ?? null, - log.topics[1] ?? null, - log.topics[2] ?? null, - log.topics[3] ?? null, - ].filter((t): t is Hex => t !== null) as [Hex, ...Hex[]] | [], - transactionHash: log.transactionHash!, - transactionIndex: hexToNumber(log.transactionIndex!), - })), logsBloom: transactionReceipt.logsBloom, status: transactionReceipt.status === "0x1" @@ -630,8 +830,6 @@ const convertTransactionReceipt = ( ? "reverted" : (transactionReceipt.status as TransactionReceipt["status"]), to: transactionReceipt.to ? checksumAddress(transactionReceipt.to) : null, - transactionHash: transactionReceipt.transactionHash, - transactionIndex: Number(transactionReceipt.transactionIndex), type: transactionReceipt.type === "0x0" ? "legacy" @@ -644,20 +842,16 @@ const convertTransactionReceipt = ( : transactionReceipt.type, }); -const convertCallTrace = (callTrace: SyncCallTrace): CallTrace => ({ - id: `${callTrace.transactionHash}-${JSON.stringify(callTrace.traceAddress)}`, - from: checksumAddress(callTrace.action.from), - to: checksumAddress(callTrace.action.to), - gas: hexToBigInt(callTrace.action.gas), - value: hexToBigInt(callTrace.action.value), - input: callTrace.action.input, - output: callTrace.result!.output, - gasUsed: hexToBigInt(callTrace.result!.gasUsed), - subtraces: callTrace.subtraces, - traceAddress: callTrace.traceAddress, - blockHash: callTrace.blockHash, - blockNumber: hexToBigInt(callTrace.blockNumber), - transactionHash: callTrace.transactionHash, - transactionIndex: callTrace.transactionPosition, - callType: callTrace.action.callType as CallTrace["callType"], +const convertTrace = (trace: SyncTrace): Trace => ({ + id: `${trace.transactionHash}-${trace.trace.index}`, + type: trace.trace.type, + from: checksumAddress(trace.trace.from), + to: trace.trace.to ? checksumAddress(trace.trace.to) : null, + input: trace.trace.input, + output: trace.trace.output, + gas: hexToBigInt(trace.trace.gas), + gasUsed: hexToBigInt(trace.trace.gasUsed), + value: trace.trace.value ? hexToBigInt(trace.trace.value) : null, + traceIndex: trace.trace.index, + subcalls: trace.trace.subcalls, }); diff --git a/packages/core/src/sync/fragments.test.ts b/packages/core/src/sync/fragments.test.ts index d28757c80..da910817d 100644 --- a/packages/core/src/sync/fragments.test.ts +++ b/packages/core/src/sync/fragments.test.ts @@ -1,140 +1,91 @@ import { buildLogFactory } from "@/build/factory.js"; import { parseAbiItem } from "viem"; import { expect, test } from "vitest"; -import { buildLogFilterFragments } from "./fragments.js"; +import { getLogFilterFragmentIds } from "./fragments.js"; const llamaFactoryEventAbiItem = parseAbiItem( "event LlamaInstanceCreated(address indexed deployer, string indexed name, address llamaCore, address llamaExecutor, address llamaPolicy, uint256 chainId)", ); -test("buildLogFilterFragments generates 1 log filter fragment for null filter", () => { - const logFilterFragments = buildLogFilterFragments({ +test("getLogFilterFragmentIds generates 1 log filter fragment for null filter", () => { + const logFilterFragments = getLogFilterFragmentIds({ type: "log", chainId: 1, address: undefined, - topics: [null, null, null, null], - includeTransactionReceipts: false, + topic0: null, + topic1: null, + topic2: null, + topic3: null, + include: [], }); - expect(logFilterFragments).toMatchObject([ - { - id: "1_null_null_null_null_null_0", - address: null, - topic0: null, - topic1: null, - topic2: null, - topic3: null, - includeTransactionReceipts: 0, - }, - ]); + expect(logFilterFragments[0]!.id).toBe("log_1_null_null_null_null_null_0"); }); -test("buildLogFilterFragments generates 1 log filter fragment for simple filter", () => { - const logFilterFragments = buildLogFilterFragments({ +test("getLogFilterFragmentIds generates 1 log filter fragment for simple filter", () => { + const logFilterFragments = getLogFilterFragmentIds({ type: "log", chainId: 1, address: "0xa", - topics: [null, null, null, null], - includeTransactionReceipts: false, + topic0: null, + topic1: null, + topic2: null, + topic3: null, + include: [], }); - expect(logFilterFragments).toMatchObject([ - { - id: "1_0xa_null_null_null_null_0", - address: "0xa", - topic0: null, - topic1: null, - topic2: null, - topic3: null, - includeTransactionReceipts: 0, - }, - ]); + expect(logFilterFragments[0]!.id).toBe("log_1_0xa_null_null_null_null_0"); }); -test("buildLogFilterFragments generates 4 log filter fragment for 2x2 filter", () => { - const logFilterFragments = buildLogFilterFragments({ +test("getLogFilterFragmentIds generates 4 log filter fragment for 2x2 filter", () => { + const logFilterFragments = getLogFilterFragmentIds({ type: "log", chainId: 115511, address: ["0xa", "0xb"], - topics: [["0xc", "0xd"], null, "0xe", null], - includeTransactionReceipts: false, + topic0: ["0xc", "0xd"], + topic1: null, + topic2: "0xe", + topic3: null, + include: [], }); - expect(logFilterFragments).toMatchObject([ - { - id: "115511_0xa_0xc_null_0xe_null_0", - address: "0xa", - topic0: "0xc", - topic1: null, - topic2: "0xe", - topic3: null, - includeTransactionReceipts: 0, - }, - { - id: "115511_0xa_0xd_null_0xe_null_0", - address: "0xa", - topic0: "0xd", - topic1: null, - topic2: "0xe", - topic3: null, - includeTransactionReceipts: 0, - }, - { - id: "115511_0xb_0xc_null_0xe_null_0", - address: "0xb", - topic0: "0xc", - topic1: null, - topic2: "0xe", - topic3: null, - includeTransactionReceipts: 0, - }, - { - id: "115511_0xb_0xd_null_0xe_null_0", - address: "0xb", - topic0: "0xd", - topic1: null, - topic2: "0xe", - topic3: null, - includeTransactionReceipts: 0, - }, - ]); + expect(logFilterFragments[0]!.id).toBe("log_115511_0xa_0xc_null_0xe_null_0"); + expect(logFilterFragments[1]!.id).toBe("log_115511_0xa_0xd_null_0xe_null_0"); + expect(logFilterFragments[2]!.id).toBe("log_115511_0xb_0xc_null_0xe_null_0"); + expect(logFilterFragments[3]!.id).toBe("log_115511_0xb_0xd_null_0xe_null_0"); }); -test("buildLogFilterFragments generates 12 log filter fragment for 2x2x3 filter", () => { - const logFilterFragments = buildLogFilterFragments({ +test("getLogFilterFragmentIds generates 12 log filter fragment for 2x2x3 filter", () => { + const logFilterFragments = getLogFilterFragmentIds({ type: "log", chainId: 1, address: ["0xa", "0xb"], - topics: [["0xc", "0xd"], null, ["0xe", "0xf", "0x1"], null], - includeTransactionReceipts: false, + topic0: ["0xc", "0xd"], + topic1: null, + topic2: ["0xe", "0xf", "0x1"], + topic3: null, + include: [], }); expect(logFilterFragments.length).toBe(12); }); -test("buildLogFilterFragments includeTransactionReceipts", () => { - const logFilterFragments = buildLogFilterFragments({ +test("getLogFilterFragmentIds includeTransactionReceipts", () => { + const logFilterFragments = getLogFilterFragmentIds({ type: "log", chainId: 1, address: undefined, - topics: [null, null, null, null], - includeTransactionReceipts: true, + topic0: null, + topic1: null, + topic2: null, + topic3: null, + include: ["transactionReceipt.status"], }); - expect(logFilterFragments).toMatchObject([ - { - id: "1_null_null_null_null_null_1", - address: null, - topic0: null, - topic1: null, - topic2: null, - topic3: null, - includeTransactionReceipts: 1, - }, - ]); + expect(logFilterFragments[0]!.id).toBe("log_1_null_null_null_null_null_1"); }); -test("buildLogFilterFragments builds id containing factory topic", () => { +test("getLogFilterFragmentIds builds id containing factory topic", () => { const factory = buildLogFactory({ address: "0xa", event: llamaFactoryEventAbiItem, @@ -142,22 +93,25 @@ test("buildLogFilterFragments builds id containing factory topic", () => { chainId: 1, }); - const fragments = buildLogFilterFragments({ + const fragments = getLogFilterFragmentIds({ type: "log", chainId: 1, - topics: [null, null, null, null], + topic0: null, + topic1: null, + topic2: null, + topic3: null, address: factory, - includeTransactionReceipts: false, + include: [], }); expect(fragments).toHaveLength(1); expect(fragments[0]!.id).toBe( - "1_0xa_0x00fef2d461a2fabbb523f9f42752c61336f03b17a602af52cc6c83cb8b110599_topic1_null_null_null_null_0", + "log_1_0xa_0x00fef2d461a2fabbb523f9f42752c61336f03b17a602af52cc6c83cb8b110599_topic1_null_null_null_null_0", ); }); -test("buildLogFilterFragments builds id containing factory offset", () => { +test("getLogFilterFragmentIds builds id containing factory offset", () => { const factory = buildLogFactory({ address: "0xa", event: llamaFactoryEventAbiItem, @@ -165,22 +119,25 @@ test("buildLogFilterFragments builds id containing factory offset", () => { chainId: 1, }); - const fragments = buildLogFilterFragments({ + const fragments = getLogFilterFragmentIds({ type: "log", chainId: 115511, - topics: [null, null, null, null], + topic0: null, + topic1: null, + topic2: null, + topic3: null, address: factory, - includeTransactionReceipts: false, + include: [], }); expect(fragments).toHaveLength(1); expect(fragments[0]!.id).toBe( - "115511_0xa_0x00fef2d461a2fabbb523f9f42752c61336f03b17a602af52cc6c83cb8b110599_offset64_null_null_null_null_0", + "log_115511_0xa_0x00fef2d461a2fabbb523f9f42752c61336f03b17a602af52cc6c83cb8b110599_offset64_null_null_null_null_0", ); }); -test("buildLogFilterFragments builds id with multiple factories", () => { +test("getLogFilterFragmentIds builds id with multiple factories", () => { const factory = buildLogFactory({ address: ["0xa", "0xb"], event: llamaFactoryEventAbiItem, @@ -188,20 +145,23 @@ test("buildLogFilterFragments builds id with multiple factories", () => { chainId: 1, }); - const fragments = buildLogFilterFragments({ + const fragments = getLogFilterFragmentIds({ type: "log", chainId: 1, - topics: [null, null, null, null], + topic0: null, + topic1: null, + topic2: null, + topic3: null, address: factory, - includeTransactionReceipts: false, + include: [], }); expect(fragments).toHaveLength(2); expect(fragments[0]!.id).toBe( - "1_0xa_0x00fef2d461a2fabbb523f9f42752c61336f03b17a602af52cc6c83cb8b110599_offset64_null_null_null_null_0", + "log_1_0xa_0x00fef2d461a2fabbb523f9f42752c61336f03b17a602af52cc6c83cb8b110599_offset64_null_null_null_null_0", ); expect(fragments[1]!.id).toBe( - "1_0xb_0x00fef2d461a2fabbb523f9f42752c61336f03b17a602af52cc6c83cb8b110599_offset64_null_null_null_null_0", + "log_1_0xb_0x00fef2d461a2fabbb523f9f42752c61336f03b17a602af52cc6c83cb8b110599_offset64_null_null_null_null_0", ); }); diff --git a/packages/core/src/sync/fragments.ts b/packages/core/src/sync/fragments.ts index 342a02e53..953a6ac98 100644 --- a/packages/core/src/sync/fragments.ts +++ b/packages/core/src/sync/fragments.ts @@ -1,258 +1,275 @@ -import type { PonderSyncSchema } from "@/sync-store/encoding.js"; import type { Address, Hex } from "viem"; import { type BlockFilter, - type CallTraceFilter, type Factory, - type LogFactory, + type Filter, type LogFilter, + type TraceFilter, + type TransactionFilter, + type TransferFilter, isAddressFactory, + shouldGetTransactionReceipt, } from "./source.js"; -export type LogFilterFragment< - factory extends Factory | undefined = Factory | undefined, -> = factory extends Factory - ? PonderSyncSchema["factoryLogFilters"] - : PonderSyncSchema["logFilters"]; - -export type BlockFilterFragment = PonderSyncSchema["blockFilters"]; - -export type TraceFilterFragment< - factory extends Factory | undefined = Factory | undefined, -> = factory extends Factory - ? PonderSyncSchema["factoryTraceFilters"] - : PonderSyncSchema["traceFilters"]; - -/** - * Generates log filter fragments from a log filter. - * - * @param logFilter Log filter to be decomposed into fragments. - * @returns A list of log filter fragments. - */ -export const buildLogFilterFragments = ({ - chainId, - address, - topics, - includeTransactionReceipts, -}: Omit< - LogFilter, - "fromBlock" | "toBlock" ->): LogFilterFragment[] => { - const fragments: LogFilterFragment[] = []; - const { topic0, topic1, topic2, topic3 } = parseTopics(topics); - - const idCallback = ({ - chainId, - address: address_, - topic0: topic0_, - topic1: topic1_, - topic2: topic2_, - topic3: topic3_, - includeTransactionReceipts, - }: Omit & { - address: Address | null; - }) => { - return `${chainId}_${address_}_${topic0_}_${topic1_}_${topic2_}_${topic3_}_${ - includeTransactionReceipts - }`; - }; - - const factoryIdCallback = ({ - chainId, - address: address_, - eventSelector: eventSelector_, - childAddressLocation: childAddressLocation_, - topic0: topic0_, - topic1: topic1_, - topic2: topic2_, - topic3: topic3_, - includeTransactionReceipts, - }: Omit & { - address: Address; - eventSelector: LogFactory["eventSelector"]; - childAddressLocation: LogFactory["childAddressLocation"]; - }) => { - return `${chainId}_${address_}_${eventSelector_}_${childAddressLocation_}_${topic0_}_${topic1_}_${topic2_}_${topic3_}_${ - includeTransactionReceipts - }`; - }; +type FragmentAddress = + | Address + | `${Address}_${Factory["eventSelector"]}_${Factory["childAddressLocation"]}` + | null; +type FragmentTopic = Hex | null; + +export type FragmentId = + /** block_{chainId}_{interval}_{offset} */ + | `block_${number}_${number}_${number}` + /** transaction_{chainId}_{fromAddress}_{toAddress} */ + | `transaction_${number}_${FragmentAddress}_${FragmentAddress}` + /** trace_{chainId}_{fromAddress}_{toAddress}_{functionSelector}_{includeReceipts} */ + | `trace_${number}_${FragmentAddress}_${FragmentAddress}_${Hex | null}_${0 | 1}` + /** log_{chainId}_{address}_{topic0}_{topic1}_{topic2}_{topic3}_{includeReceipts} */ + | `log_${number}_${FragmentAddress}_${FragmentTopic}_${FragmentTopic}_${FragmentTopic}_${FragmentTopic}_${0 | 1}` + /** transfer_{chainId}_{fromAddress}_{toAddress}_{includeReceipts} */ + | `transfer_${number}_${FragmentAddress}_${FragmentAddress}_${0 | 1}`; + +export const getFragmentIds = ( + filter: Omit, +): FragmentReturnType => { + switch (filter.type) { + case "block": + return getBlockFilterFragmentId(filter as BlockFilter); + case "transaction": + return getTransactionFilterFragmentIds(filter as TransactionFilter); + case "trace": + return getTraceFilterFragmentIds(filter as TraceFilter); + case "log": + return getLogFilterFragmentIds(filter as LogFilter); + case "transfer": + return getTransferFilterFragmentIds(filter as TransferFilter); + } +}; + +type FragmentReturnType = { + id: FragmentId; + adjacent: FragmentId[]; +}[]; + +const getAddressFragmentIds = ( + address: Address | Address[] | Factory | undefined, +) => { + const fragments: { id: FragmentAddress; adjacent: FragmentAddress[] }[] = []; if (isAddressFactory(address)) { - for (const factoryAddress_ of Array.isArray(address.address) + for (const fragmentAddress of Array.isArray(address.address) ? address.address : [address.address]) { - for (const topic0_ of Array.isArray(topic0) ? topic0 : [topic0]) { - for (const topic1_ of Array.isArray(topic1) ? topic1 : [topic1]) { - for (const topic2_ of Array.isArray(topic2) ? topic2 : [topic2]) { - for (const topic3_ of Array.isArray(topic3) ? topic3 : [topic3]) { - fragments.push({ - id: factoryIdCallback({ - chainId, - address: factoryAddress_, - eventSelector: address.eventSelector, - childAddressLocation: address.childAddressLocation, - topic0: topic0_, - topic1: topic1_, - topic2: topic2_, - topic3: topic3_, - includeTransactionReceipts: includeTransactionReceipts - ? 1 - : 0, - }), - chainId, - address: factoryAddress_, - eventSelector: address.eventSelector, - childAddressLocation: address.childAddressLocation, - topic0: topic0_, - topic1: topic1_, - topic2: topic2_, - topic3: topic3_, - includeTransactionReceipts: includeTransactionReceipts ? 1 : 0, - }); - } - } - } - } + const id = + `${fragmentAddress}_${address.eventSelector}_${address.childAddressLocation}` as const; + + fragments.push({ id, adjacent: [id] }); } } else { - for (const address_ of Array.isArray(address) + for (const fragmentAddress of Array.isArray(address) ? address : [address ?? null]) { - for (const topic0_ of Array.isArray(topic0) ? topic0 : [topic0]) { - for (const topic1_ of Array.isArray(topic1) ? topic1 : [topic1]) { - for (const topic2_ of Array.isArray(topic2) ? topic2 : [topic2]) { - for (const topic3_ of Array.isArray(topic3) ? topic3 : [topic3]) { - fragments.push({ - id: idCallback({ - chainId, - address: address_, - topic0: topic0_, - topic1: topic1_, - topic2: topic2_, - topic3: topic3_, - includeTransactionReceipts: includeTransactionReceipts - ? 1 - : 0, - }), - chainId, - address: address_, - topic0: topic0_, - topic1: topic1_, - topic2: topic2_, - topic3: topic3_, - includeTransactionReceipts: includeTransactionReceipts ? 1 : 0, - }); - } - } - } - } + fragments.push({ + id: fragmentAddress, + adjacent: fragmentAddress ? [fragmentAddress, null] : [fragmentAddress], + }); } } - return fragments as LogFilterFragment[]; + return fragments; }; -function parseTopics(topics: (Hex | Hex[] | null)[] | undefined) { - return { - topic0: topics?.[0] ?? null, - topic1: topics?.[1] ?? null, - topic2: topics?.[2] ?? null, - topic3: topics?.[3] ?? null, - } as { - topic0: Hex | Hex[] | null; - topic1: Hex | Hex[] | null; - topic2: Hex | Hex[] | null; - topic3: Hex | Hex[] | null; - }; -} - -export const buildBlockFilterFragment = ({ +export const getBlockFilterFragmentId = ({ chainId, interval, offset, -}: Omit): BlockFilterFragment => { - return { - id: `${chainId}_${interval}_${offset}`, - chainId, - interval, - offset, - }; +}: Omit): FragmentReturnType => { + return [ + { + id: `block_${chainId}_${interval}_${offset}`, + adjacent: [`block_${chainId}_${interval}_${offset}`], + }, + ]; }; -export const buildTraceFilterFragments = ({ +export const getTransactionFilterFragmentIds = ({ chainId, fromAddress, toAddress, -}: Omit, "fromBlock" | "toBlock"> & { +}: Omit & { chainId: number; -}): TraceFilterFragment[] => { - const fragments: TraceFilterFragment[] = []; - - const idCallback = ({ - chainId, - fromAddress, - toAddress, - }: Omit & { - toAddress: Address | null; - }) => { - return `${chainId}_${fromAddress}_${toAddress}`; - }; - - const factoryIdCallback = ({ - chainId, - fromAddress, - address, - eventSelector, - childAddressLocation, - }: Omit & { - address: Address; - eventSelector: LogFactory["eventSelector"]; - childAddressLocation: LogFactory["childAddressLocation"]; - }) => { - return `${chainId}_${address}_${eventSelector}_${childAddressLocation}_${fromAddress}`; - }; - - if (isAddressFactory(toAddress)) { - for (const _fromAddress of Array.isArray(fromAddress) - ? fromAddress - : [null]) { - for (const _factoryAddress of Array.isArray(toAddress.address) - ? toAddress.address - : [toAddress.address]) { - fragments.push({ - id: factoryIdCallback({ - chainId, - fromAddress: _fromAddress, - address: _factoryAddress, - eventSelector: toAddress.eventSelector, - childAddressLocation: toAddress.childAddressLocation, - }), - chainId, - address: _factoryAddress, - eventSelector: toAddress.eventSelector, - childAddressLocation: toAddress.childAddressLocation, - fromAddress: _fromAddress, - }); +}): FragmentReturnType => { + const fragments: FragmentReturnType = []; + const fromAddressFragmentIds = getAddressFragmentIds(fromAddress); + const toAddressFragmentIds = getAddressFragmentIds(toAddress); + + for (const fragmentFromAddress of fromAddressFragmentIds) { + for (const fragmentToAddress of toAddressFragmentIds) { + const id = + `transaction_${chainId}_${fragmentFromAddress.id}_${fragmentToAddress.id}` as const; + + const adjacent: FragmentId[] = []; + + for (const adjacentFromAddress of fragmentFromAddress.adjacent) { + for (const adjacentToAddress of fragmentToAddress.adjacent) { + adjacent.push( + `transaction_${chainId}_${adjacentFromAddress}_${adjacentToAddress}`, + ); + } } + + fragments.push({ id, adjacent }); } - } else { - for (const _fromAddress of Array.isArray(fromAddress) - ? fromAddress - : [null]) { - for (const _toAddress of Array.isArray(toAddress) ? toAddress : [null]) { - fragments.push({ - id: idCallback({ - chainId, - fromAddress: _fromAddress, - toAddress: _toAddress, - }), - chainId, - toAddress: _toAddress, - fromAddress: _fromAddress, - }); + } + + return fragments; +}; + +export const getTraceFilterFragmentIds = ({ + chainId, + fromAddress, + toAddress, + callType, + functionSelector, + ...filter +}: Omit & { + chainId: number; +}): FragmentReturnType => { + const fragments: FragmentReturnType = []; + const fromAddressFragmentIds = getAddressFragmentIds(fromAddress); + const toAddressFragmentIds = getAddressFragmentIds(toAddress); + const includeTransactionReceipts = shouldGetTransactionReceipt(filter); + + for (const fragmentFromAddress of fromAddressFragmentIds) { + for (const fragmentToAddress of toAddressFragmentIds) { + for (const fragmentFunctionSelector of Array.isArray(functionSelector) + ? functionSelector + : [functionSelector]) { + const id = + `trace_${chainId}_${fragmentFromAddress.id}_${fragmentToAddress.id}_${fragmentFunctionSelector ?? null}_${includeTransactionReceipts ? 1 : 0}` as const; + + const adjacent: FragmentId[] = []; + + for (const adjacentFromAddress of fragmentFromAddress.adjacent) { + for (const adjacentToAddress of fragmentToAddress.adjacent) { + for (const adjacentFunctionSelector of fragmentFunctionSelector + ? [fragmentFunctionSelector, null] + : [null]) { + for (const adjacentTxr of includeTransactionReceipts + ? [1] + : [0, 1]) { + adjacent.push( + `trace_${chainId}_${adjacentFromAddress}_${adjacentToAddress}_${adjacentFunctionSelector}_${adjacentTxr as 0 | 1}`, + ); + } + } + } + } + + fragments.push({ id, adjacent }); + } + } + } + + return fragments; +}; + +export const getLogFilterFragmentIds = ({ + chainId, + address, + topic0, + topic1, + topic2, + topic3, + ...filter +}: Omit): FragmentReturnType => { + const fragments: FragmentReturnType = []; + const addressFragmentIds = getAddressFragmentIds(address); + const includeTransactionReceipts = shouldGetTransactionReceipt(filter); + + for (const fragmentAddress of addressFragmentIds) { + for (const fragmentTopic0 of Array.isArray(topic0) ? topic0 : [topic0]) { + for (const fragmentTopic1 of Array.isArray(topic1) ? topic1 : [topic1]) { + for (const fragmentTopic2 of Array.isArray(topic2) + ? topic2 + : [topic2]) { + for (const fragmentTopic3 of Array.isArray(topic3) + ? topic3 + : [topic3]) { + const id = + `log_${chainId}_${fragmentAddress.id}_${fragmentTopic0 ?? null}_${fragmentTopic1 ?? null}_${fragmentTopic2 ?? null}_${fragmentTopic3 ?? null}_${includeTransactionReceipts ? 1 : 0}` as const; + + const adjacent: FragmentId[] = []; + + for (const adjacentAddress of fragmentAddress.adjacent) { + for (const adjacentTopic0 of fragmentTopic0 + ? [fragmentTopic0, null] + : [null]) { + for (const adjacentTopic1 of fragmentTopic1 + ? [fragmentTopic1, null] + : [null]) { + for (const adjacentTopic2 of fragmentTopic2 + ? [fragmentTopic2, null] + : [null]) { + for (const adjacentTopic3 of fragmentTopic3 + ? [fragmentTopic3, null] + : [null]) { + for (const adjacentTxr of includeTransactionReceipts + ? [1] + : [0, 1]) { + adjacent.push( + `log_${chainId}_${adjacentAddress}_${adjacentTopic0}_${adjacentTopic1}_${adjacentTopic2}_${adjacentTopic3}_${adjacentTxr as 0 | 1}`, + ); + } + } + } + } + } + } + + fragments.push({ id, adjacent }); + } + } } } } - return fragments as TraceFilterFragment[]; + return fragments; +}; + +export const getTransferFilterFragmentIds = ({ + chainId, + fromAddress, + toAddress, + ...filter +}: Omit & { + chainId: number; +}): FragmentReturnType => { + const fragments: FragmentReturnType = []; + const fromAddressFragmentIds = getAddressFragmentIds(fromAddress); + const toAddressFragmentIds = getAddressFragmentIds(toAddress); + const includeTransactionReceipts = shouldGetTransactionReceipt(filter); + + for (const fragmentFromAddress of fromAddressFragmentIds) { + for (const fragmentToAddress of toAddressFragmentIds) { + const id = + `transfer_${chainId}_${fragmentFromAddress.id}_${fragmentToAddress.id}_${includeTransactionReceipts ? 1 : 0}` as const; + + const adjacent: FragmentId[] = []; + + for (const adjacentFromAddress of fragmentFromAddress.adjacent) { + for (const adjacentToAddress of fragmentToAddress.adjacent) { + for (const adjacentTxr of includeTransactionReceipts ? [1] : [0, 1]) { + adjacent.push( + `transfer_${chainId}_${adjacentFromAddress}_${adjacentToAddress}_${adjacentTxr as 0 | 1}`, + ); + } + } + } + + fragments.push({ id, adjacent }); + } + } + + return fragments; }; diff --git a/packages/core/src/sync/index.test.ts b/packages/core/src/sync/index.test.ts index 5fb43b357..5370c99c3 100644 --- a/packages/core/src/sync/index.test.ts +++ b/packages/core/src/sync/index.test.ts @@ -4,7 +4,12 @@ import { setupDatabaseServices, setupIsolatedDatabase, } from "@/_test/setup.js"; -import { testClient } from "@/_test/utils.js"; +import { + getBlocksConfigAndIndexingFunctions, + getNetwork, + testClient, +} from "@/_test/utils.js"; +import { buildConfigAndIndexingFunctions } from "@/build/configAndIndexingFunctions.js"; import { decodeCheckpoint, encodeCheckpoint, @@ -13,10 +18,9 @@ import { } from "@/utils/checkpoint.js"; import { wait } from "@/utils/wait.js"; import { promiseWithResolvers } from "@ponder/common"; -import { type TestContext, beforeEach, expect, test, vi } from "vitest"; +import { beforeEach, expect, test, vi } from "vitest"; import type { RawEvent } from "./events.js"; import { type Sync, createSync } from "./index.js"; -import type { BlockSource } from "./source.js"; beforeEach(setupCommon); beforeEach(setupAnvil); @@ -34,33 +38,24 @@ async function drainAsyncGenerator( return result; } -function getMultichainNetworksAndSources(context: TestContext) { - const mainnet = context.networks[0]; - const optimism = { ...mainnet, name: "optimism", chainId: 10 }; - - const sources = [ - context.sources[4], - { - ...context.sources[4], - networkName: optimism.name, - filter: { - ...context.sources[4].filter, - chainId: 10, - }, - }, - ] as [BlockSource, BlockSource]; - - return { networks: [mainnet, optimism], sources }; -} - test("createSync()", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); + const network = getNetwork(); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + const sync = await createSync({ syncStore, - sources: [context.sources[0]], + sources, common: context.common, - networks: context.networks, + networks: [network], onRealtimeEvent: async () => {}, onFatalError: () => {}, initialCheckpoint: encodeCheckpoint(zeroCheckpoint), @@ -76,11 +71,26 @@ test("createSync()", async (context) => { test("getEvents() returns events", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); + const network = getNetwork(); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + await testClient.mine({ blocks: 1 }); + + // finalized block: 1 + network.finalityBlockCount = 0; + const sync = await createSync({ syncStore, - sources: [context.sources[4]], + sources, common: context.common, - networks: context.networks, + networks: [network], onRealtimeEvent: async () => {}, onFatalError: () => {}, initialCheckpoint: encodeCheckpoint(zeroCheckpoint), @@ -89,7 +99,7 @@ test("getEvents() returns events", async (context) => { const events = await drainAsyncGenerator(sync.getEvents()); expect(events).toBeDefined(); - expect(events).toHaveLength(1); + expect(events).toHaveLength(2); await sync.kill(); @@ -99,11 +109,26 @@ test("getEvents() returns events", async (context) => { test("getEvents() with cache", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); + const network = getNetwork(); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + await testClient.mine({ blocks: 1 }); + + // finalized block: 1 + network.finalityBlockCount = 0; + let sync = await createSync({ syncStore, - sources: [context.sources[4]], + sources, common: context.common, - networks: context.networks, + networks: [network], onRealtimeEvent: async () => {}, onFatalError: () => {}, initialCheckpoint: encodeCheckpoint(zeroCheckpoint), @@ -111,13 +136,13 @@ test("getEvents() with cache", async (context) => { await drainAsyncGenerator(sync.getEvents()); - const spy = vi.spyOn(syncStore, "insertInterval"); + const spy = vi.spyOn(syncStore, "insertIntervals"); sync = await createSync({ syncStore, - sources: [context.sources[4]], + sources, common: context.common, - networks: context.networks, + networks: [network], onRealtimeEvent: async () => {}, onFatalError: () => {}, initialCheckpoint: encodeCheckpoint(zeroCheckpoint), @@ -128,7 +153,7 @@ test("getEvents() with cache", async (context) => { expect(spy).toHaveBeenCalledTimes(0); expect(events).toBeDefined(); - expect(events).toHaveLength(1); + expect(events).toHaveLength(2); await sync.kill(); @@ -138,14 +163,28 @@ test("getEvents() with cache", async (context) => { test("getEvents() end block", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); - context.networks[0].finalityBlockCount = 1; - context.sources[4].filter.toBlock = 4; + const network = getNetwork(); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + await testClient.mine({ blocks: 2 }); + + // finalized block: 2 + network.finalityBlockCount = 0; + + sources[0]!.filter.toBlock = 1; const sync = await createSync({ syncStore, - sources: [context.sources[4]], + sources, common: context.common, - networks: context.networks, + networks: [network], onRealtimeEvent: async () => {}, onFatalError: () => {}, initialCheckpoint: encodeCheckpoint(zeroCheckpoint), @@ -171,15 +210,38 @@ test("getEvents() end block", async (context) => { // multiple blocks with the same hash and different chain IDs. test.skip("getEvents() multichain", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); - const { networks, sources } = getMultichainNetworksAndSources(context); - sources[1].filter.toBlock = 1; + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + + const { sources: sources1, networks: networks1 } = + await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const { sources: sources2, networks: networks2 } = + await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + await testClient.mine({ blocks: 2 }); + + // finalized block: 2 + networks1[0]!.finalityBlockCount = 0; + networks2[0]!.finalityBlockCount = 0; + + sources2[0]!.filter.chainId = 2; + sources2[0]!.filter.toBlock = 1; + networks2[0]!.chainId = 2; const sync = await createSync({ syncStore, - sources: [sources[0], sources[1]], + sources: [...sources1, ...sources2], common: context.common, - networks, + networks: [...networks1, ...networks2], onRealtimeEvent: async () => {}, onFatalError: () => {}, initialCheckpoint: encodeCheckpoint(zeroCheckpoint), @@ -198,11 +260,26 @@ test.skip("getEvents() multichain", async (context) => { test("getEvents() updates status", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); + const network = getNetwork(); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + await testClient.mine({ blocks: 2 }); + + // finalized block: 2 + network.finalityBlockCount = 0; + const sync = await createSync({ syncStore, - sources: [context.sources[4]], + sources, common: context.common, - networks: context.networks, + networks: [network], onRealtimeEvent: async () => {}, onFatalError: () => {}, initialCheckpoint: encodeCheckpoint(zeroCheckpoint), @@ -212,8 +289,8 @@ test("getEvents() updates status", async (context) => { const status = sync.getStatus(); - expect(status[context.networks[0].name]?.ready).toBe(false); - expect(status[context.networks[0].name]?.block?.number).toBe(1); + expect(status[network.name]?.ready).toBe(false); + expect(status[network.name]?.block?.number).toBe(2); await sync.kill(); @@ -223,17 +300,26 @@ test("getEvents() updates status", async (context) => { test("getEvents() pagination", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); - const network = context.networks[0]; + const network = getNetwork(); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + await testClient.mine({ blocks: 2 }); + + // finalized block: 2 network.finalityBlockCount = 0; - context.common.options = { - ...context.common.options, - syncEventsQuerySize: 1, - }; + context.common.options.syncEventsQuerySize = 1; const sync = await createSync({ syncStore, - sources: [context.sources[4]], + sources, common: context.common, networks: [network], onRealtimeEvent: async () => {}, @@ -252,11 +338,26 @@ test("getEvents() pagination", async (context) => { test("getEvents() initialCheckpoint", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); + const network = getNetwork(); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + await testClient.mine({ blocks: 2 }); + + // finalized block: 2 + network.finalityBlockCount = 0; + const sync = await createSync({ syncStore, - sources: [context.sources[4]], + sources, common: context.common, - networks: context.networks, + networks: [network], onRealtimeEvent: async () => {}, onFatalError: () => {}, initialCheckpoint: encodeCheckpoint(maxCheckpoint), @@ -275,21 +376,39 @@ test("getEvents() initialCheckpoint", async (context) => { test("getEvents() refetches finalized block", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); + const network = getNetwork(); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + await testClient.mine({ blocks: 2 }); + + // finalized block: 2 + network.finalityBlockCount = 0; + context.common.options.syncHandoffStaleSeconds = 0.5; const sync = await createSync({ syncStore, - sources: [context.sources[4]], + sources, common: context.common, - networks: context.networks, + networks: [network], onRealtimeEvent: async () => {}, onFatalError: () => {}, - initialCheckpoint: encodeCheckpoint(zeroCheckpoint), + initialCheckpoint: encodeCheckpoint(maxCheckpoint), }); + // cause `latestFinalizedFetch` to be updated + const gen = sync.getEvents(); + await wait(1000); - await drainAsyncGenerator(sync.getEvents()); + await drainAsyncGenerator(gen); await sync.kill(); @@ -299,11 +418,23 @@ test("getEvents() refetches finalized block", async (context) => { test("startRealtime()", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); + const network = getNetwork(); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + await testClient.mine({ blocks: 2 }); + const sync = await createSync({ syncStore, - sources: [context.sources[4]], + sources, common: context.common, - networks: context.networks, + networks: [network], onRealtimeEvent: async () => {}, onFatalError: () => {}, initialCheckpoint: encodeCheckpoint(zeroCheckpoint), @@ -315,8 +446,8 @@ test("startRealtime()", async (context) => { const status = sync.getStatus(); - expect(status[context.networks[0].name]?.ready).toBe(true); - expect(status[context.networks[0].name]?.block?.number).toBe(1); + expect(status[network.name]?.ready).toBe(true); + expect(status[network.name]?.block?.number).toBe(1); await sync.kill(); @@ -326,14 +457,26 @@ test("startRealtime()", async (context) => { test("onEvent() handles block", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); + const network = getNetwork(); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + const promise = promiseWithResolvers(); const events: RawEvent[] = []; + await testClient.mine({ blocks: 1 }); + const sync = await createSync({ syncStore, - sources: [context.sources[0]], + sources, common: context.common, - networks: context.networks, + networks: [network], onRealtimeEvent: async (event) => { if (event.type === "block") { events.push(...event.events); @@ -350,7 +493,7 @@ test("onEvent() handles block", async (context) => { await promise.promise; - expect(events).toHaveLength(2); + expect(events).toHaveLength(1); await sync.kill(); @@ -360,14 +503,28 @@ test("onEvent() handles block", async (context) => { test("onEvent() handles finalize", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); + const network = getNetwork(); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + const promise = promiseWithResolvers(); let checkpoint: string; + // finalized block: 0 + + network.finalityBlockCount = 2; + const sync = await createSync({ syncStore, - sources: [context.sources[0]], + sources, common: context.common, - networks: context.networks, + networks: [network], onRealtimeEvent: async (event) => { if (event.type === "finalize") { checkpoint = event.checkpoint; @@ -386,7 +543,7 @@ test("onEvent() handles finalize", async (context) => { await promise.promise; - expect(decodeCheckpoint(checkpoint!).blockNumber).toBe(5n); + expect(decodeCheckpoint(checkpoint!).blockNumber).toBe(2n); await sync.kill(); @@ -395,30 +552,47 @@ test("onEvent() handles finalize", async (context) => { test.todo("onEvent() handles reorg"); -test("onEvent() multichain end block", async (context) => { +test("onEvent() multichain gets all events", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); - const { networks, sources } = getMultichainNetworksAndSources(context); - sources[1].filter.toBlock = 1; + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources: sources1, networks: networks1 } = + await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const { sources: sources2, networks: networks2 } = + await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + // finalized block: 0 + + sources2[0]!.filter.chainId = 2; + networks2[0]!.chainId = 2; const promise = promiseWithResolvers(); const sync = await createSync({ syncStore, - sources: [sources[0], sources[1]], + sources: [...sources1, ...sources2], common: context.common, - networks, + networks: [...networks1, ...networks2], onRealtimeEvent: async (event) => { if (event.type === "block") { - if (event.events.length > 0) { - promise.resolve(); - } + promise.resolve(); } }, onFatalError: () => {}, initialCheckpoint: encodeCheckpoint(zeroCheckpoint), }); + await testClient.mine({ blocks: 1 }); + await drainAsyncGenerator(sync.getEvents()); await sync.startRealtime(); @@ -429,28 +603,49 @@ test("onEvent() multichain end block", async (context) => { await cleanup(); }); -test("onEvent() multichain gets all events", async (context) => { + +test("onEvent() multichain end block", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); - const { networks, sources } = getMultichainNetworksAndSources(context); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources: sources1, networks: networks1 } = + await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + const { sources: sources2, networks: networks2 } = + await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + + // finalized block: 0 + + sources2[0]!.filter.chainId = 2; + sources2[0]!.filter.toBlock = 0; + networks2[0]!.chainId = 2; const promise = promiseWithResolvers(); const sync = await createSync({ syncStore, - sources: [sources[0], sources[1]], + sources: [...sources1, ...sources2], common: context.common, - networks, + networks: [...networks1, ...networks2], onRealtimeEvent: async (event) => { if (event.type === "block") { - if (event.events.length > 0) { - promise.resolve(); - } + promise.resolve(); } }, onFatalError: () => {}, initialCheckpoint: encodeCheckpoint(zeroCheckpoint), }); + await testClient.mine({ blocks: 1 }); + await drainAsyncGenerator(sync.getEvents()); await sync.startRealtime(); @@ -465,15 +660,31 @@ test("onEvent() multichain gets all events", async (context) => { test("onEvent() handles endBlock finalization", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); + const network = getNetwork(); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + const promise = promiseWithResolvers(); - context.sources[0].filter.toBlock = 4; + // finalized block: 0 + + await testClient.mine({ blocks: 2 }); + + network.finalityBlockCount = 2; + + sources[0]!.filter.toBlock = 1; const sync = await createSync({ syncStore, - sources: [context.sources[0]], + sources, common: context.common, - networks: context.networks, + networks: [network], onRealtimeEvent: async (event) => { if (event.type === "finalize") { promise.resolve(); @@ -483,7 +694,7 @@ test("onEvent() handles endBlock finalization", async (context) => { initialCheckpoint: encodeCheckpoint(zeroCheckpoint), }); - await testClient.mine({ blocks: 4 }); + await testClient.mine({ blocks: 2 }); await drainAsyncGenerator(sync.getEvents()); @@ -499,13 +710,25 @@ test("onEvent() handles endBlock finalization", async (context) => { test("onEvent() handles errors", async (context) => { const { cleanup, syncStore } = await setupDatabaseServices(context); + const network = getNetwork(); + + const { config, rawIndexingFunctions } = getBlocksConfigAndIndexingFunctions({ + interval: 1, + }); + const { sources } = await buildConfigAndIndexingFunctions({ + config, + rawIndexingFunctions, + }); + const promise = promiseWithResolvers(); + // finalized block: 0 + const sync = await createSync({ syncStore, - sources: [context.sources[0]], + sources, common: context.common, - networks: context.networks, + networks: [network], onRealtimeEvent: async () => {}, onFatalError: () => { promise.resolve(); diff --git a/packages/core/src/sync/index.ts b/packages/core/src/sync/index.ts index 9a94660b0..38a22df7c 100644 --- a/packages/core/src/sync/index.ts +++ b/packages/core/src/sync/index.ts @@ -635,7 +635,7 @@ export const createSync = async (args: CreateSyncParameters): Promise => { filters: event.filters, logs: event.logs, factoryLogs: event.factoryLogs, - callTraces: event.callTraces, + traces: event.traces, transactions: event.transactions, transactionReceipts: event.transactionReceipts, }; @@ -750,7 +750,11 @@ export const createSync = async (args: CreateSyncParameters): Promise => { }), args.syncStore.insertTransactions({ transactions: finalizedEventData.flatMap( - ({ transactions }) => transactions, + ({ transactions, block }) => + transactions.map((transaction) => ({ + transaction, + block, + })), ), chainId: network.chainId, }), @@ -760,9 +764,16 @@ export const createSync = async (args: CreateSyncParameters): Promise => { ), chainId: network.chainId, }), - args.syncStore.insertCallTraces({ - callTraces: finalizedEventData.flatMap(({ callTraces, block }) => - callTraces.map((callTrace) => ({ callTrace, block })), + args.syncStore.insertTraces({ + traces: finalizedEventData.flatMap( + ({ traces, block, transactions }) => + traces.map((trace) => ({ + trace, + block, + transaction: transactions.find( + (t) => t.hash === trace.transactionHash, + )!, + })), ), chainId: network.chainId, }), @@ -770,13 +781,14 @@ export const createSync = async (args: CreateSyncParameters): Promise => { // Add corresponding intervals to the sync-store // Note: this should happen after so the database doesn't become corrupted - await Promise.all( - args.sources - .filter(({ filter }) => filter.chainId === network.chainId) - .map(({ filter }) => - args.syncStore.insertInterval({ filter, interval }), - ), - ); + + if (network.disableCache === false) { + await args.syncStore.insertIntervals({ + intervals: args.sources + .filter(({ filter }) => filter.chainId === network.chainId) + .map(({ filter }) => ({ filter, interval })), + }); + } /** * The realtime service can be killed if `endBlock` is @@ -978,7 +990,7 @@ export const getCachedBlock = ({ }): Promise | undefined => { const latestCompletedBlocks = sources.map(({ filter }) => { const requiredInterval = [ - filter.fromBlock, + filter.fromBlock ?? 0, filter.toBlock ?? Number.POSITIVE_INFINITY, ] satisfies Interval; const cachedIntervals = historicalSync.intervalsCache.get(filter)!; @@ -990,7 +1002,9 @@ export const getCachedBlock = ({ if (completedIntervals.length === 0) return undefined; const earliestCompletedInterval = completedIntervals[0]!; - if (earliestCompletedInterval[0] !== filter.fromBlock) return undefined; + if (earliestCompletedInterval[0] !== (filter.fromBlock ?? 0)) { + return undefined; + } return earliestCompletedInterval[1]; }); @@ -1007,7 +1021,8 @@ export const getCachedBlock = ({ if ( latestCompletedBlocks.every( (block, i) => - block !== undefined || sources[i]!.filter.fromBlock > minCompletedBlock, + block !== undefined || + (sources[i]!.filter.fromBlock ?? 0) > minCompletedBlock, ) ) { return _eth_getBlockByNumber(requestQueue, { @@ -1076,7 +1091,7 @@ export async function* localHistoricalSyncGenerator({ intervalDifference( [ [ - filter.fromBlock, + filter.fromBlock ?? 0, Math.min( filter.toBlock ?? Number.POSITIVE_INFINITY, totalInterval[1], diff --git a/packages/core/src/sync/source.ts b/packages/core/src/sync/source.ts index 185b9b3e6..ed067d4bd 100644 --- a/packages/core/src/sync/source.ts +++ b/packages/core/src/sync/source.ts @@ -1,17 +1,45 @@ import type { AbiEvents, AbiFunctions } from "@/sync/abi.js"; +import type { + Block, + Log, + Transaction, + TransactionReceipt, + Trace as UserTrace, +} from "@/types/eth.js"; import type { SyncLog } from "@/types/sync.js"; +import type { Trace } from "@/utils/debug.js"; import type { Abi, Address, Hex, LogTopic } from "viem"; -export type Source = ContractSource | BlockSource; +export type Source = ContractSource | AccountSource | BlockSource; export type ContractSource< filter extends "log" | "trace" = "log" | "trace", factory extends Factory | undefined = Factory | undefined, + fromFactory extends Factory | undefined = Factory | undefined, + toFactory extends Factory | undefined = Factory | undefined, > = { - filter: filter extends "log" ? LogFilter : CallTraceFilter; + filter: filter extends "log" + ? LogFilter + : TraceFilter; } & ContractMetadata; + +export type AccountSource< + filter extends "transaction" | "transfer" = "transaction" | "transfer", + fromFactory extends Factory | undefined = Factory | undefined, + toFactory extends Factory | undefined = Factory | undefined, +> = { + filter: filter extends "transaction" + ? TransactionFilter + : TransferFilter; +} & AccountMetadata; + export type BlockSource = { filter: BlockFilter } & BlockMetadata; -export type Filter = LogFilter | BlockFilter | CallTraceFilter; +export type Filter = + | LogFilter + | BlockFilter + | TransferFilter + | TransactionFilter + | TraceFilter; export type Factory = LogFactory; export type ContractMetadata = { @@ -22,6 +50,11 @@ export type ContractMetadata = { name: string; networkName: string; }; +export type AccountMetadata = { + type: "account"; + name: string; + networkName: string; +}; export type BlockMetadata = { type: "block"; name: string; @@ -34,10 +67,20 @@ export type LogFilter< type: "log"; chainId: number; address: factory extends Factory ? factory : Address | Address[] | undefined; - topics: LogTopic[]; - includeTransactionReceipts: boolean; - fromBlock: number; + topic0: LogTopic | undefined; + topic1: LogTopic | undefined; + topic2: LogTopic | undefined; + topic3: LogTopic | undefined; + fromBlock: number | undefined; toBlock: number | undefined; + include: + | ( + | `block.${keyof Block}` + | `transaction.${keyof Transaction}` + | `transactionReceipt.${keyof TransactionReceipt}` + | `log.${keyof Log}` + )[] + | undefined; }; export type BlockFilter = { @@ -45,21 +88,85 @@ export type BlockFilter = { chainId: number; interval: number; offset: number; - fromBlock: number; + fromBlock: number | undefined; toBlock: number | undefined; + include: `block.${keyof Block}`[] | undefined; }; -export type CallTraceFilter< - factory extends Factory | undefined = Factory | undefined, +export type TransferFilter< + fromFactory extends Factory | undefined = Factory | undefined, + toFactory extends Factory | undefined = Factory | undefined, +> = { + type: "transfer"; + chainId: number; + fromAddress: fromFactory extends Factory + ? fromFactory + : Address | Address[] | undefined; + toAddress: toFactory extends Factory + ? fromFactory + : Address | Address[] | undefined; + includeReverted: boolean; + fromBlock: number | undefined; + toBlock: number | undefined; + include: + | ( + | `block.${keyof Block}` + | `transaction.${keyof Transaction}` + | `transactionReceipt.${keyof TransactionReceipt}` + | `trace.${keyof UserTrace}` + )[] + | undefined; +}; + +export type TransactionFilter< + fromFactory extends Factory | undefined = Factory | undefined, + toFactory extends Factory | undefined = Factory | undefined, +> = { + type: "transaction"; + chainId: number; + fromAddress: fromFactory extends Factory + ? fromFactory + : Address | Address[] | undefined; + toAddress: toFactory extends Factory + ? toFactory + : Address | Address[] | undefined; + includeReverted: boolean; + fromBlock: number | undefined; + toBlock: number | undefined; + include: + | ( + | `block.${keyof Block}` + | `transaction.${keyof Transaction}` + | `transactionReceipt.${keyof TransactionReceipt}` + )[] + | undefined; +}; + +export type TraceFilter< + fromFactory extends Factory | undefined = Factory | undefined, + toFactory extends Factory | undefined = Factory | undefined, > = { - type: "callTrace"; + type: "trace"; chainId: number; - fromAddress: Address[] | undefined; - toAddress: factory extends Factory ? factory : Address[] | undefined; - functionSelectors: Hex[]; - includeTransactionReceipts: boolean; - fromBlock: number; + fromAddress: fromFactory extends Factory + ? fromFactory + : Address | Address[] | undefined; + toAddress: toFactory extends Factory + ? toFactory + : Address | Address[] | undefined; + functionSelector: Hex | Hex[] | undefined; + callType: Trace["result"]["type"] | undefined; + includeReverted: boolean; + fromBlock: number | undefined; toBlock: number | undefined; + include: + | ( + | `block.${keyof Block}` + | `transaction.${keyof Transaction}` + | `transactionReceipt.${keyof TransactionReceipt}` + | `trace.${keyof UserTrace}` + )[] + | undefined; }; export type LogFactory = { @@ -103,3 +210,125 @@ export const getChildAddress = ({ return `0x${log.topics[topicIndex]!.substring(start, start + length)}`; } }; + +export const defaultBlockFilterInclude: Exclude< + BlockFilter["include"], + undefined +> = [ + "block.baseFeePerGas", + "block.difficulty", + "block.extraData", + "block.gasLimit", + "block.gasUsed", + "block.hash", + "block.logsBloom", + "block.miner", + "block.nonce", + "block.number", + "block.parentHash", + "block.receiptsRoot", + "block.sha3Uncles", + "block.size", + "block.stateRoot", + "block.timestamp", + "block.transactionsRoot", +]; + +const defaultTransactionInclude: `transaction.${keyof Transaction}`[] = [ + "transaction.from", + "transaction.gas", + "transaction.hash", + "transaction.input", + "transaction.nonce", + "transaction.r", + "transaction.s", + "transaction.to", + "transaction.transactionIndex", + "transaction.v", + "transaction.value", + // NOTE: type specific properties are not included +]; + +export const defaultTransactionReceiptInclude: `transactionReceipt.${keyof TransactionReceipt}`[] = + [ + "transactionReceipt.contractAddress", + "transactionReceipt.cumulativeGasUsed", + "transactionReceipt.effectiveGasPrice", + "transactionReceipt.from", + "transactionReceipt.gasUsed", + "transactionReceipt.logsBloom", + "transactionReceipt.status", + "transactionReceipt.to", + "transactionReceipt.type", + ]; + +const defaultTraceInclude: `trace.${keyof UserTrace}`[] = [ + "trace.id", + "trace.type", + "trace.from", + "trace.to", + "trace.gas", + "trace.gasUsed", + "trace.input", + "trace.output", + "trace.error", + "trace.revertReason", + "trace.value", +]; + +export const defaultLogFilterInclude: Exclude = + [ + "log.id", + "log.address", + "log.data", + "log.logIndex", + "log.removed", + "log.topics", + ...defaultTransactionInclude, + ...defaultBlockFilterInclude, + ]; + +export const defaultTransactionFilterInclude: Exclude< + TransactionFilter["include"], + undefined +> = [ + ...defaultTransactionInclude, + ...defaultTransactionReceiptInclude, + ...defaultBlockFilterInclude, +]; + +export const defaultTraceFilterInclude: Exclude< + TraceFilter["include"], + undefined +> = [ + ...defaultBlockFilterInclude, + ...defaultTransactionInclude, + ...defaultTraceInclude, +]; + +export const defaultTransferFilterInclude: Exclude< + TransferFilter["include"], + undefined +> = [ + ...defaultBlockFilterInclude, + ...defaultTransactionInclude, + ...defaultTraceInclude, +]; + +export const shouldGetTransactionReceipt = ( + filter: Pick, +): boolean => { + // transactions must request receipts for "reverted" information + if (filter.type === "transaction") return true; + + if (filter.type === "block") return false; + + // TODO(kyle) should include be a required property? + if (filter.include === undefined) return true; + + if (filter.include.some((prop) => prop.startsWith("transactionReceipt."))) { + return true; + } + + return false; +}; diff --git a/packages/core/src/sync/transport.test.ts b/packages/core/src/sync/transport.test.ts index 9a48d4395..aa7a7eb53 100644 --- a/packages/core/src/sync/transport.test.ts +++ b/packages/core/src/sync/transport.test.ts @@ -1,12 +1,15 @@ +import { ALICE } from "@/_test/constants.js"; import { setupAnvil, setupCommon, setupDatabaseServices, setupIsolatedDatabase, } from "@/_test/setup.js"; -import { anvil, publicClient } from "@/_test/utils.js"; -import type { Transport } from "viem"; -import { getFunctionSelector, toHex } from "viem"; +import { deployErc20, mintErc20 } from "@/_test/simulate.js"; +import { anvil, getNetwork, publicClient } from "@/_test/utils.js"; +import { createRequestQueue } from "@/utils/requestQueue.js"; +import { type Transport, parseEther } from "viem"; +import { toHex } from "viem"; import { assertType, beforeEach, expect, test, vi } from "vitest"; import { cachedTransport } from "./transport.js"; @@ -15,11 +18,16 @@ beforeEach(setupAnvil); beforeEach(setupIsolatedDatabase); test("default", async (context) => { - const { requestQueues } = context; + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + const { syncStore, cleanup } = await setupDatabaseServices(context); const transport = cachedTransport({ - requestQueue: requestQueues[0], + requestQueue, syncStore, }); @@ -44,211 +52,117 @@ test("default", async (context) => { await cleanup(); }); -test("eth_call", async (context) => { - const { erc20, requestQueues } = context; +test("request() block dependent method", async (context) => { + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + const { syncStore, cleanup } = await setupDatabaseServices(context); const blockNumber = await publicClient.getBlockNumber(); const transport = cachedTransport({ - requestQueue: requestQueues[0], + requestQueue, syncStore, })({ chain: anvil, }); const response1 = await transport.request({ - method: "eth_call", - params: [ - { - data: getFunctionSelector("totalSupply()"), - to: erc20.address, - }, - toHex(blockNumber), - ], + method: "eth_getBlockByNumber", + params: [toHex(blockNumber), false], }); expect(response1).toBeDefined(); const insertSpy = vi.spyOn(syncStore, "insertRpcRequestResult"); + const getSpy = vi.spyOn(syncStore, "getRpcRequestResult"); const response2 = await transport.request({ - method: "eth_call", - params: [ - { - data: getFunctionSelector("totalSupply()"), - to: erc20.address, - }, - toHex(blockNumber), - ], + method: "eth_getBlockByNumber", + params: [toHex(blockNumber), false], }); - expect(response1).toBe(response2); + expect(response1).toStrictEqual(response2); expect(insertSpy).toHaveBeenCalledTimes(0); - - const getSpy = vi.spyOn(syncStore, "getRpcRequestResult"); - - const response3 = await transport.request({ - method: "eth_call", - params: [ - { - data: getFunctionSelector("totalSupply()"), - to: erc20.address, - }, - "latest", - ], - }); - - expect(response3).toBeDefined(); - expect(getSpy).toHaveBeenCalledTimes(1); await cleanup(); }); -test("eth_getBalance", async (context) => { - const { erc20, requestQueues } = context; - const { syncStore, cleanup } = await setupDatabaseServices(context); - const blockNumber = await publicClient.getBlockNumber(); - - const transport = cachedTransport({ - requestQueue: requestQueues[0], - syncStore, - })({ - chain: anvil, - }); - - const response1 = await transport.request({ - method: "eth_getBalance", - params: [erc20.address, toHex(blockNumber)], +test("request() non-block dependent method", async (context) => { + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, }); - expect(response1).toBeDefined(); - - const insertSpy = vi.spyOn(syncStore, "insertRpcRequestResult"); - - const response2 = await transport.request({ - method: "eth_getBalance", - params: [erc20.address, toHex(blockNumber)], + const { address } = await deployErc20({ sender: ALICE }); + await mintErc20({ + erc20: address, + to: ALICE, + amount: parseEther("1"), + sender: ALICE, }); - expect(response1).toBe(response2); - - expect(insertSpy).toHaveBeenCalledTimes(0); - - const getSpy = vi.spyOn(syncStore, "getRpcRequestResult"); - - const response3 = await transport.request({ - method: "eth_getBalance", - params: [erc20.address, "latest"], - }); - - expect(response3).toBeDefined(); - - expect(getSpy).toHaveBeenCalledTimes(1); - - await cleanup(); -}); - -test("eth_getStorageAt", async (context) => { - const { erc20, requestQueues } = context; const { syncStore, cleanup } = await setupDatabaseServices(context); const blockNumber = await publicClient.getBlockNumber(); + const block = await publicClient.getBlock({ blockNumber: blockNumber }); const transport = cachedTransport({ - requestQueue: requestQueues[0], + requestQueue, syncStore, })({ chain: anvil, }); const response1 = await transport.request({ - method: "eth_getStorageAt", - params: [erc20.address, toHex(3), toHex(blockNumber)], + method: "eth_getTransactionByHash", + params: [block.transactions[0]!], }); - expect(response1).toBeDefined(); + expect(response1).toBeDefined; const insertSpy = vi.spyOn(syncStore, "insertRpcRequestResult"); + const getSpy = vi.spyOn(syncStore, "getRpcRequestResult"); const response2 = await transport.request({ - method: "eth_getStorageAt", - params: [erc20.address, toHex(3), toHex(blockNumber)], + method: "eth_getTransactionByHash", + params: [block.transactions[0]!], }); - expect(response1).toBe(response2); + expect(response1).toStrictEqual(response2); expect(insertSpy).toHaveBeenCalledTimes(0); - - const getSpy = vi.spyOn(syncStore, "getRpcRequestResult"); - - const response3 = await transport.request({ - method: "eth_getStorageAt", - params: [erc20.address, toHex(3), "latest"], - }); - - expect(response3).toBeDefined(); - expect(getSpy).toHaveBeenCalledTimes(1); await cleanup(); }); -test("eth_getCode", async (context) => { - const { erc20, requestQueues } = context; - const { syncStore, cleanup } = await setupDatabaseServices(context); - const blockNumber = await publicClient.getBlockNumber(); +test("request() non-cached method", async (context) => { + const network = getNetwork(); + const requestQueue = createRequestQueue({ + network, + common: context.common, + }); + const { syncStore, cleanup } = await setupDatabaseServices(context); const transport = cachedTransport({ - requestQueue: requestQueues[0], + requestQueue, syncStore, })({ chain: anvil, }); - const response1 = await transport.request({ - method: "eth_getCode", - params: [erc20.address, toHex(blockNumber)], - }); - - expect(response1).toBeDefined(); - const insertSpy = vi.spyOn(syncStore, "insertRpcRequestResult"); - - const response2 = await transport.request({ - method: "eth_getCode", - params: [erc20.address, toHex(blockNumber)], - }); - - expect(response1).toBe(response2); - - expect(insertSpy).toHaveBeenCalledTimes(0); - const getSpy = vi.spyOn(syncStore, "getRpcRequestResult"); - const response3 = await transport.request({ - method: "eth_getCode", - params: [erc20.address, "latest"], - }); - - expect(response3).toBeDefined(); - - expect(getSpy).toHaveBeenCalledTimes(1); - - await cleanup(); -}); - -test("fallback method", async (context) => { - const { requestQueues } = context; - const { syncStore, cleanup } = await setupDatabaseServices(context); - const transport = cachedTransport({ - requestQueue: requestQueues[0], - syncStore, - })({ - chain: anvil, - }); - expect(await transport.request({ method: "eth_blockNumber" })).toBeDefined(); + expect(insertSpy).toHaveBeenCalledTimes(0); + expect(getSpy).toHaveBeenCalledTimes(0); + await cleanup(); }); diff --git a/packages/core/src/sync/transport.ts b/packages/core/src/sync/transport.ts index a993a912a..ae903fb42 100644 --- a/packages/core/src/sync/transport.ts +++ b/packages/core/src/sync/transport.ts @@ -1,15 +1,37 @@ import type { SyncStore } from "@/sync-store/index.js"; import { toLowerCase } from "@/utils/lowercase.js"; +import { orderObject } from "@/utils/order.js"; import type { RequestQueue } from "@/utils/requestQueue.js"; -import type { Address, Hex, Transport } from "viem"; +import type { Hex, Transport } from "viem"; import { custom, hexToBigInt, maxUint256 } from "viem"; -const cachedMethods = [ - "eth_call", +/** RPC methods that reference a block. */ +const blockDependentMethods = new Set([ "eth_getBalance", + "eth_getTransactionCount", + "eth_getBlockByNumber", + "eth_getBlockTransactionCountByNumber", + "eth_getTransactionByBlockNumberAndIndex", + "eth_call", + "eth_estimateGas", + "eth_feeHistory", + "eth_getProof", "eth_getCode", "eth_getStorageAt", -] as const; + "eth_getUncleByBlockNumberAndIndex", +]); + +/** RPC methods that don't reference a block. */ +const nonBlockDependentMethods = new Set([ + "eth_getBlockByHash", + "eth_getTransactionByHash", + "eth_getBlockTransactionCountByHash", + "eth_getTransactionByBlockHashAndIndex", + "eth_getTransactionConfirmations", + "eth_getTransactionReceipt", + "eth_getUncleByBlockHashAndIndex", + "eth_getUncleCountByBlockHash", +]); export const cachedTransport = ({ requestQueue, @@ -23,56 +45,59 @@ export const cachedTransport = ({ async request({ method, params }) { const body = { method, params }; - if (cachedMethods.includes(method)) { - let request: string = undefined!; - let blockNumber: Hex | "latest" = undefined!; - - if (method === "eth_call") { - const [{ data, to }, _blockNumber] = params as [ - { data: Hex; to: Hex }, - Hex | "latest", - ]; + if ( + blockDependentMethods.has(method) || + nonBlockDependentMethods.has(method) + ) { + const request = toLowerCase(JSON.stringify(orderObject(body))); + let blockNumber: Hex | "latest" | undefined = undefined; - request = `${method as string}_${toLowerCase(to)}_${toLowerCase(data)}`; - blockNumber = _blockNumber; - } else if (method === "eth_getBalance") { - const [address, _blockNumber] = params as [Address, Hex | "latest"]; + switch (method) { + case "eth_getBlockByNumber": + case "eth_getBlockTransactionCountByNumber": + case "eth_getTransactionByBlockNumberAndIndex": + case "eth_getUncleByBlockNumberAndIndex": + [blockNumber] = params; + break; + case "eth_getBalance": + case "eth_call": + case "eth_getCode": + case "eth_estimateGas": + case "eth_feeHistory": + case "eth_getTransactionCount": + [, blockNumber] = params; + break; - request = `${method as string}_${toLowerCase(address)}`; - blockNumber = _blockNumber; - } else if (method === "eth_getCode") { - const [address, _blockNumber] = params as [Address, Hex | "latest"]; - - request = `${method as string}_${toLowerCase(address)}`; - blockNumber = _blockNumber; - } else if (method === "eth_getStorageAt") { - const [address, slot, _blockNumber] = params as [ - Address, - Hex, - Hex | "latest", - ]; - - request = `${method as string}_${toLowerCase(address)}_${toLowerCase(slot)}`; - blockNumber = _blockNumber; + case "eth_getProof": + case "eth_getStorageAt": + [, , blockNumber] = params; + break; } - const blockNumberBigInt = - blockNumber === "latest" ? maxUint256 : hexToBigInt(blockNumber); - - const cachedResult = await syncStore.getRpcRequestResult({ - blockNumber: blockNumberBigInt, + const cacheKey = { chainId: chain!.id, request, - }); + blockNumber: + blockNumber === undefined + ? undefined + : blockNumber === "latest" + ? maxUint256 + : hexToBigInt(blockNumber), + }; + + const cachedResult = await syncStore.getRpcRequestResult(cacheKey); - if (cachedResult !== null) return cachedResult; - else { + if (cachedResult !== undefined) { + try { + return JSON.parse(cachedResult); + } catch { + return cachedResult; + } + } else { const response = await requestQueue.request(body); await syncStore.insertRpcRequestResult({ - blockNumber: blockNumberBigInt, - chainId: chain!.id, - request, - result: response as string, + ...cacheKey, + result: JSON.stringify(response), }); return response; } diff --git a/packages/core/src/types.d.ts b/packages/core/src/types.d.ts new file mode 100644 index 000000000..bd23f4111 --- /dev/null +++ b/packages/core/src/types.d.ts @@ -0,0 +1,26 @@ +declare module "ponder:registry" { + import type { Virtual } from "ponder"; + type config = typeof import("ponder:internal").config; + type schema = typeof import("ponder:internal").schema; + + export const ponder: Virtual.Registry; + + export type EventNames = Virtual.EventNames; + export type Event = Virtual.Event< + config["default"], + name + >; + export type Context = Virtual.Context< + config["default"], + schema, + name + >; + export type ApiContext = Virtual.ApiContext; + export type IndexingFunctionArgs = + Virtual.IndexingFunctionArgs; +} + +declare module "ponder:schema" { + const schema: typeof import("ponder:internal").schema; + export { schema as default }; +} diff --git a/packages/core/src/types/eth.ts b/packages/core/src/types/eth.ts index e64f09323..5060987aa 100644 --- a/packages/core/src/types/eth.ts +++ b/packages/core/src/types/eth.ts @@ -54,10 +54,6 @@ export type Block = { */ export type Transaction = Prettify< { - /** Hash of block containing this transaction */ - blockHash: Hash; - /** Number of block containing this transaction */ - blockNumber: bigint; /** Transaction sender */ from: Address; /** Gas provided for transaction execution */ @@ -141,10 +137,6 @@ export type Log = { id: string; /** The address from which this log originated */ address: Address; - /** Hash of block containing this log */ - blockHash: Hash; - /** Number of block containing this log */ - blockNumber: bigint; /** Contains the non-indexed arguments of the log */ data: Hex; /** Index of this log within its block */ @@ -153,20 +145,10 @@ export type Log = { removed: boolean; /** List of order-dependent topics */ topics: [Hex, ...Hex[]] | []; - /** Hash of the transaction that created this log */ - transactionHash: Hash; - /** Index of the transaction that created this log */ - transactionIndex: number; }; -/** - * A confirmed Ethereum transaction receipt. - */ +/** A confirmed Ethereum transaction receipt. */ export type TransactionReceipt = { - /** Hash of block containing this transaction */ - blockHash: Hash; - /** Number of block containing this transaction */ - blockNumber: bigint; /** Address of new contract or `null` if no contract was created */ contractAddress: Address | null; /** Gas used by this and all preceding transactions in this block */ @@ -177,57 +159,58 @@ export type TransactionReceipt = { from: Address; /** Gas used by this transaction */ gasUsed: bigint; - /** List of log objects generated by this transaction */ - logs: Log[]; /** Logs bloom filter */ logsBloom: Hex; /** `success` if this transaction was successful or `reverted` if it failed */ status: "success" | "reverted"; /** Transaction recipient or `null` if deploying a contract */ to: Address | null; - /** Hash of this transaction */ - transactionHash: Hash; - /** Index of this transaction in the block */ - transactionIndex: number; /** Transaction type */ type: TransactionType; }; -type _TraceAddress = number | _TraceAddress[]; -type TraceAddress = _TraceAddress[]; - -/** - * An Ethereum call trace. - */ -export type CallTrace = { - /** Globally unique identifier for this trace (`${transactionHash}-${traceAddress}`) */ +export type Trace = { + /** Globally unique identifier for this trace (`${transactionHash}-${tracePosition}`) */ id: string; - /** Message sender */ + /** The type of the call. */ + type: + | "CALL" + | "CALLCODE" + | "DELEGATECALL" + | "STATICCALL" + | "CREATE" + | "CREATE2" + | "SELFDESTRUCT"; + /** The address of that initiated the call. */ from: Address; - /** Message receipient */ - to: Address; - /** Amount of gas allocated to this call */ + /** The address of the contract that was called. */ + to: Address | null; + /** How much gas was left before the call. */ gas: bigint; - /** Value in wei sent with this call */ - value: bigint; - /** Calldata sent with this call */ - input: Hex; - /** Contains return data */ - output: Hex; - /** Total used gas by this trace */ + /** How much gas was used by the call. */ gasUsed: bigint; - /** Number of traces created by this trace */ - subtraces: number; - /** Description of this traces position within all traces in the transaction */ - traceAddress: TraceAddress; - /** Hash of block containing this trace */ - blockHash: Hash; - /** Number of block containing this trace */ - blockNumber: bigint; - /** Hash of the transaction that created this trace */ - transactionHash: Hash; - /** Index of the transaction that created this trace */ - transactionIndex: number; - /** EVM opcode used to make this call */ - callType: "call" | "staticcall" | "delegatecall" | "callcode"; + /** Calldata input. */ + input: Hex; + /** Output of the call, if any. */ + output?: Hex; + /** Error message, if any. */ + error?: string; + /** Why this call reverted, if it reverted. */ + revertReason?: string; + /** Value transferred. */ + value: bigint | null; + /** Index of this trace in the transaction. */ + traceIndex: number; + /** Number of subcalls. */ + subcalls: number; +}; + +/** A native token transfer. */ +export type Transfer = { + /** The address that sent the transfer */ + from: Address; + /** The address that received the transfer */ + to: Address; + /** The amount of tokens transferred */ + value: bigint; }; diff --git a/packages/core/src/types/sync.ts b/packages/core/src/types/sync.ts index 608cf5183..a190a136f 100644 --- a/packages/core/src/types/sync.ts +++ b/packages/core/src/types/sync.ts @@ -1,5 +1,5 @@ +import type { Trace } from "@/utils/debug.js"; import type { - Address, BlockTag, Hex, Log, @@ -12,87 +12,12 @@ export type SyncBlock = RpcBlock, true>; export type SyncLog = Log; export type SyncTransaction = RpcTransaction; export type SyncTransactionReceipt = RpcTransactionReceipt; -export type SyncTrace = - | SyncCallTrace - | SyncCreateTrace - | SyncRewardTrace - | SyncSuicideTrace; +export type SyncTrace = { + trace: Trace["result"] & { index: number; subcalls: number }; + transactionHash: Trace["txHash"]; +}; export type LightBlock = Pick< SyncBlock, "hash" | "parentHash" | "number" | "timestamp" >; - -export type SyncCallTrace = { - action: { - callType: "call" | "delegatecall" | "staticcall"; - from: Address; - gas: Hex; - input: Hex; - to: Address; - value: Hex; - }; - blockHash: Hex; - blockNumber: Hex; - error?: string; - result: { - gasUsed: Hex; - output: Hex; - } | null; - subtraces: number; - traceAddress: number[]; - transactionHash: Hex; - transactionPosition: number; - type: "call"; -}; - -export type SyncCreateTrace = { - action: { - from: Address; - gas: Hex; - init: Hex; - value: Hex; - }; - blockHash: Hex; - blockNumber: Hex; - result: { - address: Address; - code: Hex; - gasUsed: Hex; - } | null; - subtraces: number; - traceAddress: number[]; - transactionHash: Hex; - transactionPosition: number; - type: "create"; -}; - -export type SyncSuicideTrace = { - action: { - address: Address; - refundAddress: Address; - balance: Hex; - }; - blockHash: Hex; - blockNumber: Hex; - result: null; - subtraces: number; - traceAddress: number[]; - transactionHash: Hex; - transactionPosition: number; - type: "suicide"; -}; - -export type SyncRewardTrace = { - action: { - author: Address; - rewardType: "block" | "uncle"; - value: Hex; - }; - blockHash: Hex; - blockNumber: Hex; - result: null; - subtraces: number; - traceAddress: number[]; - type: "reward"; -}; diff --git a/packages/core/src/types/virtual.test-d.ts b/packages/core/src/types/virtual.test-d.ts index 4aae4300d..07f490cca 100644 --- a/packages/core/src/types/virtual.test-d.ts +++ b/packages/core/src/types/virtual.test-d.ts @@ -5,8 +5,8 @@ import { assertType, test } from "vitest"; import type { Db } from "./db.js"; import type { Block, - CallTrace, Log, + Trace, Transaction, TransactionReceipt, } from "./eth.js"; @@ -72,6 +72,12 @@ const config = createConfig({ }, }, }, + accounts: { + a1: { + address: "0x", + network: "mainnet", + }, + }, blocks: { b1: { interval: 2, @@ -94,6 +100,7 @@ test("FormatEventNames without filter", () => { { contract: { abi: abi; network: "" }; }, + {}, {} >; @@ -113,6 +120,7 @@ test("FormatEvent names with filter", () => { { contract: { abi: abi; network: ""; filter: { event: "Event1()" } }; }, + {}, {} >; @@ -132,6 +140,7 @@ test("FormatEvent names with filter array", () => { filter: { event: readonly ["Event1()"] }; }; }, + {}, {} >; @@ -147,6 +156,7 @@ test("FormatEventNames with semi-weak abi", () => { { contract: { abi: abi[number][]; network: "" }; }, + {}, {} >; @@ -166,6 +176,7 @@ test("FormatEventNames with weak abi", () => { { contract: { abi: Abi; network: "" }; }, + {}, {} >; @@ -179,6 +190,7 @@ test("FormatEventNames with functions", () => { { contract: { abi: abi; network: ""; includeCallTraces: true }; }, + {}, {} >; @@ -195,10 +207,34 @@ test("FormatEventNames with functions", () => { assertType({} as any as a); }); +test("FormatEventName with accounts", () => { + type a = Virtual.FormatEventNames< + // ^? + {}, + { account: { address: "0x"; network: "mainnet" } }, + {} + >; + + assertType( + {} as any as + | "account:transfer:from" + | "account:transfer:to" + | "account:transaction:from" + | "account:transaction:to", + ); + assertType< + | "account:transfer:from" + | "account:transfer:to" + | "account:transaction:from" + | "account:transaction:to" + >({} as any as a); +}); + test("FormatEventName with blocks", () => { type a = Virtual.FormatEventNames< // ^? {}, + {}, { block: { interval: 2; startBlock: 1; network: "mainnet" } } >; @@ -272,7 +308,6 @@ test("Context client", () => { | "getEnsName"; assertType({} as any as expectedFunctions); - assertType({} as any as keyof a); }); test("Context contracts", () => { @@ -382,7 +417,7 @@ test("Event with functions", () => { type expectedEvent = { args: readonly [Address]; result: bigint; - trace: CallTrace; + trace: Trace; block: Block; transaction: Transaction; }; @@ -398,9 +433,42 @@ test("Event with functions and no inputs or outputs", () => { type expectedEvent = { args: never; result: never; - trace: CallTrace; + trace: Trace; + block: Block; + transaction: Transaction; + }; + + assertType({} as any as expectedEvent); + assertType({} as any as a); +}); + +test("Event with account transaction", () => { + type a = Virtual.Event; + // ^? + + type expectedEvent = { + block: Block; + transaction: Transaction; + transactionReceipt: TransactionReceipt; + }; + + assertType({} as any as expectedEvent); + assertType({} as any as a); +}); + +test("Event with account transfer", () => { + type a = Virtual.Event; + // ^? + + type expectedEvent = { + transfer: { + from: Address; + to: Address; + value: bigint; + }; block: Block; transaction: Transaction; + trace: Trace; }; assertType({} as any as expectedEvent); diff --git a/packages/core/src/types/virtual.ts b/packages/core/src/types/virtual.ts index 8dfc0194e..5d5081016 100644 --- a/packages/core/src/types/virtual.ts +++ b/packages/core/src/types/virtual.ts @@ -10,10 +10,11 @@ import type { Drizzle, Schema } from "@/drizzle/index.js"; import type { ReadOnlyClient } from "@/indexing/ponderActions.js"; import type { Block, - CallTrace, Log, + Trace, Transaction, TransactionReceipt, + Transfer, } from "@/types/eth.js"; import type { ApiRegistry } from "./api.js"; import type { Db } from "./db.js"; @@ -44,14 +45,18 @@ export namespace Virtual { safeFunctionNames = SafeFunctionNames, > = string extends safeFunctionNames ? never : safeFunctionNames; - /** "{ContractName}:{EventName}" | "{ContractName}.{FunctionName}()" | "{SourceName}:block" . */ + /** "{ContractName}:{EventName}" | "{ContractName}.{FunctionName}()" | "{SourceName}:block" | "{SourceName}:transaction:from" . */ export type FormatEventNames< contracts extends Config["contracts"], + accounts extends Config["accounts"], blocks extends Config["blocks"], > = | { [name in keyof contracts]: `${name & string}:${_FormatEventNames | Setup}`; }[keyof contracts] + | { + [name in keyof accounts]: `${name & string}:${"transaction" | "transfer"}:${"from" | "to"}`; + }[keyof accounts] | { [name in keyof blocks]: `${name & string}:block`; }[keyof blocks] @@ -65,12 +70,9 @@ export namespace Virtual { }[keyof contracts]; type FormatTransactionReceipts< - contract extends Config["contracts"][string], + source extends Config["contracts" | "accounts"][string], /// - includeTxr = ExtractOverridenProperty< - contract, - "includeTransactionReceipts" - >, + includeTxr = ExtractOverridenProperty, > = includeTxr extends includeTxr ? includeTxr extends true ? { @@ -97,6 +99,7 @@ export namespace Virtual { export type EventNames = FormatEventNames< config["contracts"], + config["accounts"], config["blocks"] >; @@ -104,40 +107,59 @@ export namespace Virtual { config extends Config, name extends EventNames, /// - contractName extends ExtractSourceName = ExtractSourceName, + sourceName extends ExtractSourceName = ExtractSourceName, eventName extends ExtractEventName = ExtractEventName, > = name extends `${string}:block` - ? { block: Prettify } - : name extends `${string}.${string}` - ? Prettify< + ? // 1. block event + { block: Prettify } + : name extends `${string}:transaction:${"from" | "to"}` + ? // 2. transaction event + { + block: Prettify; + transaction: Prettify; + transactionReceipt: Prettify; + } + : name extends `${string}:transfer:${"from" | "to"}` + ? // 3. transfer event { - args: FormatFunctionArgs< - config["contracts"][contractName]["abi"], - eventName - >; - result: FormatFunctionResult< - config["contracts"][contractName]["abi"], - eventName - >; - trace: Prettify; + transfer: Prettify; block: Prettify; transaction: Prettify; - } & FormatTransactionReceipts - > - : eventName extends Setup - ? never - : Prettify< - { - name: eventName; - args: FormatEventArgs< - config["contracts"][contractName]["abi"], - eventName + trace: Prettify; + } & FormatTransactionReceipts + : name extends `${string}.${string}` + ? // 4. call trace event + Prettify< + { + args: FormatFunctionArgs< + config["contracts"][sourceName]["abi"], + eventName + >; + result: FormatFunctionResult< + config["contracts"][sourceName]["abi"], + eventName + >; + trace: Prettify; + block: Prettify; + transaction: Prettify; + } & FormatTransactionReceipts + > + : eventName extends Setup + ? // 5. setup event + never + : // 6. log event + Prettify< + { + name: eventName; + args: FormatEventArgs< + config["contracts"][sourceName]["abi"], + eventName + >; + log: Prettify; + block: Prettify; + transaction: Prettify; + } & FormatTransactionReceipts >; - log: Prettify; - block: Prettify; - transaction: Prettify; - } & FormatTransactionReceipts - >; type ContextContractProperty = Exclude< keyof Config["contracts"][string], @@ -145,7 +167,7 @@ export namespace Virtual { >; type ExtractOverridenProperty< - contract extends Config["contracts"][string], + contract extends Config["contracts" | "accounts"][string], property extends ContextContractProperty, /// base = Extract[property], @@ -202,23 +224,7 @@ export namespace Virtual { keyof config["networks"]]["chainId"]; }; }[keyof sourceNetwork]; - client: Prettify< - Omit< - ReadOnlyClient, - | "extend" - | "key" - | "batch" - | "cacheTime" - | "account" - | "type" - | "uid" - | "chain" - | "name" - | "pollingInterval" - | "transport" - | "ccipRead" - > - >; + client: Prettify; db: Db; }; diff --git a/packages/core/src/ui/Table.tsx b/packages/core/src/ui/Table.tsx index 7fc8a7ced..f7f506f3f 100644 --- a/packages/core/src/ui/Table.tsx +++ b/packages/core/src/ui/Table.tsx @@ -1,4 +1,4 @@ -import { Box, Text } from "ink"; // Assuming you're using ink for CLI UI components +import { Box, Text, render } from "ink"; // Assuming you're using ink for CLI UI components import React from "react"; const MAX_COLUMN_WIDTH = 24; @@ -121,4 +121,22 @@ export function Table(props: { ); } -export default Table; +export function printTable(props: { + columns: { + title: string; + key: keyof TRow; + align: "left" | "right"; + format?: (value: any, row: TRow) => string | number | React.JSX.Element; + }[]; + rows: TRow[]; +}) { + const table = ( + <> + + + + + ); + const instance = render(table); + instance.cleanup(); +} diff --git a/packages/core/src/ui/app.tsx b/packages/core/src/ui/app.tsx index 8e286768f..b48d4b65b 100644 --- a/packages/core/src/ui/app.tsx +++ b/packages/core/src/ui/app.tsx @@ -7,7 +7,7 @@ import { formatEta, formatPercentage } from "@/utils/format.js"; import { Box, Text, render as inkRender } from "ink"; import React from "react"; import { ProgressBar } from "./ProgressBar.js"; -import Table from "./Table.js"; +import { Table } from "./Table.js"; export type UiState = { port: number; diff --git a/packages/core/src/utils/checkpoint.ts b/packages/core/src/utils/checkpoint.ts index 4f99900ab..3040e092e 100644 --- a/packages/core/src/utils/checkpoint.ts +++ b/packages/core/src/utils/checkpoint.ts @@ -30,9 +30,10 @@ const CHECKPOINT_LENGTH = EVENT_INDEX_DIGITS; export const EVENT_TYPES = { + transactions: 2, blocks: 5, logs: 5, - callTraces: 7, + traces: 7, } as const; export const encodeCheckpoint = (checkpoint: Checkpoint) => { diff --git a/packages/core/src/utils/debug.ts b/packages/core/src/utils/debug.ts new file mode 100644 index 000000000..f0a57977c --- /dev/null +++ b/packages/core/src/utils/debug.ts @@ -0,0 +1,110 @@ +import type { Address, Hash, Hex, LogTopic } from "viem"; + +/** @see https://github.com/alloy-rs/alloy/blob/main/crates/rpc-types-trace/src/geth/call.rs */ +/** @see https://github.com/alloy-rs/alloy/blob/main/crates/rpc-types-trace/src/common.rs */ +/** @see https://github.com/paradigmxyz/reth/blob/main/crates/rpc/rpc/src/debug.rs */ + +/** Result type for geth style transaction trace. */ +export type Trace = { + /** Transaction hash. */ + txHash: Hex; + /** Trace results produced by the tracer. */ + result: CallFrame; +}; + +/** + * The response object for `debug_traceBlockByNumber` and `debug_traceBlockByHash` + * with `"tracer": "callTracer"`. + */ +type CallFrame = { + /** The type of the call. */ + type: + | "CALL" + | "CALLCODE" + | "DELEGATECALL" + | "STATICCALL" + | "CREATE" + | "CREATE2" + | "SELFDESTRUCT"; + /** The address of that initiated the call. */ + from: Address; + /** The address of the contract that was called. */ + to?: Address; + /** How much gas was left before the call. */ + gas: Hex; + /** How much gas was used by the call. */ + gasUsed: Hex; + /** Calldata input. */ + input: Hex; + /** Output of the call, if any. */ + output?: Hex; + /** Error message, if any. */ + error?: string; + /** Why this call reverted, if it reverted. */ + revertReason?: string; + /** Recorded child calls. */ + calls?: CallFrame[]; + /** Logs emitted by this call. */ + logs?: CallLogFrame[]; + /** Value transferred. */ + value?: Hex; +}; + +/** Represents a recorded log that is emitted during a trace call. */ +type CallLogFrame = { + /** The address of the contract that was called. */ + address: Address; + /** The topics of the log. */ + topics: LogTopic[]; + /** The data of the log. */ + data: Hex; + /** The position of the log relative to subcalls within the same trace. */ + position: number; +}; + +/** The configuration for the call tracer. */ +type CallConfig = { + /** When set to true, this will only trace the primary (top-level) call and not any sub-calls. */ + onlyTopCall?: boolean; + /** When set to true, this will include the logs emitted by the call. */ + withLog?: boolean; +}; + +export type DebugRpcSchema = [ + /** + * @description Returns tracing results by executing all transactions in the block specified by the block hash + * + * @example + * provider.request({ method: 'debug_traceBlockByHash', params: ['0x...', { tracer: "callTracer" }] }) + * // => { + * // txHash: '0x5a42...', + * // result: [...], + * // } + */ + { + Method: "debug_traceBlockByHash"; + Parameters: [ + hash: Hash, + tracingOptions: { tracer: "callTracer"; tracerConfig?: CallConfig }, + ]; + ReturnType: Trace[]; + }, + /** + * @description Returns tracing results by executing all transactions in the block specified by the block hash + * + * @example + * provider.request({ method: 'debug_traceBlockByNumber', params: ['0x1b4', { tracer: "callTracer" }] }) + * // => { + * // txHash: '0x5a42...', + * // result: [...], + * // } + */ + { + Method: "debug_traceBlockByNumber"; + Parameters: [ + block: Hex, + tracingOptions: { tracer: "callTracer"; tracerConfig?: CallConfig }, + ]; + ReturnType: Trace[]; + }, +]; diff --git a/packages/core/src/utils/interval.ts b/packages/core/src/utils/interval.ts index 6c409007f..62508a507 100644 --- a/packages/core/src/utils/interval.ts +++ b/packages/core/src/utils/interval.ts @@ -1,3 +1,5 @@ +import { range } from "./range.js"; + export type Interval = [number, number]; /** @@ -191,3 +193,7 @@ export function getChunks({ return _chunks; } + +export function intervalRange(interval: Interval) { + return range(interval[0], interval[1] + 1); +} diff --git a/packages/core/src/utils/order.ts b/packages/core/src/utils/order.ts new file mode 100644 index 000000000..dfe2ecbc3 --- /dev/null +++ b/packages/core/src/utils/order.ts @@ -0,0 +1,16 @@ +export const orderObject = (obj: any): any => { + if (Array.isArray(obj)) return obj.map((x) => orderObject(x)); + if (typeof obj !== "object") return obj; + + const newObj = {} as any; + for (const key of Object.keys(obj).sort()) { + const val = obj[key]; + if (typeof val === "object") { + newObj[key] = orderObject(obj[key]); + } else { + newObj[key] = obj[key]; + } + } + + return newObj; +}; diff --git a/packages/core/src/utils/requestQueue.test.ts b/packages/core/src/utils/requestQueue.test.ts index 71f92eb50..2c29071ef 100644 --- a/packages/core/src/utils/requestQueue.test.ts +++ b/packages/core/src/utils/requestQueue.test.ts @@ -1,4 +1,5 @@ import { setupAnvil, setupCommon } from "@/_test/setup.js"; +import { getNetwork } from "@/_test/utils.js"; import type { Common } from "@/common/common.js"; import type { Network } from "@/config/networks.js"; import { beforeEach, expect, test } from "vitest"; @@ -15,8 +16,10 @@ const getQueue = (network: Network, common: Common) => { }); }; -test("requests", async ({ networks, common }) => { - const queue = getQueue(networks[0], common); +test("requests", async ({ common }) => { + const network = getNetwork(); + + const queue = getQueue(network, common); queue.start(); const chainId = await queue.request({ method: "eth_chainId" }); diff --git a/packages/core/src/utils/requestQueue.ts b/packages/core/src/utils/requestQueue.ts index 7d89750c4..72643a344 100644 --- a/packages/core/src/utils/requestQueue.ts +++ b/packages/core/src/utils/requestQueue.ts @@ -16,21 +16,23 @@ import { type RpcError, isHex, } from "viem"; +import type { DebugRpcSchema } from "./debug.js"; import { startClock } from "./timer.js"; import { wait } from "./wait.js"; -type RequestReturnType< - method extends EIP1193Parameters["method"], -> = Extract["ReturnType"]; +type Schema = [...PublicRpcSchema, ...DebugRpcSchema]; + +type RequestReturnType["method"]> = + Extract["ReturnType"]; export type RequestQueue = Omit< Queue< - RequestReturnType["method"]>, - EIP1193Parameters + RequestReturnType["method"]>, + EIP1193Parameters >, "add" > & { - request: >( + request: >( parameters: TParameters, ) => Promise>; }; diff --git a/packages/core/src/utils/result.ts b/packages/core/src/utils/result.ts new file mode 100644 index 000000000..943c0bee3 --- /dev/null +++ b/packages/core/src/utils/result.ts @@ -0,0 +1,24 @@ +export type Result = + | { status: "success"; result: T } + | { status: "error"; error: Error }; + +export type MergeResults[]> = + T extends readonly [ + infer Head extends Result, + ...infer Tail extends Result[], + ] + ? [Extract["result"], ...MergeResults] + : []; + +export const mergeResults = []>( + results: T, +): Result> => { + for (const result of results) { + if (result.status === "error") { + return result; + } + } + + // @ts-ignore + return { status: "success", result: results.map((result) => result.result) }; +}; diff --git a/packages/core/src/utils/rpc.ts b/packages/core/src/utils/rpc.ts index 57bf575c2..6150d30a7 100644 --- a/packages/core/src/utils/rpc.ts +++ b/packages/core/src/utils/rpc.ts @@ -147,59 +147,144 @@ export const _eth_getTransactionReceipt = ( }); /** - * Helper function for "trace_filter" request. - * - * Note: No strict typing is available. + * Helper function for "debug_traceBlockByNumber" request. */ -export const _trace_filter = ( +export const _debug_traceBlockByNumber = ( requestQueue: RequestQueue, - params: { - fromBlock: Hex | number; - toBlock: Hex | number; - fromAddress?: Address[]; - toAddress?: Address[]; + { + blockNumber, + }: { + blockNumber: Hex | number; }, ): Promise => requestQueue .request({ - method: "trace_filter", + method: "debug_traceBlockByNumber", params: [ - { - fromBlock: - typeof params.fromBlock === "number" - ? numberToHex(params.fromBlock) - : params.fromBlock, - toBlock: - typeof params.toBlock === "number" - ? numberToHex(params.toBlock) - : params.toBlock, - fromAddress: params.fromAddress - ? params.fromAddress.map((a) => toLowerCase(a)) - : undefined, - toAddress: params.toAddress - ? params.toAddress.map((a) => toLowerCase(a)) - : undefined, - }, + typeof blockNumber === "number" + ? numberToHex(blockNumber) + : blockNumber, + { tracer: "callTracer" }, ], - } as any) - .then((traces) => traces as unknown as SyncTrace[]); + }) + .then((traces) => { + const result: SyncTrace[] = []; + let index = 0; + // all traces that weren't included because the trace has an error + // or the trace's parent has an error, mapped to the error string + const failedTraces = new Map< + (typeof traces)[number]["result"], + { error?: string; revertReason?: string } + >(); + + const dfs = ( + frames: (typeof traces)[number]["result"][], + transactionHash: Hex, + parentFrame: (typeof traces)[number]["result"] | undefined, + ) => { + for (const frame of frames) { + if (frame.error !== undefined) { + failedTraces.set(frame, { + error: frame.error, + revertReason: frame.revertReason, + }); + } else if (parentFrame && failedTraces.has(parentFrame)) { + const error = failedTraces.get(parentFrame)!; + + frame.error = error.error; + frame.revertReason = error.revertReason; + + failedTraces.set(frame, error); + } + + // @ts-ignore + frame.index = index; + // @ts-ignore + frame.subcalls = frame.calls?.length ?? 0; + + result.push({ trace: frame as SyncTrace["trace"], transactionHash }); + + index++; + + if (frame.calls) { + dfs(frame.calls, transactionHash, frame); + } + } + }; + + for (const trace of traces) { + index = 0; + dfs([trace.result], trace.txHash, undefined); + } + + return result; + }); /** - * Helper function for "trace_block" request. + * Helper function for "debug_traceBlockByHash" request. */ -export const _trace_block = ( +export const _debug_traceBlockByHash = ( requestQueue: RequestQueue, - params: { - blockNumber: Hex | number; + { + hash, + }: { + hash: Hash; }, ): Promise => requestQueue .request({ - method: "trace_block", - params: [ - typeof params.blockNumber === "number" - ? numberToHex(params.blockNumber) - : params.blockNumber, - ], - } as any) - .then((traces) => traces as unknown as SyncTrace[]); + method: "debug_traceBlockByHash", + params: [hash, { tracer: "callTracer" }], + }) + .then((traces) => { + const result: SyncTrace[] = []; + let index = 0; + // all traces that weren't included because the trace has an error + // or the trace's parent has an error, mapped to the error string + const failedTraces = new Map< + (typeof traces)[number]["result"], + { error?: string; revertReason?: string } + >(); + + const dfs = ( + frames: (typeof traces)[number]["result"][], + transactionHash: Hex, + parentFrame: (typeof traces)[number]["result"] | undefined, + ) => { + for (const frame of frames) { + if (frame.error !== undefined) { + failedTraces.set(frame, { + error: frame.error, + revertReason: frame.revertReason, + }); + } else if (parentFrame && failedTraces.has(parentFrame)) { + const error = failedTraces.get(parentFrame)!; + + frame.error = error.error; + frame.revertReason = error.revertReason; + + failedTraces.set(frame, error); + } + + // @ts-ignore + frame.index = index; + // @ts-ignore + frame.subcalls = frame.calls?.length ?? 0; + + result.push({ trace: frame as SyncTrace["trace"], transactionHash }); + + index++; + + if (frame.calls) { + dfs(frame.calls, transactionHash, frame); + } + } + }; + + for (const trace of traces) { + index = 0; + dfs([trace.result], trace.txHash, undefined); + } + + return result; + }); diff --git a/packages/core/tsup.config.ts b/packages/core/tsup.config.ts index f261292a0..0a712777c 100644 --- a/packages/core/tsup.config.ts +++ b/packages/core/tsup.config.ts @@ -2,7 +2,7 @@ import { execa } from "execa"; import { defineConfig } from "tsup"; export default defineConfig({ - name: "@ponder/core", + name: "ponder", entry: ["src/index.ts", "src/bin/ponder.ts"], outDir: "dist", format: ["esm"], diff --git a/packages/create-ponder/CHANGELOG.md b/packages/create-ponder/CHANGELOG.md index 0459ff1e2..0ed88ef3f 100644 --- a/packages/create-ponder/CHANGELOG.md +++ b/packages/create-ponder/CHANGELOG.md @@ -548,7 +548,7 @@ ### Patch Changes -- [#123](https://github.com/0xOlias/ponder/pull/123) [`9d6f820`](https://github.com/0xOlias/ponder/commit/9d6f820e9d0d1815aa6ebf7b001c0a3139c58f7c) Thanks [@0xOlias](https://github.com/0xOlias)! - Migrated to viem and added `@/generated` alias in generated handler code +- [#123](https://github.com/0xOlias/ponder/pull/123) [`9d6f820`](https://github.com/0xOlias/ponder/commit/9d6f820e9d0d1815aa6ebf7b001c0a3139c58f7c) Thanks [@0xOlias](https://github.com/0xOlias)! - Migrated to viem and added `ponder:registry` alias in generated handler code ## 0.0.39 diff --git a/packages/create-ponder/README.md b/packages/create-ponder/README.md index fa5fca37d..34be6e0c6 100644 --- a/packages/create-ponder/README.md +++ b/packages/create-ponder/README.md @@ -62,7 +62,7 @@ Ponder fetches event logs for the contracts added to `ponder.config.ts`, and pas ```ts // ponder.config.ts -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { BaseRegistrarAbi } from "./abis/BaseRegistrar"; @@ -92,7 +92,7 @@ The `ponder.schema.ts` file contains the database schema, and defines the shape ```ts // ponder.schema.ts -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const ensName = onchainTable("ens_name", (t) => ({ name: p.text().primaryKey(), @@ -108,8 +108,8 @@ Files in the `src/` directory contain **indexing functions**, which are TypeScri ```ts // src/BaseRegistrar.ts -import { ponder } from "@/generated"; -import * as schema from "../ponder.schema"; +import { ponder } from "ponder:registry"; +import schema from "ponder:schema"; ponder.on("BaseRegistrar:NameRegistered", async ({ event, context }) => { const { name, owner } = event.params; @@ -167,7 +167,7 @@ If you're interested in contributing to Ponder, please read the [contribution gu ## Packages -- `@ponder/core` +- `ponder` - `@ponder/utils` - `create-ponder` - `eslint-config-ponder` @@ -180,7 +180,7 @@ Ponder is MIT-licensed open-source software. [ci-url]: https://github.com/ponder-sh/ponder/actions/workflows/main.yml [tg-badge]: https://img.shields.io/endpoint?color=neon&logo=telegram&label=Chat&url=https%3A%2F%2Fmogyo.ro%2Fquart-apis%2Ftgmembercount%3Fchat_id%3Dponder_sh [tg-url]: https://t.me/ponder_sh -[license-badge]: https://img.shields.io/npm/l/@ponder/core?label=License +[license-badge]: https://img.shields.io/npm/l/ponder?label=License [license-url]: https://github.com/ponder-sh/ponder/blob/main/LICENSE -[version-badge]: https://img.shields.io/npm/v/@ponder/core +[version-badge]: https://img.shields.io/npm/v/ponder [version-url]: https://github.com/ponder-sh/ponder/releases diff --git a/packages/create-ponder/src/index.ts b/packages/create-ponder/src/index.ts index 3e4474bb7..9f2f8d301 100644 --- a/packages/create-ponder/src/index.ts +++ b/packages/create-ponder/src/index.ts @@ -79,6 +79,11 @@ const templates = [ title: "Feature - Factory contract", description: "A Ponder app using a factory contract", }, + { + id: "feature-accounts", + title: "Feature - Accounts", + description: "A Ponder app using accounts", + }, { id: "feature-filter", title: "Feature - Custom event filter", @@ -344,7 +349,7 @@ export async function run({ Object.values(config.contracts).some((c) => Array.isArray(c.abi)) ? ", mergeAbis" : "" - } } from "@ponder/core"; + } } from "ponder"; import { http } from "viem"; ${Object.values(config.contracts) @@ -404,7 +409,7 @@ export async function run({ .slice(0, 4); const indexingFunctionFileContents = ` - import { ponder } from '@/generated' + import { ponder } from 'ponder:registry' ${eventNamesToWrite .map( @@ -428,7 +433,7 @@ export async function run({ // Create package.json for project const packageJson = await fs.readJSON(path.join(projectPath, "package.json")); packageJson.name = projectName; - packageJson.dependencies["@ponder/core"] = `^${rootPackageJson.version}`; + packageJson.dependencies.ponder = `^${rootPackageJson.version}`; packageJson.devDependencies["eslint-config-ponder"] = `^${rootPackageJson.version}`; await fs.writeFile( diff --git a/packages/create-ponder/templates/empty/package.json b/packages/create-ponder/templates/empty/package.json index c40e1dd82..458076c84 100644 --- a/packages/create-ponder/templates/empty/package.json +++ b/packages/create-ponder/templates/empty/package.json @@ -6,12 +6,13 @@ "scripts": { "dev": "ponder dev", "start": "ponder start", + "db": "ponder db", "codegen": "ponder codegen", "lint": "eslint .", "typecheck": "tsc" }, "dependencies": { - "@ponder/core": "^0.0.95", + "ponder": "^0.0.95", "hono": "^4.5.0", "viem": "^2.21.3" }, diff --git a/packages/create-ponder/templates/empty/ponder-env.d.ts b/packages/create-ponder/templates/empty/ponder-env.d.ts index 03126bf92..b8c6a630d 100644 --- a/packages/create-ponder/templates/empty/ponder-env.d.ts +++ b/packages/create-ponder/templates/empty/ponder-env.d.ts @@ -1,28 +1,15 @@ +/// + +declare module "ponder:internal" { + const config: typeof import("./ponder.config.ts"); + const schema: typeof import("./ponder.schema.ts"); +} + +declare module "ponder:schema" { + export * from "./ponder.schema.ts"; +} + // This file enables type checking and editor autocomplete for this Ponder project. // After upgrading, you may find that changes have been made to this file. // If this happens, please commit the changes. Do not manually edit this file. // See https://ponder.sh/docs/getting-started/installation#typescript for more information. - -declare module "@/generated" { - import type { Virtual } from "@ponder/core"; - - type config = typeof import("./ponder.config.ts").default; - type schema = typeof import("./ponder.schema.ts").default; - - export const ponder: Virtual.Registry; - - export type EventNames = Virtual.EventNames; - export type Event = Virtual.Event< - config, - name - >; - export type Context = Virtual.Context< - config, - schema, - name - >; - export type ApiContext = Virtual.Drizzle; - export type IndexingFunctionArgs = - Virtual.IndexingFunctionArgs; - export type Schema = Virtual.Schema; -} diff --git a/packages/create-ponder/templates/empty/ponder.config.ts b/packages/create-ponder/templates/empty/ponder.config.ts index 1ed23d627..47357abac 100644 --- a/packages/create-ponder/templates/empty/ponder.config.ts +++ b/packages/create-ponder/templates/empty/ponder.config.ts @@ -1,4 +1,4 @@ -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { ExampleContractAbi } from "./abis/ExampleContractAbi"; diff --git a/packages/create-ponder/templates/empty/ponder.schema.ts b/packages/create-ponder/templates/empty/ponder.schema.ts index 520be9928..6ddd536ad 100644 --- a/packages/create-ponder/templates/empty/ponder.schema.ts +++ b/packages/create-ponder/templates/empty/ponder.schema.ts @@ -1,4 +1,4 @@ -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const example = onchainTable("example", (t) => ({ id: t.text().primaryKey(), diff --git a/packages/create-ponder/templates/empty/src/api/index.ts b/packages/create-ponder/templates/empty/src/api/index.ts index 8f8eec442..c1af160a2 100644 --- a/packages/create-ponder/templates/empty/src/api/index.ts +++ b/packages/create-ponder/templates/empty/src/api/index.ts @@ -1,5 +1,5 @@ -import { ponder } from "@/generated"; -import { graphql } from "@ponder/core"; +import { ponder } from "ponder:registry"; +import { graphql } from "ponder"; ponder.use("/graphql", graphql()); ponder.use("/", graphql()); diff --git a/packages/create-ponder/templates/empty/src/index.ts b/packages/create-ponder/templates/empty/src/index.ts index 65343173f..d252c7521 100644 --- a/packages/create-ponder/templates/empty/src/index.ts +++ b/packages/create-ponder/templates/empty/src/index.ts @@ -1 +1 @@ -import { ponder } from "@/generated"; +import { ponder } from "ponder:registry"; diff --git a/packages/create-ponder/templates/etherscan/package.json b/packages/create-ponder/templates/etherscan/package.json index f5a5bb29d..a6a6982f8 100644 --- a/packages/create-ponder/templates/etherscan/package.json +++ b/packages/create-ponder/templates/etherscan/package.json @@ -6,12 +6,13 @@ "scripts": { "dev": "ponder dev", "start": "ponder start", + "db": "ponder db", "codegen": "ponder codegen", "lint": "eslint .", "typecheck": "tsc" }, "dependencies": { - "@ponder/core": "^0.0.95", + "ponder": "^0.0.95", "hono": "^4.5.0", "viem": "^2.21.3" }, diff --git a/packages/create-ponder/templates/etherscan/ponder-env.d.ts b/packages/create-ponder/templates/etherscan/ponder-env.d.ts index 03126bf92..b8c6a630d 100644 --- a/packages/create-ponder/templates/etherscan/ponder-env.d.ts +++ b/packages/create-ponder/templates/etherscan/ponder-env.d.ts @@ -1,28 +1,15 @@ +/// + +declare module "ponder:internal" { + const config: typeof import("./ponder.config.ts"); + const schema: typeof import("./ponder.schema.ts"); +} + +declare module "ponder:schema" { + export * from "./ponder.schema.ts"; +} + // This file enables type checking and editor autocomplete for this Ponder project. // After upgrading, you may find that changes have been made to this file. // If this happens, please commit the changes. Do not manually edit this file. // See https://ponder.sh/docs/getting-started/installation#typescript for more information. - -declare module "@/generated" { - import type { Virtual } from "@ponder/core"; - - type config = typeof import("./ponder.config.ts").default; - type schema = typeof import("./ponder.schema.ts").default; - - export const ponder: Virtual.Registry; - - export type EventNames = Virtual.EventNames; - export type Event = Virtual.Event< - config, - name - >; - export type Context = Virtual.Context< - config, - schema, - name - >; - export type ApiContext = Virtual.Drizzle; - export type IndexingFunctionArgs = - Virtual.IndexingFunctionArgs; - export type Schema = Virtual.Schema; -} diff --git a/packages/create-ponder/templates/etherscan/ponder.schema.ts b/packages/create-ponder/templates/etherscan/ponder.schema.ts index 520be9928..6ddd536ad 100644 --- a/packages/create-ponder/templates/etherscan/ponder.schema.ts +++ b/packages/create-ponder/templates/etherscan/ponder.schema.ts @@ -1,4 +1,4 @@ -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const example = onchainTable("example", (t) => ({ id: t.text().primaryKey(), diff --git a/packages/create-ponder/templates/subgraph/package.json b/packages/create-ponder/templates/subgraph/package.json index f5a5bb29d..a6a6982f8 100644 --- a/packages/create-ponder/templates/subgraph/package.json +++ b/packages/create-ponder/templates/subgraph/package.json @@ -6,12 +6,13 @@ "scripts": { "dev": "ponder dev", "start": "ponder start", + "db": "ponder db", "codegen": "ponder codegen", "lint": "eslint .", "typecheck": "tsc" }, "dependencies": { - "@ponder/core": "^0.0.95", + "ponder": "^0.0.95", "hono": "^4.5.0", "viem": "^2.21.3" }, diff --git a/packages/create-ponder/templates/subgraph/ponder-env.d.ts b/packages/create-ponder/templates/subgraph/ponder-env.d.ts index 1169bd3f1..b8c6a630d 100644 --- a/packages/create-ponder/templates/subgraph/ponder-env.d.ts +++ b/packages/create-ponder/templates/subgraph/ponder-env.d.ts @@ -1,33 +1,15 @@ +/// + +declare module "ponder:internal" { + const config: typeof import("./ponder.config.ts"); + const schema: typeof import("./ponder.schema.ts"); +} + +declare module "ponder:schema" { + export * from "./ponder.schema.ts"; +} + // This file enables type checking and editor autocomplete for this Ponder project. // After upgrading, you may find that changes have been made to this file. // If this happens, please commit the changes. Do not manually edit this file. // See https://ponder.sh/docs/getting-started/installation#typescript for more information. - -declare module "@/generated" { - import type { - PonderContext, - PonderEvent, - PonderEventNames, - PonderApp, - } from "@ponder/core"; - - type Config = typeof import("./ponder.config.ts").default; - type Schema = typeof import("./ponder.schema.ts").default; - - export const ponder: PonderApp; - export type EventNames = PonderEventNames; - export type Event = PonderEvent< - Config, - name - >; - export type Context = PonderContext< - Config, - Schema, - name - >; - export type ApiContext = Virtual.Drizzle; - export type IndexingFunctionArgs = { - event: Event; - context: Context; - }; -} diff --git a/packages/create-ponder/templates/subgraph/ponder.schema.ts b/packages/create-ponder/templates/subgraph/ponder.schema.ts index 520be9928..6ddd536ad 100644 --- a/packages/create-ponder/templates/subgraph/ponder.schema.ts +++ b/packages/create-ponder/templates/subgraph/ponder.schema.ts @@ -1,4 +1,4 @@ -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const example = onchainTable("example", (t) => ({ id: t.text().primaryKey(), diff --git a/packages/eslint-config-ponder/README.md b/packages/eslint-config-ponder/README.md index b98260a9d..ffb26b1b9 100644 --- a/packages/eslint-config-ponder/README.md +++ b/packages/eslint-config-ponder/README.md @@ -62,7 +62,7 @@ Ponder fetches event logs for the contracts added to `ponder.config.ts`, and pas ```ts // ponder.config.ts -import { createConfig } from "@ponder/core"; +import { createConfig } from "ponder"; import { http } from "viem"; import { BaseRegistrarAbi } from "./abis/BaseRegistrar"; @@ -92,7 +92,7 @@ The `ponder.schema.ts` file contains the database schema, and defines the shape ```ts // ponder.schema.ts -import { onchainTable } from "@ponder/core"; +import { onchainTable } from "ponder"; export const ensName = onchainTable("ens_name", (t) => ({ name: p.text().primaryKey(), @@ -108,8 +108,8 @@ Files in the `src/` directory contain **indexing functions**, which are TypeScri ```ts // src/BaseRegistrar.ts -import { ponder } from "@/generated"; -import * as schema from "../ponder.schema"; +import { ponder } from "ponder:registry"; +import schema from "ponder:schema"; ponder.on("BaseRegistrar:NameRegistered", async ({ event, context }) => { const { name, owner } = event.params; @@ -167,7 +167,7 @@ If you're interested in contributing to Ponder, please read the [contribution gu ## Packages -- `@ponder/core` +- `ponder` - `@ponder/utils` - `create-ponder` - `eslint-config-ponder` @@ -180,7 +180,7 @@ Ponder is MIT-licensed open-source software. [ci-url]: https://github.com/ponder-sh/ponder/actions/workflows/main.yml [tg-badge]: https://img.shields.io/endpoint?color=neon&logo=telegram&label=Chat&url=https%3A%2F%2Fmogyo.ro%2Fquart-apis%2Ftgmembercount%3Fchat_id%3Dponder_sh [tg-url]: https://t.me/ponder_sh -[license-badge]: https://img.shields.io/npm/l/@ponder/core?label=License +[license-badge]: https://img.shields.io/npm/l/ponder?label=License [license-url]: https://github.com/ponder-sh/ponder/blob/main/LICENSE -[version-badge]: https://img.shields.io/npm/v/@ponder/core +[version-badge]: https://img.shields.io/npm/v/ponder [version-url]: https://github.com/ponder-sh/ponder/releases diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 2e253b4e6..e8d64a367 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -41,15 +41,15 @@ importers: '@graphprotocol/graph-ts': specifier: ^0.31.0 version: 0.31.0 - '@ponder/core': - specifier: workspace:* - version: link:../packages/core '@types/node': specifier: ^20.10.0 version: 20.11.24 execa: specifier: ^8.0.1 version: 8.0.1 + ponder: + specifier: workspace:* + version: link:../packages/core tsup: specifier: ^8.0.1 version: 8.0.1(postcss@8.4.32)(ts-node@10.9.2(@types/node@20.11.24)(typescript@5.3.3))(typescript@5.3.3) @@ -136,17 +136,39 @@ importers: specifier: ^5.2.2 version: 5.3.3 - examples/feature-api-functions: + examples/feature-accounts: dependencies: - '@ponder/core': + hono: + specifier: ^4.5.0 + version: 4.5.0 + ponder: specifier: workspace:* version: link:../../packages/core - drizzle-kit: - specifier: 0.25.0 - version: 0.25.0 + viem: + specifier: ^2.21.3 + version: 2.21.3(typescript@5.3.3)(zod@3.23.8) + devDependencies: + '@types/node': + specifier: ^20.10.0 + version: 20.11.24 + eslint: + specifier: ^8.54.0 + version: 8.56.0 + eslint-config-ponder: + specifier: workspace:* + version: link:../../packages/eslint-config-ponder + typescript: + specifier: ^5.3.2 + version: 5.3.3 + + examples/feature-api-functions: + dependencies: hono: specifier: ^4.5.0 version: 4.5.0 + ponder: + specifier: workspace:* + version: link:../../packages/core viem: specifier: ^2.21.3 version: 2.21.3(typescript@5.3.3)(zod@3.23.8) @@ -166,12 +188,12 @@ importers: examples/feature-blocks: dependencies: - '@ponder/core': - specifier: workspace:* - version: link:../../packages/core hono: specifier: ^4.5.0 version: 4.5.0 + ponder: + specifier: workspace:* + version: link:../../packages/core viem: specifier: ^2.21.3 version: 2.21.3(typescript@5.3.3)(zod@3.23.8) @@ -191,12 +213,12 @@ importers: examples/feature-call-traces: dependencies: - '@ponder/core': - specifier: workspace:* - version: link:../../packages/core hono: specifier: ^4.5.0 version: 4.5.0 + ponder: + specifier: workspace:* + version: link:../../packages/core viem: specifier: ^2.21.3 version: 2.21.3(typescript@5.3.3)(zod@3.23.8) @@ -216,15 +238,15 @@ importers: examples/feature-factory: dependencies: - '@ponder/core': - specifier: workspace:* - version: link:../../packages/core abitype: specifier: ^0.10.2 version: 0.10.3(typescript@5.3.3)(zod@3.23.8) hono: specifier: ^4.5.0 version: 4.5.0 + ponder: + specifier: workspace:* + version: link:../../packages/core viem: specifier: ^2.21.3 version: 2.21.3(typescript@5.3.3)(zod@3.23.8) @@ -244,12 +266,12 @@ importers: examples/feature-filter: dependencies: - '@ponder/core': - specifier: workspace:* - version: link:../../packages/core hono: specifier: ^4.5.0 version: 4.5.0 + ponder: + specifier: workspace:* + version: link:../../packages/core viem: specifier: ^2.21.3 version: 2.21.3(typescript@5.3.3)(zod@3.23.8) @@ -269,12 +291,12 @@ importers: examples/feature-multichain: dependencies: - '@ponder/core': - specifier: workspace:* - version: link:../../packages/core hono: specifier: ^4.5.0 version: 4.5.0 + ponder: + specifier: workspace:* + version: link:../../packages/core viem: specifier: ^2.21.3 version: 2.21.3(typescript@5.3.3)(zod@3.23.8) @@ -294,12 +316,12 @@ importers: examples/feature-proxy: dependencies: - '@ponder/core': - specifier: workspace:* - version: link:../../packages/core hono: specifier: ^4.5.0 version: 4.5.0 + ponder: + specifier: workspace:* + version: link:../../packages/core viem: specifier: ^2.21.3 version: 2.21.3(typescript@5.3.3)(zod@3.23.8) @@ -319,12 +341,12 @@ importers: examples/feature-read-contract: dependencies: - '@ponder/core': - specifier: workspace:* - version: link:../../packages/core hono: specifier: ^4.5.0 version: 4.5.0 + ponder: + specifier: workspace:* + version: link:../../packages/core viem: specifier: ^2.21.3 version: 2.21.3(typescript@5.3.3)(zod@3.23.8) @@ -344,12 +366,12 @@ importers: examples/project-friendtech: dependencies: - '@ponder/core': - specifier: workspace:* - version: link:../../packages/core hono: specifier: ^4.5.0 version: 4.5.0 + ponder: + specifier: workspace:* + version: link:../../packages/core viem: specifier: ^2.21.3 version: 2.21.3(typescript@5.3.3)(zod@3.23.8) @@ -369,12 +391,12 @@ importers: examples/project-uniswap-v3-flash: dependencies: - '@ponder/core': - specifier: workspace:* - version: link:../../packages/core hono: specifier: ^4.5.0 version: 4.5.0 + ponder: + specifier: workspace:* + version: link:../../packages/core viem: specifier: ^2.21.3 version: 2.21.3(typescript@5.3.3)(zod@3.23.8) @@ -394,12 +416,12 @@ importers: examples/reference-erc1155: dependencies: - '@ponder/core': - specifier: workspace:* - version: link:../../packages/core hono: specifier: ^4.5.0 version: 4.5.0 + ponder: + specifier: workspace:* + version: link:../../packages/core viem: specifier: ^2.21.3 version: 2.21.3(typescript@5.3.3)(zod@3.23.8) @@ -419,15 +441,15 @@ importers: examples/reference-erc20: dependencies: - '@ponder/core': - specifier: workspace:* - version: link:../../packages/core drizzle-kit: specifier: 0.22.8 version: 0.22.8 hono: specifier: ^4.5.0 version: 4.5.0 + ponder: + specifier: workspace:* + version: link:../../packages/core viem: specifier: ^2.21.3 version: 2.21.3(typescript@5.3.3)(zod@3.23.8) @@ -447,12 +469,12 @@ importers: examples/reference-erc4626: dependencies: - '@ponder/core': - specifier: workspace:* - version: link:../../packages/core hono: specifier: ^4.5.0 version: 4.5.0 + ponder: + specifier: workspace:* + version: link:../../packages/core viem: specifier: ^2.21.3 version: 2.21.3(typescript@5.3.3)(zod@3.23.8) @@ -472,12 +494,12 @@ importers: examples/reference-erc721: dependencies: - '@ponder/core': - specifier: workspace:* - version: link:../../packages/core hono: specifier: ^4.5.0 version: 4.5.0 + ponder: + specifier: workspace:* + version: link:../../packages/core viem: specifier: ^2.21.3 version: 2.21.3(typescript@5.3.3)(zod@3.23.8) @@ -518,15 +540,15 @@ importers: '@hono/trpc-server': specifier: ^0.3.2 version: 0.3.2(@trpc/server@10.45.2)(hono@4.5.0) - '@ponder/core': - specifier: workspace:* - version: link:../../../packages/core '@trpc/server': specifier: ^10.45.2 version: 10.45.2 hono: specifier: ^4.5.0 version: 4.5.0 + ponder: + specifier: workspace:* + version: link:../../../packages/core viem: specifier: ^2.21.3 version: 2.21.3(typescript@5.3.3)(zod@3.23.8) @@ -561,12 +583,12 @@ importers: examples/with-foundry/ponder: dependencies: - '@ponder/core': - specifier: workspace:* - version: link:../../../packages/core hono: specifier: ^4.5.0 version: 4.5.0 + ponder: + specifier: workspace:* + version: link:../../../packages/core viem: specifier: ^2.21.3 version: 2.21.3(typescript@5.3.3)(zod@3.23.8) @@ -643,12 +665,12 @@ importers: examples/with-nextjs/ponder: dependencies: - '@ponder/core': - specifier: workspace:* - version: link:../../../packages/core hono: specifier: ^4.5.0 version: 4.5.0 + ponder: + specifier: workspace:* + version: link:../../../packages/core viem: specifier: ^2.21.3 version: 2.21.3(typescript@5.3.3)(zod@3.23.8) @@ -679,15 +701,15 @@ importers: '@hono/trpc-server': specifier: ^0.3.2 version: 0.3.2(@trpc/server@10.45.2)(hono@4.5.0) - '@ponder/core': - specifier: workspace:* - version: link:../../../packages/core '@trpc/server': specifier: ^10.45.2 version: 10.45.2 hono: specifier: ^4.5.0 version: 4.5.0 + ponder: + specifier: workspace:* + version: link:../../../packages/core viem: specifier: ^2.21.3 version: 2.21.3(typescript@5.3.3)(zod@3.23.8) @@ -1723,9 +1745,6 @@ packages: resolution: {integrity: sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==} engines: {node: '>=12'} - '@drizzle-team/brocli@0.10.1': - resolution: {integrity: sha512-AHy0vjc+n/4w/8Mif+w86qpppHuF3AyXbcWW+R/W7GNA3F5/p2nuhlkCJaTXSLZheB4l1rtHzOfr9A7NwoR/Zg==} - '@electric-sql/pglite@0.2.10': resolution: {integrity: sha512-0TJF/1ouBweCtyZC4oHwx+dHGn/lP16KfEO/3q22RDuZUsV2saTuYAwb6eK3gBLzVdXG4dj4xZilvmBYEM/WQg==} @@ -4452,10 +4471,6 @@ packages: resolution: {integrity: sha512-VjI4wsJjk3hSqHSa3TwBf+uvH6M6pRHyxyoVbt935GUzP9tUR/BRZ+MhEJNgryqbzN2Za1KP0eJMTgKEPsalYQ==} hasBin: true - drizzle-kit@0.25.0: - resolution: {integrity: sha512-Rcf0nYCAKizwjWQCY+d3zytyuTbDb81NcaPor+8NebESlUz1+9W3uGl0+r9FhU4Qal5Zv9j/7neXCSCe7DHzjA==} - hasBin: true - drizzle-orm@0.34.1: resolution: {integrity: sha512-t+zCwyWWt8xTqtYV4doE/xYmT7hpv1L8pQ66zddEz+3VWyedBBtctjMAp22mAJPfyWurRQXUJ1nrTtqLq+DqNA==} peerDependencies: @@ -9809,8 +9824,6 @@ snapshots: dependencies: '@jridgewell/trace-mapping': 0.3.9 - '@drizzle-team/brocli@0.10.1': {} - '@electric-sql/pglite@0.2.10': {} '@envelop/core@5.0.2': @@ -12600,15 +12613,6 @@ snapshots: transitivePeerDependencies: - supports-color - drizzle-kit@0.25.0: - dependencies: - '@drizzle-team/brocli': 0.10.1 - '@esbuild-kit/esm-loader': 2.6.5 - esbuild: 0.19.11 - esbuild-register: 3.6.0(esbuild@0.19.11) - transitivePeerDependencies: - - supports-color - drizzle-orm@0.34.1(@electric-sql/pglite@0.2.10)(@opentelemetry/api@1.7.0)(@types/pg@8.10.9)(@types/react@18.2.46)(kysely@0.26.3)(pg@8.11.3)(react@18.2.0): optionalDependencies: '@electric-sql/pglite': 0.2.10