diff --git a/.github/dependabot.yml b/.github/dependabot.yml index e48d4303f1..577198d399 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -15,4 +15,4 @@ updates: labels: - "dependencies" commit-message: - prefix: "bot" \ No newline at end of file + prefix: "bot" diff --git a/.github/workflows/build-dependencies.yml b/.github/workflows/build-dependencies.yml index 112f847192..0bed67de09 100644 --- a/.github/workflows/build-dependencies.yml +++ b/.github/workflows/build-dependencies.yml @@ -37,7 +37,7 @@ jobs: - name: Setup Go environment explicitly uses: actions/setup-go@v3 with: - go-version: "1.20" + go-version: "1.21" check-latest: true - name: Build all dependencies diff --git a/.github/workflows/build-then-deploy-ami.yml b/.github/workflows/build-then-deploy-ami.yml index ce6be1e0bd..4423d70659 100644 --- a/.github/workflows/build-then-deploy-ami.yml +++ b/.github/workflows/build-then-deploy-ami.yml @@ -109,7 +109,7 @@ jobs: - name: Terraform validation run: terraform validate -no-color - - name: List workspaces + - name: List workspaces run: ls workspaces - name: Terraform Apply diff --git a/.github/workflows/check-vulnerabilities.yml b/.github/workflows/check-vulnerabilities.yml index 18e5f60de8..67d806ab6e 100644 --- a/.github/workflows/check-vulnerabilities.yml +++ b/.github/workflows/check-vulnerabilities.yml @@ -33,7 +33,7 @@ jobs: - name: Run govulncheck uses: golang/govulncheck-action@v1 with: - go-version-input: "1.20" + go-version-input: "1.21" go-package: ./... check-latest: true cache: true diff --git a/.github/workflows/combine-bot-prs.yml b/.github/workflows/combine-bot-prs.yml index 59a7910ced..abe21143d8 100644 --- a/.github/workflows/combine-bot-prs.yml +++ b/.github/workflows/combine-bot-prs.yml @@ -22,10 +22,10 @@ on: default: 'dependabot' mustBeGreen: - description: 'Only combine PRs that are green (status is success). Set to false if repo does not run checks' + description: 'Only combine PRs that are green (status is success). Keep false if repo does not run checks' type: boolean required: true - default: true + default: false combineBranchName: description: 'Name of the branch to combine PRs into' @@ -35,7 +35,7 @@ on: ignoreLabel: description: 'Exclude PRs with this label' required: true - default: 'nocombine' + default: 'DO NOT MERGE' jobs: combine-bot-prs: @@ -44,12 +44,12 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/github-script@v6 + - name: Set current date as env variable + run: echo "CURRENT_DATE=$(date +'%d-%m-%Y')" >> ${GITHUB_ENV} + - name: Create combined pr id: create-combined-pr - - name: Create combined pr - + uses: actions/github-script@v6 with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | @@ -66,7 +66,7 @@ jobs: if (branch.startsWith('${{ github.event.inputs.branchPrefix }}')) { console.log('Branch matched prefix: ' + branch); let statusOK = true; - if(${{ github.event.inputs.mustBeGreen }}) { + if (${{ github.event.inputs.mustBeGreen }}) { console.log('Checking green status: ' + branch); const stateQuery = `query($owner: String!, $repo: String!, $pull_number: Int!) { repository(owner: $owner, name: $repo) { @@ -92,17 +92,17 @@ jobs: const [{ commit }] = result.repository.pullRequest.commits.nodes; const state = commit.statusCheckRollup.state console.log('Validating status: ' + state); - if(state != 'SUCCESS') { + if (state != 'SUCCESS') { console.log('Discarding ' + branch + ' with status ' + state); statusOK = false; } } console.log('Checking labels: ' + branch); const labels = pull['labels']; - for(const label of labels) { + for (const label of labels) { const labelName = label['name']; console.log('Checking label: ' + labelName); - if(labelName == '${{ github.event.inputs.ignoreLabel }}') { + if (labelName == '${{ github.event.inputs.ignoreLabel }}') { console.log('Discarding ' + branch + ' with label ' + labelName); statusOK = false; } @@ -110,7 +110,10 @@ jobs: if (statusOK) { console.log('Adding branch to array: ' + branch); const prString = '#' + pull['number'] + ' ' + pull['title']; - branchesAndPRStrings.push({ branch, prString }); + branchesAndPRStrings.push({ + branch, + prString + }); baseBranch = pull['base']['ref']; baseBranchSHA = pull['base']['sha']; } @@ -135,7 +138,7 @@ jobs: let combinedPRs = []; let mergeFailedPRs = []; - for(const { branch, prString } of branchesAndPRStrings) { + for (const { branch, prString } of branchesAndPRStrings) { try { await github.rest.repos.merge({ owner: context.repo.owner, @@ -153,15 +156,15 @@ jobs: console.log('Creating combined PR'); const combinedPRsString = combinedPRs.join('\n'); - let body = '✅ This PR was created by the Combine PRs action by combining the following PRs:\n' + combinedPRsString; - if(mergeFailedPRs.length > 0) { + let body = '✅ This PR was created by combining the following PRs:\n' + combinedPRsString; + if (mergeFailedPRs.length > 0) { const mergeFailedPRsString = mergeFailedPRs.join('\n'); body += '\n\n⚠️ The following PRs were left out due to merge conflicts:\n' + mergeFailedPRsString } await github.rest.pulls.create({ owner: context.repo.owner, repo: context.repo.repo, - title: 'bot: Combined PRs', + title: 'bot: Update dependencies (bulk dependabot PRs) ${CURRENT_DATE}', head: '${{ github.event.inputs.combineBranchName }}', base: baseBranch, body: body diff --git a/.github/workflows/lint-then-benchmark.yml b/.github/workflows/lint-then-benchmark.yml index 015c8725c2..c984cce3ef 100644 --- a/.github/workflows/lint-then-benchmark.yml +++ b/.github/workflows/lint-then-benchmark.yml @@ -57,7 +57,7 @@ jobs: - name: Setup Go environment explicitly uses: actions/setup-go@v3 with: - go-version: "1.20" + go-version: "1.21" check-latest: true - name: Run the golangci-lint @@ -270,7 +270,8 @@ jobs: github.event_name == 'pull_request' && github.base_ref == 'develop' run: > - ${GOPATH}/bin/benchstat -html -alpha 1.1 develop.txt current.txt | sed -n "//,/<\/body>/p" > comparison.html && + ${GOPATH}/bin/benchstat -html -alpha 1.1 develop.txt current.txt | + sed -n "//,/<\/body>/p" > comparison.html && ./tools/scripts/pretty-benchstat-html.sh comparison.html > pretty-comparison.md - name: Comment Benchmark Results on PR diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index a08db5565a..b482903cad 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -24,8 +24,8 @@ permissions: contents: read jobs: - lint: - name: Lint job + lint-go: + name: Lint GoLang job runs-on: ubuntu-latest @@ -36,12 +36,11 @@ jobs: - name: Setup Go environment explicitly uses: actions/setup-go@v3 with: - go-version: "1.20" + go-version: "1.21" check-latest: true - - name: Check linting through golangci-lint + - name: Run golangci-lint linter uses: golangci/golangci-lint-action@v3 - with: # Required: the version of golangci-lint is required. # Note: The version should not pick the patch version as the latest patch @@ -68,3 +67,18 @@ jobs: # anyways so there shouldn't be any linter errors anyways. The enforces us to # always have a clean lint state. only-new-issues: false + + lint-yaml: + name: Lint YAML job + + runs-on: ubuntu-latest + + steps: + - name: Checkout code into the directory + uses: actions/checkout@v3 + + - name: Run yamllint linter + uses: ibiqlik/action-yamllint@v3 + with: + config_file: tools/configs/yamllint.yaml + file_or_dir: . diff --git a/.github/workflows/preview-ami-with-terraform-plan.yml b/.github/workflows/preview-ami-with-terraform-plan.yml index ed2fef6f0c..25e975a247 100644 --- a/.github/workflows/preview-ami-with-terraform-plan.yml +++ b/.github/workflows/preview-ami-with-terraform-plan.yml @@ -131,5 +131,5 @@ jobs: if: steps.terraform-plan.outcome == 'failure' run: exit 1 - - name: List workspaces + - name: List workspaces run: ls workspaces diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index bfcdf9666f..6dd1a0ec00 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -28,79 +28,88 @@ jobs: matrix: os: [ubuntu-latest, macos-latest, windows-latest] runs-on: ${{ matrix.os }} + steps: - name: Checkout code into the directory uses: actions/checkout@v3 with: fetch-depth: 0 + - name: Setup Go environment explicitly + uses: actions/setup-go@v3 + with: + go-version: "1.21" + check-latest: true + cache: true + - name: Apply tag run: git tag ${{ github.event.inputs.tag }} - + - name: Build modules run: make deps:modules - + - name: Set up QEMU - uses: docker/setup-qemu-action@v2 if: matrix.os == 'ubuntu-latest' + uses: docker/setup-qemu-action@v2 - name: Log in to Docker Hub - uses: docker/login-action@v2 if: matrix.os == 'ubuntu-latest' + uses: docker/login-action@v2 with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} - name: Log in to the Container registry - uses: docker/login-action@v2 if: matrix.os == 'ubuntu-latest' + uses: docker/login-action@v2 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - - name: Setup Go environment explicitly - uses: actions/setup-go@v3 + + - name: Run command to get SHA environment + shell: bash + run: echo "sha_short=$(git rev-parse --short HEAD)" >> ${GITHUB_ENV} + + - name: Run GoReleaser + uses: goreleaser/goreleaser-action@v5 with: - go-version: "1.20" - check-latest: true - cache: true + distribution: goreleaser-pro + version: latest + args: release --clean --split ${{ env.flags }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GITHUB_REPOSITORY: ${{ github.repository }} + GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }} - - shell: bash - run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV - - uses: actions/cache@v4 + - name: Save cache on Linux if: matrix.os == 'ubuntu-latest' + uses: actions/cache/save@v4 with: path: dist/linux_amd64 key: linux-${{ env.sha_short }} - - uses: actions/cache@v4 + + - name: Save cache on MacOS if: matrix.os == 'macos-latest' + uses: actions/cache/save@v4 with: path: dist/darwin_amd64 key: darwin-${{ env.sha_short }} - - uses: actions/cache@v4 + + - name: Save cache on Windows if: matrix.os == 'windows-latest' + uses: actions/cache/save@v4 with: path: dist/windows_amd64 key: windows-${{ env.sha_short }} enableCrossOsArchive: true - - name: Run GoReleaser - uses: goreleaser/goreleaser-action@v5 - with: - distribution: goreleaser-pro - version: latest - args: release --clean --split ${{ env.flags }} - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - GITHUB_REPOSITORY: ${{ github.repository }} - GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }} - release: runs-on: ubuntu-latest needs: prepare steps: - - uses: actions/checkout@v3 + - name: Checkout code into the directory + uses: actions/checkout@v3 with: fetch-depth: 0 @@ -110,37 +119,57 @@ jobs: - name: Setup Go environment explicitly uses: actions/setup-go@v3 with: - go-version: "1.20" + go-version: "1.21" check-latest: true cache: true - + - name: Log in to Docker Hub uses: docker/login-action@v2 with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} - # copy the cashes from prepare - - shell: bash - run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV - - uses: actions/cache@v4 + - name: Run command to get SHA environment + shell: bash + run: echo "sha_short=$(git rev-parse --short HEAD)" >> ${GITHUB_ENV} + + # Restore the cashes that were prepared for all OS + - name: Restore from cache on Linux + id: restore-linux + uses: actions/cache/restore@v4 with: path: dist/linux_amd64 key: linux-${{ env.sha_short }} - - uses: actions/cache@v4 + fail-on-cache-miss: true + + - name: Save from cache on MacOS + id: restore-macos + uses: actions/cache/restore@v4 with: path: dist/darwin_amd64 key: darwin-${{ env.sha_short }} - - uses: actions/cache@v4 + fail-on-cache-miss: true + + - name: Restore from cache on Windows + id: restore-windows + uses: actions/cache/restore@v4 with: path: dist/windows_amd64 key: windows-${{ env.sha_short }} + fail-on-cache-miss: true enableCrossOsArchive: true + # Technically the following should never happen as we are using the `fail-on-cache-miss=true` + # so it would fail before reaching here, but leaving for now incase the option is removed. + - name: Exit if failed to restore cache for any OS + if: | + steps.restore-linux.outputs.cache-hit != 'true' || + steps.restore-macos.outputs.cache-hit != 'true' || + steps.restore-windows.outputs.cache-hit != 'true' + run: exit 1 - # release - - uses: goreleaser/goreleaser-action@v5 - if: steps.cache.outputs.cache-hit != 'true' # do not run if cache hit + - name: Do the release, only if all OS caches were restored + uses: goreleaser/goreleaser-action@v5 with: distribution: goreleaser-pro version: latest @@ -149,7 +178,7 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_REPOSITORY: ${{ github.repository }} GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }} - + pull-docker-image: name: Pull docker image job runs-on: ubuntu-latest @@ -168,9 +197,9 @@ jobs: with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} - + - name: Pull Docker image run: docker pull ${{ matrix.image_tag }} - name: Test Docker image - run: docker run --rm ${{ matrix.image_tag }} \ No newline at end of file + run: docker run --rm ${{ matrix.image_tag }} diff --git a/.github/workflows/start-binary.yml b/.github/workflows/start-binary.yml index 267466b8a3..35fea3c022 100644 --- a/.github/workflows/start-binary.yml +++ b/.github/workflows/start-binary.yml @@ -37,7 +37,7 @@ jobs: - name: Setup Go environment explicitly uses: actions/setup-go@v3 with: - go-version: "1.20" + go-version: "1.21" check-latest: true - name: Build modules diff --git a/.github/workflows/test-and-upload-coverage.yml b/.github/workflows/test-and-upload-coverage.yml index de7c9df848..60858b1f86 100644 --- a/.github/workflows/test-and-upload-coverage.yml +++ b/.github/workflows/test-and-upload-coverage.yml @@ -75,7 +75,7 @@ jobs: - name: Setup Go environment explicitly uses: actions/setup-go@v3 with: - go-version: "1.20" + go-version: "1.21" check-latest: true - name: Build dependencies @@ -105,7 +105,7 @@ jobs: needs: run-tests - # Important to know: + # Important to know: # - We didn't use `if: always()` here, so this job doesn't run if we manually canceled. # - `if: success()` is always implied unless `always()` or `failure()` is specified. if: success() || failure() diff --git a/.github/workflows/validate-containerfile.yml b/.github/workflows/validate-containerfile.yml index b3315861ad..260e0dba89 100644 --- a/.github/workflows/validate-containerfile.yml +++ b/.github/workflows/validate-containerfile.yml @@ -54,4 +54,3 @@ jobs: - name: Test Docker image run: docker run --rm ${{ env.TEST_TAG }} - diff --git a/.gitignore b/.gitignore index 81c1a16d62..40eac1780c 100644 --- a/.gitignore +++ b/.gitignore @@ -17,6 +17,12 @@ tests/lenses/rust_wasm32_remove/pkg tests/lenses/rust_wasm32_copy/Cargo.lock tests/lenses/rust_wasm32_copy/target tests/lenses/rust_wasm32_copy/pkg +tests/lenses/rust_wasm32_prepend/Cargo.lock +tests/lenses/rust_wasm32_prepend/target +tests/lenses/rust_wasm32_prepend/pkg +tests/lenses/rust_wasm32_filter/Cargo.lock +tests/lenses/rust_wasm32_filter/target +tests/lenses/rust_wasm32_filter/pkg # Ignore OS X metadata files. .history diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 119447d180..05f201200f 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -15,7 +15,7 @@ builds: goarch: - amd64 - arm64 - # A build with the playground included. + # A build with the playground included. - id: "defradb_playground" main: ./cmd/defradb flags: @@ -27,30 +27,44 @@ builds: goarch: - amd64 - arm64 - + partial: by: target archives: - id: defradb_playground - builds: + builds: - defradb_playground format: binary # this name template makes the OS and Arch compatible with the results of `uname`. - name_template: '{{ .Binary }}_playground_{{ .Version }}_{{ .Os }}_{{- if eq .Arch "amd64" }}x86_64{{- else }}{{ .Arch }}{{ end }}{{- if .Arm }}v{{ .Arm }}{{ end }}' + name_template: >- + {{ .Binary }}_playground_{{ .Version }}_{{ .Os }}_ + {{- if eq .Arch "amd64" }}x86_64 + {{- else }}{{ .Arch }}{{ end }} + {{- if .Arm }}v{{ .Arm }}{{ end }} - id: defradb - builds: + builds: - defradb format: binary # this name template makes the OS and Arch compatible with the results of `uname`. - name_template: '{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{- if eq .Arch "amd64" }}x86_64{{- else }}{{ .Arch }}{{ end }}{{- if .Arm }}v{{ .Arm }}{{ end }}' + name_template: >- + {{ .Binary }}_{{ .Version }}_{{ .Os }}_ + {{- if eq .Arch "amd64" }}x86_64 + {{- else }}{{ .Arch }}{{ end }} + {{- if .Arm }}v{{ .Arm }}{{ end }} release: target_commitish: '{{ .Commit }}' - header: | - DefraDB v{{ .Major }}.{{ .Minor }} is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + header: > + DefraDB v{{ .Major }}.{{ .Minor }} is a major pre-production release. + Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, + which will give the project freedom to experiment and explore potentially breaking changes. + + To get a full outline of the changes, we invite you to review the official changelog below. + This release does include a Breaking Change to existing v{{ .Major }}.{{ .Minor }}.x databases. + If you need help migrating an existing deployment, reach out at hello@source.network or join + our Discord at https://discord.gg/w7jYQVJ/. - To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v{{ .Major }}.{{ .Minor }}.x databases. If you need help migrating an existing deployment, reach out at hello@source.network or join our Discord at https://discord.gg/w7jYQVJ/. name_template: "v{{ .Version }} Release" changelog: @@ -85,21 +99,21 @@ milestones: name_template: "DefraDB v{{ .Major }}.{{ .Minor }}" dockers: -- ids: - - "defradb_playground" - image_templates: - - "{{ .Env.GITHUB_REPOSITORY }}:latest" - - "{{ .Env.GITHUB_REPOSITORY }}:{{ .Version }}" - - "ghcr.io/{{ .Env.GITHUB_REPOSITORY }}:{{ .Version }}" - use: buildx - build_flag_templates: - - "--pull" - - "--label=org.opencontainers.image.description=DefraDB is a Peer-to-Peer Edge Database." - - "--label=org.opencontainers.image.created={{ .Date }}" - - "--label=org.opencontainers.image.name={{ .ProjectName }}" - - "--label=org.opencontainers.image.revision={{ .FullCommit }}" - - "--label=org.opencontainers.image.version={{ .Version }}" - - "--label=org.opencontainers.image.source={{ .GitURL }}" - - "--platform=linux/amd64" - dockerfile: ./tools/goreleaser.containerfile - + - ids: + - "defradb_playground" + image_templates: + - "{{ .Env.GITHUB_REPOSITORY }}:latest" + - "{{ .Env.GITHUB_REPOSITORY }}:{{ .Version }}" + - "ghcr.io/{{ .Env.GITHUB_REPOSITORY }}:latest" + - "ghcr.io/{{ .Env.GITHUB_REPOSITORY }}:{{ .Version }}" + use: buildx + build_flag_templates: + - "--pull" + - "--label=org.opencontainers.image.description=DefraDB is a Peer-to-Peer Edge Database." + - "--label=org.opencontainers.image.created={{ .Date }}" + - "--label=org.opencontainers.image.name={{ .ProjectName }}" + - "--label=org.opencontainers.image.revision={{ .FullCommit }}" + - "--label=org.opencontainers.image.version={{ .Version }}" + - "--label=org.opencontainers.image.source={{ .GitURL }}" + - "--platform=linux/amd64" + dockerfile: ./tools/goreleaser.containerfile diff --git a/CHANGELOG.md b/CHANGELOG.md index 6fa2ed67e2..342cfb3a53 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,71 @@ + +## [v0.10.0](https://github.com/sourcenetwork/defradb/compare/v0.9.0...v0.10.0) + +> 2024-03-08 + +DefraDB v0.10 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.9.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/. + +### Features + +* Add case insensitive `like` operator ([#2368](https://github.com/sourcenetwork/defradb/issues/2368)) +* Reverted order for indexed fields ([#2335](https://github.com/sourcenetwork/defradb/issues/2335)) +* Rework GetCollection/SchemaByFoo funcs into single ([#2319](https://github.com/sourcenetwork/defradb/issues/2319)) +* Add support for views with Lens transforms ([#2311](https://github.com/sourcenetwork/defradb/issues/2311)) +* Model Col. SchemaVersions and migrations on Cols ([#2286](https://github.com/sourcenetwork/defradb/issues/2286)) +* Replace FieldDescription.RelationType with IsPrimary ([#2288](https://github.com/sourcenetwork/defradb/issues/2288)) +* Multiple docs with nil value on unique-indexed field ([#2276](https://github.com/sourcenetwork/defradb/issues/2276)) +* Allow setting null values on doc fields ([#2273](https://github.com/sourcenetwork/defradb/issues/2273)) +* Add JSON scalar ([#2254](https://github.com/sourcenetwork/defradb/issues/2254)) +* Generate OpenAPI command ([#2235](https://github.com/sourcenetwork/defradb/issues/2235)) +* Add composite indexes ([#2226](https://github.com/sourcenetwork/defradb/issues/2226)) + +### Fixes + +* Add `latest` image tag for ghcr ([#2340](https://github.com/sourcenetwork/defradb/issues/2340)) +* Move field id off of schema ([#2336](https://github.com/sourcenetwork/defradb/issues/2336)) +* Make returned collections respect explicit transactions ([#2385](https://github.com/sourcenetwork/defradb/issues/2385)) +* Update GetCollections behaviour ([#2378](https://github.com/sourcenetwork/defradb/issues/2378)) +* Add missing directive definitions ([#2369](https://github.com/sourcenetwork/defradb/issues/2369)) +* Add validation to JSON fields ([#2375](https://github.com/sourcenetwork/defradb/issues/2375)) +* Make peers sync secondary index ([#2390](https://github.com/sourcenetwork/defradb/issues/2390)) +* Load root dir before loading config ([#2266](https://github.com/sourcenetwork/defradb/issues/2266)) +* Mark docs as deleted when querying in delete mut ([#2298](https://github.com/sourcenetwork/defradb/issues/2298)) +* Add missing logs at startup ([#2391](https://github.com/sourcenetwork/defradb/issues/2391)) +* Add missing delta payload ([#2306](https://github.com/sourcenetwork/defradb/issues/2306)) +* Fix compound relational filters in aggregates ([#2297](https://github.com/sourcenetwork/defradb/issues/2297)) + +### Refactoring + +* Generate field ids using a sequence ([#2339](https://github.com/sourcenetwork/defradb/issues/2339)) +* Make config internal to CLI ([#2310](https://github.com/sourcenetwork/defradb/issues/2310)) +* Node config ([#2296](https://github.com/sourcenetwork/defradb/issues/2296)) +* HTTP config ([#2278](https://github.com/sourcenetwork/defradb/issues/2278)) +* Remove unused Delete field from client.Document ([#2275](https://github.com/sourcenetwork/defradb/issues/2275)) +* Decouple net config ([#2258](https://github.com/sourcenetwork/defradb/issues/2258)) +* Make CollectionDescription.Name Option ([#2223](https://github.com/sourcenetwork/defradb/issues/2223)) + +### Chore + +* Bump to GoLang v1.21 ([#2195](https://github.com/sourcenetwork/defradb/issues/2195)) + +### Bot + +* Update dependencies (bulk dependabot PRs) 05-02-2024 ([#2372](https://github.com/sourcenetwork/defradb/issues/2372)) +* Update dependencies (bulk dependabot PRs) 02-27-2024 ([#2353](https://github.com/sourcenetwork/defradb/issues/2353)) +* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.21.0 to 7.0.1 in /playground ([#2331](https://github.com/sourcenetwork/defradb/issues/2331)) +* Bump google.golang.org/grpc from 1.61.0 to 1.61.1 ([#2320](https://github.com/sourcenetwork/defradb/issues/2320)) +* Update dependencies (bulk dependabot PRs) 2024-02-19 ([#2330](https://github.com/sourcenetwork/defradb/issues/2330)) +* Bump vite from 5.1.1 to 5.1.2 in /playground ([#2317](https://github.com/sourcenetwork/defradb/issues/2317)) +* Bump golang.org/x/net from 0.20.0 to 0.21.0 ([#2301](https://github.com/sourcenetwork/defradb/issues/2301)) +* Update dependencies (bulk dependabot PRs) 2023-02-14 ([#2313](https://github.com/sourcenetwork/defradb/issues/2313)) +* Update dependencies (bulk dependabot PRs) 02-07-2024 ([#2294](https://github.com/sourcenetwork/defradb/issues/2294)) +* Update dependencies (bulk dependabot PRs) 30-01-2024 ([#2270](https://github.com/sourcenetwork/defradb/issues/2270)) +* Update dependencies (bulk dependabot PRs) 23-01-2024 ([#2252](https://github.com/sourcenetwork/defradb/issues/2252)) +* Bump vite from 5.0.11 to 5.0.12 in /playground ([#2236](https://github.com/sourcenetwork/defradb/issues/2236)) +* Bump github.com/evanphx/json-patch/v5 from 5.7.0 to 5.8.1 ([#2233](https://github.com/sourcenetwork/defradb/issues/2233)) + ## [v0.9.0](https://github.com/sourcenetwork/defradb/compare/v0.8.0...v0.9.0) diff --git a/Makefile b/Makefile index 0ddde9790f..cde535be4b 100644 --- a/Makefile +++ b/Makefile @@ -5,6 +5,33 @@ ifndef VERBOSE MAKEFLAGS+=--no-print-directory endif +# Detect OS (`Linux`, `Darwin`, `Windows`) +# Note: can use `lsb_release --id --short` for more specfic linux distro information. +OS_GENERAL := Unknown +ifeq ($(OS),Windows_NT) + OS_GENERAL := Windows +else + OS_GENERAL := $(shell sh -c 'uname 2>/dev/null || echo Unknown') +endif + +# Detect OS specfic package manager if possible (`apt`, `yum`, `pacman`, `brew`, `choco`) +OS_PACKAGE_MANAGER := Unknown +ifeq ($(OS_GENERAL),Linux) + ifneq ($(shell which apt 2>/dev/null),) + OS_PACKAGE_MANAGER := apt + else ifneq ($(shell which yum 2>/dev/null),) + OS_PACKAGE_MANAGER := yum + else ifneq ($(shell which pacman 2>/dev/null),) + OS_PACKAGE_MANAGER := pacman + else ifneq ($(shell which dnf 2>/dev/null),) + OS_PACKAGE_MANAGER := dnf + endif +else ifeq ($(OS_GENERAL),Darwin) + OS_PACKAGE_MANAGER := brew +else ifeq ($(OS_GENERAL),Windows) + OS_PACKAGE_MANAGER := choco +endif + # Provide info from git to the version package using linker flags. ifeq (, $(shell which git)) $(error "No git in $(PATH), version information won't be included") @@ -18,6 +45,15 @@ else VERSION_GITRELEASE=$(shell git describe --tags) endif +$(info ----------------------------------------); +$(info OS = $(OS_GENERAL)); +$(info PACKAGE_MANAGER = $(OS_PACKAGE_MANAGER)); +$(info GOINFO = $(VERSION_GOINFO)); +$(info GITCOMMIT = $(VERSION_GITCOMMIT)); +$(info GITCOMMITDATE = $(VERSION_GITCOMMITDATE)); +$(info GITRELEASE = $(VERSION_GITRELEASE)); +$(info ----------------------------------------); + BUILD_FLAGS=-trimpath -ldflags "\ -X 'github.com/sourcenetwork/defradb/version.GoInfo=$(VERSION_GOINFO)'\ -X 'github.com/sourcenetwork/defradb/version.GitRelease=$(VERSION_GITRELEASE)'\ @@ -36,9 +72,8 @@ COVERAGE_FILE=coverage.txt COVERAGE_FLAGS=-covermode=atomic -coverpkg=./... -args -test.gocoverdir=$(COVERAGE_DIRECTORY) PLAYGROUND_DIRECTORY=playground -LENS_TEST_DIRECTORY=tests/integration/schema/migrations CHANGE_DETECTOR_TEST_DIRECTORY=tests/change_detector -DEFAULT_TEST_DIRECTORIES=$$(go list ./... | grep -v -e $(LENS_TEST_DIRECTORY)) +DEFAULT_TEST_DIRECTORIES=./... default: @go run $(BUILD_FLAGS) cmd/defradb/main.go @@ -47,6 +82,15 @@ default: install: @go install $(BUILD_FLAGS) ./cmd/defradb +.PHONY: install\:manpages +install\:manpages: +ifeq ($(OS_GENERAL),Linux) + cp build/man/* /usr/share/man/man1/ +endif +ifneq ($(OS_GENERAL),Linux) + @echo "Direct installation of Defradb's man pages is not supported on your system." +endif + # Usage: # - make build # - make build path="path/to/defradb-binary" @@ -78,16 +122,27 @@ client\:dump: client\:add-schema: ./build/defradb client schema add -f examples/schema/bookauthpub.graphql +.PHONY: deps\:lint-go +deps\:lint-go: + go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.54 + +.PHONY: deps\:lint-yaml +deps\:lint-yaml: +ifeq (, $(shell which yamllint)) + $(info YAML linter 'yamllint' not found on the system, please install it.) + $(info Can try using your local package manager: $(OS_PACKAGE_MANAGER)) +else + $(info YAML linter 'yamllint' already installed.) +endif + .PHONY: deps\:lint deps\:lint: - go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.54 + @$(MAKE) deps:lint-go && \ + $(MAKE) deps:lint-yaml .PHONY: deps\:test deps\:test: go install gotest.tools/gotestsum@latest - -.PHONY: deps\:lens -deps\:lens: rustup target add wasm32-unknown-unknown @$(MAKE) -C ./tests/lenses build @@ -147,7 +202,7 @@ verify: .PHONY: tidy tidy: - go mod tidy -go=1.20 + go mod tidy -go=1.21 .PHONY: clean clean: @@ -217,26 +272,6 @@ test\:cli: test\:names: gotestsum --format testname -- $(DEFAULT_TEST_DIRECTORIES) $(TEST_FLAGS) -.PHONY: test\:lens -test\:lens: - @$(MAKE) deps:lens - gotestsum --format testname -- ./$(LENS_TEST_DIRECTORY)/... $(TEST_FLAGS) - -.PHONY: test\:lens-quick -test\:lens-quick: - @$(MAKE) deps:lens - gotestsum --format testname -- ./$(LENS_TEST_DIRECTORY)/... - -.PHONY: test\:all -test\:all: - @$(MAKE) test:names - @$(MAKE) test:lens - -.PHONY: test\:all-quick -test\:all-quick: - @$(MAKE) test:quick - @$(MAKE) test:lens-quick - .PHONY: test\:verbose test\:verbose: gotestsum --format standard-verbose -- $(DEFAULT_TEST_DIRECTORIES) $(TEST_FLAGS) @@ -263,7 +298,6 @@ test\:scripts: .PHONY: test\:coverage test\:coverage: - @$(MAKE) deps:lens @$(MAKE) clean:coverage mkdir $(COVERAGE_DIRECTORY) ifeq ($(path),) @@ -300,6 +334,7 @@ validate\:circleci: .PHONY: lint lint: golangci-lint run --config tools/configs/golangci.yaml + yamllint -c tools/configs/yamllint.yaml . .PHONY: lint\:fix lint\:fix: @@ -334,13 +369,3 @@ docs\:manpages: docs\:godoc: godoc -http=:6060 # open http://localhost:6060/pkg/github.com/sourcenetwork/defradb/ - -detectedOS := $(shell uname) -.PHONY: install\:manpages -install\:manpages: -ifeq ($(detectedOS),Linux) - cp build/man/* /usr/share/man/man1/ -endif -ifneq ($(detectedOS),Linux) - @echo "Direct installation of Defradb's man pages is not supported on your system." -endif diff --git a/README.md b/README.md index 0e5997b902..a7156888b9 100644 --- a/README.md +++ b/README.md @@ -21,6 +21,7 @@ Read the documentation on [docs.source.network](https://docs.source.network/). - [Install](#install) - [Start](#start) - [Configuration](#configuration) +- [External port binding](#external-port-binding) - [Add a schema type](#add-a-schema-type) - [Create a document instance](#create-a-document-instance) - [Query documents](#query-documents) @@ -72,6 +73,16 @@ In this document, we use the default configuration, which has the following beha The GraphQL endpoint can be used with a GraphQL client (e.g., Altair) to conveniently perform requests (`query`, `mutation`) and obtain schema introspection. +Read more about the configuration [here](./docs/config.md). + +## External port binding + +By default the HTTP API and P2P network will use localhost. If you want to expose the ports externally you need to specify the addresses in the config or command line parameters. + +``` +defradb start --p2paddr /ip4/0.0.0.0/tcp/9171 --url 0.0.0.0:9181 +``` + ## Add a schema type Schemas are used to structure documents using a type system. @@ -235,7 +246,6 @@ DQL is compatible with GraphQL but features various extensions. Read its documentation at [docs.source.network](https://docs.source.network/references/query-specification/query-language-overview) to discover its filtering, ordering, limiting, relationships, variables, aggregate functions, and other useful features. - ## Peer-to-peer data synchronization DefraDB leverages peer-to-peer networking for data exchange, synchronization, and replication of documents and commits. @@ -280,14 +290,14 @@ In this example, we use `12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B`, For *nodeB*, we provide the following configuration: ```shell -defradb start --rootdir ~/.defradb-nodeB --url localhost:9182 --p2paddr /ip4/0.0.0.0/tcp/9172 --peers /ip4/0.0.0.0/tcp/9171/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B +defradb start --rootdir ~/.defradb-nodeB --url localhost:9182 --p2paddr /ip4/127.0.0.1/tcp/9172 --peers /ip4/127.0.0.1/tcp/9171/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B ``` About the flags: - `--rootdir` specifies the root dir (config and data) to use - `--url` is the address to listen on for the client HTTP and GraphQL API -- `--p2paddr` is the multiaddress for the P2P networking to listen on +- `--p2paddr` is a comma-separated list of multiaddresses to listen on for p2p networking - `--peers` is a comma-separated list of peer multiaddresses This starts two nodes and connects them via pubsub networking. @@ -387,16 +397,6 @@ defradb start --tls --pubkeypath ~/path-to-pubkey.key --privkeypath ~/path-to-pr ``` -DefraDB also comes with automatic HTTPS for deployments on the public web. To enable HTTPS, - deploy DefraDB to a server with both port 80 and port 443 open. With your domain's DNS A record - pointed to the IP of your server, you can run the database using the following command: -```shell -sudo defradb start --tls --url=your-domain.net --email=email@example.com -``` -Note: `sudo` is needed above for the redirection server (to bind port 80). - -A valid email address is necessary for the creation of the certificate, and is important to get notifications from the Certificate Authority - in case the certificate is about to expire, etc. - ## Supporting CORS When accessing DefraDB through a frontend interface, you may be confronted with a CORS error. That is because, by default, DefraDB will not have any allowed origins set. To specify which origins should be allowed to access your DefraDB endpoint, you can specify them when starting the database: diff --git a/cli/backup_export.go b/cli/backup_export.go index 9e8d1c056e..b905bdf9c7 100644 --- a/cli/backup_export.go +++ b/cli/backup_export.go @@ -38,7 +38,7 @@ Example: export data for the 'Users' collection: defradb client export --collection Users user_data.json`, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - store := mustGetStoreContext(cmd) + store := mustGetContextStore(cmd) if !isValidExportFormat(format) { return ErrInvalidExportFormat diff --git a/cli/backup_import.go b/cli/backup_import.go index 35af345a0a..56f1907643 100644 --- a/cli/backup_import.go +++ b/cli/backup_import.go @@ -24,7 +24,7 @@ Example: import data to the database: defradb client import user_data.json`, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - store := mustGetStoreContext(cmd) + store := mustGetContextStore(cmd) return store.BasicImport(cmd.Context(), args[0]) }, } diff --git a/cli/cli.go b/cli/cli.go index 2ee882afce..4cdb8c443b 100644 --- a/cli/cli.go +++ b/cli/cli.go @@ -16,14 +16,13 @@ package cli import ( "github.com/spf13/cobra" - "github.com/sourcenetwork/defradb/config" "github.com/sourcenetwork/defradb/logging" ) var log = logging.MustNewLogger("cli") // NewDefraCommand returns the root command instanciated with its tree of subcommands. -func NewDefraCommand(cfg *config.Config) *cobra.Command { +func NewDefraCommand() *cobra.Command { p2p_collection := MakeP2PCollectionCommand() p2p_collection.AddCommand( MakeP2PCollectionAddCommand(), @@ -48,7 +47,7 @@ func NewDefraCommand(cfg *config.Config) *cobra.Command { schema_migrate := MakeSchemaMigrationCommand() schema_migrate.AddCommand( MakeSchemaMigrationSetCommand(), - MakeSchemaMigrationGetCommand(), + MakeSchemaMigrationSetRegistryCommand(), MakeSchemaMigrationReloadCommand(), MakeSchemaMigrationUpCommand(), MakeSchemaMigrationDownCommand(), @@ -58,7 +57,7 @@ func NewDefraCommand(cfg *config.Config) *cobra.Command { schema.AddCommand( MakeSchemaAddCommand(), MakeSchemaPatchCommand(), - MakeSchemaSetDefaultCommand(), + MakeSchemaSetActiveCommand(), MakeSchemaDescribeCommand(), schema_migrate, ) @@ -83,12 +82,12 @@ func NewDefraCommand(cfg *config.Config) *cobra.Command { tx := MakeTxCommand() tx.AddCommand( - MakeTxCreateCommand(cfg), - MakeTxCommitCommand(cfg), - MakeTxDiscardCommand(cfg), + MakeTxCreateCommand(), + MakeTxCommitCommand(), + MakeTxDiscardCommand(), ) - collection := MakeCollectionCommand(cfg) + collection := MakeCollectionCommand() collection.AddCommand( MakeCollectionGetCommand(), MakeCollectionListDocIDsCommand(), @@ -98,7 +97,7 @@ func NewDefraCommand(cfg *config.Config) *cobra.Command { MakeCollectionDescribeCommand(), ) - client := MakeClientCommand(cfg) + client := MakeClientCommand() client.AddCommand( MakeDumpCommand(), MakeRequestCommand(), @@ -111,13 +110,12 @@ func NewDefraCommand(cfg *config.Config) *cobra.Command { collection, ) - root := MakeRootCommand(cfg) + root := MakeRootCommand() root.AddCommand( client, - MakeStartCommand(cfg), - MakeServerDumpCmd(cfg), + MakeStartCommand(), + MakeServerDumpCmd(), MakeVersionCommand(), - MakeInitCommand(cfg), ) return root diff --git a/cli/client.go b/cli/client.go index 8866294f69..532712e8f8 100644 --- a/cli/client.go +++ b/cli/client.go @@ -12,11 +12,9 @@ package cli import ( "github.com/spf13/cobra" - - "github.com/sourcenetwork/defradb/config" ) -func MakeClientCommand(cfg *config.Config) *cobra.Command { +func MakeClientCommand() *cobra.Command { var txID uint64 var cmd = &cobra.Command{ Use: "client", @@ -24,13 +22,16 @@ func MakeClientCommand(cfg *config.Config) *cobra.Command { Long: `Interact with a DefraDB node. Execute queries, add schema types, obtain node info, etc.`, PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if err := loadConfig(cfg); err != nil { + if err := setContextRootDir(cmd); err != nil { + return err + } + if err := setContextConfig(cmd); err != nil { return err } - if err := setTransactionContext(cmd, cfg, txID); err != nil { + if err := setContextTransaction(cmd, txID); err != nil { return err } - return setStoreContext(cmd, cfg) + return setContextStore(cmd) }, } cmd.PersistentFlags().Uint64Var(&txID, "tx", 0, "Transaction ID") diff --git a/cli/collection.go b/cli/collection.go index 8af1839b47..23ef9194ae 100644 --- a/cli/collection.go +++ b/cli/collection.go @@ -13,86 +13,63 @@ package cli import ( "context" + "github.com/sourcenetwork/immutable" "github.com/spf13/cobra" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" "github.com/sourcenetwork/defradb/datastore" ) -func MakeCollectionCommand(cfg *config.Config) *cobra.Command { +func MakeCollectionCommand() *cobra.Command { var txID uint64 var name string var schemaRoot string var versionID string + var getInactive bool var cmd = &cobra.Command{ Use: "collection [--name --schema --version ]", Short: "Interact with a collection.", Long: `Create, read, update, and delete documents within a collection.`, PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { // cobra does not chain pre run calls so we have to run them again here - if err := loadConfig(cfg); err != nil { + if err := setContextRootDir(cmd); err != nil { return err } - if err := setTransactionContext(cmd, cfg, txID); err != nil { + if err := setContextConfig(cmd); err != nil { return err } - if err := setStoreContext(cmd, cfg); err != nil { + if err := setContextTransaction(cmd, txID); err != nil { return err } - store := mustGetStoreContext(cmd) - - var col client.Collection - var cols []client.Collection - switch { - case versionID != "": - cols, err = store.GetCollectionsByVersionID(cmd.Context(), versionID) - - case schemaRoot != "": - cols, err = store.GetCollectionsBySchemaRoot(cmd.Context(), schemaRoot) - - case name != "": - col, err = store.GetCollectionByName(cmd.Context(), name) - cols = []client.Collection{col} - - default: - return nil - } - - if err != nil { + if err := setContextStore(cmd); err != nil { return err } + store := mustGetContextStore(cmd) - if schemaRoot != "" && versionID != "" && len(cols) > 0 { - if cols[0].SchemaRoot() != schemaRoot { - // If the a versionID has been provided that does not pair up with the given schema root - // we should error and let the user know they have provided impossible params. - // We only need to check the first item - they will all be the same. - return NewErrSchemaVersionNotOfSchema(schemaRoot, versionID) - } + options := client.CollectionFetchOptions{} + if versionID != "" { + options.SchemaVersionID = immutable.Some(versionID) + } + if schemaRoot != "" { + options.SchemaRoot = immutable.Some(schemaRoot) } - if name != "" { - // Multiple params may have been specified, and in some cases both are needed. - // For example if a schema version and a collection name have been provided, - // we need to ensure that a collection at the requested version is returned. - // Likewise we need to ensure that if a collection name and schema id are provided, - // but there are none matching both, that nothing is returned. - fetchedCols := cols - cols = nil - for _, c := range fetchedCols { - if c.Name() == name { - cols = append(cols, c) - break - } - } + options.Name = immutable.Some(name) + } + if getInactive { + options.IncludeInactive = immutable.Some(getInactive) + } + + cols, err := store.GetCollections(cmd.Context(), options) + if err != nil { + return err } if len(cols) != 1 { // If more than one collection matches the given criteria we cannot set the context collection return nil } - col = cols[0] + col := cols[0] if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok { col = col.WithTxn(tx) @@ -107,5 +84,6 @@ func MakeCollectionCommand(cfg *config.Config) *cobra.Command { cmd.PersistentFlags().StringVar(&name, "name", "", "Collection name") cmd.PersistentFlags().StringVar(&schemaRoot, "schema", "", "Collection schema Root") cmd.PersistentFlags().StringVar(&versionID, "version", "", "Collection version ID") + cmd.PersistentFlags().BoolVar(&getInactive, "get-inactive", false, "Get inactive collections as well as active") return cmd } diff --git a/cli/collection_create.go b/cli/collection_create.go index 82e1e5db09..efeee61494 100644 --- a/cli/collection_create.go +++ b/cli/collection_create.go @@ -40,7 +40,7 @@ Example: create from stdin `, Args: cobra.RangeArgs(0, 1), RunE: func(cmd *cobra.Command, args []string) error { - col, ok := tryGetCollectionContext(cmd) + col, ok := tryGetContextCollection(cmd) if !ok { return cmd.Usage() } diff --git a/cli/collection_delete.go b/cli/collection_delete.go index dcd7c9d872..d1f945d9ae 100644 --- a/cli/collection_delete.go +++ b/cli/collection_delete.go @@ -31,7 +31,7 @@ Example: delete by filter defradb client collection delete --name User --filter '{ "_gte": { "points": 100 } }' `, RunE: func(cmd *cobra.Command, args []string) error { - col, ok := tryGetCollectionContext(cmd) + col, ok := tryGetContextCollection(cmd) if !ok { return cmd.Usage() } diff --git a/cli/collection_describe.go b/cli/collection_describe.go index a21c4d0c10..5d1a85ea5e 100644 --- a/cli/collection_describe.go +++ b/cli/collection_describe.go @@ -11,12 +11,17 @@ package cli import ( + "github.com/sourcenetwork/immutable" "github.com/spf13/cobra" "github.com/sourcenetwork/defradb/client" ) func MakeCollectionDescribeCommand() *cobra.Command { + var name string + var schemaRoot string + var versionID string + var getInactive bool var cmd = &cobra.Command{ Use: "describe", Short: "View collection description.", @@ -28,21 +33,33 @@ Example: view all collections Example: view collection by name defradb client collection describe --name User -Example: view collection by schema id +Example: view collection by schema root id defradb client collection describe --schema bae123 -Example: view collection by version id +Example: view collection by version id. This will also return inactive collections defradb client collection describe --version bae123 `, RunE: func(cmd *cobra.Command, args []string) error { - store := mustGetStoreContext(cmd) + store := mustGetContextStore(cmd) - col, ok := tryGetCollectionContext(cmd) - if ok { - return writeJSON(cmd, col.Definition()) + options := client.CollectionFetchOptions{} + if versionID != "" { + options.SchemaVersionID = immutable.Some(versionID) } - // if no collection specified list all collections - cols, err := store.GetAllCollections(cmd.Context()) + if schemaRoot != "" { + options.SchemaRoot = immutable.Some(schemaRoot) + } + if name != "" { + options.Name = immutable.Some(name) + } + if getInactive { + options.IncludeInactive = immutable.Some(getInactive) + } + + cols, err := store.GetCollections( + cmd.Context(), + options, + ) if err != nil { return err } @@ -53,5 +70,9 @@ Example: view collection by version id return writeJSON(cmd, colDesc) }, } + cmd.Flags().StringVar(&name, "name", "", "Collection name") + cmd.Flags().StringVar(&schemaRoot, "schema", "", "Collection schema Root") + cmd.Flags().StringVar(&versionID, "version", "", "Collection version ID") + cmd.Flags().BoolVar(&getInactive, "get-inactive", false, "Get inactive collections as well as active") return cmd } diff --git a/cli/collection_get.go b/cli/collection_get.go index d753e0a8db..55c84d6289 100644 --- a/cli/collection_get.go +++ b/cli/collection_get.go @@ -28,7 +28,7 @@ Example: `, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - col, ok := tryGetCollectionContext(cmd) + col, ok := tryGetContextCollection(cmd) if !ok { return cmd.Usage() } diff --git a/cli/collection_list_doc_ids.go b/cli/collection_list_doc_ids.go index d7009cb300..7112a88817 100644 --- a/cli/collection_list_doc_ids.go +++ b/cli/collection_list_doc_ids.go @@ -26,7 +26,7 @@ Example: defradb client collection docIDs --name User `, RunE: func(cmd *cobra.Command, args []string) error { - col, ok := tryGetCollectionContext(cmd) + col, ok := tryGetContextCollection(cmd) if !ok { return cmd.Usage() } diff --git a/cli/collection_update.go b/cli/collection_update.go index 9fd2deed3f..42354948a9 100644 --- a/cli/collection_update.go +++ b/cli/collection_update.go @@ -38,7 +38,7 @@ Example: update by docIDs `, Args: cobra.RangeArgs(0, 1), RunE: func(cmd *cobra.Command, args []string) error { - col, ok := tryGetCollectionContext(cmd) + col, ok := tryGetContextCollection(cmd) if !ok { return cmd.Usage() } diff --git a/cli/config.go b/cli/config.go new file mode 100644 index 0000000000..bb57a8cb3d --- /dev/null +++ b/cli/config.go @@ -0,0 +1,185 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "os" + "path/filepath" + "strings" + + "github.com/spf13/pflag" + "github.com/spf13/viper" + + "github.com/sourcenetwork/defradb/logging" +) + +const ( + configStoreBadger = "badger" + configStoreMemory = "memory" + configLogFormatJSON = "json" + configLogFormatCSV = "csv" + configLogLevelInfo = "info" + configLogLevelDebug = "debug" + configLogLevelError = "error" + configLogLevelFatal = "fatal" +) + +// configPaths are config keys that will be made relative to the rootdir +var configPaths = []string{ + "datastore.badger.path", + "api.pubkeypath", + "api.privkeypath", +} + +// configFlags is a mapping of config keys to cli flags to bind to. +var configFlags = map[string]string{ + "log.level": "loglevel", + "log.output": "logoutput", + "log.format": "logformat", + "log.stacktrace": "logtrace", + "log.nocolor": "lognocolor", + "api.address": "url", + "datastore.maxtxnretries": "max-txn-retries", + "datastore.store": "store", + "datastore.badger.valuelogfilesize": "valuelogfilesize", + "net.peers": "peers", + "net.p2paddresses": "p2paddr", + "net.p2pdisabled": "no-p2p", + "api.allowed-origins": "allowed-origins", + "api.pubkeypath": "pubkeypath", + "api.privkeypath": "privkeypath", +} + +// defaultConfig returns a new config with default values. +func defaultConfig() *viper.Viper { + cfg := viper.New() + + cfg.AutomaticEnv() + cfg.SetEnvPrefix("DEFRA") + cfg.SetEnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_")) + + cfg.SetConfigName("config") + cfg.SetConfigType("yaml") + + cfg.SetDefault("datastore.badger.path", "data") + cfg.SetDefault("net.pubSubEnabled", true) + cfg.SetDefault("net.relay", false) + cfg.SetDefault("log.caller", false) + + return cfg +} + +// createConfig writes the default config file if one does not exist. +func createConfig(rootdir string, flags *pflag.FlagSet) error { + cfg := defaultConfig() + cfg.AddConfigPath(rootdir) + + if err := bindConfigFlags(cfg, flags); err != nil { + return err + } + // make sure rootdir exists + if err := os.MkdirAll(rootdir, 0755); err != nil { + return err + } + err := cfg.SafeWriteConfig() + // error type is known and shouldn't be wrapped + // + //nolint:errorlint + if _, ok := err.(viper.ConfigFileAlreadyExistsError); ok { + return nil + } + return err +} + +// loadConfig returns a new config with values from the config in the given rootdir. +func loadConfig(rootdir string, flags *pflag.FlagSet) (*viper.Viper, error) { + cfg := defaultConfig() + cfg.AddConfigPath(rootdir) + + // attempt to read the existing config + err := cfg.ReadInConfig() + // error type is known and shouldn't be wrapped + // + //nolint:errorlint + if _, ok := err.(viper.ConfigFileNotFoundError); err != nil && !ok { + return nil, err + } + // bind cli flags to config keys + if err := bindConfigFlags(cfg, flags); err != nil { + return nil, err + } + + // make paths relative to the rootdir + for _, key := range configPaths { + path := cfg.GetString(key) + if path != "" && !filepath.IsAbs(path) { + cfg.Set(key, filepath.Join(rootdir, path)) + } + } + + logCfg := loggingConfig(cfg.Sub("log")) + logCfg.OverridesByLoggerName = make(map[string]logging.Config) + + // apply named logging overrides + for key := range cfg.GetStringMap("log.overrides") { + logCfg.OverridesByLoggerName[key] = loggingConfig(cfg.Sub("log.overrides." + key)) + } + logging.SetConfig(logCfg) + + return cfg, nil +} + +// bindConfigFlags binds the set of cli flags to config values. +func bindConfigFlags(cfg *viper.Viper, flags *pflag.FlagSet) error { + for key, flag := range configFlags { + err := cfg.BindPFlag(key, flags.Lookup(flag)) + if err != nil { + return err + } + } + return nil +} + +// loggingConfig returns a new logging config from the given config. +func loggingConfig(cfg *viper.Viper) logging.Config { + var level int8 + switch value := cfg.GetString("level"); value { + case configLogLevelDebug: + level = logging.Debug + case configLogLevelInfo: + level = logging.Info + case configLogLevelError: + level = logging.Error + case configLogLevelFatal: + level = logging.Fatal + default: + level = logging.Info + } + + var format logging.EncoderFormat + switch value := cfg.GetString("format"); value { + case configLogFormatJSON: + format = logging.JSON + case configLogFormatCSV: + format = logging.CSV + default: + format = logging.CSV + } + + return logging.Config{ + Level: logging.NewLogLevelOption(level), + EnableStackTrace: logging.NewEnableStackTraceOption(cfg.GetBool("stacktrace")), + DisableColor: logging.NewDisableColorOption(cfg.GetBool("nocolor")), + EncoderFormat: logging.NewEncoderFormatOption(format), + OutputPaths: []string{cfg.GetString("output")}, + EnableCaller: logging.NewEnableCallerOption(cfg.GetBool("caller")), + } +} diff --git a/cli/config_test.go b/cli/config_test.go new file mode 100644 index 0000000000..210743477c --- /dev/null +++ b/cli/config_test.go @@ -0,0 +1,61 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCreateConfig(t *testing.T) { + rootdir := t.TempDir() + err := createConfig(rootdir, NewDefraCommand().PersistentFlags()) + require.NoError(t, err) + + // ensure no errors when config already exists + err = createConfig(rootdir, NewDefraCommand().PersistentFlags()) + require.NoError(t, err) + + assert.FileExists(t, filepath.Join(rootdir, "config.yaml")) +} + +func TestLoadConfigNotExist(t *testing.T) { + rootdir := t.TempDir() + cfg, err := loadConfig(rootdir, NewDefraCommand().PersistentFlags()) + require.NoError(t, err) + + assert.Equal(t, 5, cfg.GetInt("datastore.maxtxnretries")) + + assert.Equal(t, filepath.Join(rootdir, "data"), cfg.GetString("datastore.badger.path")) + assert.Equal(t, 1<<30, cfg.GetInt("datastore.badger.valuelogfilesize")) + assert.Equal(t, "badger", cfg.GetString("datastore.store")) + + assert.Equal(t, "127.0.0.1:9181", cfg.GetString("api.address")) + assert.Equal(t, []string{}, cfg.GetStringSlice("api.allowed-origins")) + assert.Equal(t, "", cfg.GetString("api.pubkeypath")) + assert.Equal(t, "", cfg.GetString("api.privkeypath")) + + assert.Equal(t, false, cfg.GetBool("net.p2pdisabled")) + assert.Equal(t, []string{"/ip4/127.0.0.1/tcp/9171"}, cfg.GetStringSlice("net.p2paddresses")) + assert.Equal(t, true, cfg.GetBool("net.pubsubenabled")) + assert.Equal(t, false, cfg.GetBool("net.relay")) + assert.Equal(t, []string{}, cfg.GetStringSlice("net.peers")) + + assert.Equal(t, "info", cfg.GetString("log.level")) + assert.Equal(t, false, cfg.GetBool("log.stacktrace")) + assert.Equal(t, "csv", cfg.GetString("log.format")) + assert.Equal(t, "stderr", cfg.GetString("log.output")) + assert.Equal(t, false, cfg.GetBool("log.nocolor")) + assert.Equal(t, false, cfg.GetBool("log.caller")) +} diff --git a/cli/index_create.go b/cli/index_create.go index 099eb7e7a6..bfe5ec64c2 100644 --- a/cli/index_create.go +++ b/cli/index_create.go @@ -37,7 +37,7 @@ Example: create a named index for 'Users' collection on 'name' field: defradb client index create --collection Users --fields name --name UsersByName`, ValidArgs: []string{"collection", "fields", "name"}, RunE: func(cmd *cobra.Command, args []string) error { - store := mustGetStoreContext(cmd) + store := mustGetContextStore(cmd) var fields []client.IndexedFieldDescription for _, name := range fieldsArg { diff --git a/cli/index_drop.go b/cli/index_drop.go index 03639fb277..96f007268d 100644 --- a/cli/index_drop.go +++ b/cli/index_drop.go @@ -28,7 +28,7 @@ Example: drop the index 'UsersByName' for 'Users' collection: defradb client index create --collection Users --name UsersByName`, ValidArgs: []string{"collection", "name"}, RunE: func(cmd *cobra.Command, args []string) error { - store := mustGetStoreContext(cmd) + store := mustGetContextStore(cmd) col, err := store.GetCollectionByName(cmd.Context(), collectionArg) if err != nil { diff --git a/cli/index_list.go b/cli/index_list.go index 92ada3e007..bf1fd21251 100644 --- a/cli/index_list.go +++ b/cli/index_list.go @@ -30,7 +30,7 @@ Example: show all index for 'Users' collection: defradb client index list --collection Users`, ValidArgs: []string{"collection"}, RunE: func(cmd *cobra.Command, args []string) error { - store := mustGetStoreContext(cmd) + store := mustGetContextStore(cmd) switch { case collectionArg != "": diff --git a/cli/init.go b/cli/init.go deleted file mode 100644 index f9af1850b7..0000000000 --- a/cli/init.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "fmt" - - "github.com/spf13/cobra" - - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" -) - -/* -The `init` command initializes the configuration file and root directory. - -It covers three possible situations: -- root dir doesn't exist -- root dir exists and doesn't contain a config file -- root dir exists and contains a config file -*/ -func MakeInitCommand(cfg *config.Config) *cobra.Command { - var reinitialize bool - var cmd = &cobra.Command{ - Use: "init", - Short: "Initialize DefraDB's root directory and configuration file", - Long: `Initialize a directory for configuration and data at the given path. -Passed flags will be persisted in the stored configuration.`, - // Load a default configuration, considering env. variables and CLI flags. - PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { - if err := cfg.LoadWithRootdir(false); err != nil { - return errors.Wrap("failed to load configuration", err) - } - return nil - }, - RunE: func(cmd *cobra.Command, args []string) error { - if config.FolderExists(cfg.Rootdir) { - if cfg.ConfigFileExists() { - if reinitialize { - if err := cfg.DeleteConfigFile(); err != nil { - return err - } - if err := cfg.WriteConfigFile(); err != nil { - return err - } - } else { - log.FeedbackError( - cmd.Context(), - fmt.Sprintf( - "Configuration file already exists at %v. Consider using --reinitialize", - cfg.ConfigFilePath(), - ), - ) - } - } else { - if err := cfg.WriteConfigFile(); err != nil { - return errors.Wrap("failed to create configuration file", err) - } - } - } else { - if err := cfg.CreateRootDirAndConfigFile(); err != nil { - return err - } - } - return nil - }, - } - - cmd.Flags().BoolVar( - &reinitialize, "reinitialize", false, - "Reinitialize the configuration file", - ) - - return cmd -} diff --git a/cli/p2p_collection_add.go b/cli/p2p_collection_add.go index dedae0a358..8a867e6abb 100644 --- a/cli/p2p_collection_add.go +++ b/cli/p2p_collection_add.go @@ -31,7 +31,7 @@ Example: add multiple collections `, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - p2p := mustGetP2PContext(cmd) + p2p := mustGetContextP2P(cmd) var collectionIDs []string for _, id := range strings.Split(args[0], ",") { diff --git a/cli/p2p_collection_getall.go b/cli/p2p_collection_getall.go index 10d98582c6..8a005df801 100644 --- a/cli/p2p_collection_getall.go +++ b/cli/p2p_collection_getall.go @@ -22,7 +22,7 @@ func MakeP2PCollectionGetAllCommand() *cobra.Command { This is the list of collections of the node that are synchronized on the pubsub network.`, Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, args []string) error { - p2p := mustGetP2PContext(cmd) + p2p := mustGetContextP2P(cmd) cols, err := p2p.GetAllP2PCollections(cmd.Context()) if err != nil { diff --git a/cli/p2p_collection_remove.go b/cli/p2p_collection_remove.go index 8aa0b5b7df..7def06e779 100644 --- a/cli/p2p_collection_remove.go +++ b/cli/p2p_collection_remove.go @@ -31,7 +31,7 @@ Example: remove multiple collections `, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - p2p := mustGetP2PContext(cmd) + p2p := mustGetContextP2P(cmd) var collectionIDs []string for _, id := range strings.Split(args[0], ",") { diff --git a/cli/p2p_replicator_delete.go b/cli/p2p_replicator_delete.go index 6cc2ddf785..debd0ac280 100644 --- a/cli/p2p_replicator_delete.go +++ b/cli/p2p_replicator_delete.go @@ -32,7 +32,7 @@ Example: `, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - p2p := mustGetP2PContext(cmd) + p2p := mustGetContextP2P(cmd) var info peer.AddrInfo if err := json.Unmarshal([]byte(args[0]), &info); err != nil { diff --git a/cli/p2p_replicator_getall.go b/cli/p2p_replicator_getall.go index 4bdf6e8487..0e5549fea6 100644 --- a/cli/p2p_replicator_getall.go +++ b/cli/p2p_replicator_getall.go @@ -25,7 +25,7 @@ Example: defradb client p2p replicator getall `, RunE: func(cmd *cobra.Command, args []string) error { - p2p := mustGetP2PContext(cmd) + p2p := mustGetContextP2P(cmd) reps, err := p2p.GetAllReplicators(cmd.Context()) if err != nil { diff --git a/cli/p2p_replicator_set.go b/cli/p2p_replicator_set.go index 5d9c712a82..29109a920a 100644 --- a/cli/p2p_replicator_set.go +++ b/cli/p2p_replicator_set.go @@ -32,7 +32,7 @@ Example: `, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - p2p := mustGetP2PContext(cmd) + p2p := mustGetContextP2P(cmd) var info peer.AddrInfo if err := json.Unmarshal([]byte(args[0]), &info); err != nil { diff --git a/cli/request.go b/cli/request.go index 56e33d7c4a..d5e37e79a3 100644 --- a/cli/request.go +++ b/cli/request.go @@ -45,7 +45,7 @@ with the database more conveniently. To learn more about the DefraDB GraphQL Query Language, refer to https://docs.source.network.`, RunE: func(cmd *cobra.Command, args []string) error { - store := mustGetStoreContext(cmd) + store := mustGetContextStore(cmd) var request string switch { diff --git a/cli/root.go b/cli/root.go index 729b638f02..e4ba349f76 100644 --- a/cli/root.go +++ b/cli/root.go @@ -11,14 +11,10 @@ package cli import ( - "context" - "github.com/spf13/cobra" - - "github.com/sourcenetwork/defradb/config" ) -func MakeRootCommand(cfg *config.Config) *cobra.Command { +func MakeRootCommand() *cobra.Command { var cmd = &cobra.Command{ SilenceUsage: true, Use: "defradb", @@ -28,81 +24,108 @@ func MakeRootCommand(cfg *config.Config) *cobra.Command { Start a DefraDB node, interact with a local or remote node, and much more. `, PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - return loadConfig(cfg) + if err := setContextRootDir(cmd); err != nil { + return err + } + return setContextConfig(cmd) }, } cmd.PersistentFlags().String( - "rootdir", "", - "Directory for data and configuration to use (default: $HOME/.defradb)", + "rootdir", + "", + "Directory for persistent data (default: $HOME/.defradb)", ) - err := cfg.BindFlag(config.RootdirKey, cmd.PersistentFlags().Lookup("rootdir")) - if err != nil { - log.FeedbackFatalE(context.Background(), "Could not bind rootdir", err) - } cmd.PersistentFlags().String( - "loglevel", cfg.Log.Level, + "loglevel", + "info", "Log level to use. Options are debug, info, error, fatal", ) - err = cfg.BindFlag("log.level", cmd.PersistentFlags().Lookup("loglevel")) - if err != nil { - log.FeedbackFatalE(context.Background(), "Could not bind log.loglevel", err) - } - - cmd.PersistentFlags().StringArray( - "logger", []string{}, - "Override logger parameters. Usage: --logger ,level=,output=,...", - ) - err = cfg.BindFlag("log.logger", cmd.PersistentFlags().Lookup("logger")) - if err != nil { - log.FeedbackFatalE(context.Background(), "Could not bind log.logger", err) - } cmd.PersistentFlags().String( - "logoutput", cfg.Log.Output, + "logoutput", + "stderr", "Log output path", ) - err = cfg.BindFlag("log.output", cmd.PersistentFlags().Lookup("logoutput")) - if err != nil { - log.FeedbackFatalE(context.Background(), "Could not bind log.output", err) - } cmd.PersistentFlags().String( - "logformat", cfg.Log.Format, + "logformat", + "csv", "Log format to use. Options are csv, json", ) - err = cfg.BindFlag("log.format", cmd.PersistentFlags().Lookup("logformat")) - if err != nil { - log.FeedbackFatalE(context.Background(), "Could not bind log.format", err) - } cmd.PersistentFlags().Bool( - "logtrace", cfg.Log.Stacktrace, + "logtrace", + false, "Include stacktrace in error and fatal logs", ) - err = cfg.BindFlag("log.stacktrace", cmd.PersistentFlags().Lookup("logtrace")) - if err != nil { - log.FeedbackFatalE(context.Background(), "Could not bind log.stacktrace", err) - } cmd.PersistentFlags().Bool( - "lognocolor", cfg.Log.NoColor, + "lognocolor", + false, "Disable colored log output", ) - err = cfg.BindFlag("log.nocolor", cmd.PersistentFlags().Lookup("lognocolor")) - if err != nil { - log.FeedbackFatalE(context.Background(), "Could not bind log.nocolor", err) - } cmd.PersistentFlags().String( - "url", cfg.API.Address, + "url", + "127.0.0.1:9181", "URL of HTTP endpoint to listen on or connect to", ) - err = cfg.BindFlag("api.address", cmd.PersistentFlags().Lookup("url")) - if err != nil { - log.FeedbackFatalE(context.Background(), "Could not bind api.address", err) - } + + cmd.PersistentFlags().StringArray( + "peers", + []string{}, + "List of peers to connect to", + ) + + cmd.PersistentFlags().Int( + "max-txn-retries", + 5, + "Specify the maximum number of retries per transaction", + ) + + cmd.PersistentFlags().String( + "store", + "badger", + "Specify the datastore to use (supported: badger, memory)", + ) + + cmd.PersistentFlags().Int( + "valuelogfilesize", + 1<<30, + "Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize", + ) + + cmd.PersistentFlags().StringSlice( + "p2paddr", + []string{"/ip4/127.0.0.1/tcp/9171"}, + "Listen addresses for the p2p network (formatted as a libp2p MultiAddr)", + ) + + cmd.PersistentFlags().Bool( + "no-p2p", + false, + "Disable the peer-to-peer network synchronization system", + ) + + cmd.PersistentFlags().StringArray( + "allowed-origins", + []string{}, + "List of origins to allow for CORS requests", + ) + + cmd.PersistentFlags().String( + "pubkeypath", + "", + "Path to the public key for tls", + ) + + cmd.PersistentFlags().String( + "privkeypath", + "", + "Path to the private key for tls", + ) return cmd } diff --git a/cli/schema_add.go b/cli/schema_add.go index b93427a883..f987d062df 100644 --- a/cli/schema_add.go +++ b/cli/schema_add.go @@ -36,7 +36,7 @@ Example: add from stdin: Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network.`, RunE: func(cmd *cobra.Command, args []string) error { - store := mustGetStoreContext(cmd) + store := mustGetContextStore(cmd) var schema string switch { diff --git a/cli/schema_describe.go b/cli/schema_describe.go index 72d8eda474..c4133baa8c 100644 --- a/cli/schema_describe.go +++ b/cli/schema_describe.go @@ -11,6 +11,7 @@ package cli import ( + "github.com/sourcenetwork/immutable" "github.com/spf13/cobra" "github.com/sourcenetwork/defradb/client" @@ -39,37 +40,22 @@ Example: view a single schema by version id defradb client schema describe --version bae123 `, RunE: func(cmd *cobra.Command, args []string) error { - store := mustGetStoreContext(cmd) + store := mustGetContextStore(cmd) - var schemas []client.SchemaDescription - switch { - case versionID != "": - schema, err := store.GetSchemaByVersionID(cmd.Context(), versionID) - if err != nil { - return err - } - return writeJSON(cmd, schema) - - case root != "": - s, err := store.GetSchemasByRoot(cmd.Context(), root) - if err != nil { - return err - } - schemas = s - - case name != "": - s, err := store.GetSchemasByName(cmd.Context(), name) - if err != nil { - return err - } - schemas = s + options := client.SchemaFetchOptions{} + if versionID != "" { + options.ID = immutable.Some(versionID) + } + if root != "" { + options.Root = immutable.Some(root) + } + if name != "" { + options.Name = immutable.Some(name) + } - default: - s, err := store.GetAllSchemas(cmd.Context()) - if err != nil { - return err - } - schemas = s + schemas, err := store.GetSchemas(cmd.Context(), options) + if err != nil { + return err } return writeJSON(cmd, schemas) diff --git a/cli/schema_migration_down.go b/cli/schema_migration_down.go index 1dcb5e64da..1d7622257c 100644 --- a/cli/schema_migration_down.go +++ b/cli/schema_migration_down.go @@ -23,25 +23,25 @@ import ( func MakeSchemaMigrationDownCommand() *cobra.Command { var file string - var schemaVersionID string + var collectionID uint32 var cmd = &cobra.Command{ - Use: "down --version ", - Short: "Reverses the migration from the specified schema version.", - Long: `Reverses the migration from the specified schema version. + Use: "down --collection ", + Short: "Reverses the migration to the specified collection version.", + Long: `Reverses the migration to the specified collection version. Documents is a list of documents to reverse the migration from. Example: migrate from string - defradb client schema migration down --version bae123 '[{"name": "Bob"}]' + defradb client schema migration down --collection 2 '[{"name": "Bob"}]' Example: migrate from file - defradb client schema migration down --version bae123 -f documents.json + defradb client schema migration down --collection 2 -f documents.json Example: migrate from stdin - cat documents.json | defradb client schema migration down --version bae123 - + cat documents.json | defradb client schema migration down --collection 2 - `, Args: cobra.RangeArgs(0, 1), RunE: func(cmd *cobra.Command, args []string) error { - store := mustGetStoreContext(cmd) + store := mustGetContextStore(cmd) var srcData []byte switch { @@ -71,7 +71,8 @@ Example: migrate from stdin if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok { lens = lens.WithTxn(tx) } - out, err := lens.MigrateDown(cmd.Context(), enumerable.New(src), schemaVersionID) + + out, err := lens.MigrateDown(cmd.Context(), enumerable.New(src), collectionID) if err != nil { return err } @@ -86,6 +87,6 @@ Example: migrate from stdin }, } cmd.Flags().StringVarP(&file, "file", "f", "", "File containing document(s)") - cmd.Flags().StringVar(&schemaVersionID, "version", "", "Schema version id") + cmd.Flags().Uint32Var(&collectionID, "collection", 0, "Collection id") return cmd } diff --git a/cli/schema_migration_get.go b/cli/schema_migration_get.go deleted file mode 100644 index 43b66599b7..0000000000 --- a/cli/schema_migration_get.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "github.com/spf13/cobra" -) - -func MakeSchemaMigrationGetCommand() *cobra.Command { - var cmd = &cobra.Command{ - Use: "get", - Short: "Gets the schema migrations within DefraDB", - Long: `Gets the schema migrations within the local DefraDB node. - -Example: - defradb client schema migration get' - -Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network.`, - RunE: func(cmd *cobra.Command, args []string) error { - store := mustGetStoreContext(cmd) - - cfgs, err := store.LensRegistry().Config(cmd.Context()) - if err != nil { - return err - } - return writeJSON(cmd, cfgs) - }, - } - return cmd -} diff --git a/cli/schema_migration_reload.go b/cli/schema_migration_reload.go index d04aebed65..4266b3ec3f 100644 --- a/cli/schema_migration_reload.go +++ b/cli/schema_migration_reload.go @@ -22,7 +22,7 @@ func MakeSchemaMigrationReloadCommand() *cobra.Command { Short: "Reload the schema migrations within DefraDB", Long: `Reload the schema migrations within DefraDB`, RunE: func(cmd *cobra.Command, args []string) error { - store := mustGetStoreContext(cmd) + store := mustGetContextStore(cmd) lens := store.LensRegistry() if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok { diff --git a/cli/schema_migration_set.go b/cli/schema_migration_set.go index 280130b8db..f7b32103b9 100644 --- a/cli/schema_migration_set.go +++ b/cli/schema_migration_set.go @@ -27,7 +27,8 @@ func MakeSchemaMigrationSetCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "set [src] [dst] [cfg]", Short: "Set a schema migration within DefraDB", - Long: `Set a migration between two schema versions within the local DefraDB node. + Long: `Set a migration from a source schema version to a destination schema version for +all collections that are on the given source schema version within the local DefraDB node. Example: set from an argument string: defradb client schema migration set bae123 bae456 '{"lenses": [...' @@ -41,7 +42,7 @@ Example: add from stdin: Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network.`, Args: cobra.RangeArgs(2, 3), RunE: func(cmd *cobra.Command, args []string) error { - store := mustGetStoreContext(cmd) + store := mustGetContextStore(cmd) var lensCfgJson string switch { @@ -80,7 +81,7 @@ Learn more about the DefraDB GraphQL Schema Language on https://docs.source.netw Lens: lensCfg, } - return store.LensRegistry().SetMigration(cmd.Context(), migrationCfg) + return store.SetMigration(cmd.Context(), migrationCfg) }, } cmd.Flags().StringVarP(&lensFile, "file", "f", "", "Lens configuration file") diff --git a/cli/schema_migration_set_registry.go b/cli/schema_migration_set_registry.go new file mode 100644 index 0000000000..cc5098afae --- /dev/null +++ b/cli/schema_migration_set_registry.go @@ -0,0 +1,54 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "encoding/json" + "strconv" + "strings" + + "github.com/lens-vm/lens/host-go/config/model" + "github.com/spf13/cobra" +) + +func MakeSchemaMigrationSetRegistryCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "set-registry [collectionID] [cfg]", + Short: "Set a schema migration within the DefraDB LensRegistry", + Long: `Set a migration to a collection within the LensRegistry of the local DefraDB node. +Does not persist the migration after restart. + +Example: set from an argument string: + defradb client schema migration set-registry 2 '{"lenses": [...' + +Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network.`, + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetContextStore(cmd) + + decoder := json.NewDecoder(strings.NewReader(args[1])) + decoder.DisallowUnknownFields() + + var lensCfg model.Lens + if err := decoder.Decode(&lensCfg); err != nil { + return NewErrInvalidLensConfig(err) + } + + collectionID, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return err + } + + return store.LensRegistry().SetMigration(cmd.Context(), uint32(collectionID), lensCfg) + }, + } + return cmd +} diff --git a/cli/schema_migration_up.go b/cli/schema_migration_up.go index 3b0b522349..577b87d4c7 100644 --- a/cli/schema_migration_up.go +++ b/cli/schema_migration_up.go @@ -23,25 +23,25 @@ import ( func MakeSchemaMigrationUpCommand() *cobra.Command { var file string - var schemaVersionID string + var collectionID uint32 var cmd = &cobra.Command{ - Use: "up --version ", - Short: "Applies the migration to the specified schema version.", - Long: `Applies the migration to the specified schema version. + Use: "up --collection ", + Short: "Applies the migration to the specified collection version.", + Long: `Applies the migration to the specified collection version. Documents is a list of documents to apply the migration to. Example: migrate from string - defradb client schema migration up --version bae123 '[{"name": "Bob"}]' + defradb client schema migration up --collection 2 '[{"name": "Bob"}]' Example: migrate from file - defradb client schema migration up --version bae123 -f documents.json + defradb client schema migration up --collection 2 -f documents.json Example: migrate from stdin - cat documents.json | defradb client schema migration up --version bae123 - + cat documents.json | defradb client schema migration up --collection 2 - `, Args: cobra.RangeArgs(0, 1), RunE: func(cmd *cobra.Command, args []string) error { - store := mustGetStoreContext(cmd) + store := mustGetContextStore(cmd) var srcData []byte switch { @@ -71,7 +71,8 @@ Example: migrate from stdin if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok { lens = lens.WithTxn(tx) } - out, err := lens.MigrateUp(cmd.Context(), enumerable.New(src), schemaVersionID) + + out, err := lens.MigrateUp(cmd.Context(), enumerable.New(src), collectionID) if err != nil { return err } @@ -86,6 +87,6 @@ Example: migrate from stdin }, } cmd.Flags().StringVarP(&file, "file", "f", "", "File containing document(s)") - cmd.Flags().StringVar(&schemaVersionID, "version", "", "Schema version id") + cmd.Flags().Uint32Var(&collectionID, "collection", 0, "Collection id") return cmd } diff --git a/cli/schema_patch.go b/cli/schema_patch.go index 70f4283c85..23f425396d 100644 --- a/cli/schema_patch.go +++ b/cli/schema_patch.go @@ -11,25 +11,30 @@ package cli import ( + "encoding/json" "fmt" "io" "os" + "strings" + "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable" "github.com/spf13/cobra" ) func MakeSchemaPatchCommand() *cobra.Command { var patchFile string - var setDefault bool + var lensFile string + var setActive bool var cmd = &cobra.Command{ - Use: "patch [schema]", + Use: "patch [schema] [migration]", Short: "Patch an existing schema type", Long: `Patch an existing schema. Uses JSON Patch to modify schema types. Example: patch from an argument string: - defradb client schema patch '[{ "op": "add", "path": "...", "value": {...} }]' + defradb client schema patch '[{ "op": "add", "path": "...", "value": {...} }]' '{"lenses": [...' Example: patch from file: defradb client schema patch -f patch.json @@ -39,7 +44,7 @@ Example: patch from stdin: To learn more about the DefraDB GraphQL Schema Language, refer to https://docs.source.network.`, RunE: func(cmd *cobra.Command, args []string) error { - store := mustGetStoreContext(cmd) + store := mustGetContextStore(cmd) var patch string switch { @@ -55,16 +60,42 @@ To learn more about the DefraDB GraphQL Schema Language, refer to https://docs.s return err } patch = string(data) - case len(args) > 0: + case len(args) >= 1: patch = args[0] default: return fmt.Errorf("patch cannot be empty") } - return store.PatchSchema(cmd.Context(), patch, setDefault) + var lensCfgJson string + switch { + case lensFile != "": + data, err := os.ReadFile(lensFile) + if err != nil { + return err + } + patch = string(data) + case len(args) == 2: + lensCfgJson = args[1] + } + + decoder := json.NewDecoder(strings.NewReader(lensCfgJson)) + decoder.DisallowUnknownFields() + + var migration immutable.Option[model.Lens] + if lensCfgJson != "" { + var lensCfg model.Lens + if err := decoder.Decode(&lensCfg); err != nil { + return NewErrInvalidLensConfig(err) + } + migration = immutable.Some(lensCfg) + } + + return store.PatchSchema(cmd.Context(), patch, migration, setActive) }, } - cmd.Flags().BoolVar(&setDefault, "set-default", false, "Set default schema version") - cmd.Flags().StringVarP(&patchFile, "file", "f", "", "File to load a patch from") + cmd.Flags().BoolVar(&setActive, "set-active", false, + "Set the active schema version for all collections using the root schem") + cmd.Flags().StringVarP(&patchFile, "patch-file", "p", "", "File to load a patch from") + cmd.Flags().StringVarP(&lensFile, "lens-file", "t", "", "File to load a lens config from") return cmd } diff --git a/cli/schema_set_default.go b/cli/schema_set_active.go similarity index 56% rename from cli/schema_set_default.go rename to cli/schema_set_active.go index cdb6bd8bd8..2b13713461 100644 --- a/cli/schema_set_default.go +++ b/cli/schema_set_active.go @@ -14,15 +14,16 @@ import ( "github.com/spf13/cobra" ) -func MakeSchemaSetDefaultCommand() *cobra.Command { +func MakeSchemaSetActiveCommand() *cobra.Command { var cmd = &cobra.Command{ - Use: "set-default [versionID]", - Short: "Set the default schema version", - Long: `Set the default schema version`, - Args: cobra.ExactArgs(1), + Use: "set-active [versionID]", + Short: "Set the active collection version", + Long: `Activates all collection versions with the given schema version, and deactivates all +those without it (if they share the same schema root).`, + Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - store := mustGetStoreContext(cmd) - return store.SetDefaultSchemaVersion(cmd.Context(), args[0]) + store := mustGetContextStore(cmd) + return store.SetActiveSchemaVersion(cmd.Context(), args[0]) }, } return cmd diff --git a/cli/server_dump.go b/cli/server_dump.go index 0ba638d268..eb364a247f 100644 --- a/cli/server_dump.go +++ b/cli/server_dump.go @@ -11,65 +11,39 @@ package cli import ( - "fmt" - "os" - "os/signal" - "github.com/spf13/cobra" - "github.com/sourcenetwork/defradb/config" - ds "github.com/sourcenetwork/defradb/datastore" - badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" + "github.com/sourcenetwork/defradb/node" ) -func MakeServerDumpCmd(cfg *config.Config) *cobra.Command { - var datastore string - +func MakeServerDumpCmd() *cobra.Command { cmd := &cobra.Command{ Use: "server-dump", Short: "Dumps the state of the entire database", RunE: func(cmd *cobra.Command, _ []string) error { - log.FeedbackInfo(cmd.Context(), "Starting DefraDB process...") - - // setup signal handlers - signalCh := make(chan os.Signal, 1) - signal.Notify(signalCh, os.Interrupt) + cfg := mustGetContextConfig(cmd) + log.FeedbackInfo(cmd.Context(), "Dumping DB state...") - var rootstore ds.RootStore - var err error - if datastore == badgerDatastoreName { - info, err := os.Stat(cfg.Datastore.Badger.Path) - exists := (err == nil && info.IsDir()) - if !exists { - return errors.New(fmt.Sprintf( - "badger store does not exist at %s. Try with an existing directory", - cfg.Datastore.Badger.Path, - )) - } - log.FeedbackInfo(cmd.Context(), "Opening badger store", logging.NewKV("Path", cfg.Datastore.Badger.Path)) - rootstore, err = badgerds.NewDatastore(cfg.Datastore.Badger.Path, cfg.Datastore.Badger.Options) - if err != nil { - return errors.Wrap("could not open badger datastore", err) - } - } else { + if cfg.GetString("datastore.store") != configStoreBadger { return errors.New("server-side dump is only supported for the Badger datastore") } - + storeOpts := []node.StoreOpt{ + node.WithPath(cfg.GetString("datastore.badger.path")), + } + rootstore, err := node.NewStore(storeOpts...) + if err != nil { + return err + } db, err := db.NewDB(cmd.Context(), rootstore) if err != nil { return errors.Wrap("failed to initialize database", err) } + defer db.Close() - log.FeedbackInfo(cmd.Context(), "Dumping DB state...") return db.PrintDump(cmd.Context()) }, } - cmd.Flags().StringVar( - &datastore, "store", cfg.Datastore.Store, - "Datastore to use. Options are badger, memory", - ) return cmd } diff --git a/cli/start.go b/cli/start.go index 62cc281dbf..d4e789cbc6 100644 --- a/cli/start.go +++ b/cli/start.go @@ -11,317 +11,125 @@ package cli import ( - "context" "fmt" - "net/http" "os" "os/signal" "path/filepath" - "strings" "syscall" - badger "github.com/sourcenetwork/badger/v4" + "github.com/libp2p/go-libp2p/core/peer" "github.com/spf13/cobra" - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" - ds "github.com/sourcenetwork/defradb/datastore" - badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" - httpapi "github.com/sourcenetwork/defradb/http" - "github.com/sourcenetwork/defradb/logging" + "github.com/sourcenetwork/defradb/http" "github.com/sourcenetwork/defradb/net" netutils "github.com/sourcenetwork/defradb/net/utils" + "github.com/sourcenetwork/defradb/node" ) -const badgerDatastoreName = "badger" - -func MakeStartCommand(cfg *config.Config) *cobra.Command { +func MakeStartCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "start", Short: "Start a DefraDB node", Long: "Start a DefraDB node.", // Load the root config if it exists, otherwise create it. PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { - if err := loadConfig(cfg); err != nil { + if err := setContextRootDir(cmd); err != nil { return err } - if !cfg.ConfigFileExists() { - return createConfig(cfg) - } - return nil - }, - RunE: func(cmd *cobra.Command, args []string) error { - di, err := start(cmd.Context(), cfg) - if err != nil { + rootdir := mustGetContextRootDir(cmd) + if err := createConfig(rootdir, cmd.Root().PersistentFlags()); err != nil { return err } - - return wait(cmd.Context(), di) + return setContextConfig(cmd) }, - } - - cmd.Flags().String( - "peers", cfg.Net.Peers, - "List of peers to connect to", - ) - err := cfg.BindFlag("net.peers", cmd.Flags().Lookup("peers")) - if err != nil { - log.FeedbackFatalE(context.Background(), "Could not bind net.peers", err) - } - - cmd.Flags().Int( - "max-txn-retries", cfg.Datastore.MaxTxnRetries, - "Specify the maximum number of retries per transaction", - ) - err = cfg.BindFlag("datastore.maxtxnretries", cmd.Flags().Lookup("max-txn-retries")) - if err != nil { - log.FeedbackFatalE(context.Background(), "Could not bind datastore.maxtxnretries", err) - } - - cmd.Flags().String( - "store", cfg.Datastore.Store, - "Specify the datastore to use (supported: badger, memory)", - ) - err = cfg.BindFlag("datastore.store", cmd.Flags().Lookup("store")) - if err != nil { - log.FeedbackFatalE(context.Background(), "Could not bind datastore.store", err) - } - - cmd.Flags().Var( - &cfg.Datastore.Badger.ValueLogFileSize, "valuelogfilesize", - "Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize", - ) - err = cfg.BindFlag("datastore.badger.valuelogfilesize", cmd.Flags().Lookup("valuelogfilesize")) - if err != nil { - log.FeedbackFatalE(context.Background(), "Could not bind datastore.badger.valuelogfilesize", err) - } - - cmd.Flags().String( - "p2paddr", cfg.Net.P2PAddress, - "Listener address for the p2p network (formatted as a libp2p MultiAddr)", - ) - err = cfg.BindFlag("net.p2paddress", cmd.Flags().Lookup("p2paddr")) - if err != nil { - log.FeedbackFatalE(context.Background(), "Could not bind net.p2paddress", err) - } - - cmd.Flags().Bool( - "no-p2p", cfg.Net.P2PDisabled, - "Disable the peer-to-peer network synchronization system", - ) - err = cfg.BindFlag("net.p2pdisabled", cmd.Flags().Lookup("no-p2p")) - if err != nil { - log.FeedbackFatalE(context.Background(), "Could not bind net.p2pdisabled", err) - } - - cmd.Flags().Bool( - "tls", cfg.API.TLS, - "Enable serving the API over https", - ) - err = cfg.BindFlag("api.tls", cmd.Flags().Lookup("tls")) - if err != nil { - log.FeedbackFatalE(context.Background(), "Could not bind api.tls", err) - } - - cmd.Flags().StringArray( - "allowed-origins", cfg.API.AllowedOrigins, - "List of origins to allow for CORS requests", - ) - err = cfg.BindFlag("api.allowed-origins", cmd.Flags().Lookup("allowed-origins")) - if err != nil { - log.FeedbackFatalE(context.Background(), "Could not bind api.allowed-origins", err) - } - - cmd.Flags().String( - "pubkeypath", cfg.API.PubKeyPath, - "Path to the public key for tls", - ) - err = cfg.BindFlag("api.pubkeypath", cmd.Flags().Lookup("pubkeypath")) - if err != nil { - log.FeedbackFatalE(context.Background(), "Could not bind api.pubkeypath", err) - } - - cmd.Flags().String( - "privkeypath", cfg.API.PrivKeyPath, - "Path to the private key for tls", - ) - err = cfg.BindFlag("api.privkeypath", cmd.Flags().Lookup("privkeypath")) - if err != nil { - log.FeedbackFatalE(context.Background(), "Could not bind api.privkeypath", err) - } - - cmd.Flags().String( - "email", cfg.API.Email, - "Email address used by the CA for notifications", - ) - err = cfg.BindFlag("api.email", cmd.Flags().Lookup("email")) - if err != nil { - log.FeedbackFatalE(context.Background(), "Could not bind api.email", err) - } - return cmd -} - -type defraInstance struct { - node *net.Node - db client.DB - server *httpapi.Server -} - -func (di *defraInstance) close(ctx context.Context) { - if di.node != nil { - di.node.Close() - } else { - di.db.Close() - } - if err := di.server.Close(); err != nil { - log.FeedbackInfo( - ctx, - "The server could not be closed successfully", - logging.NewKV("Error", err.Error()), - ) - } -} + RunE: func(cmd *cobra.Command, args []string) error { + cfg := mustGetContextConfig(cmd) -func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) { - log.FeedbackInfo(ctx, "Starting DefraDB service...") + dbOpts := []db.Option{ + db.WithUpdateEvents(), + db.WithMaxRetries(cfg.GetInt("datastore.MaxTxnRetries")), + } - var rootstore ds.RootStore + netOpts := []net.NodeOpt{ + net.WithListenAddresses(cfg.GetStringSlice("net.p2pAddresses")...), + net.WithEnablePubSub(cfg.GetBool("net.pubSubEnabled")), + net.WithEnableRelay(cfg.GetBool("net.relayEnabled")), + } - var err error - if cfg.Datastore.Store == badgerDatastoreName { - log.FeedbackInfo(ctx, "Opening badger store", logging.NewKV("Path", cfg.Datastore.Badger.Path)) - rootstore, err = badgerds.NewDatastore( - cfg.Datastore.Badger.Path, - cfg.Datastore.Badger.Options, - ) - } else if cfg.Datastore.Store == "memory" { - log.FeedbackInfo(ctx, "Building new memory store") - opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} - rootstore, err = badgerds.NewDatastore("", &opts) - } + serverOpts := []http.ServerOpt{ + http.WithAddress(cfg.GetString("api.address")), + http.WithAllowedOrigins(cfg.GetStringSlice("api.allowed-origins")...), + http.WithTLSCertPath(cfg.GetString("api.pubKeyPath")), + http.WithTLSKeyPath(cfg.GetString("api.privKeyPath")), + } - if err != nil { - return nil, errors.Wrap("failed to open datastore", err) - } + storeOpts := []node.StoreOpt{ + node.WithPath(cfg.GetString("datastore.badger.path")), + node.WithInMemory(cfg.GetString("datastore.store") == configStoreMemory), + } - options := []db.Option{ - db.WithUpdateEvents(), - db.WithMaxRetries(cfg.Datastore.MaxTxnRetries), - } + var peers []peer.AddrInfo + if val := cfg.GetStringSlice("net.peers"); len(val) > 0 { + addrs, err := netutils.ParsePeers(val) + if err != nil { + return errors.Wrap(fmt.Sprintf("failed to parse bootstrap peers %s", val), err) + } + peers = addrs + } - db, err := db.NewDB(ctx, rootstore, options...) - if err != nil { - return nil, errors.Wrap("failed to create database", err) - } + if cfg.GetString("datastore.store") != configStoreMemory { + // It would be ideal to not have the key path tied to the datastore. + // Running with memory store mode will always generate a random key. + // Adding support for an ephemeral mode and moving the key to the + // config would solve both of these issues. + rootdir := mustGetContextRootDir(cmd) + key, err := loadOrGeneratePrivateKey(filepath.Join(rootdir, "data", "key")) + if err != nil { + return err + } + netOpts = append(netOpts, net.WithPrivateKey(key)) + } - // init the p2p node - var node *net.Node - if !cfg.Net.P2PDisabled { - nodeOpts := []net.NodeOpt{ - net.WithConfig(cfg), - } - if cfg.Datastore.Store == badgerDatastoreName { - // It would be ideal to not have the key path tied to the datastore. - // Running with memory store mode will always generate a random key. - // Adding support for an ephemeral mode and moving the key to the - // config would solve both of these issues. - key, err := loadOrGeneratePrivateKey(filepath.Join(cfg.Rootdir, "data", "key")) - if err != nil { - return nil, err + opts := []node.NodeOpt{ + node.WithPeers(peers...), + node.WithStoreOpts(storeOpts...), + node.WithDatabaseOpts(dbOpts...), + node.WithNetOpts(netOpts...), + node.WithServerOpts(serverOpts...), + node.WithDisableP2P(cfg.GetBool("net.p2pDisabled")), } - nodeOpts = append(nodeOpts, net.WithPrivateKey(key)) - } - log.FeedbackInfo(ctx, "Starting P2P node", logging.NewKV("P2P address", cfg.Net.P2PAddress)) - node, err = net.NewNode(ctx, db, nodeOpts...) - if err != nil { - db.Close() - return nil, errors.Wrap("failed to start P2P node", err) - } - // parse peers and bootstrap - if len(cfg.Net.Peers) != 0 { - log.Debug(ctx, "Parsing bootstrap peers", logging.NewKV("Peers", cfg.Net.Peers)) - addrs, err := netutils.ParsePeers(strings.Split(cfg.Net.Peers, ",")) + n, err := node.NewNode(cmd.Context(), opts...) if err != nil { - return nil, errors.Wrap(fmt.Sprintf("failed to parse bootstrap peers %v", cfg.Net.Peers), err) + return err } - log.Debug(ctx, "Bootstrapping with peers", logging.NewKV("Addresses", addrs)) - node.Bootstrap(addrs) - } - if err := node.Start(); err != nil { - node.Close() - return nil, errors.Wrap("failed to start P2P listeners", err) - } - } - - sOpt := []func(*httpapi.Server){ - httpapi.WithAddress(cfg.API.Address), - httpapi.WithRootDir(cfg.Rootdir), - httpapi.WithAllowedOrigins(cfg.API.AllowedOrigins...), - } - - if cfg.API.TLS { - sOpt = append( - sOpt, - httpapi.WithTLS(), - httpapi.WithSelfSignedCert(cfg.API.PubKeyPath, cfg.API.PrivKeyPath), - httpapi.WithCAEmail(cfg.API.Email), - ) - } - - var server *httpapi.Server - if node != nil { - server, err = httpapi.NewServer(node, sOpt...) - } else { - server, err = httpapi.NewServer(db, sOpt...) - } - if err != nil { - return nil, errors.Wrap("failed to create http server", err) - } - if err := server.Listen(ctx); err != nil { - return nil, errors.Wrap(fmt.Sprintf("failed to listen on TCP address %v", server.Addr), err) - } - // save the address on the config in case the port number was set to random - cfg.API.Address = server.AssignedAddr() + defer func() { + if err := n.Close(cmd.Context()); err != nil { + log.FeedbackErrorE(cmd.Context(), "Stopping DefraDB", err) + } + }() - // run the server in a separate goroutine - go func() { - log.FeedbackInfo(ctx, fmt.Sprintf("Providing HTTP API at %s.", cfg.API.AddressToURL())) - if err := server.Run(ctx); err != nil && !errors.Is(err, http.ErrServerClosed) { - log.FeedbackErrorE(ctx, "Failed to run the HTTP server", err) - if node != nil { - node.Close() - } else { - db.Close() + log.FeedbackInfo(cmd.Context(), "Starting DefraDB") + if err := n.Start(cmd.Context()); err != nil { + return err } - os.Exit(1) - } - }() - return &defraInstance{ - node: node, - db: db, - server: server, - }, nil -} + signalCh := make(chan os.Signal, 1) + signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM) -// wait waits for an interrupt signal to close the program. -func wait(ctx context.Context, di *defraInstance) error { - // setup signal handlers - signalCh := make(chan os.Signal, 1) - signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM) + select { + case <-cmd.Context().Done(): + log.FeedbackInfo(cmd.Context(), "Received context cancellation; shutting down...") + case <-signalCh: + log.FeedbackInfo(cmd.Context(), "Received interrupt; shutting down...") + } - select { - case <-ctx.Done(): - log.FeedbackInfo(ctx, "Received context cancellation; closing database...") - di.close(ctx) - return ctx.Err() - case <-signalCh: - log.FeedbackInfo(ctx, "Received interrupt; closing database...") - di.close(ctx) - return ctx.Err() + return nil + }, } + + return cmd } diff --git a/cli/tx_commit.go b/cli/tx_commit.go index 260a274a08..f7ef112988 100644 --- a/cli/tx_commit.go +++ b/cli/tx_commit.go @@ -15,22 +15,23 @@ import ( "github.com/spf13/cobra" - "github.com/sourcenetwork/defradb/config" "github.com/sourcenetwork/defradb/http" ) -func MakeTxCommitCommand(cfg *config.Config) *cobra.Command { +func MakeTxCommitCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "commit [id]", Short: "Commit a DefraDB transaction.", Long: `Commit a DefraDB transaction.`, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) (err error) { + cfg := mustGetContextConfig(cmd) + id, err := strconv.ParseUint(args[0], 10, 64) if err != nil { return err } - tx, err := http.NewTransaction(cfg.API.Address, id) + tx, err := http.NewTransaction(cfg.GetString("api.address"), id) if err != nil { return err } diff --git a/cli/tx_create.go b/cli/tx_create.go index 987a784077..da239b6943 100644 --- a/cli/tx_create.go +++ b/cli/tx_create.go @@ -14,11 +14,10 @@ import ( "github.com/spf13/cobra" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" "github.com/sourcenetwork/defradb/datastore" ) -func MakeTxCreateCommand(cfg *config.Config) *cobra.Command { +func MakeTxCreateCommand() *cobra.Command { var concurrent bool var readOnly bool var cmd = &cobra.Command{ diff --git a/cli/tx_discard.go b/cli/tx_discard.go index 351f919f53..0a980a63f5 100644 --- a/cli/tx_discard.go +++ b/cli/tx_discard.go @@ -15,22 +15,23 @@ import ( "github.com/spf13/cobra" - "github.com/sourcenetwork/defradb/config" "github.com/sourcenetwork/defradb/http" ) -func MakeTxDiscardCommand(cfg *config.Config) *cobra.Command { +func MakeTxDiscardCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "discard [id]", Short: "Discard a DefraDB transaction.", Long: `Discard a DefraDB transaction.`, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) (err error) { + cfg := mustGetContextConfig(cmd) + id, err := strconv.ParseUint(args[0], 10, 64) if err != nil { return err } - tx, err := http.NewTransaction(cfg.API.Address, id) + tx, err := http.NewTransaction(cfg.GetString("api.address"), id) if err != nil { return err } diff --git a/cli/utils.go b/cli/utils.go index 0f3fa0e565..caeb282606 100644 --- a/cli/utils.go +++ b/cli/utils.go @@ -14,12 +14,13 @@ import ( "context" "encoding/json" "os" + "path/filepath" "github.com/libp2p/go-libp2p/core/crypto" "github.com/spf13/cobra" + "github.com/spf13/viper" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/http" ) @@ -27,6 +28,10 @@ import ( type contextKey string var ( + // cfgContextKey is the context key for the config. + cfgContextKey = contextKey("cfg") + // rootDirContextKey is the context key for the root directory. + rootDirContextKey = contextKey("rootDir") // txContextKey is the context key for the datastore.Txn // // This will only be set if a transaction id is specified. @@ -45,33 +50,61 @@ var ( colContextKey = contextKey("col") ) -// mustGetStoreContext returns the store for the current command context. +// mustGetContextStore returns the store for the current command context. // // If a store is not set in the current context this function panics. -func mustGetStoreContext(cmd *cobra.Command) client.Store { +func mustGetContextStore(cmd *cobra.Command) client.Store { return cmd.Context().Value(storeContextKey).(client.Store) } -// mustGetP2PContext returns the p2p implementation for the current command context. +// mustGetContextP2P returns the p2p implementation for the current command context. // // If a p2p implementation is not set in the current context this function panics. -func mustGetP2PContext(cmd *cobra.Command) client.P2P { +func mustGetContextP2P(cmd *cobra.Command) client.P2P { return cmd.Context().Value(dbContextKey).(client.P2P) } -// tryGetCollectionContext returns the collection for the current command context +// mustGetContextConfig returns the config for the current command context. +// +// If a config is not set in the current context this function panics. +func mustGetContextConfig(cmd *cobra.Command) *viper.Viper { + return cmd.Context().Value(cfgContextKey).(*viper.Viper) +} + +// mustGetContextRootDir returns the rootdir for the current command context. +// +// If a rootdir is not set in the current context this function panics. +func mustGetContextRootDir(cmd *cobra.Command) string { + return cmd.Context().Value(rootDirContextKey).(string) +} + +// tryGetContextCollection returns the collection for the current command context // and a boolean indicating if the collection was set. -func tryGetCollectionContext(cmd *cobra.Command) (client.Collection, bool) { +func tryGetContextCollection(cmd *cobra.Command) (client.Collection, bool) { col, ok := cmd.Context().Value(colContextKey).(client.Collection) return col, ok } -// setTransactionContext sets the transaction for the current command context. -func setTransactionContext(cmd *cobra.Command, cfg *config.Config, txId uint64) error { +// setContextConfig sets teh config for the current command context. +func setContextConfig(cmd *cobra.Command) error { + rootdir := mustGetContextRootDir(cmd) + flags := cmd.Root().PersistentFlags() + cfg, err := loadConfig(rootdir, flags) + if err != nil { + return err + } + ctx := context.WithValue(cmd.Context(), cfgContextKey, cfg) + cmd.SetContext(ctx) + return nil +} + +// setContextTransaction sets the transaction for the current command context. +func setContextTransaction(cmd *cobra.Command, txId uint64) error { if txId == 0 { return nil } - tx, err := http.NewTransaction(cfg.API.Address, txId) + cfg := mustGetContextConfig(cmd) + tx, err := http.NewTransaction(cfg.GetString("api.address"), txId) if err != nil { return err } @@ -80,9 +113,10 @@ func setTransactionContext(cmd *cobra.Command, cfg *config.Config, txId uint64) return nil } -// setStoreContext sets the store for the current command context. -func setStoreContext(cmd *cobra.Command, cfg *config.Config) error { - db, err := http.NewClient(cfg.API.Address) +// setContextStore sets the store for the current command context. +func setContextStore(cmd *cobra.Command) error { + cfg := mustGetContextConfig(cmd) + db, err := http.NewClient(cfg.GetString("api.address")) if err != nil { return err } @@ -96,19 +130,22 @@ func setStoreContext(cmd *cobra.Command, cfg *config.Config) error { return nil } -// loadConfig loads the rootDir containing the configuration file, -// otherwise warn about it and load a default configuration. -func loadConfig(cfg *config.Config) error { - return cfg.LoadWithRootdir(cfg.ConfigFileExists()) -} - -// createConfig creates the config directories and writes -// the current config to a file. -func createConfig(cfg *config.Config) error { - if config.FolderExists(cfg.Rootdir) { - return cfg.WriteConfigFile() +// setContextRootDir sets the rootdir for the current command context. +func setContextRootDir(cmd *cobra.Command) error { + rootdir, err := cmd.Root().PersistentFlags().GetString("rootdir") + if err != nil { + return err + } + home, err := os.UserHomeDir() + if err != nil { + return err } - return cfg.CreateRootDirAndConfigFile() + if rootdir == "" { + rootdir = filepath.Join(home, ".defradb") + } + ctx := context.WithValue(cmd.Context(), rootDirContextKey, rootdir) + cmd.SetContext(ctx) + return nil } // loadOrGeneratePrivateKey loads the private key from the given path @@ -135,6 +172,10 @@ func generatePrivateKey(path string) (crypto.PrivKey, error) { if err != nil { return nil, err } + err = os.MkdirAll(filepath.Dir(path), 0755) + if err != nil { + return nil, err + } return key, os.WriteFile(path, data, 0644) } diff --git a/cli/view_add.go b/cli/view_add.go index 46779fb784..9c7d42b723 100644 --- a/cli/view_add.go +++ b/cli/view_add.go @@ -10,34 +10,72 @@ package cli -import "github.com/spf13/cobra" +import ( + "encoding/json" + "io" + "os" + "strings" + + "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable" + "github.com/spf13/cobra" +) func MakeViewAddCommand() *cobra.Command { + var lensFile string var cmd = &cobra.Command{ - Use: "add [query] [sdl]", + Use: "add [query] [sdl] [transform]", Short: "Add new view", Long: `Add new database view. Example: add from an argument string: - defradb client view add 'Foo { name, ...}' 'type Foo { ... }' + defradb client view add 'Foo { name, ...}' 'type Foo { ... }' '{"lenses": [...' Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network.`, + Args: cobra.RangeArgs(2, 4), RunE: func(cmd *cobra.Command, args []string) error { - store := mustGetStoreContext(cmd) - - if len(args) != 2 { - return ErrViewAddMissingArgs - } + store := mustGetContextStore(cmd) query := args[0] sdl := args[1] - defs, err := store.AddView(cmd.Context(), query, sdl) + var lensCfgJson string + switch { + case lensFile != "": + data, err := os.ReadFile(lensFile) + if err != nil { + return err + } + lensCfgJson = string(data) + case len(args) == 3 && args[2] == "-": + data, err := io.ReadAll(cmd.InOrStdin()) + if err != nil { + return err + } + lensCfgJson = string(data) + case len(args) == 3: + lensCfgJson = args[2] + } + + var transform immutable.Option[model.Lens] + if lensCfgJson != "" { + decoder := json.NewDecoder(strings.NewReader(lensCfgJson)) + decoder.DisallowUnknownFields() + + var lensCfg model.Lens + if err := decoder.Decode(&lensCfg); err != nil { + return NewErrInvalidLensConfig(err) + } + transform = immutable.Some(lensCfg) + } + + defs, err := store.AddView(cmd.Context(), query, sdl, transform) if err != nil { return err } return writeJSON(cmd, defs) }, } + cmd.Flags().StringVarP(&lensFile, "file", "f", "", "Lens configuration file") return cmd } diff --git a/client/collection.go b/client/collection.go index 3a42871c62..58b53c3af0 100644 --- a/client/collection.go +++ b/client/collection.go @@ -13,17 +13,11 @@ package client import ( "context" + "github.com/sourcenetwork/immutable" + "github.com/sourcenetwork/defradb/datastore" ) -// CollectionDefinition contains the metadata defining what a Collection is. -type CollectionDefinition struct { - // Description returns the CollectionDescription of this Collection. - Description CollectionDescription `json:"description"` - // Schema returns the SchemaDescription used to define this Collection. - Schema SchemaDescription `json:"schema"` -} - // Collection represents a defradb collection. // // A Collection is mostly analogous to a SQL table, however a collection is specific to its @@ -32,7 +26,7 @@ type CollectionDefinition struct { // Many functions on this object will interact with the underlying datastores. type Collection interface { // Name returns the name of this collection. - Name() string + Name() immutable.Option[string] // ID returns the ID of this Collection. ID() uint32 @@ -175,6 +169,21 @@ type Collection interface { // GetIndexes returns all the indexes that exist on the collection. GetIndexes(ctx context.Context) ([]IndexDescription, error) + + // CreateDocIndex creates an index for the given document. + // WARNING: This method is only for internal use and is not supposed to be called by the client + // as it might compromise the integrity of the database. This method will be removed in the future + CreateDocIndex(context.Context, *Document) error + + // UpdateDocIndex updates the index for the given document. + // WARNING: This method is only for internal use and is not supposed to be called by the client + // as it might compromise the integrity of the database. This method will be removed in the future + UpdateDocIndex(ctx context.Context, oldDoc, newDoc *Document) error + + // DeleteDocIndex deletes the index for the given document. + // WARNING: This method is only for internal use and is not supposed to be called by the client + // as it might compromise the integrity of the database. This method will be removed in the future + DeleteDocIndex(context.Context, *Document) error } // DocIDResult wraps the result of an attempt at a DocID retrieval operation. diff --git a/client/ctype.go b/client/ctype.go index 7c194c73bf..c5f792df86 100644 --- a/client/ctype.go +++ b/client/ctype.go @@ -39,7 +39,7 @@ func (t CType) IsSupportedFieldCType() bool { func (t CType) IsCompatibleWith(kind FieldKind) bool { switch t { case PN_COUNTER: - if kind == FieldKind_INT || kind == FieldKind_FLOAT { + if kind == FieldKind_NILLABLE_INT || kind == FieldKind_NILLABLE_FLOAT { return true } return false diff --git a/client/db.go b/client/db.go index 240d2d5dfc..7b0cc8060f 100644 --- a/client/db.go +++ b/client/db.go @@ -14,6 +14,8 @@ import ( "context" blockstore "github.com/ipfs/boxo/blockstore" + "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/events" @@ -98,8 +100,10 @@ type Store interface { AddSchema(context.Context, string) ([]CollectionDescription, error) // PatchSchema takes the given JSON patch string and applies it to the set of SchemaDescriptions - // present in the database. If true is provided, the new schema versions will be made default, otherwise - // [SetDefaultSchemaVersion] should be called to set them so. + // present in the database. + // + // If true is provided, the new schema versions will be made active and previous versions deactivated, otherwise + // [SetActiveSchemaVersion] should be called to do so. // // It will also update the GQL types used by the query system. It will error and not apply any of the // requested, valid updates should the net result of the patch result in an invalid state. The @@ -112,16 +116,18 @@ type Store interface { // // Field [FieldKind] values may be provided in either their raw integer form, or as string as per // [FieldKindStringToEnumMapping]. - PatchSchema(context.Context, string, bool) error + // + // A lens configuration may also be provided, it will be added to all collections using the schema. + PatchSchema(context.Context, string, immutable.Option[model.Lens], bool) error - // SetDefaultSchemaVersion sets the default schema version to the ID provided. It will be applied to all - // collections using the schema. + // SetActiveSchemaVersion activates all collection versions with the given schema version, and deactivates all + // those without it (if they share the same schema root). // // This will affect all operations interacting with the schema where a schema version is not explicitly // provided. This includes GQL queries and Collection operations. // // It will return an error if the provided schema version ID does not exist. - SetDefaultSchemaVersion(context.Context, string) error + SetActiveSchemaVersion(context.Context, string) error // AddView creates a new Defra View. // @@ -149,12 +155,20 @@ type Store interface { // // It will return the collection definitions of the types defined in the SDL if successful, otherwise an error // will be returned. This function does not execute the given query. - AddView(ctx context.Context, gqlQuery string, sdl string) ([]CollectionDefinition, error) - - // SetMigration sets the migration for the given source-destination schema version IDs. Is equivalent to - // calling `LensRegistry().SetMigration(ctx, cfg)`. // - // There may only be one migration per schema version id. If another migration was registered it will be + // Optionally, a lens transform configuration may also be provided - it will execute after the query has run. + // The transform is not limited to just transforming the input documents, it may also yield new ones, or filter out + // those passed in from the underlying query. + AddView( + ctx context.Context, + gqlQuery string, + sdl string, + transform immutable.Option[model.Lens], + ) ([]CollectionDefinition, error) + + // SetMigration sets the migration for all collections using the given source-destination schema version IDs. + // + // There may only be one migration per collection version. If another migration was registered it will be // overwritten by this migration. // // Neither of the schema version IDs specified in the configuration need to exist at the time of calling. @@ -173,24 +187,20 @@ type Store interface { // GetCollectionByName attempts to retrieve a collection matching the given name. // // If no matching collection is found an error will be returned. + // + // If a transaction was explicitly provided to this [Store] via [DB].[WithTxn], any function calls + // made via the returned [Collection] will respect that transaction. GetCollectionByName(context.Context, CollectionName) (Collection, error) - // GetCollectionsBySchemaRoot attempts to retrieve all collections using the given schema ID. + // GetCollections returns all collections and their descriptions matching the given options + // that currently exist within this [Store]. // - // If no matching collection is found an empty set will be returned. - GetCollectionsBySchemaRoot(context.Context, string) ([]Collection, error) - - // GetCollectionsByVersionID attempts to retrieve all collections using the given schema version ID. + // Inactive collections are not returned by default unless a specific schema version ID + // is provided. // - // If no matching collections are found an empty set will be returned. - GetCollectionsByVersionID(context.Context, string) ([]Collection, error) - - // GetAllCollections returns all the collections and their descriptions that currently exist within - // this [Store]. - GetAllCollections(context.Context) ([]Collection, error) - - // GetSchemasByName returns the all schema versions with the given name. - GetSchemasByName(context.Context, string) ([]SchemaDescription, error) + // If a transaction was explicitly provided to this [Store] via [DB].[WithTxn], any function calls + // made via the returned [Collection]s will respect that transaction. + GetCollections(context.Context, CollectionFetchOptions) ([]Collection, error) // GetSchemaByVersionID returns the schema description for the schema version of the // ID provided. @@ -198,12 +208,9 @@ type Store interface { // Will return an error if it is not found. GetSchemaByVersionID(context.Context, string) (SchemaDescription, error) - // GetSchemasByRoot returns the all schema versions for the given root. - GetSchemasByRoot(context.Context, string) ([]SchemaDescription, error) - - // GetAllSchemas returns all schema versions that currently exist within + // GetSchemas returns all schema versions that currently exist within // this [Store]. - GetAllSchemas(context.Context) ([]SchemaDescription, error) + GetSchemas(context.Context, SchemaFetchOptions) ([]SchemaDescription, error) // GetAllIndexes returns all the indexes that currently exist within this [Store]. GetAllIndexes(context.Context) (map[CollectionName][]IndexDescription, error) @@ -237,3 +244,30 @@ type RequestResult struct { // if the request was a GQL subscription. Pub *events.Publisher[events.Update] } + +// CollectionFetchOptions represents a set of options used for fetching collections. +type CollectionFetchOptions struct { + // If provided, only collections with this schema version id will be returned. + SchemaVersionID immutable.Option[string] + + // If provided, only collections with schemas of this root will be returned. + SchemaRoot immutable.Option[string] + + // If provided, only collections with this name will be returned. + Name immutable.Option[string] + + // If IncludeInactive is true, then inactive collections will also be returned. + IncludeInactive immutable.Option[bool] +} + +// SchemaFetchOptions represents a set of options used for fetching schemas. +type SchemaFetchOptions struct { + // If provided, only schemas of this root will be returned. + Root immutable.Option[string] + + // If provided, only schemas with this name will be returned. + Name immutable.Option[string] + + // If provided, only the schema with this id will be returned. + ID immutable.Option[string] +} diff --git a/client/definitions.go b/client/definitions.go new file mode 100644 index 0000000000..e521a69fcf --- /dev/null +++ b/client/definitions.go @@ -0,0 +1,115 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package client + +// CollectionDefinition contains the metadata defining what a Collection is. +// +// The definition types ([CollectionDefinition], [FieldDefinition]) are read-only types returned +// from various functions as a convienient means to access the computated convergence of schema +// and collection descriptions. +type CollectionDefinition struct { + // Description returns the CollectionDescription of this Collection. + Description CollectionDescription `json:"description"` + // Schema returns the SchemaDescription used to define this Collection. + Schema SchemaDescription `json:"schema"` +} + +// GetFieldByName returns the field for the given field name. If such a field is found it +// will return it and true, if it is not found it will return false. +func (def CollectionDefinition) GetFieldByName(fieldName string) (FieldDefinition, bool) { + collectionField, ok := def.Description.GetFieldByName(fieldName) + if ok { + schemaField, ok := def.Schema.GetFieldByName(fieldName) + if ok { + return NewFieldDefinition( + collectionField, + schemaField, + ), true + } + } + return FieldDefinition{}, false +} + +// GetFields returns the combined local and global field elements on this [CollectionDefinition] +// as a single set. +func (def CollectionDefinition) GetFields() []FieldDefinition { + fields := []FieldDefinition{} + for _, localField := range def.Description.Fields { + globalField, ok := def.Schema.GetFieldByName(localField.Name) + if ok { + fields = append( + fields, + NewFieldDefinition(localField, globalField), + ) + } + } + return fields +} + +// FieldDefinition describes the combined local and global set of properties that constitutes +// a field on a collection. +// +// It draws it's information from the [CollectionFieldDescription] on the [CollectionDescription], +// and the [SchemaFieldDescription] on the [SchemaDescription]. +// +// It is to [CollectionFieldDescription] and [SchemaFieldDescription] what [CollectionDefinition] +// is to [CollectionDescription] and [SchemaDescription]. +// +// The definition types ([CollectionDefinition], [FieldDefinition]) are read-only types returned +// from various functions as a convienient means to access the computated convergence of schema +// and collection descriptions. +type FieldDefinition struct { + // Name contains the name of this field. + Name string + + // ID contains the local, internal ID of this field. + ID FieldID + + // The data type that this field holds. + // + // Must contain a valid value. It is currently immutable. + Kind FieldKind + + // Schema contains the schema name of the type this field contains if this field is + // a relation field. Otherwise this will be empty. + Schema string + + // RelationName the name of the relationship that this field represents if this field is + // a relation field. Otherwise this will be empty. + RelationName string + + // The CRDT Type of this field. If no type has been provided it will default to [LWW_REGISTER]. + // + // It is currently immutable. + Typ CType + + // If true, this is the primary half of a relation, otherwise is false. + IsPrimaryRelation bool +} + +// NewFieldDefinition returns a new [FieldDefinition], combining the given local and global elements +// into a single object. +func NewFieldDefinition(local CollectionFieldDescription, global SchemaFieldDescription) FieldDefinition { + return FieldDefinition{ + Name: global.Name, + ID: local.ID, + Kind: global.Kind, + Schema: global.Schema, + RelationName: global.RelationName, + Typ: global.Typ, + IsPrimaryRelation: global.IsPrimaryRelation, + } +} + +// IsRelation returns true if this field is a relation. +func (f FieldDefinition) IsRelation() bool { + return f.RelationName != "" +} diff --git a/client/descriptions.go b/client/descriptions.go index 7a4ec0ba7e..dd12e9cf00 100644 --- a/client/descriptions.go +++ b/client/descriptions.go @@ -11,33 +11,57 @@ package client import ( + "encoding/json" "fmt" + "math" + + "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client/request" ) +// CollectionDescription with no known root will take this ID as their temporary RootID. +// +// Orphan CollectionDescriptions are typically created when setting migrations from schema versions +// that do not yet exist. The OrphanRootID will be replaced with the actual RootID once a full chain +// of schema versions leading back to a schema version used by a collection with a non-orphan RootID +// has been established. +const OrphanRootID uint32 = math.MaxUint32 + // CollectionDescription describes a Collection and all its associated metadata. type CollectionDescription struct { // Name contains the name of the collection. // // It is conceptually local to the node hosting the DefraDB instance, but currently there // is no means to update the local value so that it differs from the (global) schema name. - Name string + Name immutable.Option[string] // ID is the local identifier of this collection. // // It is immutable. ID uint32 + // RootID is the local root identifier of this collection, linking together a chain of + // collection instances on different schema versions. + // + // Collections sharing the same RootID will be compatable with each other, with the documents + // within them shared and yielded as if they were in the same set, using Lens transforms to + // migrate between schema versions when provided. + RootID uint32 + // The ID of the schema version that this collection is at. SchemaVersionID string - // BaseQuery contains the base query of this view, if this collection is a view. + // Sources is the set of sources from which this collection draws data. // - // The query will be saved, and then may be accessed by other actors on demand. Actor defined - // aggregates, filters and other logic (such as LensVM transforms) will execute on top of this - // base query before the result is returned to the actor. - BaseQuery *request.Select + // Currently supported source types are: + // - [QuerySource] + // - [CollectionSource] + Sources []any + + // Fields contains the fields within this Collection. + Fields []CollectionFieldDescription // Indexes contains the secondary indexes that this Collection has. Indexes []IndexDescription @@ -48,26 +72,26 @@ func (col CollectionDescription) IDString() string { return fmt.Sprint(col.ID) } -// GetFieldByID searches for a field with the given ID. If such a field is found it +// GetFieldByName returns the field for the given field name. If such a field is found it // will return it and true, if it is not found it will return false. -func (col CollectionDescription) GetFieldByID(id FieldID, schema *SchemaDescription) (FieldDescription, bool) { - for _, field := range schema.Fields { - if field.ID == id { +func (col CollectionDescription) GetFieldByName(fieldName string) (CollectionFieldDescription, bool) { + for _, field := range col.Fields { + if field.Name == fieldName { return field, true } } - return FieldDescription{}, false + return CollectionFieldDescription{}, false } // GetFieldByName returns the field for the given field name. If such a field is found it // will return it and true, if it is not found it will return false. -func (col CollectionDescription) GetFieldByName(fieldName string, schema *SchemaDescription) (FieldDescription, bool) { - for _, field := range schema.Fields { +func (s SchemaDescription) GetFieldByName(fieldName string) (SchemaFieldDescription, bool) { + for _, field := range s.Fields { if field.Name == fieldName { return field, true } } - return FieldDescription{}, false + return SchemaFieldDescription{}, false } // GetFieldByRelation returns the field that supports the relation of the given name. @@ -76,13 +100,73 @@ func (col CollectionDescription) GetFieldByRelation( otherCollectionName string, otherFieldName string, schema *SchemaDescription, -) (FieldDescription, bool) { +) (SchemaFieldDescription, bool) { for _, field := range schema.Fields { - if field.RelationName == relationName && !(col.Name == otherCollectionName && otherFieldName == field.Name) { + if field.RelationName == relationName && + !(col.Name.Value() == otherCollectionName && otherFieldName == field.Name) && + field.Kind != FieldKind_DocID { return field, true } } - return FieldDescription{}, false + return SchemaFieldDescription{}, false +} + +// QuerySources returns all the Sources of type [QuerySource] +func (col CollectionDescription) QuerySources() []*QuerySource { + return sourcesOfType[*QuerySource](col) +} + +// CollectionSources returns all the Sources of type [CollectionSource] +func (col CollectionDescription) CollectionSources() []*CollectionSource { + return sourcesOfType[*CollectionSource](col) +} + +func sourcesOfType[ResultType any](col CollectionDescription) []ResultType { + result := []ResultType{} + for _, source := range col.Sources { + if typedSource, isOfType := source.(ResultType); isOfType { + result = append(result, typedSource) + } + } + return result +} + +// QuerySource represents a collection data source from a query. +// +// The query will be executed when data from this source is requested, and the query results +// yielded to the consumer. +type QuerySource struct { + // Query contains the base query of this data source. + Query request.Select + + // Transform is a optional Lens configuration. If specified, data drawn from the [Query] will have the + // transform applied before being returned. + // + // The transform is not limited to just transforming the input documents, it may also yield new ones, or filter out + // those passed in from the underlying query. + Transform immutable.Option[model.Lens] +} + +// CollectionSource represents a collection data source from another collection instance. +// +// Data against all collection instances in a CollectionSource chain will be returned as-if +// from the same dataset when queried. Lens transforms may be applied between instances. +// +// Typically these are used to link together multiple schema versions into the same dataset. +type CollectionSource struct { + // SourceCollectionID is the local identifier of the source [CollectionDescription] from which to + // share data. + // + // This is a bi-directional relationship, and documents in the host collection instance will also + // be available to the source collection instance. + SourceCollectionID uint32 + + // Transform is a optional Lens configuration. If specified, data drawn from the source will have the + // transform applied before being returned by any operation on the host collection instance. + // + // If the transform supports an inverse operation, that inverse will be applied when the source collection + // draws data from this host. + Transform immutable.Option[model.Lens] } // SchemaDescription describes a Schema and its associated metadata. @@ -109,17 +193,7 @@ type SchemaDescription struct { // Fields contains the fields within this Schema. // // Currently new fields may be added after initial declaration, but they cannot be removed. - Fields []FieldDescription -} - -// GetField returns the field of the given name. -func (sd SchemaDescription) GetField(name string) (FieldDescription, bool) { - for _, field := range sd.Fields { - if field.Name == name { - return field, true - } - } - return FieldDescription{}, false + Fields []SchemaFieldDescription } // FieldKind describes the type of a field. @@ -129,57 +203,84 @@ func (f FieldKind) String() string { switch f { case FieldKind_DocID: return "ID" - case FieldKind_BOOL: + case FieldKind_NILLABLE_BOOL: return "Boolean" case FieldKind_NILLABLE_BOOL_ARRAY: return "[Boolean]" case FieldKind_BOOL_ARRAY: return "[Boolean!]" - case FieldKind_INT: + case FieldKind_NILLABLE_INT: return "Int" case FieldKind_NILLABLE_INT_ARRAY: return "[Int]" case FieldKind_INT_ARRAY: return "[Int!]" - case FieldKind_DATETIME: + case FieldKind_NILLABLE_DATETIME: return "DateTime" - case FieldKind_FLOAT: + case FieldKind_NILLABLE_FLOAT: return "Float" case FieldKind_NILLABLE_FLOAT_ARRAY: return "[Float]" case FieldKind_FLOAT_ARRAY: return "[Float!]" - case FieldKind_STRING: + case FieldKind_NILLABLE_STRING: return "String" case FieldKind_NILLABLE_STRING_ARRAY: return "[String]" case FieldKind_STRING_ARRAY: return "[String!]" - case FieldKind_BLOB: + case FieldKind_NILLABLE_BLOB: return "Blob" + case FieldKind_NILLABLE_JSON: + return "JSON" default: return fmt.Sprint(uint8(f)) } } +// IsObject returns true if this FieldKind is an object type. +func (f FieldKind) IsObject() bool { + return f == FieldKind_FOREIGN_OBJECT || + f == FieldKind_FOREIGN_OBJECT_ARRAY +} + +// IsObjectArray returns true if this FieldKind is an object array type. +func (f FieldKind) IsObjectArray() bool { + return f == FieldKind_FOREIGN_OBJECT_ARRAY +} + +// IsArray returns true if this FieldKind is an array type which includes inline arrays as well +// as relation arrays. +func (f FieldKind) IsArray() bool { + return f == FieldKind_BOOL_ARRAY || + f == FieldKind_INT_ARRAY || + f == FieldKind_FLOAT_ARRAY || + f == FieldKind_STRING_ARRAY || + f == FieldKind_FOREIGN_OBJECT_ARRAY || + f == FieldKind_NILLABLE_BOOL_ARRAY || + f == FieldKind_NILLABLE_INT_ARRAY || + f == FieldKind_NILLABLE_FLOAT_ARRAY || + f == FieldKind_NILLABLE_STRING_ARRAY +} + // Note: These values are serialized and persisted in the database, avoid modifying existing values. const ( - FieldKind_None FieldKind = 0 - FieldKind_DocID FieldKind = 1 - FieldKind_BOOL FieldKind = 2 - FieldKind_BOOL_ARRAY FieldKind = 3 - FieldKind_INT FieldKind = 4 - FieldKind_INT_ARRAY FieldKind = 5 - FieldKind_FLOAT FieldKind = 6 - FieldKind_FLOAT_ARRAY FieldKind = 7 - _ FieldKind = 8 // safe to repurpose (was never used) - _ FieldKind = 9 // safe to repurpose (previously old field) - FieldKind_DATETIME FieldKind = 10 - FieldKind_STRING FieldKind = 11 - FieldKind_STRING_ARRAY FieldKind = 12 - FieldKind_BLOB FieldKind = 13 - _ FieldKind = 14 // safe to repurpose (was never used) - _ FieldKind = 15 // safe to repurpose (was never used) + FieldKind_None FieldKind = 0 + FieldKind_DocID FieldKind = 1 + FieldKind_NILLABLE_BOOL FieldKind = 2 + FieldKind_BOOL_ARRAY FieldKind = 3 + FieldKind_NILLABLE_INT FieldKind = 4 + FieldKind_INT_ARRAY FieldKind = 5 + FieldKind_NILLABLE_FLOAT FieldKind = 6 + FieldKind_FLOAT_ARRAY FieldKind = 7 + _ FieldKind = 8 // safe to repurpose (was never used) + _ FieldKind = 9 // safe to repurpose (previously old field) + FieldKind_NILLABLE_DATETIME FieldKind = 10 + FieldKind_NILLABLE_STRING FieldKind = 11 + FieldKind_STRING_ARRAY FieldKind = 12 + FieldKind_NILLABLE_BLOB FieldKind = 13 + FieldKind_NILLABLE_JSON FieldKind = 14 + _ FieldKind = 15 // safe to repurpose (was never used) // Embedded object, but accessed via foreign keys FieldKind_FOREIGN_OBJECT FieldKind = 16 @@ -202,37 +303,26 @@ const ( // equality is not guaranteed. var FieldKindStringToEnumMapping = map[string]FieldKind{ "ID": FieldKind_DocID, - "Boolean": FieldKind_BOOL, + "Boolean": FieldKind_NILLABLE_BOOL, "[Boolean]": FieldKind_NILLABLE_BOOL_ARRAY, "[Boolean!]": FieldKind_BOOL_ARRAY, - "Int": FieldKind_INT, + "Int": FieldKind_NILLABLE_INT, "[Int]": FieldKind_NILLABLE_INT_ARRAY, "[Int!]": FieldKind_INT_ARRAY, - "DateTime": FieldKind_DATETIME, - "Float": FieldKind_FLOAT, + "DateTime": FieldKind_NILLABLE_DATETIME, + "Float": FieldKind_NILLABLE_FLOAT, "[Float]": FieldKind_NILLABLE_FLOAT_ARRAY, "[Float!]": FieldKind_FLOAT_ARRAY, - "String": FieldKind_STRING, + "String": FieldKind_NILLABLE_STRING, "[String]": FieldKind_NILLABLE_STRING_ARRAY, "[String!]": FieldKind_STRING_ARRAY, - "Blob": FieldKind_BLOB, + "Blob": FieldKind_NILLABLE_BLOB, + "JSON": FieldKind_NILLABLE_JSON, } // RelationType describes the type of relation between two types. type RelationType uint8 -// Note: These values are serialized and persisted in the database, avoid modifying existing values -const ( - Relation_Type_ONE RelationType = 1 // 0b0000 0001 - Relation_Type_MANY RelationType = 2 // 0b0000 0010 - Relation_Type_ONEONE RelationType = 4 // 0b0000 0100 - Relation_Type_ONEMANY RelationType = 8 // 0b0000 1000 - Relation_Type_MANYMANY RelationType = 16 // 0b0001 0000 - _ RelationType = 32 // 0b0010 0000 - Relation_Type_INTERNAL_ID RelationType = 64 // 0b0100 0000 - Relation_Type_Primary RelationType = 128 // 0b1000 0000 Primary reference entity on relation -) - // FieldID is a unique identifier for a field in a schema. type FieldID uint32 @@ -240,21 +330,13 @@ func (f FieldID) String() string { return fmt.Sprint(uint32(f)) } -// FieldDescription describes a field on a Schema and its associated metadata. -type FieldDescription struct { +// SchemaFieldDescription describes a field on a Schema and its associated metadata. +type SchemaFieldDescription struct { // Name contains the name of this field. // // It is currently immutable. Name string - // ID contains the internal ID of this field. - // - // Whilst this ID will typically match the field's index within the Schema's Fields - // slice, there is no guarantee that they will be the same. - // - // It is immutable. - ID FieldID - // The data type that this field holds. // // Must contain a valid value. It is currently immutable. @@ -273,52 +355,93 @@ type FieldDescription struct { // It is currently immutable. Typ CType - // RelationType contains the relationship type if this field is a relation field. Otherwise this - // will be empty. - RelationType RelationType + // If true, this is the primary half of a relation, otherwise is false. + IsPrimaryRelation bool } -// IsInternal returns true if this field is internally generated. -func (f FieldDescription) IsInternal() bool { - return (f.Name == request.DocIDFieldName) || f.RelationType&Relation_Type_INTERNAL_ID != 0 -} +// CollectionFieldDescription describes the local components of a field on a collection. +type CollectionFieldDescription struct { + // Name contains the name of the [SchemaFieldDescription] that this field uses. + Name string -// IsObject returns true if this field is an object type. -func (f FieldDescription) IsObject() bool { - return (f.Kind == FieldKind_FOREIGN_OBJECT) || - (f.Kind == FieldKind_FOREIGN_OBJECT_ARRAY) + // ID contains the local, internal ID of this field. + ID FieldID } -// IsObjectArray returns true if this field is an object array type. -func (f FieldDescription) IsObjectArray() bool { - return (f.Kind == FieldKind_FOREIGN_OBJECT_ARRAY) +// IsRelation returns true if this field is a relation. +func (f SchemaFieldDescription) IsRelation() bool { + return f.RelationName != "" } -// IsPrimaryRelation returns true if this field is a relation, and is the primary side. -func (f FieldDescription) IsPrimaryRelation() bool { - return f.RelationType > 0 && f.RelationType&Relation_Type_Primary != 0 +// IsSet returns true if the target relation type is set. +func (m RelationType) IsSet(target RelationType) bool { + return m&target > 0 } -// IsRelation returns true if this field is a relation. -func (f FieldDescription) IsRelation() bool { - return f.RelationType > 0 -} +// collectionDescription is a private type used to facilitate the unmarshalling +// of json to a [CollectionDescription]. +type collectionDescription struct { + // These properties are unmarshalled using the default json unmarshaller + Name immutable.Option[string] + ID uint32 + RootID uint32 + SchemaVersionID string + Indexes []IndexDescription + Fields []CollectionFieldDescription -// IsArray returns true if this field is an array type which includes inline arrays as well -// as relation arrays. -func (f FieldDescription) IsArray() bool { - return f.Kind == FieldKind_BOOL_ARRAY || - f.Kind == FieldKind_INT_ARRAY || - f.Kind == FieldKind_FLOAT_ARRAY || - f.Kind == FieldKind_STRING_ARRAY || - f.Kind == FieldKind_FOREIGN_OBJECT_ARRAY || - f.Kind == FieldKind_NILLABLE_BOOL_ARRAY || - f.Kind == FieldKind_NILLABLE_INT_ARRAY || - f.Kind == FieldKind_NILLABLE_FLOAT_ARRAY || - f.Kind == FieldKind_NILLABLE_STRING_ARRAY + // Properties below this line are unmarshalled using custom logic in [UnmarshalJSON] + Sources []map[string]json.RawMessage } -// IsSet returns true if the target relation type is set. -func (m RelationType) IsSet(target RelationType) bool { - return m&target > 0 +func (c *CollectionDescription) UnmarshalJSON(bytes []byte) error { + var descMap collectionDescription + err := json.Unmarshal(bytes, &descMap) + if err != nil { + return err + } + + c.Name = descMap.Name + c.ID = descMap.ID + c.RootID = descMap.RootID + c.SchemaVersionID = descMap.SchemaVersionID + c.Indexes = descMap.Indexes + c.Fields = descMap.Fields + c.Sources = make([]any, len(descMap.Sources)) + + for i, source := range descMap.Sources { + sourceJson, err := json.Marshal(source) + if err != nil { + return err + } + + var sourceValue any + // We detect which concrete type each `Source` object is by detecting + // non-nillable fields, if the key is present it must be of that type. + // They must be non-nillable as nil values may have their keys omitted from + // the json. This also relies on the fields being unique. We may wish to change + // this later to custom-serialize with a `_type` property. + if _, ok := source["Query"]; ok { + // This must be a QuerySource, as only the `QuerySource` type has a `Query` field + var querySource QuerySource + err := json.Unmarshal(sourceJson, &querySource) + if err != nil { + return err + } + sourceValue = &querySource + } else if _, ok := source["SourceCollectionID"]; ok { + // This must be a CollectionSource, as only the `CollectionSource` type has a `SourceCollectionID` field + var collectionSource CollectionSource + err := json.Unmarshal(sourceJson, &collectionSource) + if err != nil { + return err + } + sourceValue = &collectionSource + } else { + return ErrFailedToUnmarshalCollection + } + + c.Sources[i] = sourceValue + } + + return nil } diff --git a/client/document.go b/client/document.go index 93e06df27e..6c837260ba 100644 --- a/client/document.go +++ b/client/document.go @@ -12,6 +12,7 @@ package client import ( "encoding/json" + "errors" "regexp" "strings" "sync" @@ -171,13 +172,34 @@ func NewDocsFromJSON(obj []byte, sd SchemaDescription) ([]*Document, error) { return docs, nil } +// IsNillableKind returns true if the given FieldKind is nillable. +func IsNillableKind(kind FieldKind) bool { + switch kind { + case FieldKind_NILLABLE_STRING, FieldKind_NILLABLE_BLOB, FieldKind_NILLABLE_JSON, + FieldKind_NILLABLE_BOOL, FieldKind_NILLABLE_FLOAT, FieldKind_NILLABLE_DATETIME, + FieldKind_NILLABLE_INT: + return true + default: + return false + } +} + // validateFieldSchema takes a given value as an interface, // and ensures it matches the supplied field description. // It will do any minor parsing, like dates, and return // the typed value again as an interface. -func validateFieldSchema(val any, field FieldDescription) (any, error) { +func validateFieldSchema(val any, field SchemaFieldDescription) (any, error) { + if IsNillableKind(field.Kind) { + if val == nil { + return nil, nil + } + if v, ok := val.(*fastjson.Value); ok && v.Type() == fastjson.TypeNull { + return nil, nil + } + } + switch field.Kind { - case FieldKind_DocID, FieldKind_STRING, FieldKind_BLOB: + case FieldKind_DocID, FieldKind_NILLABLE_STRING, FieldKind_NILLABLE_BLOB: return getString(val) case FieldKind_STRING_ARRAY: @@ -186,7 +208,7 @@ func validateFieldSchema(val any, field FieldDescription) (any, error) { case FieldKind_NILLABLE_STRING_ARRAY: return getNillableArray(val, getString) - case FieldKind_BOOL: + case FieldKind_NILLABLE_BOOL: return getBool(val) case FieldKind_BOOL_ARRAY: @@ -195,7 +217,7 @@ func validateFieldSchema(val any, field FieldDescription) (any, error) { case FieldKind_NILLABLE_BOOL_ARRAY: return getNillableArray(val, getBool) - case FieldKind_FLOAT: + case FieldKind_NILLABLE_FLOAT: return getFloat64(val) case FieldKind_FLOAT_ARRAY: @@ -204,10 +226,10 @@ func validateFieldSchema(val any, field FieldDescription) (any, error) { case FieldKind_NILLABLE_FLOAT_ARRAY: return getNillableArray(val, getFloat64) - case FieldKind_DATETIME: + case FieldKind_NILLABLE_DATETIME: return getDateTime(val) - case FieldKind_INT: + case FieldKind_NILLABLE_INT: return getInt64(val) case FieldKind_INT_ARRAY: @@ -221,6 +243,9 @@ func validateFieldSchema(val any, field FieldDescription) (any, error) { case FieldKind_FOREIGN_OBJECT_ARRAY: return nil, NewErrFieldOrAliasToFieldNotExist(field.Name) + + case FieldKind_NILLABLE_JSON: + return getJSON(val) } return nil, NewErrUnhandledType("FieldKind", field.Kind) @@ -296,6 +321,18 @@ func getDateTime(v any) (time.Time, error) { return time.Parse(time.RFC3339, s) } +func getJSON(v any) (string, error) { + s, err := getString(v) + if err != nil { + return "", err + } + val, err := fastjson.Parse(s) + if err != nil { + return "", NewErrInvalidJSONPaylaod(s) + } + return val.String(), nil +} + func getArray[T any]( v any, typeGetter func(any) (T, error), @@ -448,6 +485,16 @@ func (doc *Document) GetValue(field string) (*FieldValue, error) { } } +// TryGetValue returns the value for a given field, if it exists. +// If the field does not exist then return nil and an error. +func (doc *Document) TryGetValue(field string) (*FieldValue, error) { + val, err := doc.GetValue(field) + if err != nil && errors.Is(err, ErrFieldNotExist) { + return nil, nil + } + return val, err +} + // GetValueWithField gets the Value type from a given Field type func (doc *Document) GetValueWithField(f Field) (*FieldValue, error) { doc.mu.RLock() @@ -491,15 +538,15 @@ func (doc *Document) setWithFastJSONObject(obj *fastjson.Object) error { // Set the value of a field. func (doc *Document) Set(field string, value any) error { - fd, exists := doc.schemaDescription.GetField(field) + fd, exists := doc.schemaDescription.GetFieldByName(field) if !exists { return NewErrFieldNotExist(field) } - if fd.IsRelation() && !fd.IsObjectArray() { + if fd.IsRelation() && !fd.Kind.IsObjectArray() { if !strings.HasSuffix(field, request.RelatedObjectID) { field = field + request.RelatedObjectID } - fd, exists = doc.schemaDescription.GetField(field) + fd, exists = doc.schemaDescription.GetFieldByName(field) if !exists { return NewErrFieldNotExist(field) } @@ -511,20 +558,6 @@ func (doc *Document) Set(field string, value any) error { return doc.setCBOR(fd.Typ, field, val) } -// Delete removes a field, and marks it to be deleted on the following db.Update() call. -func (doc *Document) Delete(fields ...string) error { - doc.mu.Lock() - defer doc.mu.Unlock() - for _, f := range fields { - field, exists := doc.fields[f] - if !exists { - return NewErrFieldNotExist(f) - } - doc.values[field].Delete() - } - return nil -} - func (doc *Document) set(t CType, field string, value *FieldValue) error { doc.mu.Lock() defer doc.mu.Unlock() @@ -622,9 +655,6 @@ func (doc *Document) ToJSONPatch() ([]byte, error) { if !value.IsDirty() { delete(docMap, field.Name()) } - if value.IsDelete() { - docMap[field.Name()] = nil - } } return json.Marshal(docMap) @@ -635,9 +665,6 @@ func (doc *Document) Clean() { for _, v := range doc.Fields() { val, _ := doc.GetValueWithField(v) if val.IsDirty() { - if val.IsDelete() { - doc.Set(v.Name(), nil) //nolint:errcheck - } val.Clean() } } diff --git a/client/document_test.go b/client/document_test.go index dc5867b562..593876705f 100644 --- a/client/document_test.go +++ b/client/document_test.go @@ -30,16 +30,21 @@ var ( schemaDescriptions = []SchemaDescription{ { Name: "User", - Fields: []FieldDescription{ + Fields: []SchemaFieldDescription{ { Name: "Name", Typ: LWW_REGISTER, - Kind: FieldKind_STRING, + Kind: FieldKind_NILLABLE_STRING, }, { Name: "Age", Typ: LWW_REGISTER, - Kind: FieldKind_INT, + Kind: FieldKind_NILLABLE_INT, + }, + { + Name: "Custom", + Typ: LWW_REGISTER, + Kind: FieldKind_NILLABLE_JSON, }, }, }, @@ -135,3 +140,53 @@ func TestNewDocsFromJSON_WithObjectInsteadOfArray_Error(t *testing.T) { _, err := NewDocsFromJSON(testJSONObj, schemaDescriptions[0]) require.ErrorContains(t, err, "value doesn't contain array; it contains object") } + +func TestNewFromJSON_WithValidJSONFieldValue_NoError(t *testing.T) { + objWithJSONField := []byte(`{ + "Name": "John", + "Age": 26, + "Custom": "{\"tree\":\"maple\", \"age\": 260}" + }`) + doc, err := NewDocFromJSON(objWithJSONField, schemaDescriptions[0]) + if err != nil { + t.Error("Error creating new doc from JSON:", err) + return + } + + // check field/value + // fields + assert.Equal(t, doc.fields["Name"].Name(), "Name") + assert.Equal(t, doc.fields["Name"].Type(), LWW_REGISTER) + assert.Equal(t, doc.fields["Age"].Name(), "Age") + assert.Equal(t, doc.fields["Age"].Type(), LWW_REGISTER) + assert.Equal(t, doc.fields["Custom"].Name(), "Custom") + assert.Equal(t, doc.fields["Custom"].Type(), LWW_REGISTER) + + //values + assert.Equal(t, doc.values[doc.fields["Name"]].Value(), "John") + assert.Equal(t, doc.values[doc.fields["Name"]].IsDocument(), false) + assert.Equal(t, doc.values[doc.fields["Age"]].Value(), int64(26)) + assert.Equal(t, doc.values[doc.fields["Age"]].IsDocument(), false) + assert.Equal(t, doc.values[doc.fields["Custom"]].Value(), "{\"tree\":\"maple\",\"age\":260}") + assert.Equal(t, doc.values[doc.fields["Custom"]].IsDocument(), false) +} + +func TestNewFromJSON_WithInvalidJSONFieldValue_Error(t *testing.T) { + objWithJSONField := []byte(`{ + "Name": "John", + "Age": 26, + "Custom": "{\"tree\":\"maple, \"age\": 260}" + }`) + _, err := NewDocFromJSON(objWithJSONField, schemaDescriptions[0]) + require.ErrorContains(t, err, "invalid JSON payload. Payload: {\"tree\":\"maple, \"age\": 260}") +} + +func TestNewFromJSON_WithInvalidJSONFieldValueSimpleString_Error(t *testing.T) { + objWithJSONField := []byte(`{ + "Name": "John", + "Age": 26, + "Custom": "blah" + }`) + _, err := NewDocFromJSON(objWithJSONField, schemaDescriptions[0]) + require.ErrorContains(t, err, "invalid JSON payload. Payload: blah") +} diff --git a/client/errors.go b/client/errors.go index 78daf3531b..c86ac274c7 100644 --- a/client/errors.go +++ b/client/errors.go @@ -17,17 +17,20 @@ import ( ) const ( - errFieldNotExist string = "The given field does not exist" - errUnexpectedType string = "unexpected type" - errParsingFailed string = "failed to parse argument" - errUninitializeProperty string = "invalid state, required property is uninitialized" - errMaxTxnRetries string = "reached maximum transaction reties" - errRelationOneSided string = "relation must be defined on both schemas" - errCollectionNotFound string = "collection not found" - errFieldOrAliasToFieldNotExist string = "The given field or alias to field does not exist" - errUnknownCRDT string = "unknown crdt" - errCRDTKindMismatch string = "CRDT type %s can't be assigned to field kind %s" - errInvalidCRDTType string = "CRDT type not supported" + errFieldNotExist string = "The given field does not exist" + errUnexpectedType string = "unexpected type" + errParsingFailed string = "failed to parse argument" + errUninitializeProperty string = "invalid state, required property is uninitialized" + errMaxTxnRetries string = "reached maximum transaction reties" + errRelationOneSided string = "relation must be defined on both schemas" + errCollectionNotFound string = "collection not found" + errFieldOrAliasToFieldNotExist string = "The given field or alias to field does not exist" + errUnknownCRDT string = "unknown crdt" + errCRDTKindMismatch string = "CRDT type %s can't be assigned to field kind %s" + errInvalidCRDTType string = "CRDT type not supported" + errFailedToUnmarshalCollection string = "failed to unmarshal collection json" + errOperationNotPermittedOnNamelessCols string = "operation not permitted on nameless collection" + errInvalidJSONPayload string = "invalid JSON payload" ) // Errors returnable from this package. @@ -35,16 +38,19 @@ const ( // This list is incomplete and undefined errors may also be returned. // Errors returned from this package may be tested against these errors with errors.Is. var ( - ErrFieldNotExist = errors.New(errFieldNotExist) - ErrUnexpectedType = errors.New(errUnexpectedType) - ErrFieldNotObject = errors.New("trying to access field on a non object type") - ErrValueTypeMismatch = errors.New("value does not match indicated type") - ErrDocumentNotFound = errors.New("no document for the given ID exists") - ErrInvalidUpdateTarget = errors.New("the target document to update is of invalid type") - ErrInvalidUpdater = errors.New("the updater of a document is of invalid type") - ErrInvalidDeleteTarget = errors.New("the target document to delete is of invalid type") - ErrMalformedDocID = errors.New("malformed document ID, missing either version or cid") - ErrInvalidDocIDVersion = errors.New("invalid document ID version") + ErrFieldNotExist = errors.New(errFieldNotExist) + ErrUnexpectedType = errors.New(errUnexpectedType) + ErrFailedToUnmarshalCollection = errors.New(errFailedToUnmarshalCollection) + ErrOperationNotPermittedOnNamelessCols = errors.New(errOperationNotPermittedOnNamelessCols) + ErrFieldNotObject = errors.New("trying to access field on a non object type") + ErrValueTypeMismatch = errors.New("value does not match indicated type") + ErrDocumentNotFound = errors.New("no document for the given ID exists") + ErrInvalidUpdateTarget = errors.New("the target document to update is of invalid type") + ErrInvalidUpdater = errors.New("the updater of a document is of invalid type") + ErrInvalidDeleteTarget = errors.New("the target document to delete is of invalid type") + ErrMalformedDocID = errors.New("malformed document ID, missing either version or cid") + ErrInvalidDocIDVersion = errors.New("invalid document ID version") + ErrInvalidJSONPayload = errors.New(errInvalidJSONPayload) ) // NewErrFieldNotExist returns an error indicating that the given field does not exist. @@ -145,3 +151,7 @@ func NewErrInvalidCRDTType(name, crdtType string) error { func NewErrCRDTKindMismatch(cType, kind string) error { return errors.New(fmt.Sprintf(errCRDTKindMismatch, cType, kind)) } + +func NewErrInvalidJSONPaylaod(payload string) error { + return errors.New(errInvalidJSONPayload, errors.NewKV("Payload", payload)) +} diff --git a/client/field.go b/client/field.go index a2c80c7ff8..40f130e428 100644 --- a/client/field.go +++ b/client/field.go @@ -14,23 +14,18 @@ package client type Field interface { Name() string Type() CType //TODO Abstract into a Field Type interface - SchemaType() string } type simpleField struct { - name string - crdtType CType - schemaType string + name string + crdtType CType } -func (doc *Document) newField(t CType, name string, schemaType ...string) Field { +func (doc *Document) newField(t CType, name string) Field { f := simpleField{ name: name, crdtType: t, } - if len(schemaType) > 0 { - f.schemaType = schemaType[0] - } return f } @@ -43,8 +38,3 @@ func (field simpleField) Name() string { func (field simpleField) Type() CType { return field.crdtType } - -// SchemaType returns the schema type of the field. -func (field simpleField) SchemaType() string { - return field.schemaType -} diff --git a/client/index.go b/client/index.go index 5e2d397394..6f87626c98 100644 --- a/client/index.go +++ b/client/index.go @@ -10,22 +10,18 @@ package client -// IndexDirection is the direction of an index. -type IndexDirection string - -const ( - // Ascending is the value to use for an ascending fields - Ascending IndexDirection = "ASC" - // Descending is the value to use for an descending fields - Descending IndexDirection = "DESC" +import ( + "context" + + "github.com/sourcenetwork/defradb/datastore" ) // IndexFieldDescription describes how a field is being indexed. type IndexedFieldDescription struct { // Name contains the name of the field. Name string - // Direction contains the direction of the index. - Direction IndexDirection + // Descending indicates whether the field is indexed in descending order. + Descending bool } // IndexDescription describes an index. @@ -40,21 +36,48 @@ type IndexDescription struct { Unique bool } +// CollectionIndex is an interface for indexing documents in a collection. +type CollectionIndex interface { + // Save indexes a document by storing it + Save(context.Context, datastore.Txn, *Document) error + // Update updates an existing document in the index + Update(context.Context, datastore.Txn, *Document, *Document) error + // Delete deletes an existing document from the index + Delete(context.Context, datastore.Txn, *Document) error + // Name returns the name of the index + Name() string + // Description returns the description of the index + Description() IndexDescription +} + // CollectIndexedFields returns all fields that are indexed by all collection indexes. -func (d CollectionDescription) CollectIndexedFields(schema *SchemaDescription) []FieldDescription { +func (d CollectionDefinition) CollectIndexedFields() []FieldDefinition { fieldsMap := make(map[string]bool) - fields := make([]FieldDescription, 0, len(d.Indexes)) - for _, index := range d.Indexes { + fields := make([]FieldDefinition, 0, len(d.Description.Indexes)) + for _, index := range d.Description.Indexes { for _, field := range index.Fields { - for i := range schema.Fields { - colField := schema.Fields[i] - if field.Name == colField.Name && !fieldsMap[field.Name] { - fieldsMap[field.Name] = true - fields = append(fields, colField) - break - } + if fieldsMap[field.Name] { + // If the FieldDescription has already been added to the result do not add it a second time + // this can happen if a field is referenced by multiple indexes + continue + } + colField, ok := d.GetFieldByName(field.Name) + if ok { + fields = append(fields, colField) } } } return fields } + +// GetIndexesOnField returns all indexes that are indexing the given field. +// If the field is not the first field of a composite index, the index is not returned. +func (d CollectionDescription) GetIndexesOnField(fieldName string) []IndexDescription { + result := []IndexDescription{} + for _, index := range d.Indexes { + if index.Fields[0].Name == fieldName { + result = append(result, index) + } + } + return result +} diff --git a/client/index_test.go b/client/index_test.go new file mode 100644 index 0000000000..d10450ece1 --- /dev/null +++ b/client/index_test.go @@ -0,0 +1,129 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package client + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCollectIndexesOnField(t *testing.T) { + tests := []struct { + name string + desc CollectionDescription + field string + expected []IndexDescription + }{ + { + name: "no indexes", + desc: CollectionDescription{ + Indexes: []IndexDescription{}, + }, + field: "test", + expected: []IndexDescription{}, + }, + { + name: "single index on field", + desc: CollectionDescription{ + Indexes: []IndexDescription{ + { + Name: "index1", + Fields: []IndexedFieldDescription{ + {Name: "test"}, + }, + }, + }, + }, + field: "test", + expected: []IndexDescription{ + { + Name: "index1", + Fields: []IndexedFieldDescription{ + {Name: "test"}, + }, + }, + }, + }, + { + name: "multiple indexes on field", + desc: CollectionDescription{ + Indexes: []IndexDescription{ + { + Name: "index1", + Fields: []IndexedFieldDescription{ + {Name: "test"}, + }, + }, + { + Name: "index2", + Fields: []IndexedFieldDescription{ + {Name: "test", Descending: true}, + }, + }, + }, + }, + field: "test", + expected: []IndexDescription{ + { + Name: "index1", + Fields: []IndexedFieldDescription{ + {Name: "test"}, + }, + }, + { + Name: "index2", + Fields: []IndexedFieldDescription{ + {Name: "test", Descending: true}, + }, + }, + }, + }, + { + name: "no indexes on field", + desc: CollectionDescription{ + Indexes: []IndexDescription{ + { + Name: "index1", + Fields: []IndexedFieldDescription{ + {Name: "other"}, + }, + }, + }, + }, + field: "test", + expected: []IndexDescription{}, + }, + { + name: "second field in composite index", + desc: CollectionDescription{ + Indexes: []IndexDescription{ + { + Name: "index1", + Fields: []IndexedFieldDescription{ + {Name: "other"}, + {Name: "test"}, + }, + }, + }, + }, + field: "test", + expected: []IndexDescription{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := tt.desc.GetIndexesOnField(tt.field) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/client/lens.go b/client/lens.go index 35ef9f1ee3..1a6b423991 100644 --- a/client/lens.go +++ b/client/lens.go @@ -49,49 +49,35 @@ type LensRegistry interface { // after this has been created, the results of those commits will be visible within this scope. WithTxn(datastore.Txn) LensRegistry - // SetMigration sets the migration for the given source-destination schema version IDs. Is equivalent to - // calling `Store.SetMigration(ctx, cfg)`. + // SetMigration caches the migration for the given collection ID. It does not persist the migration in long + // term storage, for that one should call [Store.SetMigration(ctx, cfg)]. // - // There may only be one migration per schema version id. If another migration was registered it will be + // There may only be one migration per collection. If another migration was registered it will be // overwritten by this migration. // - // Neither of the schema version IDs specified in the configuration need to exist at the time of calling. - // This is to allow the migration of documents of schema versions unknown to the local node received by the - // P2P system. - // // Migrations will only run if there is a complete path from the document schema version to the latest local // schema version. - SetMigration(context.Context, LensConfig) error + SetMigration(context.Context, uint32, model.Lens) error // ReloadLenses clears any cached migrations, loads their configurations from the database and re-initializes // them. It is run on database start if the database already existed. ReloadLenses(context.Context) error // MigrateUp returns an enumerable that feeds the given source through the Lens migration for the given - // schema version id if one is found, if there is no matching migration the given source will be returned. + // collection id if one is found, if there is no matching migration the given source will be returned. MigrateUp( context.Context, enumerable.Enumerable[map[string]any], - string, + uint32, ) (enumerable.Enumerable[map[string]any], error) - // MigrateDown returns an enumerable that feeds the given source through the Lens migration for the schema - // version that precedes the given schema version id in reverse, if one is found, if there is no matching - // migration the given source will be returned. + // MigrateDown returns an enumerable that feeds the given source through the Lens migration for the given + // collection id in reverse if one is found, if there is no matching migration the given source will be returned. // // This downgrades any documents in the source enumerable if/when enumerated. MigrateDown( context.Context, enumerable.Enumerable[map[string]any], - string, + uint32, ) (enumerable.Enumerable[map[string]any], error) - - // Config returns a slice of the configurations of the currently loaded migrations. - // - // Modifying the slice does not affect the loaded configurations. - Config(context.Context) ([]LensConfig, error) - - // HasMigration returns true if there is a migration registered for the given schema version id, otherwise - // will return false. - HasMigration(context.Context, string) (bool, error) } diff --git a/client/mocks/collection.go b/client/mocks/collection.go index b1fac9c243..6e6c7afae3 100644 --- a/client/mocks/collection.go +++ b/client/mocks/collection.go @@ -9,6 +9,8 @@ import ( datastore "github.com/sourcenetwork/defradb/datastore" + immutable "github.com/sourcenetwork/immutable" + mock "github.com/stretchr/testify/mock" ) @@ -821,14 +823,14 @@ func (_c *Collection_ID_Call) RunAndReturn(run func() uint32) *Collection_ID_Cal } // Name provides a mock function with given fields: -func (_m *Collection) Name() string { +func (_m *Collection) Name() immutable.Option[string] { ret := _m.Called() - var r0 string - if rf, ok := ret.Get(0).(func() string); ok { + var r0 immutable.Option[string] + if rf, ok := ret.Get(0).(func() immutable.Option[string]); ok { r0 = rf() } else { - r0 = ret.Get(0).(string) + r0 = ret.Get(0).(immutable.Option[string]) } return r0 @@ -851,12 +853,12 @@ func (_c *Collection_Name_Call) Run(run func()) *Collection_Name_Call { return _c } -func (_c *Collection_Name_Call) Return(_a0 string) *Collection_Name_Call { +func (_c *Collection_Name_Call) Return(_a0 immutable.Option[string]) *Collection_Name_Call { _c.Call.Return(_a0) return _c } -func (_c *Collection_Name_Call) RunAndReturn(run func() string) *Collection_Name_Call { +func (_c *Collection_Name_Call) RunAndReturn(run func() immutable.Option[string]) *Collection_Name_Call { _c.Call.Return(run) return _c } diff --git a/client/mocks/db.go b/client/mocks/db.go index 90dc8986d0..aeb54ea4cd 100644 --- a/client/mocks/db.go +++ b/client/mocks/db.go @@ -12,7 +12,11 @@ import ( events "github.com/sourcenetwork/defradb/events" + immutable "github.com/sourcenetwork/immutable" + mock "github.com/stretchr/testify/mock" + + model "github.com/lens-vm/lens/host-go/config/model" ) // DB is an autogenerated mock type for the DB type @@ -83,25 +87,25 @@ func (_c *DB_AddSchema_Call) RunAndReturn(run func(context.Context, string) ([]c return _c } -// AddView provides a mock function with given fields: ctx, gqlQuery, sdl -func (_m *DB) AddView(ctx context.Context, gqlQuery string, sdl string) ([]client.CollectionDefinition, error) { - ret := _m.Called(ctx, gqlQuery, sdl) +// AddView provides a mock function with given fields: ctx, gqlQuery, sdl, transform +func (_m *DB) AddView(ctx context.Context, gqlQuery string, sdl string, transform immutable.Option[model.Lens]) ([]client.CollectionDefinition, error) { + ret := _m.Called(ctx, gqlQuery, sdl, transform) var r0 []client.CollectionDefinition var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, string) ([]client.CollectionDefinition, error)); ok { - return rf(ctx, gqlQuery, sdl) + if rf, ok := ret.Get(0).(func(context.Context, string, string, immutable.Option[model.Lens]) ([]client.CollectionDefinition, error)); ok { + return rf(ctx, gqlQuery, sdl, transform) } - if rf, ok := ret.Get(0).(func(context.Context, string, string) []client.CollectionDefinition); ok { - r0 = rf(ctx, gqlQuery, sdl) + if rf, ok := ret.Get(0).(func(context.Context, string, string, immutable.Option[model.Lens]) []client.CollectionDefinition); ok { + r0 = rf(ctx, gqlQuery, sdl, transform) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]client.CollectionDefinition) } } - if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { - r1 = rf(ctx, gqlQuery, sdl) + if rf, ok := ret.Get(1).(func(context.Context, string, string, immutable.Option[model.Lens]) error); ok { + r1 = rf(ctx, gqlQuery, sdl, transform) } else { r1 = ret.Error(1) } @@ -118,13 +122,14 @@ type DB_AddView_Call struct { // - ctx context.Context // - gqlQuery string // - sdl string -func (_e *DB_Expecter) AddView(ctx interface{}, gqlQuery interface{}, sdl interface{}) *DB_AddView_Call { - return &DB_AddView_Call{Call: _e.mock.On("AddView", ctx, gqlQuery, sdl)} +// - transform immutable.Option[model.Lens] +func (_e *DB_Expecter) AddView(ctx interface{}, gqlQuery interface{}, sdl interface{}, transform interface{}) *DB_AddView_Call { + return &DB_AddView_Call{Call: _e.mock.On("AddView", ctx, gqlQuery, sdl, transform)} } -func (_c *DB_AddView_Call) Run(run func(ctx context.Context, gqlQuery string, sdl string)) *DB_AddView_Call { +func (_c *DB_AddView_Call) Run(run func(ctx context.Context, gqlQuery string, sdl string, transform immutable.Option[model.Lens])) *DB_AddView_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(string), args[2].(string)) + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(immutable.Option[model.Lens])) }) return _c } @@ -134,7 +139,7 @@ func (_c *DB_AddView_Call) Return(_a0 []client.CollectionDefinition, _a1 error) return _c } -func (_c *DB_AddView_Call) RunAndReturn(run func(context.Context, string, string) ([]client.CollectionDefinition, error)) *DB_AddView_Call { +func (_c *DB_AddView_Call) RunAndReturn(run func(context.Context, string, string, immutable.Option[model.Lens]) ([]client.CollectionDefinition, error)) *DB_AddView_Call { _c.Call.Return(run) return _c } @@ -386,60 +391,6 @@ func (_c *DB_ExecRequest_Call) RunAndReturn(run func(context.Context, string) *c return _c } -// GetAllCollections provides a mock function with given fields: _a0 -func (_m *DB) GetAllCollections(_a0 context.Context) ([]client.Collection, error) { - ret := _m.Called(_a0) - - var r0 []client.Collection - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) ([]client.Collection, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(context.Context) []client.Collection); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]client.Collection) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DB_GetAllCollections_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllCollections' -type DB_GetAllCollections_Call struct { - *mock.Call -} - -// GetAllCollections is a helper method to define mock.On call -// - _a0 context.Context -func (_e *DB_Expecter) GetAllCollections(_a0 interface{}) *DB_GetAllCollections_Call { - return &DB_GetAllCollections_Call{Call: _e.mock.On("GetAllCollections", _a0)} -} - -func (_c *DB_GetAllCollections_Call) Run(run func(_a0 context.Context)) *DB_GetAllCollections_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *DB_GetAllCollections_Call) Return(_a0 []client.Collection, _a1 error) *DB_GetAllCollections_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *DB_GetAllCollections_Call) RunAndReturn(run func(context.Context) ([]client.Collection, error)) *DB_GetAllCollections_Call { - _c.Call.Return(run) - return _c -} - // GetAllIndexes provides a mock function with given fields: _a0 func (_m *DB) GetAllIndexes(_a0 context.Context) (map[string][]client.IndexDescription, error) { ret := _m.Called(_a0) @@ -494,60 +445,6 @@ func (_c *DB_GetAllIndexes_Call) RunAndReturn(run func(context.Context) (map[str return _c } -// GetAllSchemas provides a mock function with given fields: _a0 -func (_m *DB) GetAllSchemas(_a0 context.Context) ([]client.SchemaDescription, error) { - ret := _m.Called(_a0) - - var r0 []client.SchemaDescription - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) ([]client.SchemaDescription, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(context.Context) []client.SchemaDescription); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]client.SchemaDescription) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DB_GetAllSchemas_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllSchemas' -type DB_GetAllSchemas_Call struct { - *mock.Call -} - -// GetAllSchemas is a helper method to define mock.On call -// - _a0 context.Context -func (_e *DB_Expecter) GetAllSchemas(_a0 interface{}) *DB_GetAllSchemas_Call { - return &DB_GetAllSchemas_Call{Call: _e.mock.On("GetAllSchemas", _a0)} -} - -func (_c *DB_GetAllSchemas_Call) Run(run func(_a0 context.Context)) *DB_GetAllSchemas_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *DB_GetAllSchemas_Call) Return(_a0 []client.SchemaDescription, _a1 error) *DB_GetAllSchemas_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *DB_GetAllSchemas_Call) RunAndReturn(run func(context.Context) ([]client.SchemaDescription, error)) *DB_GetAllSchemas_Call { - _c.Call.Return(run) - return _c -} - // GetCollectionByName provides a mock function with given fields: _a0, _a1 func (_m *DB) GetCollectionByName(_a0 context.Context, _a1 string) (client.Collection, error) { ret := _m.Called(_a0, _a1) @@ -603,71 +500,16 @@ func (_c *DB_GetCollectionByName_Call) RunAndReturn(run func(context.Context, st return _c } -// GetCollectionsBySchemaRoot provides a mock function with given fields: _a0, _a1 -func (_m *DB) GetCollectionsBySchemaRoot(_a0 context.Context, _a1 string) ([]client.Collection, error) { - ret := _m.Called(_a0, _a1) - - var r0 []client.Collection - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string) ([]client.Collection, error)); ok { - return rf(_a0, _a1) - } - if rf, ok := ret.Get(0).(func(context.Context, string) []client.Collection); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]client.Collection) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DB_GetCollectionsBySchemaRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCollectionsBySchemaRoot' -type DB_GetCollectionsBySchemaRoot_Call struct { - *mock.Call -} - -// GetCollectionsBySchemaRoot is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 string -func (_e *DB_Expecter) GetCollectionsBySchemaRoot(_a0 interface{}, _a1 interface{}) *DB_GetCollectionsBySchemaRoot_Call { - return &DB_GetCollectionsBySchemaRoot_Call{Call: _e.mock.On("GetCollectionsBySchemaRoot", _a0, _a1)} -} - -func (_c *DB_GetCollectionsBySchemaRoot_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_GetCollectionsBySchemaRoot_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(string)) - }) - return _c -} - -func (_c *DB_GetCollectionsBySchemaRoot_Call) Return(_a0 []client.Collection, _a1 error) *DB_GetCollectionsBySchemaRoot_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *DB_GetCollectionsBySchemaRoot_Call) RunAndReturn(run func(context.Context, string) ([]client.Collection, error)) *DB_GetCollectionsBySchemaRoot_Call { - _c.Call.Return(run) - return _c -} - -// GetCollectionsByVersionID provides a mock function with given fields: _a0, _a1 -func (_m *DB) GetCollectionsByVersionID(_a0 context.Context, _a1 string) ([]client.Collection, error) { +// GetCollections provides a mock function with given fields: _a0, _a1 +func (_m *DB) GetCollections(_a0 context.Context, _a1 client.CollectionFetchOptions) ([]client.Collection, error) { ret := _m.Called(_a0, _a1) var r0 []client.Collection var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string) ([]client.Collection, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, client.CollectionFetchOptions) ([]client.Collection, error)); ok { return rf(_a0, _a1) } - if rf, ok := ret.Get(0).(func(context.Context, string) []client.Collection); ok { + if rf, ok := ret.Get(0).(func(context.Context, client.CollectionFetchOptions) []client.Collection); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { @@ -675,7 +517,7 @@ func (_m *DB) GetCollectionsByVersionID(_a0 context.Context, _a1 string) ([]clie } } - if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, client.CollectionFetchOptions) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -684,31 +526,31 @@ func (_m *DB) GetCollectionsByVersionID(_a0 context.Context, _a1 string) ([]clie return r0, r1 } -// DB_GetCollectionsByVersionID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCollectionsByVersionID' -type DB_GetCollectionsByVersionID_Call struct { +// DB_GetCollections_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCollections' +type DB_GetCollections_Call struct { *mock.Call } -// GetCollectionsByVersionID is a helper method to define mock.On call +// GetCollections is a helper method to define mock.On call // - _a0 context.Context -// - _a1 string -func (_e *DB_Expecter) GetCollectionsByVersionID(_a0 interface{}, _a1 interface{}) *DB_GetCollectionsByVersionID_Call { - return &DB_GetCollectionsByVersionID_Call{Call: _e.mock.On("GetCollectionsByVersionID", _a0, _a1)} +// - _a1 client.CollectionFetchOptions +func (_e *DB_Expecter) GetCollections(_a0 interface{}, _a1 interface{}) *DB_GetCollections_Call { + return &DB_GetCollections_Call{Call: _e.mock.On("GetCollections", _a0, _a1)} } -func (_c *DB_GetCollectionsByVersionID_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_GetCollectionsByVersionID_Call { +func (_c *DB_GetCollections_Call) Run(run func(_a0 context.Context, _a1 client.CollectionFetchOptions)) *DB_GetCollections_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(string)) + run(args[0].(context.Context), args[1].(client.CollectionFetchOptions)) }) return _c } -func (_c *DB_GetCollectionsByVersionID_Call) Return(_a0 []client.Collection, _a1 error) *DB_GetCollectionsByVersionID_Call { +func (_c *DB_GetCollections_Call) Return(_a0 []client.Collection, _a1 error) *DB_GetCollections_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *DB_GetCollectionsByVersionID_Call) RunAndReturn(run func(context.Context, string) ([]client.Collection, error)) *DB_GetCollectionsByVersionID_Call { +func (_c *DB_GetCollections_Call) RunAndReturn(run func(context.Context, client.CollectionFetchOptions) ([]client.Collection, error)) *DB_GetCollections_Call { _c.Call.Return(run) return _c } @@ -766,16 +608,16 @@ func (_c *DB_GetSchemaByVersionID_Call) RunAndReturn(run func(context.Context, s return _c } -// GetSchemasByName provides a mock function with given fields: _a0, _a1 -func (_m *DB) GetSchemasByName(_a0 context.Context, _a1 string) ([]client.SchemaDescription, error) { +// GetSchemas provides a mock function with given fields: _a0, _a1 +func (_m *DB) GetSchemas(_a0 context.Context, _a1 client.SchemaFetchOptions) ([]client.SchemaDescription, error) { ret := _m.Called(_a0, _a1) var r0 []client.SchemaDescription var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string) ([]client.SchemaDescription, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, client.SchemaFetchOptions) ([]client.SchemaDescription, error)); ok { return rf(_a0, _a1) } - if rf, ok := ret.Get(0).(func(context.Context, string) []client.SchemaDescription); ok { + if rf, ok := ret.Get(0).(func(context.Context, client.SchemaFetchOptions) []client.SchemaDescription); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { @@ -783,7 +625,7 @@ func (_m *DB) GetSchemasByName(_a0 context.Context, _a1 string) ([]client.Schema } } - if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, client.SchemaFetchOptions) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -792,86 +634,31 @@ func (_m *DB) GetSchemasByName(_a0 context.Context, _a1 string) ([]client.Schema return r0, r1 } -// DB_GetSchemasByName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSchemasByName' -type DB_GetSchemasByName_Call struct { +// DB_GetSchemas_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSchemas' +type DB_GetSchemas_Call struct { *mock.Call } -// GetSchemasByName is a helper method to define mock.On call +// GetSchemas is a helper method to define mock.On call // - _a0 context.Context -// - _a1 string -func (_e *DB_Expecter) GetSchemasByName(_a0 interface{}, _a1 interface{}) *DB_GetSchemasByName_Call { - return &DB_GetSchemasByName_Call{Call: _e.mock.On("GetSchemasByName", _a0, _a1)} +// - _a1 client.SchemaFetchOptions +func (_e *DB_Expecter) GetSchemas(_a0 interface{}, _a1 interface{}) *DB_GetSchemas_Call { + return &DB_GetSchemas_Call{Call: _e.mock.On("GetSchemas", _a0, _a1)} } -func (_c *DB_GetSchemasByName_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_GetSchemasByName_Call { +func (_c *DB_GetSchemas_Call) Run(run func(_a0 context.Context, _a1 client.SchemaFetchOptions)) *DB_GetSchemas_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(string)) - }) - return _c -} - -func (_c *DB_GetSchemasByName_Call) Return(_a0 []client.SchemaDescription, _a1 error) *DB_GetSchemasByName_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *DB_GetSchemasByName_Call) RunAndReturn(run func(context.Context, string) ([]client.SchemaDescription, error)) *DB_GetSchemasByName_Call { - _c.Call.Return(run) - return _c -} - -// GetSchemasByRoot provides a mock function with given fields: _a0, _a1 -func (_m *DB) GetSchemasByRoot(_a0 context.Context, _a1 string) ([]client.SchemaDescription, error) { - ret := _m.Called(_a0, _a1) - - var r0 []client.SchemaDescription - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string) ([]client.SchemaDescription, error)); ok { - return rf(_a0, _a1) - } - if rf, ok := ret.Get(0).(func(context.Context, string) []client.SchemaDescription); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]client.SchemaDescription) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DB_GetSchemasByRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSchemasByRoot' -type DB_GetSchemasByRoot_Call struct { - *mock.Call -} - -// GetSchemasByRoot is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 string -func (_e *DB_Expecter) GetSchemasByRoot(_a0 interface{}, _a1 interface{}) *DB_GetSchemasByRoot_Call { - return &DB_GetSchemasByRoot_Call{Call: _e.mock.On("GetSchemasByRoot", _a0, _a1)} -} - -func (_c *DB_GetSchemasByRoot_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_GetSchemasByRoot_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(string)) + run(args[0].(context.Context), args[1].(client.SchemaFetchOptions)) }) return _c } -func (_c *DB_GetSchemasByRoot_Call) Return(_a0 []client.SchemaDescription, _a1 error) *DB_GetSchemasByRoot_Call { +func (_c *DB_GetSchemas_Call) Return(_a0 []client.SchemaDescription, _a1 error) *DB_GetSchemas_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *DB_GetSchemasByRoot_Call) RunAndReturn(run func(context.Context, string) ([]client.SchemaDescription, error)) *DB_GetSchemasByRoot_Call { +func (_c *DB_GetSchemas_Call) RunAndReturn(run func(context.Context, client.SchemaFetchOptions) ([]client.SchemaDescription, error)) *DB_GetSchemas_Call { _c.Call.Return(run) return _c } @@ -1070,13 +857,13 @@ func (_c *DB_NewTxn_Call) RunAndReturn(run func(context.Context, bool) (datastor return _c } -// PatchSchema provides a mock function with given fields: _a0, _a1, _a2 -func (_m *DB) PatchSchema(_a0 context.Context, _a1 string, _a2 bool) error { - ret := _m.Called(_a0, _a1, _a2) +// PatchSchema provides a mock function with given fields: _a0, _a1, _a2, _a3 +func (_m *DB) PatchSchema(_a0 context.Context, _a1 string, _a2 immutable.Option[model.Lens], _a3 bool) error { + ret := _m.Called(_a0, _a1, _a2, _a3) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string, bool) error); ok { - r0 = rf(_a0, _a1, _a2) + if rf, ok := ret.Get(0).(func(context.Context, string, immutable.Option[model.Lens], bool) error); ok { + r0 = rf(_a0, _a1, _a2, _a3) } else { r0 = ret.Error(0) } @@ -1092,14 +879,15 @@ type DB_PatchSchema_Call struct { // PatchSchema is a helper method to define mock.On call // - _a0 context.Context // - _a1 string -// - _a2 bool -func (_e *DB_Expecter) PatchSchema(_a0 interface{}, _a1 interface{}, _a2 interface{}) *DB_PatchSchema_Call { - return &DB_PatchSchema_Call{Call: _e.mock.On("PatchSchema", _a0, _a1, _a2)} +// - _a2 immutable.Option[model.Lens] +// - _a3 bool +func (_e *DB_Expecter) PatchSchema(_a0 interface{}, _a1 interface{}, _a2 interface{}, _a3 interface{}) *DB_PatchSchema_Call { + return &DB_PatchSchema_Call{Call: _e.mock.On("PatchSchema", _a0, _a1, _a2, _a3)} } -func (_c *DB_PatchSchema_Call) Run(run func(_a0 context.Context, _a1 string, _a2 bool)) *DB_PatchSchema_Call { +func (_c *DB_PatchSchema_Call) Run(run func(_a0 context.Context, _a1 string, _a2 immutable.Option[model.Lens], _a3 bool)) *DB_PatchSchema_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(string), args[2].(bool)) + run(args[0].(context.Context), args[1].(string), args[2].(immutable.Option[model.Lens]), args[3].(bool)) }) return _c } @@ -1109,7 +897,7 @@ func (_c *DB_PatchSchema_Call) Return(_a0 error) *DB_PatchSchema_Call { return _c } -func (_c *DB_PatchSchema_Call) RunAndReturn(run func(context.Context, string, bool) error) *DB_PatchSchema_Call { +func (_c *DB_PatchSchema_Call) RunAndReturn(run func(context.Context, string, immutable.Option[model.Lens], bool) error) *DB_PatchSchema_Call { _c.Call.Return(run) return _c } @@ -1242,8 +1030,8 @@ func (_c *DB_Root_Call) RunAndReturn(run func() datastore.RootStore) *DB_Root_Ca return _c } -// SetDefaultSchemaVersion provides a mock function with given fields: _a0, _a1 -func (_m *DB) SetDefaultSchemaVersion(_a0 context.Context, _a1 string) error { +// SetActiveSchemaVersion provides a mock function with given fields: _a0, _a1 +func (_m *DB) SetActiveSchemaVersion(_a0 context.Context, _a1 string) error { ret := _m.Called(_a0, _a1) var r0 error @@ -1256,31 +1044,31 @@ func (_m *DB) SetDefaultSchemaVersion(_a0 context.Context, _a1 string) error { return r0 } -// DB_SetDefaultSchemaVersion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetDefaultSchemaVersion' -type DB_SetDefaultSchemaVersion_Call struct { +// DB_SetActiveSchemaVersion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetActiveSchemaVersion' +type DB_SetActiveSchemaVersion_Call struct { *mock.Call } -// SetDefaultSchemaVersion is a helper method to define mock.On call +// SetActiveSchemaVersion is a helper method to define mock.On call // - _a0 context.Context // - _a1 string -func (_e *DB_Expecter) SetDefaultSchemaVersion(_a0 interface{}, _a1 interface{}) *DB_SetDefaultSchemaVersion_Call { - return &DB_SetDefaultSchemaVersion_Call{Call: _e.mock.On("SetDefaultSchemaVersion", _a0, _a1)} +func (_e *DB_Expecter) SetActiveSchemaVersion(_a0 interface{}, _a1 interface{}) *DB_SetActiveSchemaVersion_Call { + return &DB_SetActiveSchemaVersion_Call{Call: _e.mock.On("SetActiveSchemaVersion", _a0, _a1)} } -func (_c *DB_SetDefaultSchemaVersion_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_SetDefaultSchemaVersion_Call { +func (_c *DB_SetActiveSchemaVersion_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_SetActiveSchemaVersion_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context), args[1].(string)) }) return _c } -func (_c *DB_SetDefaultSchemaVersion_Call) Return(_a0 error) *DB_SetDefaultSchemaVersion_Call { +func (_c *DB_SetActiveSchemaVersion_Call) Return(_a0 error) *DB_SetActiveSchemaVersion_Call { _c.Call.Return(_a0) return _c } -func (_c *DB_SetDefaultSchemaVersion_Call) RunAndReturn(run func(context.Context, string) error) *DB_SetDefaultSchemaVersion_Call { +func (_c *DB_SetActiveSchemaVersion_Call) RunAndReturn(run func(context.Context, string) error) *DB_SetActiveSchemaVersion_Call { _c.Call.Return(run) return _c } diff --git a/client/value.go b/client/value.go index ae462b74f2..261535d8d2 100644 --- a/client/value.go +++ b/client/value.go @@ -19,7 +19,6 @@ type FieldValue struct { t CType value any isDirty bool - delete bool } func NewFieldValue(t CType, val any) *FieldValue { @@ -50,16 +49,6 @@ func (val FieldValue) IsDirty() bool { func (val *FieldValue) Clean() { val.isDirty = false - val.delete = false -} - -func (val *FieldValue) Delete() { - val.delete = true - val.isDirty = true -} - -func (val FieldValue) IsDelete() bool { - return val.delete } func (val *FieldValue) SetType(t CType) { diff --git a/cmd/defradb/main.go b/cmd/defradb/main.go index 2406885a76..e827177132 100644 --- a/cmd/defradb/main.go +++ b/cmd/defradb/main.go @@ -15,12 +15,11 @@ import ( "os" "github.com/sourcenetwork/defradb/cli" - "github.com/sourcenetwork/defradb/config" ) // Execute adds all child commands to the root command and sets flags appropriately. func main() { - defraCmd := cli.NewDefraCommand(config.DefaultConfig()) + defraCmd := cli.NewDefraCommand() if err := defraCmd.Execute(); err != nil { // this error is okay to discard because cobra // logs any errors encountered during execution diff --git a/cmd/genclidocs/main.go b/cmd/genclidocs/main.go index f556c26d20..a9a6f198c4 100644 --- a/cmd/genclidocs/main.go +++ b/cmd/genclidocs/main.go @@ -21,7 +21,6 @@ import ( "github.com/spf13/cobra/doc" "github.com/sourcenetwork/defradb/cli" - "github.com/sourcenetwork/defradb/config" ) var path string @@ -33,7 +32,7 @@ func init() { func main() { flag.Parse() - defraCmd := cli.NewDefraCommand(config.DefaultConfig()) + defraCmd := cli.NewDefraCommand() defraCmd.DisableAutoGenTag = true if err := os.MkdirAll(path, os.ModePerm); err != nil { diff --git a/cmd/gendocs/main.go b/cmd/gendocs/main.go index 44901b0faf..4c1f7dcf9a 100644 --- a/cmd/gendocs/main.go +++ b/cmd/gendocs/main.go @@ -16,13 +16,11 @@ package main import ( "os" - "github.com/sourcenetwork/defradb/config" "github.com/sourcenetwork/defradb/tests/gen/cli" ) func main() { - conf := config.DefaultConfig() - gendocsCmd := cli.MakeGenDocCommand(conf) + gendocsCmd := cli.MakeGenDocCommand() if err := gendocsCmd.Execute(); err != nil { // this error is okay to discard because cobra // logs any errors encountered during execution diff --git a/cmd/genmanpages/main.go b/cmd/genmanpages/main.go index 1a9b43df7c..ecdc78762d 100644 --- a/cmd/genmanpages/main.go +++ b/cmd/genmanpages/main.go @@ -22,7 +22,6 @@ import ( "github.com/spf13/cobra/doc" "github.com/sourcenetwork/defradb/cli" - "github.com/sourcenetwork/defradb/config" ) const defaultPerm os.FileMode = 0o777 @@ -41,7 +40,7 @@ func init() { func main() { flag.Parse() - defraCmd := cli.NewDefraCommand(config.DefaultConfig()) + defraCmd := cli.NewDefraCommand() if err := os.MkdirAll(dir, defaultPerm); err != nil { log.Fatal("Failed to create directory", err) diff --git a/cmd/genopenapi/main.go b/cmd/genopenapi/main.go new file mode 100644 index 0000000000..ed655eb932 --- /dev/null +++ b/cmd/genopenapi/main.go @@ -0,0 +1,33 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +/* +genopenapi is a tool to generate and print an OpenAPI specification. +*/ +package main + +import ( + "fmt" + "os" + + "github.com/sourcenetwork/defradb/http" +) + +func main() { + router, err := http.NewApiRouter() + if err != nil { + panic(err) + } + json, err := router.OpenAPI().MarshalJSON() + if err != nil { + panic(err) + } + fmt.Fprint(os.Stdout, string(json)) +} diff --git a/config/config.go b/config/config.go deleted file mode 100644 index c179fc5db3..0000000000 --- a/config/config.go +++ /dev/null @@ -1,701 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -/* -Package config provides the central point for DefraDB's configuration and related facilities. - -[Config] embeds component-specific config structs. Each config struct can have a function providing -default options, a method providing test configurations, a method for validation, a method handling -deprecated fields (e.g. with warnings). This is extensible. - -The 'root directory' is where the configuration file and data of a DefraDB instance exists. -It is specified as a global flag `defradb --rootdir path/to/somewhere. - -Some packages of DefraDB provide their own configuration approach (logging, node). -For each, a way to go from top-level configuration to package-specific configuration is provided. - -Parameters are determined by, in order of least importance: defaults, configuration file, env. variables, and then CLI -flags. That is, CLI flags can override everything else. - -For example `DEFRA_DATASTORE_BADGER_PATH` matches [Config.Datastore.Badger.Path] and in the config file: - - datastore: - badger: - path: /tmp/badger - -This implementation does not support online modification of configuration. - -How to use, e.g. without using a rootdir: - - cfg := config.DefaultConfig() - cfg.NetConfig.P2PDisabled = true // as example - err := cfg.LoadWithRootdir(false) - if err != nil { - ... -*/ -package config - -import ( - "bytes" - "encoding/json" - "fmt" - "net" - "path/filepath" - "strconv" - "strings" - "text/template" - - "github.com/mitchellh/mapstructure" - ma "github.com/multiformats/go-multiaddr" - "github.com/spf13/pflag" - "github.com/spf13/viper" - "golang.org/x/net/idna" - - badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" - "github.com/sourcenetwork/defradb/logging" -) - -var log = logging.MustNewLogger("config") - -const ( - DefaultAPIEmail = "example@example.com" - RootdirKey = "rootdircli" - defraEnvPrefix = "DEFRA" - logLevelDebug = "debug" - logLevelInfo = "info" - logLevelError = "error" - logLevelFatal = "fatal" -) - -// Config is DefraDB's main configuration struct, embedding component-specific config structs. -type Config struct { - Datastore *DatastoreConfig - API *APIConfig - Net *NetConfig - Log *LoggingConfig - Rootdir string - v *viper.Viper -} - -// DefaultConfig returns the default configuration (or panics). -func DefaultConfig() *Config { - cfg := &Config{ - Datastore: defaultDatastoreConfig(), - API: defaultAPIConfig(), - Net: defaultNetConfig(), - Log: defaultLogConfig(), - Rootdir: "", - v: viper.New(), - } - - cfg.v.SetEnvPrefix(defraEnvPrefix) - cfg.v.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) - - cfg.v.SetConfigName(DefaultConfigFileName) - cfg.v.SetConfigType(configType) - - cfg.Persist() - - return cfg -} - -// Persist persists manually set config parameters to the viper config. -func (cfg *Config) Persist() { - // Load new values in viper. - b, err := cfg.toBytes() - if err != nil { - panic(err) - } - if err = cfg.v.ReadConfig(bytes.NewReader(b)); err != nil { - panic(NewErrReadingConfigFile(err)) - } -} - -// LoadWithRootdir loads a Config with parameters from defaults, config file, environment variables, and CLI flags. -// It loads from config file when `fromFile` is true, otherwise it loads directly from a default configuration. -// Use on a Config struct already loaded with default values from DefaultConfig(). -// To be executed once at the beginning of the program. -func (cfg *Config) LoadWithRootdir(withRootdir bool) error { - // Use default logging configuration here, so that - // we can log errors in a consistent way even in the case of early failure. - defaultLogCfg := defaultLogConfig() - if err := defaultLogCfg.load(); err != nil { - return err - } - - if err := cfg.LoadRootDirFromFlagOrDefault(); err != nil { - return err - } - - if withRootdir { - if err := cfg.v.ReadInConfig(); err != nil { - return NewErrReadingConfigFile(err) - } - } - - cfg.v.AutomaticEnv() - - if err := cfg.paramsPreprocessing(); err != nil { - return err - } - // We load the viper configuration in the Config struct. - if err := cfg.v.Unmarshal(cfg, viper.DecodeHook(mapstructure.TextUnmarshallerHookFunc())); err != nil { - return NewErrLoadingConfig(err) - } - if err := cfg.validate(); err != nil { - return err - } - if err := cfg.load(); err != nil { - return err - } - - return nil -} - -func (cfg *Config) LoadRootDirFromFlagOrDefault() error { - if cfg.Rootdir == "" { - rootdir := cfg.v.GetString(RootdirKey) - if rootdir != "" { - return cfg.setRootdir(rootdir) - } - - return cfg.setRootdir(DefaultRootDir()) - } - - return nil -} - -func (cfg *Config) setRootdir(rootdir string) error { - var err error - if rootdir == "" { - return NewErrInvalidRootDir(rootdir) - } - // using absolute rootdir for robustness. - cfg.Rootdir, err = filepath.Abs(rootdir) - if err != nil { - return err - } - cfg.v.AddConfigPath(cfg.Rootdir) - return nil -} - -func (cfg *Config) validate() error { - if err := cfg.Datastore.validate(); err != nil { - return NewErrFailedToValidateConfig(err) - } - if err := cfg.API.validate(); err != nil { - return NewErrFailedToValidateConfig(err) - } - if err := cfg.Net.validate(); err != nil { - return NewErrFailedToValidateConfig(err) - } - if err := cfg.Log.validate(); err != nil { - return NewErrFailedToValidateConfig(err) - } - return nil -} - -func (cfg *Config) paramsPreprocessing() error { - // We prefer using absolute paths, relative to the rootdir. - if !filepath.IsAbs(cfg.v.GetString("datastore.badger.path")) { - cfg.v.Set("datastore.badger.path", filepath.Join(cfg.Rootdir, cfg.v.GetString("datastore.badger.path"))) - } - if !filepath.IsAbs(cfg.v.GetString("api.privkeypath")) { - cfg.v.Set("api.privkeypath", filepath.Join(cfg.Rootdir, cfg.v.GetString("api.privkeypath"))) - } - if !filepath.IsAbs(cfg.v.GetString("api.pubkeypath")) { - cfg.v.Set("api.pubkeypath", filepath.Join(cfg.Rootdir, cfg.v.GetString("api.pubkeypath"))) - } - - // log.logger configuration as a string - logloggerAsStringSlice := cfg.v.GetStringSlice("log.logger") - if logloggerAsStringSlice != nil { - cfg.v.Set("log.logger", strings.Join(logloggerAsStringSlice, ";")) - } - - // Expand the passed in `~` if it wasn't expanded properly by the shell. - // That can happen when the parameters are passed from outside of a shell. - if err := expandHomeDir(&cfg.API.PrivKeyPath); err != nil { - return err - } - if err := expandHomeDir(&cfg.API.PubKeyPath); err != nil { - return err - } - - var bs ByteSize - if err := bs.Set(cfg.v.GetString("datastore.badger.valuelogfilesize")); err != nil { - return err - } - cfg.Datastore.Badger.ValueLogFileSize = bs - - return nil -} - -func (cfg *Config) load() error { - if err := cfg.Log.load(); err != nil { - return err - } - return nil -} - -// DatastoreConfig configures datastores. -type DatastoreConfig struct { - Store string - Memory MemoryConfig - Badger BadgerConfig - MaxTxnRetries int -} - -// BadgerConfig configures Badger's on-disk / filesystem mode. -type BadgerConfig struct { - Path string - ValueLogFileSize ByteSize - *badgerds.Options -} - -// MemoryConfig configures of Badger's memory mode. -type MemoryConfig struct { - Size uint64 -} - -func defaultDatastoreConfig() *DatastoreConfig { - // create a copy of the default badger options - opts := badgerds.DefaultOptions - return &DatastoreConfig{ - Store: "badger", - Badger: BadgerConfig{ - Path: "data", - ValueLogFileSize: 1 * GiB, - Options: &opts, - }, - MaxTxnRetries: 5, - } -} - -func (dbcfg DatastoreConfig) validate() error { - switch dbcfg.Store { - case "badger", "memory": - default: - return NewErrInvalidDatastoreType(dbcfg.Store) - } - return nil -} - -// APIConfig configures the API endpoints. -type APIConfig struct { - Address string - TLS bool - AllowedOrigins []string `mapstructure:"allowed-origins"` - PubKeyPath string - PrivKeyPath string - Email string -} - -func defaultAPIConfig() *APIConfig { - return &APIConfig{ - Address: "localhost:9181", - TLS: false, - AllowedOrigins: []string{}, - PubKeyPath: "certs/server.key", - PrivKeyPath: "certs/server.crt", - Email: DefaultAPIEmail, - } -} - -func (apicfg *APIConfig) validate() error { - if apicfg.Address == "" { - return ErrInvalidDatabaseURL - } - - if apicfg.Address == "localhost" || net.ParseIP(apicfg.Address) != nil { //nolint:goconst - return ErrMissingPortNumber - } - - if isValidDomainName(apicfg.Address) { - return nil - } - - host, _, err := net.SplitHostPort(apicfg.Address) - if err != nil { - return NewErrInvalidDatabaseURL(err) - } - if host == "localhost" { - return nil - } - if net.ParseIP(host) == nil { - return ErrNoPortWithDomain - } - - return nil -} - -func isValidDomainName(domain string) bool { - asciiDomain, err := idna.Registration.ToASCII(domain) - if err != nil { - return false - } - return asciiDomain == domain -} - -// AddressToURL provides the API address as URL. -func (apicfg *APIConfig) AddressToURL() string { - if apicfg.TLS { - return fmt.Sprintf("https://%s", apicfg.Address) - } - return fmt.Sprintf("http://%s", apicfg.Address) -} - -// NetConfig configures aspects of network and peer-to-peer. -type NetConfig struct { - P2PAddress string - P2PDisabled bool - Peers string - PubSubEnabled bool `mapstructure:"pubsub"` - RelayEnabled bool `mapstructure:"relay"` -} - -func defaultNetConfig() *NetConfig { - return &NetConfig{ - P2PAddress: "/ip4/0.0.0.0/tcp/9171", - P2PDisabled: false, - Peers: "", - PubSubEnabled: true, - RelayEnabled: false, - } -} - -func (netcfg *NetConfig) validate() error { - _, err := ma.NewMultiaddr(netcfg.P2PAddress) - if err != nil { - return NewErrInvalidP2PAddress(err, netcfg.P2PAddress) - } - if len(netcfg.Peers) > 0 { - peers := strings.Split(netcfg.Peers, ",") - maddrs := make([]ma.Multiaddr, len(peers)) - for i, addr := range peers { - addr, err := ma.NewMultiaddr(addr) - if err != nil { - return NewErrInvalidBootstrapPeers(err, netcfg.Peers) - } - maddrs[i] = addr - } - } - return nil -} - -// LogConfig configures output and logger. -type LoggingConfig struct { - Level string - Stacktrace bool - Format string - Output string // logging actually supports multiple output paths, but here only one is supported - Caller bool - NoColor bool - Logger string - NamedOverrides map[string]*NamedLoggingConfig -} - -// NamedLoggingConfig is a named logging config, used for named overrides of the default config. -type NamedLoggingConfig struct { - Name string - LoggingConfig -} - -func defaultLogConfig() *LoggingConfig { - return &LoggingConfig{ - Level: logLevelInfo, - Stacktrace: false, - Format: "csv", - Output: "stderr", - Caller: false, - NoColor: false, - Logger: "", - NamedOverrides: make(map[string]*NamedLoggingConfig), - } -} - -// validate ensures that the logging config is valid. -func (logcfg *LoggingConfig) validate() error { - /* - `loglevel` is either a single value, or a single value with comma-separated list of key=value pairs, for which - the key is the name of the logger and the value is the log level, each logger name is unique, and value is valid. - - `--loglevels ,=,...` - */ - kvs := []map[string]string{} - validLevel := func(level string) bool { - for _, l := range []string{ - logLevelDebug, - logLevelInfo, - logLevelError, - logLevelFatal, - } { - if l == level { - return true - } - } - return false - } - ensureUniqueKeys := func(kvs []map[string]string) error { - keys := make(map[string]bool) - for _, kv := range kvs { - for k := range kv { - if keys[k] { - return NewErrDuplicateLoggerName(k) - } - keys[k] = true - } - } - return nil - } - - parts := strings.Split(logcfg.Level, ",") - if len(parts) > 0 { - if !validLevel(parts[0]) { - return NewErrInvalidLogLevel(parts[0]) - } - for _, kv := range parts[1:] { - parsedKV, err := parseKV(kv) - if err != nil { - return err - } - // ensure each value is a valid loglevel validLevel - if !validLevel(parsedKV[1]) { - return NewErrInvalidLogLevel(parsedKV[1]) - } - kvs = append(kvs, map[string]string{parsedKV[0]: parsedKV[1]}) - } - if err := ensureUniqueKeys(kvs); err != nil { - return err - } - } - - // logger: expect format like: `net,nocolor=true,level=debug;config,output=stdout,level=info` - if len(logcfg.Logger) != 0 { - namedconfigs := strings.Split(logcfg.Logger, ";") - for _, c := range namedconfigs { - parts := strings.Split(c, ",") - if len(parts) < 2 { - return NewErrLoggerConfig("unexpected format (expected: `module,key=value;module,key=value;...`") - } - if parts[0] == "" { - return ErrLoggerNameEmpty - } - for _, pair := range parts[1:] { - parsedKV, err := parseKV(pair) - if err != nil { - return err - } - if !isLowercaseAlpha(parsedKV[0]) { - return NewErrInvalidLoggerName(parsedKV[0]) - } - switch parsedKV[0] { - case "format", "output", "nocolor", "stacktrace", "caller": //nolint:goconst - // valid logger parameters - case "level": //nolint:goconst - // ensure each value is a valid loglevel validLevel - if !validLevel(parsedKV[1]) { - return NewErrInvalidLogLevel(parsedKV[1]) - } - default: - return NewErrUnknownLoggerParameter(parsedKV[0]) - } - } - } - } - - return nil -} - -func (logcfg *LoggingConfig) load() error { - // load loglevel - parts := strings.Split(logcfg.Level, ",") - if len(parts) > 0 { - logcfg.Level = parts[0] - } - if len(parts) > 1 { - for _, kv := range parts[1:] { - parsedKV := strings.Split(kv, "=") - if len(parsedKV) != 2 { - return NewErrInvalidLogLevel(kv) - } - c, err := logcfg.GetOrCreateNamedLogger(parsedKV[0]) - if err != nil { - return NewErrCouldNotObtainLoggerConfig(err, parsedKV[0]) - } - c.Level = parsedKV[1] - } - } - - // load logger - // e.g. `net,nocolor=true,level=debug;config,output=stdout,level=info` - // logger has higher priority over loglevel whenever both touch the same parameters - if len(logcfg.Logger) != 0 { - s := strings.Split(logcfg.Logger, ";") - for _, v := range s { - vs := strings.Split(v, ",") - override, err := logcfg.GetOrCreateNamedLogger(vs[0]) - if err != nil { - return NewErrCouldNotObtainLoggerConfig(err, vs[0]) - } - override.Name = vs[0] - for _, v := range vs[1:] { - parsedKV := strings.Split(v, "=") - if len(parsedKV) != 2 { - return NewErrNotProvidedAsKV(v) - } - switch param := strings.ToLower(parsedKV[0]); param { - case "level": // string - override.Level = parsedKV[1] - case "format": // string - override.Format = parsedKV[1] - case "output": // string - override.Output = parsedKV[1] - case "stacktrace": // bool - if override.Stacktrace, err = strconv.ParseBool(parsedKV[1]); err != nil { - return NewErrCouldNotParseType(err, "bool") - } - case "nocolor": // bool - if override.NoColor, err = strconv.ParseBool(parsedKV[1]); err != nil { - return NewErrCouldNotParseType(err, "bool") - } - case "caller": // bool - if override.Caller, err = strconv.ParseBool(parsedKV[1]); err != nil { - return NewErrCouldNotParseType(err, "bool") - } - default: - return NewErrUnknownLoggerParameter(param) - } - } - } - } - - c, err := logcfg.toLoggerConfig() - if err != nil { - return err - } - logging.SetConfig(c) - return nil -} - -func convertLoglevel(level string) (logging.LogLevel, error) { - switch level { - case logLevelDebug: - return logging.Debug, nil - case logLevelInfo: - return logging.Info, nil - case logLevelError: - return logging.Error, nil - case logLevelFatal: - return logging.Fatal, nil - default: - return logging.LogLevel(0), NewErrInvalidLogLevel(level) - } -} - -// Exports the logging config to the logging library's config. -func (logcfg LoggingConfig) toLoggerConfig() (logging.Config, error) { - loglevel, err := convertLoglevel(logcfg.Level) - if err != nil { - return logging.Config{}, err - } - - var encfmt logging.EncoderFormat - switch logcfg.Format { - case "json": - encfmt = logging.JSON - case "csv": - encfmt = logging.CSV - default: - return logging.Config{}, NewErrInvalidLogFormat(logcfg.Format) - } - - // handle logger named overrides - overrides := make(map[string]logging.Config) - for name, cfg := range logcfg.NamedOverrides { - c, err := cfg.toLoggerConfig() - if err != nil { - return logging.Config{}, NewErrOverrideConfigConvertFailed(err, name) - } - overrides[name] = c - } - - c := logging.Config{ - Level: logging.NewLogLevelOption(loglevel), - EnableStackTrace: logging.NewEnableStackTraceOption(logcfg.Stacktrace), - DisableColor: logging.NewDisableColorOption(logcfg.NoColor), - EncoderFormat: logging.NewEncoderFormatOption(encfmt), - OutputPaths: []string{logcfg.Output}, - EnableCaller: logging.NewEnableCallerOption(logcfg.Caller), - OverridesByLoggerName: overrides, - } - return c, nil -} - -// this is a copy that doesn't deep copy the NamedOverrides map -// copy is handled by runtime "pass-by-value" -func (logcfg LoggingConfig) copy() LoggingConfig { - logcfg.NamedOverrides = make(map[string]*NamedLoggingConfig) - return logcfg -} - -// GetOrCreateNamedLogger returns a named logger config, or creates a default one if it doesn't exist. -func (logcfg *LoggingConfig) GetOrCreateNamedLogger(name string) (*NamedLoggingConfig, error) { - if name == "" { - return nil, ErrLoggerNameEmpty - } - if namedCfg, exists := logcfg.NamedOverrides[name]; exists { - return namedCfg, nil - } - // create default and save to overrides - namedCfg := &NamedLoggingConfig{ - Name: name, - LoggingConfig: logcfg.copy(), - } - logcfg.NamedOverrides[name] = namedCfg - - return namedCfg, nil -} - -// BindFlag binds a CLI flag to a config key. -func (cfg *Config) BindFlag(key string, flag *pflag.Flag) error { - return cfg.v.BindPFlag(key, flag) -} - -// ToJSON serializes the config to a JSON byte array. -func (c *Config) ToJSON() ([]byte, error) { - jsonbytes, err := json.Marshal(c) - if err != nil { - return []byte{}, NewErrConfigToJSONFailed(err) - } - return jsonbytes, nil -} - -// String serializes the config to a JSON string. -func (c *Config) String() string { - jsonbytes, err := c.ToJSON() - if err != nil { - return fmt.Sprintf("failed to convert config to string: %s", err) - } - return string(jsonbytes) -} - -func (c *Config) toBytes() ([]byte, error) { - var buffer bytes.Buffer - tmpl := template.New("configTemplate") - configTemplate, err := tmpl.Parse(defaultConfigTemplate) - if err != nil { - return nil, NewErrConfigTemplateFailed(err) - } - if err := configTemplate.Execute(&buffer, c); err != nil { - return nil, NewErrConfigTemplateFailed(err) - } - return buffer.Bytes(), nil -} diff --git a/config/config_test.go b/config/config_test.go deleted file mode 100644 index e29ef8aa81..0000000000 --- a/config/config_test.go +++ /dev/null @@ -1,435 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package config - -import ( - "encoding/json" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" -) - -var envVarsDifferent = map[string]string{ - "DEFRA_DATASTORE_STORE": "memory", - "DEFRA_DATASTORE_BADGER_PATH": "defra_data", - "DEFRA_API_ADDRESS": "localhost:9999", - "DEFRA_NET_P2PDISABLED": "true", - "DEFRA_NET_P2PADDRESS": "/ip4/0.0.0.0/tcp/9876", - "DEFRA_NET_PUBSUB": "false", - "DEFRA_NET_RELAY": "false", - "DEFRA_LOG_LEVEL": "error", - "DEFRA_LOG_STACKTRACE": "true", - "DEFRA_LOG_FORMAT": "json", -} - -var envVarsInvalid = map[string]string{ - "DEFRA_DATASTORE_STORE": "^=+()&**()*(&))", - "DEFRA_DATASTORE_BADGER_PATH": "^=+()&**()*(&))", - "DEFRA_API_ADDRESS": "^=+()&**()*(&))", - "DEFRA_NET_P2PDISABLED": "^=+()&**()*(&))", - "DEFRA_NET_P2PADDRESS": "^=+()&**()*(&))", - "DEFRA_NET_PUBSUB": "^=+()&**()*(&))", - "DEFRA_NET_RELAY": "^=+()&**()*(&))", - "DEFRA_LOG_LEVEL": "^=+()&**()*(&))", - "DEFRA_LOG_STACKTRACE": "^=+()&**()*(&))", - "DEFRA_LOG_FORMAT": "^=+()&**()*(&))", -} - -func FixtureEnvKeyValue(t *testing.T, key, value string) { - t.Helper() - os.Setenv(key, value) - t.Cleanup(func() { - os.Unsetenv(key) - }) -} - -func FixtureEnvVars(t *testing.T, envVars map[string]string) { - t.Helper() - for k, v := range envVars { - os.Setenv(k, v) - } - t.Cleanup(func() { - for k := range envVars { - os.Unsetenv(k) - } - }) -} - -func TestConfigValidateBasic(t *testing.T) { - cfg := DefaultConfig() - assert.NoError(t, cfg.validate()) - - err := cfg.validate() - assert.NoError(t, err) - // asserting equality of some unlikely-to-change default values - assert.Equal(t, "stderr", cfg.Log.Output) - assert.Equal(t, "csv", cfg.Log.Format) - assert.Equal(t, false, cfg.API.TLS) - assert.Equal(t, false, cfg.Net.RelayEnabled) -} - -func TestLoadIncorrectValuesFromConfigFile(t *testing.T) { - var cfg *Config - - testcases := []struct { - setter func() - err error - }{ - { - setter: func() { - cfg.Datastore.Store = "antibadger" - }, - err: ErrInvalidDatastoreType, - }, - { - setter: func() { - cfg.Log.Level = "antilevel" - }, - err: ErrInvalidLogLevel, - }, - { - setter: func() { - cfg.Log.Format = "antiformat" - }, - - err: ErrInvalidLogFormat, - }, - } - - for _, tc := range testcases { - cfg = DefaultConfig() - err := cfg.setRootdir(t.TempDir()) - assert.NoError(t, err) - tc.setter() - err = cfg.WriteConfigFile() - assert.NoError(t, err) - err = cfg.LoadWithRootdir(true) - assert.ErrorIs(t, err, tc.err) - } -} - -func TestJSONSerialization(t *testing.T) { - cfg := DefaultConfig() - var m map[string]any - - b, errSerialize := cfg.ToJSON() - errUnmarshal := json.Unmarshal(b, &m) - - assert.NoError(t, errUnmarshal) - assert.NoError(t, errSerialize) - for k, v := range m { - if k != "Rootdir" { // Rootdir is not serialized - assert.NotEmpty(t, v) - } - } -} - -func TestLoadValidationDefaultsConfigFileEnv(t *testing.T) { - tmpdir := t.TempDir() - cfg := DefaultConfig() - err := cfg.setRootdir(tmpdir) - assert.NoError(t, err) - FixtureEnvVars(t, envVarsDifferent) - errWriteConfig := cfg.WriteConfigFile() - - errLoad := cfg.LoadWithRootdir(true) - - assert.NoError(t, errWriteConfig) - assert.NoError(t, errLoad) - assert.Equal(t, "localhost:9999", cfg.API.Address) - assert.Equal(t, filepath.Join(tmpdir, "defra_data"), cfg.Datastore.Badger.Path) -} - -func TestLoadDefaultsEnv(t *testing.T) { - cfg := DefaultConfig() - FixtureEnvVars(t, envVarsDifferent) - - err := cfg.LoadWithRootdir(false) - - assert.NoError(t, err) - assert.Equal(t, "localhost:9999", cfg.API.Address) - assert.Equal(t, filepath.Join(cfg.Rootdir, "defra_data"), cfg.Datastore.Badger.Path) -} - -func TestEnvVariablesAllConsidered(t *testing.T) { - cfg := DefaultConfig() - FixtureEnvVars(t, envVarsDifferent) - - err := cfg.LoadWithRootdir(false) - - assert.NoError(t, err) - assert.Equal(t, "localhost:9999", cfg.API.Address) - assert.Equal(t, filepath.Join(cfg.Rootdir, "defra_data"), cfg.Datastore.Badger.Path) - assert.Equal(t, "memory", cfg.Datastore.Store) - assert.Equal(t, true, cfg.Net.P2PDisabled) - assert.Equal(t, "/ip4/0.0.0.0/tcp/9876", cfg.Net.P2PAddress) - assert.Equal(t, false, cfg.Net.PubSubEnabled) - assert.Equal(t, false, cfg.Net.RelayEnabled) - assert.Equal(t, "error", cfg.Log.Level) - assert.Equal(t, true, cfg.Log.Stacktrace) - assert.Equal(t, "json", cfg.Log.Format) -} - -func TestLoadNonExistingConfigFile(t *testing.T) { - cfg := DefaultConfig() - err := cfg.setRootdir(t.TempDir()) - assert.NoError(t, err) - err = cfg.LoadWithRootdir(true) - assert.ErrorIs(t, err, ErrReadingConfigFile) -} - -func TestLoadInvalidConfigFile(t *testing.T) { - cfg := DefaultConfig() - tmpdir := t.TempDir() - - errWrite := os.WriteFile( - filepath.Join(tmpdir, DefaultConfigFileName), - []byte("{"), - 0644, - ) - assert.NoError(t, errWrite) - - err := cfg.setRootdir(tmpdir) - assert.NoError(t, err) - errLoad := cfg.LoadWithRootdir(true) - assert.ErrorIs(t, errLoad, ErrReadingConfigFile) -} - -func TestInvalidEnvVars(t *testing.T) { - cfg := DefaultConfig() - FixtureEnvVars(t, envVarsInvalid) - - err := cfg.LoadWithRootdir(false) - - assert.ErrorIs(t, err, ErrLoadingConfig) -} - -func TestCreateAndLoadCustomConfig(t *testing.T) { - testdir := t.TempDir() - - cfg := DefaultConfig() - err := cfg.setRootdir(testdir) - assert.NoError(t, err) - // a few valid but non-default changes - cfg.Net.PubSubEnabled = false - cfg.Log.Level = "fatal" - - err = cfg.CreateRootDirAndConfigFile() - assert.NoError(t, err) - - assert.True(t, cfg.ConfigFileExists()) - - // check that the config file loads properly - cfg2 := DefaultConfig() - err = cfg2.setRootdir(testdir) - assert.NoError(t, err) - err = cfg2.LoadWithRootdir(true) - assert.NoError(t, err) - assert.Equal(t, cfg.Net.PubSubEnabled, cfg2.Net.PubSubEnabled) - assert.Equal(t, cfg.Log.Level, cfg2.Log.Level) -} - -func TestLoadValidationEnvLoggingConfig(t *testing.T) { - FixtureEnvKeyValue(t, "DEFRA_LOG_LEVEL", "debug,net=info,log=error,cli=fatal") - cfg := DefaultConfig() - err := cfg.LoadWithRootdir(false) - assert.NoError(t, err) - assert.Equal(t, "debug", cfg.Log.Level) - for _, override := range cfg.Log.NamedOverrides { - switch override.Name { - case "net": - assert.Equal(t, "info", override.Level) - case "log": - assert.Equal(t, "error", override.Level) - case "cli": - assert.Equal(t, "fatal", override.Level) - default: - t.Fatal("unexpected named override") - } - } -} - -func TestLoadValidationEnvLoggerConfig(t *testing.T) { - FixtureEnvKeyValue(t, "DEFRA_LOG_LOGGER", "net,nocolor=true,level=debug;config,output=stdout,level=info") - cfg := DefaultConfig() - err := cfg.LoadWithRootdir(false) - assert.NoError(t, err) - for _, override := range cfg.Log.NamedOverrides { - switch override.Name { - case "net": - assert.Equal(t, true, override.NoColor) - assert.Equal(t, "debug", override.Level) - case "config": - assert.Equal(t, "info", override.Level) - assert.Equal(t, "stdout", override.Output) - default: - t.Fatal("unexpected named override") - } - } -} - -func TestLoadValidationEnvLoggerConfigInvalid(t *testing.T) { - // logging config parameter not provided as = pair - FixtureEnvKeyValue(t, "DEFRA_LOG_LOGGER", "net,nocolor,true,level,debug;config,output,stdout,level,info") - cfg := DefaultConfig() - err := cfg.LoadWithRootdir(false) - assert.ErrorIs(t, err, ErrFailedToValidateConfig) - - // invalid logger names - FixtureEnvKeyValue(t, "DEFRA_LOG_LOGGER", "13;2134;™¡£¡™£∞¡™∞¡™£¢;1234;1") - cfg = DefaultConfig() - err = cfg.LoadWithRootdir(false) - assert.ErrorIs(t, err, ErrFailedToValidateConfig) -} - -func TestLoadValidationLoggerConfigFromEnvExhaustive(t *testing.T) { - FixtureEnvKeyValue(t, "DEFRA_LOG_LOGGER", "net,nocolor=true,level=debug;config,output=stdout,caller=false;logging,stacktrace=true,format=json") - cfg := DefaultConfig() - err := cfg.LoadWithRootdir(false) - assert.NoError(t, err) - for _, override := range cfg.Log.NamedOverrides { - switch override.Name { - case "net": - assert.Equal(t, true, override.NoColor) - assert.Equal(t, "debug", override.Level) - case "config": - assert.Equal(t, "stdout", override.Output) - assert.Equal(t, false, override.Caller) - case "logging": - assert.Equal(t, true, override.Stacktrace) - assert.Equal(t, "json", override.Format) - default: - t.Fatal("unexpected named override") - } - } -} - -func TestLoadValidationLoggerConfigFromEnvUnknownParam(t *testing.T) { - FixtureEnvKeyValue(t, "DEFRA_LOG_LOGGER", "net,unknown=true,level=debug") - cfg := DefaultConfig() - err := cfg.LoadWithRootdir(false) - assert.ErrorIs(t, err, ErrUnknownLoggerParameter) -} - -func TestLoadValidationInvalidDatastoreConfig(t *testing.T) { - FixtureEnvKeyValue(t, "DEFRA_DATASTORE_STORE", "antibadger") - cfg := DefaultConfig() - err := cfg.LoadWithRootdir(false) - assert.ErrorIs(t, err, ErrInvalidDatastoreType) -} - -func TestValidationLogger(t *testing.T) { - testCases := []struct { - input string - expectedErr error - }{ - {"node,level=debug,output=stdout", nil}, - {"node,level=fatal,format=csv", nil}, - {"node,level=warn", ErrInvalidLogLevel}, - {"node,level=debug;cli,", ErrNotProvidedAsKV}, - {"node,level", ErrNotProvidedAsKV}, - - {";", ErrInvalidLoggerConfig}, - {";;", ErrInvalidLoggerConfig}, - {",level=debug", ErrLoggerNameEmpty}, - {"node,bar=baz", ErrUnknownLoggerParameter}, // unknown parameter - {"m,level=debug,output-json", ErrNotProvidedAsKV}, // key-value pair with invalid separator - {"myModule,level=debug,extraPart", ErrNotProvidedAsKV}, // additional part after last key-value pair - {"myModule,=myValue", ErrNotProvidedAsKV}, // empty key - {",k=v", ErrLoggerNameEmpty}, // empty module - {";foo", ErrInvalidLoggerConfig}, // empty module name - {"k=v", ErrInvalidLoggerConfig}, // missing module - {"debug,net=,log=error,cli=fatal", ErrNotProvidedAsKV}, // empty value - - } - - for _, tc := range testCases { - cfg := DefaultConfig() - cfg.Log.Logger = tc.input - t.Log(tc.input) - err := cfg.validate() - assert.ErrorIs(t, err, tc.expectedErr) - } -} - -func TestValidationInvalidEmptyAPIAddress(t *testing.T) { - cfg := DefaultConfig() - cfg.API.Address = "" - err := cfg.validate() - assert.ErrorIs(t, err, ErrInvalidDatabaseURL) -} - -func TestValidationNetConfigPeers(t *testing.T) { - cfg := DefaultConfig() - cfg.Net.Peers = "/ip4/127.0.0.1/udp/1234,/ip4/7.7.7.7/tcp/4242/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N" - err := cfg.validate() - assert.NoError(t, err) -} - -func TestValidationInvalidNetConfigPeers(t *testing.T) { - cfg := DefaultConfig() - cfg.Net.Peers = "&(*^(*&^(*&^(*&^))), mmmmh,123123" - err := cfg.validate() - assert.ErrorIs(t, err, ErrFailedToValidateConfig) -} - -func TestValidationInvalidLoggingConfig(t *testing.T) { - cfg := DefaultConfig() - cfg.Log.Level = "546578" - cfg.Log.Format = "*&)*&" - err := cfg.validate() - assert.ErrorIs(t, err, ErrInvalidLogLevel) -} - -func TestValidationAddressBasicIncomplete(t *testing.T) { - cfg := DefaultConfig() - cfg.API.Address = "localhost" - err := cfg.validate() - assert.ErrorIs(t, err, ErrFailedToValidateConfig) -} - -func TestValidationAddressLocalhostValid(t *testing.T) { - cfg := DefaultConfig() - cfg.API.Address = "localhost:9876" - err := cfg.validate() - assert.NoError(t, err) -} - -func TestValidationAddress0000Incomplete(t *testing.T) { - cfg := DefaultConfig() - cfg.API.Address = "0.0.0.0" - err := cfg.validate() - assert.ErrorIs(t, err, ErrFailedToValidateConfig) -} - -func TestValidationAddress0000Valid(t *testing.T) { - cfg := DefaultConfig() - cfg.API.Address = "0.0.0.0:9876" - err := cfg.validate() - assert.NoError(t, err) -} - -func TestValidationAddressDomainWithSubdomainValidWithTLSCorrectPortIsInvalid(t *testing.T) { - cfg := DefaultConfig() - cfg.API.Address = "sub.example.com:443" - cfg.API.TLS = true - err := cfg.validate() - assert.ErrorIs(t, err, ErrNoPortWithDomain) -} - -func TestValidationAddressDomainWithSubdomainWrongPortIsInvalid(t *testing.T) { - cfg := DefaultConfig() - cfg.API.Address = "sub.example.com:9876" - err := cfg.validate() - assert.ErrorIs(t, err, ErrNoPortWithDomain) -} diff --git a/config/config_utils.go b/config/config_utils.go deleted file mode 100644 index 46e5762570..0000000000 --- a/config/config_utils.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package config - -import ( - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - "unicode" -) - -type ByteSize uint64 - -const ( - B ByteSize = 1 - KiB = B << 10 - MiB = KiB << 10 - GiB = MiB << 10 - TiB = GiB << 10 - PiB = TiB << 10 -) - -// UnmarshalText calls Set on ByteSize with the given text -func (bs *ByteSize) UnmarshalText(text []byte) error { - return bs.Set(string(text)) -} - -// String returns the string formatted output of ByteSize -func (bs *ByteSize) String() string { - const unit = 1024 - bsInt := int64(*bs) - if bsInt < unit { - return fmt.Sprintf("%d", bsInt) - } - div, exp := int64(unit), 0 - for n := bsInt / unit; n >= unit; n /= unit { - div *= unit - exp++ - } - return fmt.Sprintf("%d%ciB", bsInt/div, "KMGTP"[exp]) -} - -// Type returns the type as a string. -func (bs *ByteSize) Type() string { - return "ByteSize" -} - -// Set parses a string into ByteSize -func (bs *ByteSize) Set(s string) error { - digitString := "" - unit := "" - for _, char := range s { - if unicode.IsDigit(char) { - digitString += string(char) - } else { - unit += string(char) - } - } - digits, err := strconv.Atoi(digitString) - if err != nil { - return NewErrUnableToParseByteSize(err) - } - - switch strings.ToUpper(strings.Trim(unit, " ")) { - case "B": - *bs = ByteSize(digits) * B - case "KB", "KIB": - *bs = ByteSize(digits) * KiB - case "MB", "MIB": - *bs = ByteSize(digits) * MiB - case "GB", "GIB": - *bs = ByteSize(digits) * GiB - case "TB", "TIB": - *bs = ByteSize(digits) * TiB - case "PB", "PIB": - *bs = ByteSize(digits) * PiB - default: - *bs = ByteSize(digits) - } - - return nil -} - -// expandHomeDir expands paths if they were passed in as `~` rather than `${HOME}` -// converts `~/.defradb/certs/server.crt` to `/home/username/.defradb/certs/server.crt`. -func expandHomeDir(path *string) error { - if *path == "~" { - return ErrPathCannotBeHomeDir - } else if strings.HasPrefix(*path, "~/") { - homeDir, err := os.UserHomeDir() - if err != nil { - return NewErrUnableToExpandHomeDir(err) - } - - // Use strings.HasPrefix so we don't match paths like "/x/~/x/" - *path = filepath.Join(homeDir, (*path)[2:]) - } - - return nil -} - -func isLowercaseAlpha(s string) bool { - for i := 0; i < len(s); i++ { - c := s[i] - if c < 'a' || c > 'z' { - return false - } - } - return true -} - -func parseKV(kv string) ([]string, error) { - parsedKV := strings.Split(kv, "=") - if len(parsedKV) != 2 { - return nil, NewErrNotProvidedAsKV(kv) - } - if parsedKV[0] == "" || parsedKV[1] == "" { - return nil, NewErrNotProvidedAsKV(kv) - } - return parsedKV, nil -} diff --git a/config/config_utils_test.go b/config/config_utils_test.go deleted file mode 100644 index f9eea47c16..0000000000 --- a/config/config_utils_test.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package config - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestUnmarshalByteSize(t *testing.T) { - var bs ByteSize - - b := []byte("10") - err := bs.UnmarshalText(b) - assert.NoError(t, err) - assert.Equal(t, 10*B, bs) - - b = []byte("10B") - err = bs.UnmarshalText(b) - assert.NoError(t, err) - assert.Equal(t, 10*B, bs) - - b = []byte("10 B") - err = bs.UnmarshalText(b) - assert.NoError(t, err) - assert.Equal(t, 10*B, bs) - - kb := []byte("10KB") - err = bs.UnmarshalText(kb) - assert.NoError(t, err) - assert.Equal(t, 10*KiB, bs) - - kb = []byte("10KiB") - err = bs.UnmarshalText(kb) - assert.NoError(t, err) - assert.Equal(t, 10*KiB, bs) - - kb = []byte("10 kb") - err = bs.UnmarshalText(kb) - assert.NoError(t, err) - assert.Equal(t, 10*KiB, bs) - - mb := []byte("10MB") - err = bs.UnmarshalText(mb) - assert.NoError(t, err) - assert.Equal(t, 10*MiB, bs) - - mb = []byte("10MiB") - err = bs.UnmarshalText(mb) - assert.NoError(t, err) - assert.Equal(t, 10*MiB, bs) - - gb := []byte("10GB") - err = bs.UnmarshalText(gb) - assert.NoError(t, err) - assert.Equal(t, 10*GiB, bs) - - gb = []byte("10GiB") - err = bs.UnmarshalText(gb) - assert.NoError(t, err) - assert.Equal(t, 10*GiB, bs) - - tb := []byte("10TB") - err = bs.UnmarshalText(tb) - assert.NoError(t, err) - assert.Equal(t, 10*TiB, bs) - - tb = []byte("10TiB") - err = bs.UnmarshalText(tb) - assert.NoError(t, err) - assert.Equal(t, 10*TiB, bs) - - pb := []byte("10PB") - err = bs.UnmarshalText(pb) - assert.NoError(t, err) - assert.Equal(t, 10*PiB, bs) - - pb = []byte("10PiB") - err = bs.UnmarshalText(pb) - assert.NoError(t, err) - assert.Equal(t, 10*PiB, bs) - - eb := []byte("༧") - err = bs.UnmarshalText(eb) - assert.ErrorIs(t, err, ErrUnableToParseByteSize) -} - -func TestByteSizeType(t *testing.T) { - var bs ByteSize - assert.Equal(t, "ByteSize", bs.Type()) -} - -func TestByteSizeToString(t *testing.T) { - b := 999 * B - assert.Equal(t, "999", b.String()) - - mb := 10 * MiB - assert.Equal(t, "10MiB", mb.String()) -} diff --git a/config/configfile.go b/config/configfile.go deleted file mode 100644 index f13b6695e1..0000000000 --- a/config/configfile.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package config - -import ( - "context" - _ "embed" - "fmt" - "os" - "path/filepath" -) - -const ( - DefaultConfigFileName = "config.yaml" - configType = "yaml" - defaultDirPerm = 0o700 - defaultConfigFilePerm = 0o644 -) - -// defaultConfigTemplate must reflect the Config struct in content and configuration. -// All parameters must be represented here, to support Viper's automatic environment variable handling. -// We embed the default config template for clarity and to avoid autoformatters from breaking it. -// -//go:embed configfile_yaml.gotmpl -var defaultConfigTemplate string - -func (cfg *Config) ConfigFilePath() string { - return filepath.Join(cfg.Rootdir, DefaultConfigFileName) -} - -func (cfg *Config) WriteConfigFile() error { - path := cfg.ConfigFilePath() - buffer, err := cfg.toBytes() - if err != nil { - return err - } - if err := os.WriteFile(path, buffer, defaultConfigFilePerm); err != nil { - return NewErrFailedToWriteFile(err, path) - } - log.FeedbackInfo(context.Background(), fmt.Sprintf("Created config file at %v", path)) - return nil -} - -func (cfg *Config) DeleteConfigFile() error { - if err := os.Remove(cfg.ConfigFilePath()); err != nil { - return NewErrFailedToRemoveConfigFile(err) - } - log.FeedbackInfo(context.Background(), fmt.Sprintf("Deleted config file at %v", cfg.ConfigFilePath())) - return nil -} - -func (cfg *Config) CreateRootDirAndConfigFile() error { - if err := os.MkdirAll(cfg.Rootdir, defaultDirPerm); err != nil { - return err - } - log.FeedbackInfo(context.Background(), fmt.Sprintf("Created DefraDB root directory at %v", cfg.Rootdir)) - if err := cfg.WriteConfigFile(); err != nil { - return err - } - return nil -} - -func (cfg *Config) ConfigFileExists() bool { - statInfo, err := os.Stat(cfg.ConfigFilePath()) - existsAsFile := (err == nil && !statInfo.IsDir()) - return existsAsFile -} - -func DefaultRootDir() string { - home, err := os.UserHomeDir() - if err != nil { - log.FatalE(context.Background(), "error determining user directory", err) - } - return filepath.Join(home, ".defradb") -} - -func FolderExists(folderPath string) bool { - statInfo, err := os.Stat(folderPath) - existsAsFolder := (err == nil && statInfo.IsDir()) - return existsAsFolder -} diff --git a/config/configfile_test.go b/config/configfile_test.go deleted file mode 100644 index 5f7aed26aa..0000000000 --- a/config/configfile_test.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package config - -import ( - "bytes" - "os" - "path/filepath" - "runtime" - "testing" - "text/template" - - "github.com/stretchr/testify/assert" -) - -func TestConfigTemplateSerialize(t *testing.T) { - var buffer bytes.Buffer - cfg := DefaultConfig() - tmpl := template.New("configTemplate") - configTemplate, err := tmpl.Parse(defaultConfigTemplate) - assert.NoError(t, err) - err = configTemplate.Execute(&buffer, cfg) - assert.NoError(t, err) - _, err = cfg.ToJSON() - assert.NoError(t, err) -} - -func TestConfigTemplateExecutes(t *testing.T) { - cfg := DefaultConfig() - var buffer bytes.Buffer - tmpl := template.New("configTemplate") - configTemplate, err := tmpl.Parse(defaultConfigTemplate) - assert.NoError(t, err) - err = configTemplate.Execute(&buffer, cfg) - assert.NoError(t, err) -} - -func TestWritesConfigFile(t *testing.T) { - cfg := DefaultConfig() - tmpdir := t.TempDir() - cfg.Rootdir = tmpdir - err := cfg.WriteConfigFile() - assert.NoError(t, err) - path := filepath.Join(tmpdir, DefaultConfigFileName) - _, err = os.Stat(path) - assert.Nil(t, err) -} - -func TestWritesConfigFileErroneousPath(t *testing.T) { - cfg := DefaultConfig() - cfg.Rootdir = filepath.Join(t.TempDir(), "////*&^^(*8769876////bar") - err := cfg.WriteConfigFile() - assert.Error(t, err) -} - -func TestReadConfigFileForLogger(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skipf("Test is not supported on windows as it leaks resources, see https://github.com/sourcenetwork/defradb/issues/2057") - } - - cfg := DefaultConfig() - tmpdir := t.TempDir() - cfg.Rootdir = tmpdir - cfg.Log.Caller = true - cfg.Log.Format = "json" - cfg.Log.Level = logLevelDebug - cfg.Log.NoColor = true - cfg.Log.Output = filepath.Join(tmpdir, "log.txt") - cfg.Log.Stacktrace = true - - err := cfg.WriteConfigFile() - assert.NoError(t, err) - - assert.True(t, cfg.ConfigFileExists()) - - cfgFromFile := DefaultConfig() - err = cfgFromFile.setRootdir(tmpdir) - assert.NoError(t, err) - err = cfgFromFile.LoadWithRootdir(true) - assert.NoError(t, err) - - assert.Equal(t, cfg.Log.Caller, cfgFromFile.Log.Caller) - assert.Equal(t, cfg.Log.Format, cfgFromFile.Log.Format) - assert.Equal(t, cfg.Log.Level, cfgFromFile.Log.Level) - assert.Equal(t, cfg.Log.NoColor, cfgFromFile.Log.NoColor) - assert.Equal(t, cfg.Log.Output, cfgFromFile.Log.Output) - assert.Equal(t, cfg.Log.Stacktrace, cfgFromFile.Log.Stacktrace) -} - -func TestReadConfigFileForDatastore(t *testing.T) { - tmpdir := t.TempDir() - - cfg := DefaultConfig() - err := cfg.setRootdir(tmpdir) - assert.NoError(t, err) - cfg.Datastore.Store = "badger" - cfg.Datastore.Badger.Path = "dataPath" - cfg.Datastore.Badger.ValueLogFileSize = 512 * MiB - - err = cfg.WriteConfigFile() - assert.NoError(t, err) - - configPath := filepath.Join(tmpdir, DefaultConfigFileName) - _, err = os.Stat(configPath) - assert.NoError(t, err) - - cfgFromFile := DefaultConfig() - err = cfgFromFile.setRootdir(tmpdir) - assert.NoError(t, err) - err = cfgFromFile.LoadWithRootdir(true) - assert.NoError(t, err) - - assert.Equal(t, cfg.Datastore.Store, cfgFromFile.Datastore.Store) - assert.Equal(t, filepath.Join(tmpdir, cfg.Datastore.Badger.Path), cfgFromFile.Datastore.Badger.Path) - assert.Equal(t, cfg.Datastore.Badger.ValueLogFileSize, cfgFromFile.Datastore.Badger.ValueLogFileSize) -} - -func TestConfigFileExists(t *testing.T) { - cfg := DefaultConfig() - err := cfg.setRootdir(t.TempDir()) - assert.NoError(t, err) - assert.False(t, cfg.ConfigFileExists()) - - err = cfg.WriteConfigFile() - assert.NoError(t, err) - assert.True(t, cfg.ConfigFileExists()) -} - -func TestConfigFileExistsErroneousPath(t *testing.T) { - cfg := DefaultConfig() - cfg.Rootdir = filepath.Join(t.TempDir(), "////*&^^(*8769876////bar") - assert.False(t, cfg.ConfigFileExists()) -} - -func TestDeleteConfigFile(t *testing.T) { - cfg := DefaultConfig() - tmpdir := t.TempDir() - cfg.Rootdir = tmpdir - err := cfg.WriteConfigFile() - assert.NoError(t, err) - - assert.True(t, cfg.ConfigFileExists()) - - err = cfg.DeleteConfigFile() - assert.NoError(t, err) - assert.False(t, cfg.ConfigFileExists()) -} diff --git a/config/configfile_yaml.gotmpl b/config/configfile_yaml.gotmpl deleted file mode 100644 index 5346e41378..0000000000 --- a/config/configfile_yaml.gotmpl +++ /dev/null @@ -1,62 +0,0 @@ -# DefraDB configuration (YAML) - -# The default DefraDB directory is "$HOME/.defradb". It can be changed via the --rootdir CLI flag. -# Relative paths are interpreted as being rooted in the DefraDB directory. - -datastore: - # Store can be badger | memory - # badger: fast pure Go key-value store optimized for SSDs (https://github.com/dgraph-io/badger) - # memory: in-memory version of badger - store: {{ .Datastore.Store }} - badger: - # The path to the database data file(s). - path: {{ .Datastore.Badger.Path }} - # Maximum file size of the value log files. The in-memory file size will be 2*valuelogfilesize. - # Human friendly units can be used (ex: 500MB). - valuelogfilesize: {{ .Datastore.Badger.ValueLogFileSize }} - maxtxnretries: {{ .Datastore.MaxTxnRetries }} - # memory: - # size: {{ .Datastore.Memory.Size }} - -api: - # Address of the HTTP API to listen on or connect to - address: {{ .API.Address }} - # Whether the API server should listen over HTTPS - tls: {{ .API.TLS }} - # The list of origins a cross-domain request can be executed from. - # allowed-origins: {{ .API.AllowedOrigins }} - # The path to the public key file. Ignored if domains is set. - pubkeypath: {{ .API.PubKeyPath }} - # The path to the private key file. Ignored if domains is set. - privkeypath: {{ .API.PrivKeyPath }} - # Email address to let the CA (Let's Encrypt) send notifications via email when there are issues (optional). - # email: {{ .API.Email }} - -net: - # Whether the P2P is disabled - p2pdisabled: {{ .Net.P2PDisabled }} - # Listening address of the P2P network - p2paddress: {{ .Net.P2PAddress }} - # Whether the node has pubsub enabled or not - pubsub: {{ .Net.PubSubEnabled }} - # Enable libp2p's Circuit relay transport protocol https://docs.libp2p.io/concepts/circuit-relay/ - relay: {{ .Net.RelayEnabled }} - # List of peers to boostrap with, specified as multiaddresses (https://docs.libp2p.io/concepts/addressing/) - peers: {{ .Net.Peers }} - -log: - # Log level. Options are debug, info, error, fatal - level: {{ .Log.Level }} - # Include stacktrace in error and fatal logs - stacktrace: {{ .Log.Stacktrace }} - # Supported log formats are json, csv - format: {{ .Log.Format }} - # Where the log output is written to - output: {{ .Log.Output }} - # Disable colored log output - nocolor: {{ .Log.NoColor }} - # Caller location in log output - caller: {{ .Log.Caller }} - # Provide specific named component logger configuration - # e.g. net,nocolor=true,level=debug;config,output=stdout,format=json - logger: {{ .Log.Logger }} \ No newline at end of file diff --git a/config/errors.go b/config/errors.go deleted file mode 100644 index 872b362b66..0000000000 --- a/config/errors.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package config - -import ( - "github.com/sourcenetwork/defradb/errors" -) - -const ( - errFailedToWriteFile string = "failed to write file" - errFailedToRemoveConfigFile string = "failed to remove config file" - errCannotBeHomeDir string = "path cannot be just ~ (home directory)" - errUnableToExpandHomeDir string = "unable to expand home directory" - errNoDatabaseURLProvided string = "no database URL provided" - errLoggingConfigNotObtained string = "could not get logging config" - errFailedToValidateConfig string = "failed to validate config" - errOverrideConfigConvertFailed string = "invalid override config" - errConfigToJSONFailed string = "failed to marshal Config to JSON" - errInvalidDatabaseURL string = "invalid database URL" - errInvalidRPCTimeout string = "invalid RPC timeout" - errInvalidRPCMaxConnectionIdle string = "invalid RPC MaxConnectionIdle" - errInvalidP2PAddress string = "invalid P2P address" - errInvalidRPCAddress string = "invalid RPC address" - errInvalidBootstrapPeers string = "invalid bootstrap peers" - errInvalidLogLevel string = "invalid log level" - errInvalidDatastoreType string = "invalid store type" - errInvalidLogFormat string = "invalid log format" - errInvalidNamedLoggerName string = "invalid named logger name" - errInvalidLoggerConfig string = "invalid logger config" - errConfigTemplateFailed string = "could not process config template" - errCouldNotObtainLoggerConfig string = "could not get named logger config" - errNotProvidedAsKV string = "logging config parameter was not provided as = pair" - errLoggerNameEmpty string = "logger name cannot be empty" - errCouldNotParseType string = "could not parse type" - errUnknownLoggerParameter string = "unknown logger parameter" - errInvalidLoggerName string = "invalid logger name" - errDuplicateLoggerName string = "duplicate logger name" - errReadingConfigFile string = "failed to read config" - errLoadingConfig string = "failed to load config" - errUnableToParseByteSize string = "unable to parse byte size" - errInvalidDatastorePath string = "invalid datastore path" - errMissingPortNumber string = "missing port number" - errNoPortWithDomain string = "cannot provide port with domain name" - errInvalidRootDir string = "invalid root directory" -) - -var ( - ErrFailedToWriteFile = errors.New(errFailedToWriteFile) - ErrFailedToRemoveConfigFile = errors.New(errFailedToRemoveConfigFile) - ErrPathCannotBeHomeDir = errors.New(errCannotBeHomeDir) - ErrUnableToExpandHomeDir = errors.New(errUnableToExpandHomeDir) - ErrNoDatabaseURLProvided = errors.New(errNoDatabaseURLProvided) - ErrInvalidDatabaseURL = errors.New(errInvalidDatabaseURL) - ErrLoggingConfigNotObtained = errors.New(errLoggingConfigNotObtained) - ErrFailedToValidateConfig = errors.New(errFailedToValidateConfig) - ErrInvalidRPCTimeout = errors.New(errInvalidRPCTimeout) - ErrInvalidRPCMaxConnectionIdle = errors.New(errInvalidRPCMaxConnectionIdle) - ErrInvalidP2PAddress = errors.New(errInvalidP2PAddress) - ErrInvalidRPCAddress = errors.New(errInvalidRPCAddress) - ErrInvalidBootstrapPeers = errors.New(errInvalidBootstrapPeers) - ErrInvalidLogLevel = errors.New(errInvalidLogLevel) - ErrInvalidDatastoreType = errors.New(errInvalidDatastoreType) - ErrOverrideConfigConvertFailed = errors.New(errOverrideConfigConvertFailed) - ErrInvalidLogFormat = errors.New(errInvalidLogFormat) - ErrConfigToJSONFailed = errors.New(errConfigToJSONFailed) - ErrInvalidNamedLoggerName = errors.New(errInvalidNamedLoggerName) - ErrConfigTemplateFailed = errors.New(errConfigTemplateFailed) - ErrCouldNotObtainLoggerConfig = errors.New(errCouldNotObtainLoggerConfig) - ErrNotProvidedAsKV = errors.New(errNotProvidedAsKV) - ErrLoggerNameEmpty = errors.New(errLoggerNameEmpty) - ErrCouldNotParseType = errors.New(errCouldNotParseType) - ErrUnknownLoggerParameter = errors.New(errUnknownLoggerParameter) - ErrInvalidLoggerName = errors.New(errInvalidLoggerName) - ErrDuplicateLoggerName = errors.New(errDuplicateLoggerName) - ErrReadingConfigFile = errors.New(errReadingConfigFile) - ErrLoadingConfig = errors.New(errLoadingConfig) - ErrUnableToParseByteSize = errors.New(errUnableToParseByteSize) - ErrInvalidLoggerConfig = errors.New(errInvalidLoggerConfig) - ErrorInvalidDatastorePath = errors.New(errInvalidDatastorePath) - ErrMissingPortNumber = errors.New(errMissingPortNumber) - ErrNoPortWithDomain = errors.New(errNoPortWithDomain) - ErrorInvalidRootDir = errors.New(errInvalidRootDir) -) - -func NewErrFailedToWriteFile(inner error, path string) error { - return errors.Wrap(errFailedToWriteFile, inner, errors.NewKV("path", path)) -} - -func NewErrFailedToRemoveConfigFile(inner error) error { - return errors.Wrap(errFailedToRemoveConfigFile, inner) -} - -func NewErrPathCannotBeHomeDir(inner error) error { - return errors.Wrap(errCannotBeHomeDir, inner) -} - -func NewErrUnableToExpandHomeDir(inner error) error { - return errors.Wrap(errUnableToExpandHomeDir, inner) -} - -func NewErrNoDatabaseURLProvided(inner error) error { - return errors.Wrap(errNoDatabaseURLProvided, inner) -} - -func NewErrInvalidDatabaseURL(inner error) error { - return errors.Wrap(errInvalidDatabaseURL, inner) -} - -func NewErrLoggingConfigNotObtained(inner error) error { - return errors.Wrap(errLoggingConfigNotObtained, inner) -} - -func NewErrFailedToValidateConfig(inner error) error { - return errors.Wrap(errFailedToValidateConfig, inner) -} - -func NewErrInvalidRPCTimeout(inner error, timeout string) error { - return errors.Wrap(errInvalidRPCTimeout, inner, errors.NewKV("timeout", timeout)) -} - -func NewErrInvalidRPCMaxConnectionIdle(inner error, timeout string) error { - return errors.Wrap(errInvalidRPCMaxConnectionIdle, inner, errors.NewKV("timeout", timeout)) -} - -func NewErrInvalidP2PAddress(inner error, address string) error { - return errors.Wrap(errInvalidP2PAddress, inner, errors.NewKV("address", address)) -} - -func NewErrInvalidRPCAddress(inner error, address string) error { - return errors.Wrap(errInvalidRPCAddress, inner, errors.NewKV("address", address)) -} - -func NewErrInvalidBootstrapPeers(inner error, peers string) error { - return errors.Wrap(errInvalidBootstrapPeers, inner, errors.NewKV("peers", peers)) -} - -func NewErrInvalidLogLevel(level string) error { - return errors.New(errInvalidLogLevel, errors.NewKV("level", level)) -} - -func NewErrInvalidDatastoreType(storeType string) error { - return errors.New(errInvalidDatastoreType, errors.NewKV("store_type", storeType)) -} - -func NewErrOverrideConfigConvertFailed(inner error, name string) error { - return errors.Wrap(errOverrideConfigConvertFailed, inner, errors.NewKV("name", name)) -} - -func NewErrInvalidLogFormat(format string) error { - return errors.New(errInvalidLogFormat, errors.NewKV("format", format)) -} - -func NewErrConfigToJSONFailed(inner error) error { - return errors.Wrap(errConfigToJSONFailed, inner) -} - -func NewErrInvalidNamedLoggerName(name string) error { - return errors.New(errInvalidNamedLoggerName, errors.NewKV("name", name)) -} - -func NewErrConfigTemplateFailed(inner error) error { - return errors.Wrap(errConfigTemplateFailed, inner) -} - -func NewErrCouldNotObtainLoggerConfig(inner error, name string) error { - return errors.Wrap(errCouldNotObtainLoggerConfig, inner, errors.NewKV("name", name)) -} - -func NewErrNotProvidedAsKV(kv string) error { - return errors.New(errNotProvidedAsKV, errors.NewKV("KV", kv)) -} - -func NewErrCouldNotParseType(inner error, name string) error { - return errors.Wrap(errCouldNotParseType, inner, errors.NewKV("name", name)) -} - -func NewErrUnknownLoggerParameter(name string) error { - return errors.New(errUnknownLoggerParameter, errors.NewKV("param", name)) -} - -func NewErrInvalidLoggerName(name string) error { - return errors.New(errInvalidLoggerName, errors.NewKV("name", name)) -} - -func NewErrDuplicateLoggerName(name string) error { - return errors.New(errDuplicateLoggerName, errors.NewKV("name", name)) -} - -func NewErrReadingConfigFile(inner error) error { - return errors.Wrap(errReadingConfigFile, inner) -} - -func NewErrLoadingConfig(inner error) error { - return errors.Wrap(errLoadingConfig, inner) -} - -func NewErrUnableToParseByteSize(err error) error { - return errors.Wrap(errUnableToParseByteSize, err) -} - -func NewErrLoggerConfig(s string) error { - return errors.New(errInvalidLoggerConfig, errors.NewKV("explanation", s)) -} - -func NewErrInvalidDatastorePath(path string) error { - return errors.New(errInvalidDatastorePath, errors.NewKV("path", path)) -} - -func NewErrInvalidRootDir(path string) error { - return errors.New(errInvalidRootDir, errors.NewKV("path", path)) -} diff --git a/connor/connor.go b/connor/connor.go index 4b174bc45c..927b1dfffd 100644 --- a/connor/connor.go +++ b/connor/connor.go @@ -40,6 +40,10 @@ func matchWith(op string, conditions, data any) (bool, error) { return like(conditions, data) case "_nlike": return nlike(conditions, data) + case "_ilike": + return ilike(conditions, data) + case "_nilike": + return nilike(conditions, data) case "_not": return not(conditions, data) default: diff --git a/connor/ilike.go b/connor/ilike.go new file mode 100644 index 0000000000..84181affb9 --- /dev/null +++ b/connor/ilike.go @@ -0,0 +1,30 @@ +package connor + +import ( + "strings" + + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/client" +) + +// ilike is an operator which performs case insensitive string equality tests. +func ilike(condition, data any) (bool, error) { + switch d := data.(type) { + case immutable.Option[string]: + if !d.HasValue() { + return condition == nil, nil + } + data = d.Value() + } + + switch cn := condition.(type) { + case string: + if d, ok := data.(string); ok { + return like(strings.ToLower(cn), strings.ToLower(d)) + } + return false, nil + default: + return false, client.NewErrUnhandledType("condition", cn) + } +} diff --git a/connor/ilike_test.go b/connor/ilike_test.go new file mode 100644 index 0000000000..cf9b38c40e --- /dev/null +++ b/connor/ilike_test.go @@ -0,0 +1,41 @@ +package connor + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestILike(t *testing.T) { + const testString = "Source Is The Glue of Web3" + + // case insensitive exact match + result, err := ilike("source is the glue of web3", testString) + require.NoError(t, err) + require.True(t, result) + + // case insensitive no match + result, err = ilike("source is the glue", testString) + require.NoError(t, err) + require.False(t, result) + + // case insensitive match prefix + result, err = ilike("source%", testString) + require.NoError(t, err) + require.True(t, result) + + // case insensitive match suffix + result, err = ilike("%web3", testString) + require.NoError(t, err) + require.True(t, result) + + // case insensitive match contains + result, err = ilike("%glue%", testString) + require.NoError(t, err) + require.True(t, result) + + // case insensitive match start and end with + result, err = ilike("source%web3", testString) + require.NoError(t, err) + require.True(t, result) +} diff --git a/connor/like.go b/connor/like.go index 0b1903eea0..63b8de5289 100644 --- a/connor/like.go +++ b/connor/like.go @@ -11,12 +11,12 @@ import ( // like is an operator which performs string equality // tests. func like(condition, data any) (bool, error) { - switch arr := data.(type) { + switch d := data.(type) { case immutable.Option[string]: - if !arr.HasValue() { + if !d.HasValue() { return condition == nil, nil } - data = arr.Value() + data = d.Value() } switch cn := condition.(type) { diff --git a/connor/nilike.go b/connor/nilike.go new file mode 100644 index 0000000000..be45d958ac --- /dev/null +++ b/connor/nilike.go @@ -0,0 +1,12 @@ +package connor + +// nilike performs case insensitive string inequality comparisons by inverting +// the result of the Like operator for non-error cases. +func nilike(conditions, data any) (bool, error) { + m, err := ilike(conditions, data) + if err != nil { + return false, err + } + + return !m, err +} diff --git a/connor/nilike_test.go b/connor/nilike_test.go new file mode 100644 index 0000000000..aa1a1350d0 --- /dev/null +++ b/connor/nilike_test.go @@ -0,0 +1,41 @@ +package connor + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNILike(t *testing.T) { + const testString = "Source Is The Glue of Web3" + + // case insensitive exact match + result, err := nilike("source is the glue of web3", testString) + require.NoError(t, err) + require.False(t, result) + + // case insensitive no match + result, err = nilike("source is the glue", testString) + require.NoError(t, err) + require.True(t, result) + + // case insensitive match prefix + result, err = nilike("source%", testString) + require.NoError(t, err) + require.False(t, result) + + // case insensitive match suffix + result, err = nilike("%web3", testString) + require.NoError(t, err) + require.False(t, result) + + // case insensitive match contains + result, err = nilike("%glue%", testString) + require.NoError(t, err) + require.False(t, result) + + // case insensitive match start and end with + result, err = nilike("source%web3", testString) + require.NoError(t, err) + require.False(t, result) +} diff --git a/connor/nlike_test.go b/connor/nlike_test.go new file mode 100644 index 0000000000..ae19bc5494 --- /dev/null +++ b/connor/nlike_test.go @@ -0,0 +1,41 @@ +package connor + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNLike(t *testing.T) { + const testString = "Source is the glue of web3" + + // exact match + result, err := nlike(testString, testString) + require.NoError(t, err) + require.False(t, result) + + // exact match error + result, err = nlike("Source is the glue", testString) + require.NoError(t, err) + require.True(t, result) + + // match prefix + result, err = nlike("Source%", testString) + require.NoError(t, err) + require.False(t, result) + + // match suffix + result, err = nlike("%web3", testString) + require.NoError(t, err) + require.False(t, result) + + // match contains + result, err = nlike("%glue%", testString) + require.NoError(t, err) + require.False(t, result) + + // match start and end with + result, err = nlike("Source%web3", testString) + require.NoError(t, err) + require.False(t, result) +} diff --git a/core/data_test.go b/core/data_test.go index ccad8163a7..1ba5a71611 100644 --- a/core/data_test.go +++ b/core/data_test.go @@ -25,8 +25,8 @@ func TestMergeAscending_ReturnsEmpty_GivenEmpty(t *testing.T) { } func TestMergeAscending_ReturnsSingle_GivenSingle(t *testing.T) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" input := []Span{NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1))} result := MergeAscending(input) @@ -37,10 +37,10 @@ func TestMergeAscending_ReturnsSingle_GivenSingle(t *testing.T) { } func TestMergeAscending_ReturnsSecondBeforeFirst_GivenKeysInReverseOrder(t *testing.T) { - start1 := "/p/0/0/k4" - end1 := "/p/0/0/k5" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k2" + start1 := "/1/p/0/k4" + end1 := "/1/p/0/k5" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k2" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), @@ -57,12 +57,12 @@ func TestMergeAscending_ReturnsSecondBeforeFirst_GivenKeysInReverseOrder(t *test } func TestMergeAscending_ReturnsItemsInOrder_GivenKeysInMixedOrder(t *testing.T) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k7" - end2 := "/p/0/0/k8" - start3 := "/p/0/0/k4" - end3 := "/p/0/0/k5" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k7" + end2 := "/1/p/0/k8" + start3 := "/1/p/0/k4" + end3 := "/1/p/0/k5" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), @@ -83,10 +83,10 @@ func TestMergeAscending_ReturnsItemsInOrder_GivenKeysInMixedOrder(t *testing.T) } func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndEqualToStart(t *testing.T) { - start1 := "/p/0/0/k3" - end1 := "/p/0/0/k4" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k3" + start1 := "/1/p/0/k3" + end1 := "/1/p/0/k4" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k3" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -100,10 +100,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndEqualToStart(t *testing } func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndAdjacentToStart(t *testing.T) { - start1 := "/p/0/0/k3" - end1 := "/p/0/0/k4" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k2" + start1 := "/1/p/0/k3" + end1 := "/1/p/0/k4" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k2" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -117,10 +117,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndAdjacentToStart(t *test } func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndWithin(t *testing.T) { - start1 := "/p/0/0/k3" - end1 := "/p/0/0/k4" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k3.5" + start1 := "/1/p/0/k3" + end1 := "/1/p/0/k4" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k3.5" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -134,10 +134,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndWithin(t *testing.T) { } func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndWithin(t *testing.T) { - start1 := "/p/0/0/k1.1" - end1 := "/p/0/0/k3" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k2.5" + start1 := "/1/p/0/k1.1" + end1 := "/1/p/0/k3" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k2.5" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -151,10 +151,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndWithin(t *testing.T) } func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndWithinEndPrefix(t *testing.T) { - start1 := "/p/0/0/k3" - end1 := "/p/0/0/k4" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k4.5" + start1 := "/1/p/0/k3" + end1 := "/1/p/0/k4" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k4.5" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -168,10 +168,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndWithinEndPrefix(t *test } func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndWithinEndPrefix(t *testing.T) { - start1 := "/p/0/0/k1.1" - end1 := "/p/0/0/k3" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k3.5" + start1 := "/1/p/0/k1.1" + end1 := "/1/p/0/k3" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k3.5" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -185,10 +185,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndWithinEndPrefix(t *te } func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndEqual(t *testing.T) { - start1 := "/p/0/0/k3" - end1 := "/p/0/0/k4" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k4" + start1 := "/1/p/0/k3" + end1 := "/1/p/0/k4" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k4" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -202,10 +202,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndEqual(t *testing.T) { } func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndAdjacentAndBefore(t *testing.T) { - start1 := "/p/0/0/k3" - end1 := "/p/0/0/k5" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k4" + start1 := "/1/p/0/k3" + end1 := "/1/p/0/k5" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k4" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -219,10 +219,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndAdjacentAndBefore(t *te } func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndAdjacentAndGreater(t *testing.T) { - start1 := "/p/0/0/k3" - end1 := "/p/0/0/k4" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k5" + start1 := "/1/p/0/k3" + end1 := "/1/p/0/k4" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k5" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -236,10 +236,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndAdjacentAndGreater(t *t } func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndEqual(t *testing.T) { - start1 := "/p/0/0/k1.1" - end1 := "/p/0/0/k3" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k3" + start1 := "/1/p/0/k1.1" + end1 := "/1/p/0/k3" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k3" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -253,10 +253,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndEqual(t *testing.T) { } func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndAdjacentAndBefore(t *testing.T) { - start1 := "/p/0/0/k1.1" - end1 := "/p/0/0/k3" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k2" + start1 := "/1/p/0/k1.1" + end1 := "/1/p/0/k3" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k2" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -270,10 +270,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndAdjacentAndBefore(t * } func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndAdjacentAndAfter(t *testing.T) { - start1 := "/p/0/0/k1.1" - end1 := "/p/0/0/k3" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k4" + start1 := "/1/p/0/k1.1" + end1 := "/1/p/0/k3" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k4" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -287,16 +287,16 @@ func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndAdjacentAndAfter(t *t } func TestMergeAscending_ReturnsMiddleSpansMerged_GivenSpanCoveringMiddleSpans(t *testing.T) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k6" - end2 := "/p/0/0/k7" - start3 := "/p/0/0/k9" - end3 := "/p/0/0/ka" - start4 := "/p/0/0/kc" - end4 := "/p/0/0/kd" - start5 := "/p/0/0/k4" - end5 := "/p/0/0/ka" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k6" + end2 := "/1/p/0/k7" + start3 := "/1/p/0/k9" + end3 := "/1/p/0/ka" + start4 := "/1/p/0/kc" + end4 := "/1/p/0/kd" + start5 := "/1/p/0/k4" + end5 := "/1/p/0/ka" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -318,10 +318,10 @@ func TestMergeAscending_ReturnsMiddleSpansMerged_GivenSpanCoveringMiddleSpans(t } func TestMergeAscending_ReturnsSingle_GivenStartEqualEndWithin(t *testing.T) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k1.5" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k1.5" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -335,10 +335,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartEqualEndWithin(t *testing.T) { } func TestMergeAscending_ReturnsSingle_GivenStartEqualEndWithinEndPrefix(t *testing.T) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k2.5" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k2.5" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -352,8 +352,8 @@ func TestMergeAscending_ReturnsSingle_GivenStartEqualEndWithinEndPrefix(t *testi } func TestMergeAscending_ReturnsSingle_GivenDuplicates(t *testing.T) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), @@ -367,10 +367,10 @@ func TestMergeAscending_ReturnsSingle_GivenDuplicates(t *testing.T) { } func TestMergeAscending_ReturnsSingle_GivenStartWithinEndWithin(t *testing.T) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k1.2" - end2 := "/p/0/0/k1.5" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k1.2" + end2 := "/1/p/0/k1.5" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -384,10 +384,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartWithinEndWithin(t *testing.T) { } func TestMergeAscending_ReturnsSingle_GivenStartWithinEndWithinEndPrefix(t *testing.T) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k1.2" - end2 := "/p/0/0/k2.5" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k1.2" + end2 := "/1/p/0/k2.5" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -401,10 +401,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartWithinEndWithinEndPrefix(t *test } func TestMergeAscending_ReturnsSingle_GivenStartWithinEndEqual(t *testing.T) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k1.2" - end2 := "/p/0/0/k2" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k1.2" + end2 := "/1/p/0/k2" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -418,10 +418,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartWithinEndEqual(t *testing.T) { } func TestMergeAscending_ReturnsSingle_GivenStartWithinEndAdjacentAndBefore(t *testing.T) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k3" - start2 := "/p/0/0/k1.2" - end2 := "/p/0/0/k2" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k3" + start2 := "/1/p/0/k1.2" + end2 := "/1/p/0/k2" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -435,10 +435,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartWithinEndAdjacentAndBefore(t *te } func TestMergeAscending_ReturnsSingle_GivenStartWithinEndAdjacentAndAfter(t *testing.T) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k3" - start2 := "/p/0/0/k1.2" - end2 := "/p/0/0/k4" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k3" + start2 := "/1/p/0/k1.2" + end2 := "/1/p/0/k4" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -454,16 +454,16 @@ func TestMergeAscending_ReturnsSingle_GivenStartWithinEndAdjacentAndAfter(t *tes func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartEqualEndAfterSpanCoveringMiddleSpans( t *testing.T, ) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k4" - end2 := "/p/0/0/k5" - start3 := "/p/0/0/k7" - end3 := "/p/0/0/k8" - start4 := "/p/0/0/kc" - end4 := "/p/0/0/kd" - start5 := "/p/0/0/k4" // equal to start2 - end5 := "/p/0/0/ka" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k4" + end2 := "/1/p/0/k5" + start3 := "/1/p/0/k7" + end3 := "/1/p/0/k8" + start4 := "/1/p/0/kc" + end4 := "/1/p/0/kd" + start5 := "/1/p/0/k4" // equal to start2 + end5 := "/1/p/0/ka" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -487,16 +487,16 @@ func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartEqualEndAfterSpanCove func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartWithinEndAfterSpanCoveringMiddleSpans( t *testing.T, ) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k4" - end2 := "/p/0/0/k5" - start3 := "/p/0/0/k7" - end3 := "/p/0/0/k8" - start4 := "/p/0/0/kc" - end4 := "/p/0/0/kd" - start5 := "/p/0/0/k4.5" // within span2 - end5 := "/p/0/0/ka" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k4" + end2 := "/1/p/0/k5" + start3 := "/1/p/0/k7" + end3 := "/1/p/0/k8" + start4 := "/1/p/0/kc" + end4 := "/1/p/0/kd" + start5 := "/1/p/0/k4.5" // within span2 + end5 := "/1/p/0/ka" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -519,16 +519,16 @@ func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartWithinEndAfterSpanCov func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartEqualToEndEndAfterSpanCoveringMiddleSpans( t *testing.T, ) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k4" - end2 := "/p/0/0/k5" - start3 := "/p/0/0/k7" - end3 := "/p/0/0/k8" - start4 := "/p/0/0/kc" - end4 := "/p/0/0/kd" - start5 := "/p/0/0/k5" // span2's end - end5 := "/p/0/0/ka" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k4" + end2 := "/1/p/0/k5" + start3 := "/1/p/0/k7" + end3 := "/1/p/0/k8" + start4 := "/1/p/0/kc" + end4 := "/1/p/0/kd" + start5 := "/1/p/0/k5" // span2's end + end5 := "/1/p/0/ka" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -551,16 +551,16 @@ func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartEqualToEndEndAfterSpa func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartAdjacentAndBeforeEndEndAfterSpanCoveringMiddleSpans( t *testing.T, ) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k4" - end2 := "/p/0/0/k6" - start3 := "/p/0/0/k8" - end3 := "/p/0/0/k9" - start4 := "/p/0/0/kd" - end4 := "/p/0/0/ke" - start5 := "/p/0/0/k5" // adjacent but before span2's end - end5 := "/p/0/0/kb" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k4" + end2 := "/1/p/0/k6" + start3 := "/1/p/0/k8" + end3 := "/1/p/0/k9" + start4 := "/1/p/0/kd" + end4 := "/1/p/0/ke" + start5 := "/1/p/0/k5" // adjacent but before span2's end + end5 := "/1/p/0/kb" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -583,16 +583,16 @@ func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartAdjacentAndBeforeEndE func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartAdjacentAndAfterEndEndAfterSpanCoveringMiddleSpans( t *testing.T, ) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k4" - end2 := "/p/0/0/k5" - start3 := "/p/0/0/k8" - end3 := "/p/0/0/k9" - start4 := "/p/0/0/kd" - end4 := "/p/0/0/ke" - start5 := "/p/0/0/k6" // adjacent and after span2's end - end5 := "/p/0/0/kb" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k4" + end2 := "/1/p/0/k5" + start3 := "/1/p/0/k8" + end3 := "/1/p/0/k9" + start4 := "/1/p/0/kd" + end4 := "/1/p/0/ke" + start5 := "/1/p/0/k6" // adjacent and after span2's end + end5 := "/1/p/0/kb" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -613,10 +613,10 @@ func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartAdjacentAndAfterEndEn } func TestMergeAscending_ReturnsTwoItems_GivenSecondItemAfterFirst(t *testing.T) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k4" - end2 := "/p/0/0/k5" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k4" + end2 := "/1/p/0/k5" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -632,10 +632,10 @@ func TestMergeAscending_ReturnsTwoItems_GivenSecondItemAfterFirst(t *testing.T) } func TestMergeAscending_ReturnsSingle_GivenStartAdjacentAndBeforeEndEndEqual(t *testing.T) { - start1 := "/p/0/0/k3" - end1 := "/p/0/0/k6" - start2 := "/p/0/0/k5" - end2 := "/p/0/0/k6" + start1 := "/1/p/0/k3" + end1 := "/1/p/0/k6" + start2 := "/1/p/0/k5" + end2 := "/1/p/0/k6" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -651,10 +651,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartAdjacentAndBeforeEndEndEqual(t * func TestMergeAscending_ReturnsSingle_GivenStartAdjacentAndBeforeEndEndAdjacentAndAfter( t *testing.T, ) { - start1 := "/p/0/0/k3" - end1 := "/p/0/0/k6" - start2 := "/p/0/0/k5" - end2 := "/p/0/0/k7" + start1 := "/1/p/0/k3" + end1 := "/1/p/0/k6" + start2 := "/1/p/0/k5" + end2 := "/1/p/0/k7" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -668,10 +668,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartAdjacentAndBeforeEndEndAdjacentA } func TestMergeAscending_ReturnsSingle_GivenStartAdjacentAndBeforeEndEndAfter(t *testing.T) { - start1 := "/p/0/0/k3" - end1 := "/p/0/0/k6" - start2 := "/p/0/0/k5" - end2 := "/p/0/0/k8" + start1 := "/1/p/0/k3" + end1 := "/1/p/0/k6" + start2 := "/1/p/0/k5" + end2 := "/1/p/0/k8" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -685,10 +685,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartAdjacentAndBeforeEndEndAfter(t * } func TestMergeAscending_ReturnsSingle_GivenStartAdjacentAndAfterEndEndAfter(t *testing.T) { - start1 := "/p/0/0/k3" - end1 := "/p/0/0/k6" - start2 := "/p/0/0/k7" - end2 := "/p/0/0/k8" + start1 := "/1/p/0/k3" + end1 := "/1/p/0/k6" + start2 := "/1/p/0/k7" + end2 := "/1/p/0/k8" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), diff --git a/core/encoding.go b/core/encoding.go index f6b46a4381..40e74915b8 100644 --- a/core/encoding.go +++ b/core/encoding.go @@ -17,11 +17,13 @@ import ( "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/encoding" ) -// DecodeFieldValue takes a field value and description and converts it to the +// NormalizeFieldValue takes a field value and description and converts it to the // standardized Defra Go type. -func DecodeFieldValue(fieldDesc client.FieldDescription, val any) (any, error) { +func NormalizeFieldValue(fieldDesc client.FieldDefinition, val any) (any, error) { if val == nil { return nil, nil } @@ -96,7 +98,7 @@ func DecodeFieldValue(fieldDesc client.FieldDescription, val any) (any, error) { } } else { // CBOR often encodes values typed as floats as ints switch fieldDesc.Kind { - case client.FieldKind_FLOAT: + case client.FieldKind_NILLABLE_FLOAT: switch v := val.(type) { case int64: return float64(v), nil @@ -107,7 +109,7 @@ func DecodeFieldValue(fieldDesc client.FieldDescription, val any) (any, error) { case uint: return float64(v), nil } - case client.FieldKind_INT: + case client.FieldKind_NILLABLE_INT: switch v := val.(type) { case float64: return int64(v), nil @@ -120,11 +122,21 @@ func DecodeFieldValue(fieldDesc client.FieldDescription, val any) (any, error) { case uint: return int64(v), nil } - case client.FieldKind_DATETIME: + case client.FieldKind_NILLABLE_DATETIME: switch v := val.(type) { case string: return time.Parse(time.RFC3339, v) } + case client.FieldKind_NILLABLE_BOOL: + switch v := val.(type) { + case int64: + return v != 0, nil + } + case client.FieldKind_NILLABLE_STRING: + switch v := val.(type) { + case []byte: + return string(v), nil + } } } @@ -179,3 +191,126 @@ func convertToInt(propertyName string, untypedValue any) (int64, error) { return 0, client.NewErrUnexpectedType[string](propertyName, untypedValue) } } + +// DecodeIndexDataStoreKey decodes a IndexDataStoreKey from bytes. +// It expects the input bytes is in the following format: +// +// /[CollectionID]/[IndexID]/[FieldValue](/[FieldValue]...) +// +// Where [CollectionID] and [IndexID] are integers +// +// All values of the fields are converted to standardized Defra Go type +// according to fields description. +func DecodeIndexDataStoreKey( + data []byte, + indexDesc *client.IndexDescription, + fields []client.FieldDefinition, +) (IndexDataStoreKey, error) { + if len(data) == 0 { + return IndexDataStoreKey{}, ErrEmptyKey + } + + if data[0] != '/' { + return IndexDataStoreKey{}, ErrInvalidKey + } + data = data[1:] + + data, colID, err := encoding.DecodeUvarintAscending(data) + if err != nil { + return IndexDataStoreKey{}, err + } + + key := IndexDataStoreKey{CollectionID: uint32(colID)} + + if data[0] != '/' { + return IndexDataStoreKey{}, ErrInvalidKey + } + data = data[1:] + + data, indID, err := encoding.DecodeUvarintAscending(data) + if err != nil { + return IndexDataStoreKey{}, err + } + key.IndexID = uint32(indID) + + if len(data) == 0 { + return key, nil + } + + for len(data) > 0 { + if data[0] != '/' { + return IndexDataStoreKey{}, ErrInvalidKey + } + data = data[1:] + + i := len(key.Fields) + descending := false + // If the key has more values encoded then fields on the index description, the last + // value must be the docID and we treat it as a string. + if i < len(indexDesc.Fields) { + descending = indexDesc.Fields[i].Descending + } else if i > len(indexDesc.Fields) { + return IndexDataStoreKey{}, ErrInvalidKey + } + + var val any + data, val, err = encoding.DecodeFieldValue(data, descending) + if err != nil { + return IndexDataStoreKey{}, err + } + + key.Fields = append(key.Fields, IndexedField{Value: val, Descending: descending}) + } + + err = normalizeIndexDataStoreKeyValues(&key, fields) + return key, err +} + +// normalizeIndexDataStoreKeyValues converts all field values to standardized +// Defra Go type according to fields description. +func normalizeIndexDataStoreKeyValues(key *IndexDataStoreKey, fields []client.FieldDefinition) error { + for i := range key.Fields { + if key.Fields[i].Value == nil { + continue + } + var err error + var val any + if i == len(key.Fields)-1 && len(key.Fields)-len(fields) == 1 { + bytes, ok := key.Fields[i].Value.([]byte) + if !ok { + return client.NewErrUnexpectedType[[]byte](request.DocIDArgName, key.Fields[i].Value) + } + val = string(bytes) + } else { + val, err = NormalizeFieldValue(fields[i], key.Fields[i].Value) + } + if err != nil { + return err + } + key.Fields[i].Value = val + } + return nil +} + +// EncodeIndexDataStoreKey encodes a IndexDataStoreKey to bytes to be stored as a key +// for secondary indexes. +func EncodeIndexDataStoreKey(key *IndexDataStoreKey) []byte { + if key.CollectionID == 0 { + return []byte{} + } + + b := encoding.EncodeUvarintAscending([]byte{'/'}, uint64(key.CollectionID)) + + if key.IndexID == 0 { + return b + } + b = append(b, '/') + b = encoding.EncodeUvarintAscending(b, uint64(key.IndexID)) + + for _, field := range key.Fields { + b = append(b, '/') + b = encoding.EncodeFieldValue(b, field.Value, field.Descending) + } + + return b +} diff --git a/core/errors.go b/core/errors.go index b672c1ed00..440e5778ac 100644 --- a/core/errors.go +++ b/core/errors.go @@ -16,15 +16,22 @@ import ( const ( errFailedToGetFieldIdOfKey string = "failed to get FieldID of Key" + errInvalidFieldIndex string = "invalid field index" ) var ( ErrFailedToGetFieldIdOfKey = errors.New(errFailedToGetFieldIdOfKey) ErrEmptyKey = errors.New("received empty key string") ErrInvalidKey = errors.New("invalid key string") + ErrInvalidFieldIndex = errors.New(errInvalidFieldIndex) ) // NewErrFailedToGetFieldIdOfKey returns the error indicating failure to get FieldID of Key. func NewErrFailedToGetFieldIdOfKey(inner error) error { return errors.Wrap(errFailedToGetFieldIdOfKey, inner) } + +// NewErrInvalidFieldIndex returns the error indicating invalid field index. +func NewErrInvalidFieldIndex(i int) error { + return errors.New(errInvalidFieldIndex, errors.NewKV("index", i)) +} diff --git a/core/key.go b/core/key.go index 0c038b11dd..4017d445b0 100644 --- a/core/key.go +++ b/core/key.go @@ -17,6 +17,7 @@ import ( "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" + "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/errors" @@ -46,10 +47,11 @@ const ( COLLECTION_NAME = "/collection/name" COLLECTION_SCHEMA_VERSION = "/collection/version" COLLECTION_INDEX = "/collection/index" - SCHEMA_MIGRATION = "/schema/migration" SCHEMA_VERSION = "/schema/version/v" - SCHEMA_VERSION_HISTORY = "/schema/version/h" - SEQ = "/seq" + SCHEMA_VERSION_ROOT = "/schema/version/r" + COLLECTION_SEQ = "/seq/collection" + INDEX_ID_SEQ = "/seq/index" + FIELD_ID_SEQ = "/seq/field" PRIMARY_KEY = "/pk" DATASTORE_DOC_VERSION_FIELD_ID = "v" REPLICATOR = "/replicator/id" @@ -65,29 +67,38 @@ type Key interface { // DataStoreKey is a type that represents a key in the database. type DataStoreKey struct { - CollectionID string - InstanceType InstanceType - DocID string - FieldId string + CollectionRootID uint32 + InstanceType InstanceType + DocID string + FieldId string } var _ Key = (*DataStoreKey)(nil) +// IndexedField contains information necessary for storing a single +// value of a field in an index. +type IndexedField struct { + // Value is the value of the field in the index + Value any + // Descending is true if the field is sorted in descending order + Descending bool +} + // IndexDataStoreKey is key of an indexed document in the database. type IndexDataStoreKey struct { // CollectionID is the id of the collection CollectionID uint32 // IndexID is the id of the index IndexID uint32 - // FieldValues is the values of the fields in the index - FieldValues [][]byte + // Fields is the values of the fields in the index + Fields []IndexedField } var _ Key = (*IndexDataStoreKey)(nil) type PrimaryDataStoreKey struct { - CollectionId string - DocID string + CollectionRootID uint32 + DocID string } var _ Key = (*PrimaryDataStoreKey)(nil) @@ -132,8 +143,8 @@ var _ Key = (*CollectionSchemaVersionKey)(nil) // CollectionIndexKey to a stored description of an index type CollectionIndexKey struct { - // CollectionName is the name of the collection that the index is on - CollectionName string + // CollectionID is the id of the collection that the index is on + CollectionID immutable.Option[uint32] // IndexName is the name of the index IndexName string } @@ -149,26 +160,15 @@ type SchemaVersionKey struct { var _ Key = (*SchemaVersionKey)(nil) -// SchemaHistoryKey holds the pathway through the schema version history for -// any given schema. +// SchemaRootKey indexes schema version ids by their root schema id. // -// The key points to the schema version id of the next version of the schema. -// If a SchemaHistoryKey does not exist for a given SchemaVersionID it means -// that that SchemaVersionID is for the latest version. -type SchemaHistoryKey struct { - SchemaRoot string - PreviousSchemaVersionID string -} - -var _ Key = (*SchemaHistoryKey)(nil) - -// SchemaVersionMigrationKey points to the jsonified configuration of a lens migration -// for the given source schema version id. -type SchemaVersionMigrationKey struct { - SourceSchemaVersionID string +// The index is the key, there are no values stored against the key. +type SchemaRootKey struct { + SchemaRoot string + SchemaVersionID string } -var _ Key = (*SchemaVersionMigrationKey)(nil) +var _ Key = (*SchemaRootKey)(nil) type P2PCollectionKey struct { CollectionID string @@ -176,11 +176,29 @@ type P2PCollectionKey struct { var _ Key = (*P2PCollectionKey)(nil) -type SequenceKey struct { - SequenceName string +// CollectionIDSequenceKey is used to key the sequence used to generate collection ids. +type CollectionIDSequenceKey struct{} + +var _ Key = (*CollectionIDSequenceKey)(nil) + +// IndexIDSequenceKey is used to key the sequence used to generate index ids. +// +// The sequence is specific to each collection version. +type IndexIDSequenceKey struct { + CollectionID uint32 +} + +var _ Key = (*IndexIDSequenceKey)(nil) + +// FieldIDSequenceKey is used to key the sequence used to generate field ids. +// +// The sequence is specific to each collection root. Multiple collection of the same root +// must maintain consistent field ids. +type FieldIDSequenceKey struct { + CollectionRoot uint32 } -var _ Key = (*SequenceKey)(nil) +var _ Key = (*FieldIDSequenceKey)(nil) type ReplicatorKey struct { ReplicatorID string @@ -192,7 +210,7 @@ var _ Key = (*ReplicatorKey)(nil) // splitting the input using '/' as a field deliminator. It assumes // that the input string is in the following format: // -// /[CollectionId]/[InstanceType]/[DocID]/[FieldId] +// /[CollectionRootId]/[InstanceType]/[DocID]/[FieldId] // // Any properties before the above (assuming a '/' deliminator) are ignored func NewDataStoreKey(key string) (DataStoreKey, error) { @@ -210,7 +228,12 @@ func NewDataStoreKey(key string) (DataStoreKey, error) { return dataStoreKey, ErrInvalidKey } - dataStoreKey.CollectionID = elements[0] + colRootID, err := strconv.Atoi(elements[0]) + if err != nil { + return DataStoreKey{}, err + } + + dataStoreKey.CollectionRootID = uint32(colRootID) dataStoreKey.InstanceType = InstanceType(elements[1]) dataStoreKey.DocID = elements[2] if numberOfElements == 4 { @@ -291,14 +314,14 @@ func NewCollectionSchemaVersionKeyFromString(key string) (CollectionSchemaVersio } // NewCollectionIndexKey creates a new CollectionIndexKey from a collection name and index name. -func NewCollectionIndexKey(colID, indexName string) CollectionIndexKey { - return CollectionIndexKey{CollectionName: colID, IndexName: indexName} +func NewCollectionIndexKey(colID immutable.Option[uint32], indexName string) CollectionIndexKey { + return CollectionIndexKey{CollectionID: colID, IndexName: indexName} } // NewCollectionIndexKeyFromString creates a new CollectionIndexKey from a string. // It expects the input string is in the following format: // -// /collection/index/[CollectionName]/[IndexName] +// /collection/index/[CollectionID]/[IndexName] // // Where [IndexName] might be omitted. Anything else will return an error. func NewCollectionIndexKeyFromString(key string) (CollectionIndexKey, error) { @@ -306,7 +329,13 @@ func NewCollectionIndexKeyFromString(key string) (CollectionIndexKey, error) { if len(keyArr) < 4 || len(keyArr) > 5 || keyArr[1] != "collection" || keyArr[2] != "index" { return CollectionIndexKey{}, ErrInvalidKey } - result := CollectionIndexKey{CollectionName: keyArr[3]} + + colID, err := strconv.Atoi(keyArr[3]) + if err != nil { + return CollectionIndexKey{}, err + } + + result := CollectionIndexKey{CollectionID: immutable.Some(uint32(colID))} if len(keyArr) == 5 { result.IndexName = keyArr[4] } @@ -315,13 +344,13 @@ func NewCollectionIndexKeyFromString(key string) (CollectionIndexKey, error) { // ToString returns the string representation of the key // It is in the following format: -// /collection/index/[CollectionName]/[IndexName] -// if [CollectionName] is empty, the rest is ignored +// /collection/index/[CollectionID]/[IndexName] +// if [CollectionID] is empty, the rest is ignored func (k CollectionIndexKey) ToString() string { result := COLLECTION_INDEX - if k.CollectionName != "" { - result = result + "/" + k.CollectionName + if k.CollectionID.HasValue() { + result = result + "/" + fmt.Sprint(k.CollectionID.Value()) if k.IndexName != "" { result = result + "/" + k.IndexName } @@ -344,32 +373,32 @@ func NewSchemaVersionKey(schemaVersionID string) SchemaVersionKey { return SchemaVersionKey{SchemaVersionID: schemaVersionID} } -func NewSchemaHistoryKey(schemaRoot string, previousSchemaVersionID string) SchemaHistoryKey { - return SchemaHistoryKey{ - SchemaRoot: schemaRoot, - PreviousSchemaVersionID: previousSchemaVersionID, +func NewSchemaRootKey(schemaRoot string, schemaVersionID string) SchemaRootKey { + return SchemaRootKey{ + SchemaRoot: schemaRoot, + SchemaVersionID: schemaVersionID, } } -func NewSchemaVersionMigrationKey(schemaVersionID string) SchemaVersionMigrationKey { - return SchemaVersionMigrationKey{SourceSchemaVersionID: schemaVersionID} -} - -func NewSchemaHistoryKeyFromString(keyString string) (SchemaHistoryKey, error) { - keyString = strings.TrimPrefix(keyString, SCHEMA_VERSION_HISTORY+"/") +func NewSchemaRootKeyFromString(keyString string) (SchemaRootKey, error) { + keyString = strings.TrimPrefix(keyString, SCHEMA_VERSION_ROOT+"/") elements := strings.Split(keyString, "/") if len(elements) != 2 { - return SchemaHistoryKey{}, ErrInvalidKey + return SchemaRootKey{}, ErrInvalidKey } - return SchemaHistoryKey{ - SchemaRoot: elements[0], - PreviousSchemaVersionID: elements[1], + return SchemaRootKey{ + SchemaRoot: elements[0], + SchemaVersionID: elements[1], }, nil } -func NewSequenceKey(name string) SequenceKey { - return SequenceKey{SequenceName: name} +func NewIndexIDSequenceKey(collectionID uint32) IndexIDSequenceKey { + return IndexIDSequenceKey{CollectionID: collectionID} +} + +func NewFieldIDSequenceKey(collectionRoot uint32) FieldIDSequenceKey { + return FieldIDSequenceKey{CollectionRoot: collectionRoot} } func (k DataStoreKey) WithValueFlag() DataStoreKey { @@ -438,8 +467,8 @@ func (k HeadStoreKey) WithFieldId(fieldId string) HeadStoreKey { func (k DataStoreKey) ToString() string { var result string - if k.CollectionID != "" { - result = result + "/" + k.CollectionID + if k.CollectionRootID != 0 { + result = result + "/" + strconv.Itoa(int(k.CollectionRootID)) } if k.InstanceType != "" { result = result + "/" + string(k.InstanceType) @@ -463,7 +492,7 @@ func (k DataStoreKey) ToDS() ds.Key { } func (k DataStoreKey) Equal(other DataStoreKey) bool { - return k.CollectionID == other.CollectionID && + return k.CollectionRootID == other.CollectionRootID && k.DocID == other.DocID && k.FieldId == other.FieldId && k.InstanceType == other.InstanceType @@ -471,57 +500,24 @@ func (k DataStoreKey) Equal(other DataStoreKey) bool { func (k DataStoreKey) ToPrimaryDataStoreKey() PrimaryDataStoreKey { return PrimaryDataStoreKey{ - CollectionId: k.CollectionID, - DocID: k.DocID, + CollectionRootID: k.CollectionRootID, + DocID: k.DocID, } } -// NewIndexDataStoreKey creates a new IndexDataStoreKey from a string. -// It expects the input string is in the following format: -// -// /[CollectionID]/[IndexID]/[FieldValue](/[FieldValue]...) -// -// Where [CollectionID] and [IndexID] are integers -func NewIndexDataStoreKey(key string) (IndexDataStoreKey, error) { - if key == "" { - return IndexDataStoreKey{}, ErrEmptyKey - } - - if !strings.HasPrefix(key, "/") { - return IndexDataStoreKey{}, ErrInvalidKey - } - - elements := strings.Split(key[1:], "/") - - // With less than 3 elements, we know it's an invalid key - if len(elements) < 3 { - return IndexDataStoreKey{}, ErrInvalidKey - } - - colID, err := strconv.Atoi(elements[0]) - if err != nil { - return IndexDataStoreKey{}, ErrInvalidKey - } - - indexKey := IndexDataStoreKey{CollectionID: uint32(colID)} - - indID, err := strconv.Atoi(elements[1]) - if err != nil { - return IndexDataStoreKey{}, ErrInvalidKey +// NewIndexDataStoreKey creates a new IndexDataStoreKey from a collection ID, index ID and fields. +// It also validates values of the fields. +func NewIndexDataStoreKey(collectionID, indexID uint32, fields []IndexedField) IndexDataStoreKey { + return IndexDataStoreKey{ + CollectionID: collectionID, + IndexID: indexID, + Fields: fields, } - indexKey.IndexID = uint32(indID) - - // first 2 elements are the collection and index IDs, the rest are field values - for i := 2; i < len(elements); i++ { - indexKey.FieldValues = append(indexKey.FieldValues, []byte(elements[i])) - } - - return indexKey, nil } // Bytes returns the byte representation of the key func (k *IndexDataStoreKey) Bytes() []byte { - return []byte(k.ToString()) + return EncodeIndexDataStoreKey(k) } // ToDS returns the datastore key @@ -535,54 +531,13 @@ func (k *IndexDataStoreKey) ToDS() ds.Key { // If while composing the string from left to right, a component // is empty, the string is returned up to that point func (k *IndexDataStoreKey) ToString() string { - sb := strings.Builder{} - - if k.CollectionID == 0 { - return "" - } - sb.WriteByte('/') - sb.WriteString(strconv.Itoa(int(k.CollectionID))) - - if k.IndexID == 0 { - return sb.String() - } - sb.WriteByte('/') - sb.WriteString(strconv.Itoa(int(k.IndexID))) - - for _, v := range k.FieldValues { - if len(v) == 0 { - break - } - sb.WriteByte('/') - sb.WriteString(string(v)) - } - - return sb.String() -} - -// Equal returns true if the two keys are equal -func (k IndexDataStoreKey) Equal(other IndexDataStoreKey) bool { - if k.CollectionID != other.CollectionID { - return false - } - if k.IndexID != other.IndexID { - return false - } - if len(k.FieldValues) != len(other.FieldValues) { - return false - } - for i := range k.FieldValues { - if string(k.FieldValues[i]) != string(other.FieldValues[i]) { - return false - } - } - return true + return string(k.Bytes()) } func (k PrimaryDataStoreKey) ToDataStoreKey() DataStoreKey { return DataStoreKey{ - CollectionID: k.CollectionId, - DocID: k.DocID, + CollectionRootID: k.CollectionRootID, + DocID: k.DocID, } } @@ -597,8 +552,8 @@ func (k PrimaryDataStoreKey) ToDS() ds.Key { func (k PrimaryDataStoreKey) ToString() string { result := "" - if k.CollectionId != "" { - result = result + "/" + k.CollectionId + if k.CollectionRootID != 0 { + result = result + "/" + fmt.Sprint(k.CollectionRootID) } result = result + PRIMARY_KEY if k.DocID != "" { @@ -672,61 +627,61 @@ func (k SchemaVersionKey) ToDS() ds.Key { return ds.NewKey(k.ToString()) } -func (k SchemaHistoryKey) ToString() string { - result := SCHEMA_VERSION_HISTORY +func (k SchemaRootKey) ToString() string { + result := SCHEMA_VERSION_ROOT if k.SchemaRoot != "" { result = result + "/" + k.SchemaRoot } - if k.PreviousSchemaVersionID != "" { - result = result + "/" + k.PreviousSchemaVersionID + if k.SchemaVersionID != "" { + result = result + "/" + k.SchemaVersionID } return result } -func (k SchemaHistoryKey) Bytes() []byte { +func (k SchemaRootKey) Bytes() []byte { return []byte(k.ToString()) } -func (k SchemaHistoryKey) ToDS() ds.Key { +func (k SchemaRootKey) ToDS() ds.Key { return ds.NewKey(k.ToString()) } -func (k SchemaVersionMigrationKey) ToString() string { - result := SCHEMA_MIGRATION - - if k.SourceSchemaVersionID != "" { - result = result + "/" + k.SourceSchemaVersionID - } - - return result +func (k CollectionIDSequenceKey) ToString() string { + return COLLECTION_SEQ } -func (k SchemaVersionMigrationKey) Bytes() []byte { +func (k CollectionIDSequenceKey) Bytes() []byte { return []byte(k.ToString()) } -func (k SchemaVersionMigrationKey) ToDS() ds.Key { +func (k CollectionIDSequenceKey) ToDS() ds.Key { return ds.NewKey(k.ToString()) } -func (k SequenceKey) ToString() string { - result := SEQ +func (k IndexIDSequenceKey) ToString() string { + return INDEX_ID_SEQ + "/" + strconv.Itoa(int(k.CollectionID)) +} - if k.SequenceName != "" { - result = result + "/" + k.SequenceName - } +func (k IndexIDSequenceKey) Bytes() []byte { + return []byte(k.ToString()) +} - return result +func (k IndexIDSequenceKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} + +func (k FieldIDSequenceKey) ToString() string { + return FIELD_ID_SEQ + "/" + strconv.Itoa(int(k.CollectionRoot)) } -func (k SequenceKey) Bytes() []byte { +func (k FieldIDSequenceKey) Bytes() []byte { return []byte(k.ToString()) } -func (k SequenceKey) ToDS() ds.Key { +func (k FieldIDSequenceKey) ToDS() ds.Key { return ds.NewKey(k.ToString()) } @@ -825,10 +780,11 @@ func (k DataStoreKey) PrefixEnd() DataStoreKey { newKey.InstanceType = InstanceType(bytesPrefixEnd([]byte(k.InstanceType))) return newKey } - if k.CollectionID != "" { - newKey.CollectionID = string(bytesPrefixEnd([]byte(k.CollectionID))) + if k.CollectionRootID != 0 { + newKey.CollectionRootID = k.CollectionRootID + 1 return newKey } + return newKey } diff --git a/core/key_test.go b/core/key_test.go index 4984c5b14f..3fa7f41a63 100644 --- a/core/key_test.go +++ b/core/key_test.go @@ -11,10 +11,15 @@ package core import ( + "fmt" "testing" ds "github.com/ipfs/go-datastore" + "github.com/sourcenetwork/immutable" "github.com/stretchr/testify/assert" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/encoding" ) func TestNewDataStoreKey_ReturnsEmptyStruct_GivenEmptyString(t *testing.T) { @@ -33,10 +38,10 @@ func TestNewDataStoreKey_ReturnsCollectionIdAndIndexIdAndDocIDAndFieldIdAndInsta t *testing.T, ) { instanceType := "anyType" - fieldId := "f1" + fieldID := "f1" docID := "docID" - collectionId := "1" - inputString := collectionId + "/" + instanceType + "/" + docID + "/" + fieldId + var collectionRootID uint32 = 2 + inputString := fmt.Sprintf("%v/%s/%s/%s", collectionRootID, instanceType, docID, fieldID) result, err := NewDataStoreKey(inputString) if err != nil { @@ -47,12 +52,12 @@ func TestNewDataStoreKey_ReturnsCollectionIdAndIndexIdAndDocIDAndFieldIdAndInsta assert.Equal( t, DataStoreKey{ - CollectionID: collectionId, - DocID: docID, - FieldId: fieldId, - InstanceType: InstanceType(instanceType)}, + CollectionRootID: collectionRootID, + DocID: docID, + FieldId: fieldID, + InstanceType: InstanceType(instanceType)}, result) - assert.Equal(t, "/"+collectionId+"/"+instanceType+"/"+docID+"/"+fieldId, resultString) + assert.Equal(t, fmt.Sprintf("/%v/%s/%s/%s", collectionRootID, instanceType, docID, fieldID), resultString) } func TestNewDataStoreKey_ReturnsEmptyStruct_GivenAStringWithMissingElements(t *testing.T) { @@ -66,8 +71,8 @@ func TestNewDataStoreKey_ReturnsEmptyStruct_GivenAStringWithMissingElements(t *t func TestNewDataStoreKey_GivenAShortObjectMarker(t *testing.T) { instanceType := "anyType" docID := "docID" - collectionId := "1" - inputString := collectionId + "/" + instanceType + "/" + docID + var collectionRootID uint32 = 2 + inputString := fmt.Sprintf("%v/%s/%s", collectionRootID, instanceType, docID) result, err := NewDataStoreKey(inputString) if err != nil { @@ -78,11 +83,11 @@ func TestNewDataStoreKey_GivenAShortObjectMarker(t *testing.T) { assert.Equal( t, DataStoreKey{ - CollectionID: collectionId, - DocID: docID, - InstanceType: InstanceType(instanceType)}, + CollectionRootID: collectionRootID, + DocID: docID, + InstanceType: InstanceType(instanceType)}, result) - assert.Equal(t, "/"+collectionId+"/"+instanceType+"/"+docID, resultString) + assert.Equal(t, fmt.Sprintf("/%v/%s/%s", collectionRootID, instanceType, docID), resultString) } func TestNewDataStoreKey_GivenAStringWithExtraPrefixes(t *testing.T) { @@ -110,23 +115,23 @@ func TestNewDataStoreKey_GivenAStringWithExtraSuffix(t *testing.T) { } func TestNewIndexKey_IfEmptyParam_ReturnPrefix(t *testing.T) { - key := NewCollectionIndexKey("", "") + key := NewCollectionIndexKey(immutable.None[uint32](), "") assert.Equal(t, "/collection/index", key.ToString()) } func TestNewIndexKey_IfParamsAreGiven_ReturnFullKey(t *testing.T) { - key := NewCollectionIndexKey("col", "idx") - assert.Equal(t, "/collection/index/col/idx", key.ToString()) + key := NewCollectionIndexKey(immutable.Some[uint32](1), "idx") + assert.Equal(t, "/collection/index/1/idx", key.ToString()) } func TestNewIndexKey_InNoCollectionName_ReturnJustPrefix(t *testing.T) { - key := NewCollectionIndexKey("", "idx") + key := NewCollectionIndexKey(immutable.None[uint32](), "idx") assert.Equal(t, "/collection/index", key.ToString()) } func TestNewIndexKey_InNoIndexName_ReturnWithoutIndexName(t *testing.T) { - key := NewCollectionIndexKey("col", "") - assert.Equal(t, "/collection/index/col", key.ToString()) + key := NewCollectionIndexKey(immutable.Some[uint32](1), "") + assert.Equal(t, "/collection/index/1", key.ToString()) } func TestNewIndexKeyFromString_IfInvalidString_ReturnError(t *testing.T) { @@ -144,280 +149,256 @@ func TestNewIndexKeyFromString_IfInvalidString_ReturnError(t *testing.T) { } func TestNewIndexKeyFromString_IfOnlyCollectionName_ReturnKey(t *testing.T) { - key, err := NewCollectionIndexKeyFromString("/collection/index/col") + key, err := NewCollectionIndexKeyFromString("/collection/index/1") assert.NoError(t, err) - assert.Equal(t, key.CollectionName, "col") - assert.Equal(t, key.IndexName, "") + assert.Equal(t, immutable.Some[uint32](1), key.CollectionID) + assert.Equal(t, "", key.IndexName) } func TestNewIndexKeyFromString_IfFullKeyString_ReturnKey(t *testing.T) { - key, err := NewCollectionIndexKeyFromString("/collection/index/col/idx") + key, err := NewCollectionIndexKeyFromString("/collection/index/1/idx") assert.NoError(t, err) - assert.Equal(t, key.CollectionName, "col") - assert.Equal(t, key.IndexName, "idx") + assert.Equal(t, immutable.Some[uint32](1), key.CollectionID) + assert.Equal(t, "idx", key.IndexName) +} + +func encodePrefix(colID, indexID uint32) []byte { + return encoding.EncodeUvarintAscending(append(encoding.EncodeUvarintAscending( + []byte{'/'}, uint64(colID)), '/'), uint64(indexID)) } -func toFieldValues(values ...string) [][]byte { - var result [][]byte = make([][]byte, 0, len(values)) - for _, value := range values { - result = append(result, []byte(value)) +func encodeKey(colID, indexID uint32, fieldParts ...any) []byte { + b := encodePrefix(colID, indexID) + const partSize = 2 + if len(fieldParts)%partSize != 0 { + panic(fmt.Sprintf("fieldParts must be a multiple of %d: value, descending", partSize)) + } + for i := 0; i < len(fieldParts)/partSize; i++ { + b = append(b, '/') + isDescending := fieldParts[i*partSize+1].(bool) + if fieldParts[i*partSize] == nil { + if isDescending { + b = encoding.EncodeNullDescending(b) + } else { + b = encoding.EncodeNullAscending(b) + } + } else { + if isDescending { + b = encoding.EncodeUvarintDescending(b, uint64(fieldParts[i*partSize].(int))) + } else { + b = encoding.EncodeUvarintAscending(b, uint64(fieldParts[i*partSize].(int))) + } + } } - return result + return b } -func TestIndexDatastoreKey_ToString(t *testing.T) { +func TestIndexDatastoreKey_Bytes(t *testing.T) { cases := []struct { - Key IndexDataStoreKey - Expected string + Name string + CollectionID uint32 + IndexID uint32 + Fields []IndexedField + Expected []byte }{ { - Key: IndexDataStoreKey{}, - Expected: "", - }, - { - Key: IndexDataStoreKey{ - CollectionID: 1, - }, - Expected: "/1", + Name: "empty", + Expected: []byte{}, }, { - Key: IndexDataStoreKey{ - CollectionID: 1, - IndexID: 2, - }, - Expected: "/1/2", + Name: "only collection", + CollectionID: 1, + Expected: encoding.EncodeUvarintAscending([]byte{'/'}, 1), }, { - Key: IndexDataStoreKey{ - CollectionID: 1, - IndexID: 2, - FieldValues: toFieldValues("3"), - }, - Expected: "/1/2/3", + Name: "only collection and index", + CollectionID: 1, + IndexID: 2, + Expected: encodePrefix(1, 2), }, { - Key: IndexDataStoreKey{ - CollectionID: 1, - IndexID: 2, - FieldValues: toFieldValues("3", "4"), - }, - Expected: "/1/2/3/4", + Name: "collection, index and one field", + CollectionID: 1, + IndexID: 2, + Fields: []IndexedField{{Value: 5}}, + Expected: encodeKey(1, 2, 5, false), }, { - Key: IndexDataStoreKey{ - CollectionID: 1, - FieldValues: toFieldValues("3"), - }, - Expected: "/1", + Name: "collection, index and two fields", + CollectionID: 1, + IndexID: 2, + Fields: []IndexedField{{Value: 5}, {Value: 7}}, + Expected: encodeKey(1, 2, 5, false, 7, false), }, { - Key: IndexDataStoreKey{ - IndexID: 2, - FieldValues: toFieldValues("3"), - }, - Expected: "", + Name: "no index", + CollectionID: 1, + Fields: []IndexedField{{Value: 5}}, + Expected: encoding.EncodeUvarintAscending([]byte{'/'}, 1), }, { - Key: IndexDataStoreKey{ - FieldValues: toFieldValues("3"), - }, - Expected: "", - }, - { - Key: IndexDataStoreKey{ - CollectionID: 1, - IndexID: 2, - FieldValues: toFieldValues("", ""), - }, - Expected: "/1/2", - }, - { - Key: IndexDataStoreKey{ - CollectionID: 1, - IndexID: 2, - FieldValues: toFieldValues("", "3"), - }, - Expected: "/1/2", - }, - { - Key: IndexDataStoreKey{ - CollectionID: 1, - IndexID: 2, - FieldValues: toFieldValues("3", "", "4"), - }, - Expected: "/1/2/3", + Name: "no collection", + IndexID: 2, + Fields: []IndexedField{{Value: 5}}, + Expected: []byte{}, }, } - for i, c := range cases { - assert.Equal(t, c.Key.ToString(), c.Expected, "case %d", i) + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + key := NewIndexDataStoreKey(c.CollectionID, c.IndexID, c.Fields) + actual := key.Bytes() + assert.Equal(t, c.Expected, actual, "upon calling key.Bytes()") + encKey := EncodeIndexDataStoreKey(&key) + assert.Equal(t, c.Expected, encKey, "upon calling EncodeIndexDataStoreKey") + }) } } -func TestIndexDatastoreKey_Bytes(t *testing.T) { - key := IndexDataStoreKey{ - CollectionID: 1, - IndexID: 2, - FieldValues: toFieldValues("3", "4"), - } - assert.Equal(t, key.Bytes(), []byte("/1/2/3/4")) +func TestIndexDatastoreKey_ToString(t *testing.T) { + key := NewIndexDataStoreKey(1, 2, []IndexedField{{Value: 5}}) + assert.Equal(t, key.ToString(), string(encodeKey(1, 2, 5, false))) } func TestIndexDatastoreKey_ToDS(t *testing.T) { - key := IndexDataStoreKey{ - CollectionID: 1, - IndexID: 2, - FieldValues: toFieldValues("3", "4"), + key := NewIndexDataStoreKey(1, 2, []IndexedField{{Value: 5}}) + assert.Equal(t, key.ToDS(), ds.NewKey(string(encodeKey(1, 2, 5, false)))) +} + +func TestCollectionIndexKey_Bytes(t *testing.T) { + key := CollectionIndexKey{ + CollectionID: immutable.Some[uint32](1), + IndexName: "idx", } - assert.Equal(t, key.ToDS(), ds.NewKey("/1/2/3/4")) + assert.Equal(t, []byte(COLLECTION_INDEX+"/1/idx"), key.Bytes()) } -func TestIndexDatastoreKey_EqualTrue(t *testing.T) { - cases := [][]IndexDataStoreKey{ +func TestDecodeIndexDataStoreKey(t *testing.T) { + const colID, indexID = 1, 2 + cases := []struct { + name string + desc client.IndexDescription + inputBytes []byte + expectedFields []IndexedField + fieldKinds []client.FieldKind + }{ { - { - CollectionID: 1, - IndexID: 2, - FieldValues: toFieldValues("3", "4"), - }, - { - CollectionID: 1, - IndexID: 2, - FieldValues: toFieldValues("3", "4"), + name: "one field", + desc: client.IndexDescription{ + ID: indexID, + Fields: []client.IndexedFieldDescription{{}}, }, + inputBytes: encodeKey(colID, indexID, 5, false), + expectedFields: []IndexedField{{Value: int64(5)}}, }, { - { - CollectionID: 1, - FieldValues: toFieldValues("3", "4"), - }, - { - CollectionID: 1, - FieldValues: toFieldValues("3", "4"), + name: "two fields (one descending)", + desc: client.IndexDescription{ + ID: indexID, + Fields: []client.IndexedFieldDescription{{}, {Descending: true}}, }, + inputBytes: encodeKey(colID, indexID, 5, false, 7, true), + expectedFields: []IndexedField{{Value: int64(5)}, {Value: int64(7), Descending: true}}, }, { - { - CollectionID: 1, - }, - { - CollectionID: 1, + name: "last encoded value without matching field description is docID", + desc: client.IndexDescription{ + ID: indexID, + Fields: []client.IndexedFieldDescription{{}}, }, + inputBytes: encoding.EncodeStringAscending(append(encodeKey(1, indexID, 5, false), '/'), "docID"), + expectedFields: []IndexedField{{Value: int64(5)}, {Value: "docID"}}, + fieldKinds: []client.FieldKind{client.FieldKind_NILLABLE_INT}, }, } - for i, c := range cases { - assert.True(t, c[0].Equal(c[1]), "case %d", i) + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + expectedKey := NewIndexDataStoreKey(colID, indexID, tc.expectedFields) + fieldDescs := make([]client.FieldDefinition, len(tc.desc.Fields)) + for i := range tc.fieldKinds { + fieldDescs[i] = client.FieldDefinition{Kind: tc.fieldKinds[i]} + } + key, err := DecodeIndexDataStoreKey(tc.inputBytes, &tc.desc, fieldDescs) + assert.NoError(t, err) + assert.Equal(t, expectedKey, key) + }) } } -func TestCollectionIndexKey_Bytes(t *testing.T) { - key := CollectionIndexKey{ - CollectionName: "col", - IndexName: "idx", +func TestDecodeIndexDataStoreKey_InvalidKey(t *testing.T) { + replace := func(b []byte, i int, v byte) []byte { + b = append([]byte{}, b...) + b[i] = v + return b } - assert.Equal(t, []byte(COLLECTION_INDEX+"/col/idx"), key.Bytes()) -} + cutEnd := func(b []byte, l int) []byte { + return b[:len(b)-l] + } + + const colID, indexID = 1, 2 -func TestIndexDatastoreKey_EqualFalse(t *testing.T) { - cases := [][]IndexDataStoreKey{ + cases := []struct { + name string + val []byte + numFields int + }{ { - { - CollectionID: 1, - }, - { - CollectionID: 2, - }, + name: "empty", + val: []byte{}, }, { - { - CollectionID: 1, - IndexID: 2, - }, - { - CollectionID: 1, - IndexID: 3, - }, + name: "only slash", + val: []byte{'/'}, }, { - { - CollectionID: 1, - }, - { - IndexID: 1, - }, + name: "slash after collection", + val: append(encoding.EncodeUvarintAscending([]byte{'/'}, colID), '/'), }, { - { - CollectionID: 1, - IndexID: 2, - FieldValues: toFieldValues("4", "3"), - }, - { - CollectionID: 1, - IndexID: 2, - FieldValues: toFieldValues("3", "4"), - }, + name: "wrong prefix", + val: replace(encodeKey(colID, indexID, 5, false), 0, ' '), + numFields: 1, }, { - { - CollectionID: 1, - IndexID: 2, - FieldValues: toFieldValues("3"), - }, - { - CollectionID: 1, - IndexID: 2, - FieldValues: toFieldValues("3", "4"), - }, + name: "no slash before collection", + val: encodeKey(colID, indexID, 5, false)[1:], + numFields: 1, }, { - { - CollectionID: 1, - FieldValues: toFieldValues("3", "", "4"), - }, - { - CollectionID: 1, - FieldValues: toFieldValues("3", "4"), - }, + name: "no slash before index", + val: replace(encodeKey(colID, indexID, 5, false), 2, ' '), + numFields: 1, + }, + { + name: "no slash before field value", + val: replace(encodeKey(colID, indexID, 5, false), 4, ' '), + numFields: 1, + }, + { + name: "no field value", + val: cutEnd(encodeKey(colID, indexID, 5, false), 1), + numFields: 1, + }, + { + name: "no field description", + val: encodeKey(colID, indexID, 5, false, 7, false, 9, false), + numFields: 2, + }, + { + name: "invalid docID value", + val: encoding.EncodeUvarintAscending(append(encodeKey(colID, indexID, 5, false), '/'), 5), + numFields: 1, }, } - - for i, c := range cases { - assert.False(t, c[0].Equal(c[1]), "case %d", i) - } -} - -func TestNewIndexDataStoreKey_ValidKey(t *testing.T) { - str, err := NewIndexDataStoreKey("/1/2/3") - assert.NoError(t, err) - assert.Equal(t, str, IndexDataStoreKey{ - CollectionID: 1, - IndexID: 2, - FieldValues: toFieldValues("3"), - }) - - str, err = NewIndexDataStoreKey("/1/2/3/4") - assert.NoError(t, err) - assert.Equal(t, str, IndexDataStoreKey{ - CollectionID: 1, - IndexID: 2, - FieldValues: toFieldValues("3", "4"), - }) -} - -func TestNewIndexDataStoreKey_InvalidKey(t *testing.T) { - keys := []string{ - "", - "/", - "/1", - "/1/2", - " /1/2/3", - "1/2/3", - "/a/2/3", - "/1/b/3", - } - for i, key := range keys { - _, err := NewIndexDataStoreKey(key) - assert.Error(t, err, "case %d: %s", i, key) + indexDesc := client.IndexDescription{ID: indexID, Fields: []client.IndexedFieldDescription{{}}} + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + fieldDescs := make([]client.FieldDefinition, c.numFields) + for i := 0; i < c.numFields; i++ { + fieldDescs[i] = client.FieldDefinition{Kind: client.FieldKind_NILLABLE_INT} + } + _, err := DecodeIndexDataStoreKey(c.val, &indexDesc, fieldDescs) + assert.Error(t, err, c.name) + }) } } diff --git a/db/backup.go b/db/backup.go index d3a1138686..d47b3534e1 100644 --- a/db/backup.go +++ b/db/backup.go @@ -122,7 +122,7 @@ func (db *db) basicExport(ctx context.Context, txn datastore.Txn, config *client cols := []client.Collection{} if len(config.Collections) == 0 { - cols, err = db.getAllCollections(ctx, txn) + cols, err = db.getCollections(ctx, txn, client.CollectionFetchOptions{}) if err != nil { return NewErrFailedToGetAllCollections(err) } @@ -137,7 +137,7 @@ func (db *db) basicExport(ctx context.Context, txn datastore.Txn, config *client } colNameCache := map[string]struct{}{} for _, col := range cols { - colNameCache[col.Name()] = struct{}{} + colNameCache[col.Name().Value()] = struct{}{} } tempFile := config.Filepath + ".temp" @@ -181,8 +181,8 @@ func (db *db) basicExport(ctx context.Context, txn datastore.Txn, config *client // set collection err = writeString( f, - fmt.Sprintf("\"%s\":[", col.Name()), - fmt.Sprintf(" \"%s\": [\n", col.Name()), + fmt.Sprintf("\"%s\":[", col.Name().Value()), + fmt.Sprintf(" \"%s\": [\n", col.Name().Value()), config.Pretty, ) if err != nil { diff --git a/db/base/collection_keys.go b/db/base/collection_keys.go index b2adc2f9e7..1277b96a81 100644 --- a/db/base/collection_keys.go +++ b/db/base/collection_keys.go @@ -20,7 +20,7 @@ import ( // MakeDataStoreKeyWithCollectionDescription returns the datastore key for the given collection description. func MakeDataStoreKeyWithCollectionDescription(col client.CollectionDescription) core.DataStoreKey { return core.DataStoreKey{ - CollectionID: col.IDString(), + CollectionRootID: col.RootID, } } @@ -30,28 +30,33 @@ func MakeDataStoreKeyWithCollectionAndDocID( docID string, ) core.DataStoreKey { return core.DataStoreKey{ - CollectionID: col.IDString(), - DocID: docID, + CollectionRootID: col.RootID, + DocID: docID, } } func MakePrimaryIndexKeyForCRDT( - c client.CollectionDescription, - schema client.SchemaDescription, + c client.CollectionDefinition, ctype client.CType, key core.DataStoreKey, fieldName string, ) (core.DataStoreKey, error) { switch ctype { case client.COMPOSITE: - return MakeDataStoreKeyWithCollectionDescription(c).WithInstanceInfo(key).WithFieldId(core.COMPOSITE_NAMESPACE), nil + return MakeDataStoreKeyWithCollectionDescription(c.Description). + WithInstanceInfo(key). + WithFieldId(core.COMPOSITE_NAMESPACE), + nil case client.LWW_REGISTER, client.PN_COUNTER: - field, ok := c.GetFieldByName(fieldName, &schema) + field, ok := c.GetFieldByName(fieldName) if !ok { return core.DataStoreKey{}, client.NewErrFieldNotExist(fieldName) } - return MakeDataStoreKeyWithCollectionDescription(c).WithInstanceInfo(key).WithFieldId(fmt.Sprint(field.ID)), nil + return MakeDataStoreKeyWithCollectionDescription(c.Description). + WithInstanceInfo(key). + WithFieldId(fmt.Sprint(field.ID)), + nil } return core.DataStoreKey{}, ErrInvalidCrdtType } diff --git a/db/collection.go b/db/collection.go index f066c1d9fe..c9d311f01a 100644 --- a/db/collection.go +++ b/db/collection.go @@ -21,6 +21,7 @@ import ( ds "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/query" ipld "github.com/ipfs/go-ipld-format" + "github.com/lens-vm/lens/host-go/config/model" "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" @@ -63,7 +64,7 @@ type collection struct { // to be auto generated based on a more controllable and user friendly // CollectionOptions object. -// NewCollection returns a pointer to a newly instanciated DB Collection +// newCollection returns a pointer to a newly instantiated DB Collection func (db *db) newCollection(desc client.CollectionDescription, schema client.SchemaDescription) *collection { return &collection{ db: db, @@ -87,7 +88,7 @@ func (c *collection) newFetcher() fetcher.Fetcher { } // createCollection creates a collection and saves it to the database in its system store. -// Note: Collection.ID is an autoincrementing value that is generated by the database. +// Note: Collection.ID is an auto-incrementing value that is generated by the database. func (db *db) createCollection( ctx context.Context, txn datastore.Txn, @@ -96,15 +97,17 @@ func (db *db) createCollection( schema := def.Schema desc := def.Description - exists, err := description.HasCollectionByName(ctx, txn, desc.Name) - if err != nil { - return nil, err - } - if exists { - return nil, ErrCollectionAlreadyExists + if desc.Name.HasValue() { + exists, err := description.HasCollectionByName(ctx, txn, desc.Name.Value()) + if err != nil { + return nil, err + } + if exists { + return nil, ErrCollectionAlreadyExists + } } - colSeq, err := db.getSequence(ctx, txn, core.COLLECTION) + colSeq, err := db.getSequence(ctx, txn, core.CollectionIDSequenceKey{}) if err != nil { return nil, err } @@ -112,13 +115,42 @@ func (db *db) createCollection( if err != nil { return nil, err } + + fieldSeq, err := db.getSequence(ctx, txn, core.NewFieldIDSequenceKey(uint32(colID))) + if err != nil { + return nil, err + } + desc.ID = uint32(colID) + desc.RootID = desc.ID schema, err = description.CreateSchemaVersion(ctx, txn, schema) if err != nil { return nil, err } desc.SchemaVersionID = schema.VersionID + for _, globalField := range schema.Fields { + var fieldID uint64 + if globalField.Name == request.DocIDFieldName { + // There is no hard technical requirement for this, we just think it looks nicer + // if the doc id is at the zero index. It makes it look a little nicer in commit + // queries too. + fieldID = 0 + } else { + fieldID, err = fieldSeq.next(ctx, txn) + if err != nil { + return nil, err + } + } + + desc.Fields = append( + desc.Fields, + client.CollectionFieldDescription{ + Name: globalField.Name, + ID: client.FieldID(fieldID), + }, + ) + } desc, err = description.SaveCollection(ctx, txn, desc) if err != nil { @@ -132,7 +164,7 @@ func (db *db) createCollection( } } - return db.getCollectionByName(ctx, txn, desc.Name) + return db.getCollectionByID(ctx, txn, desc.ID) } // updateSchema updates the persisted schema description matching the name of the given @@ -149,11 +181,10 @@ func (db *db) updateSchema( existingSchemaByName map[string]client.SchemaDescription, proposedDescriptionsByName map[string]client.SchemaDescription, schema client.SchemaDescription, - setAsDefaultVersion bool, + migration immutable.Option[model.Lens], + setAsActiveVersion bool, ) error { hasChanged, err := db.validateUpdateSchema( - ctx, - txn, existingSchemaByName, proposedDescriptionsByName, schema, @@ -167,13 +198,12 @@ func (db *db) updateSchema( } for _, field := range schema.Fields { - if field.RelationType.IsSet(client.Relation_Type_ONE) { + if field.Kind == client.FieldKind_FOREIGN_OBJECT { idFieldName := field.Name + "_id" - if _, ok := schema.GetField(idFieldName); !ok { - schema.Fields = append(schema.Fields, client.FieldDescription{ + if _, ok := schema.GetFieldByName(idFieldName); !ok { + schema.Fields = append(schema.Fields, client.SchemaFieldDescription{ Name: idFieldName, Kind: client.FieldKind_DocID, - RelationType: client.Relation_Type_INTERNAL_ID, RelationName: field.RelationName, }) } @@ -194,24 +224,144 @@ func (db *db) updateSchema( return err } - if setAsDefaultVersion { - cols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, previousVersionID) + // After creating the new schema version, we need to create new collection versions for + // any collection using the previous version. These will be inactive unless [setAsActiveVersion] + // is true. + + cols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, previousVersionID) + if err != nil { + return err + } + + colSeq, err := db.getSequence(ctx, txn, core.CollectionIDSequenceKey{}) + if err != nil { + return err + } + + for _, col := range cols { + previousID := col.ID + + existingCols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, schema.VersionID) if err != nil { return err } - for _, col := range cols { - col.SchemaVersionID = schema.VersionID + // The collection version may exist before the schema version was created locally. This is + // because migrations for the globally known schema version may have been registered locally + // (typically to handle documents synced over P2P at higher versions) before the local schema + // was updated. We need to check for them now, and update them instead of creating new ones + // if they exist. + var isExistingCol bool + existingColLoop: + for _, existingCol := range existingCols { + sources := existingCol.CollectionSources() + for _, source := range sources { + // Make sure that this collection is the parent of the current [col], and not part of + // another collection set that happens to be using the same schema. + if source.SourceCollectionID == previousID { + if existingCol.RootID == client.OrphanRootID { + existingCol.RootID = col.RootID + } + + fieldSeq, err := db.getSequence(ctx, txn, core.NewFieldIDSequenceKey(existingCol.RootID)) + if err != nil { + return err + } - col, err = description.SaveCollection(ctx, txn, col) + for _, globalField := range schema.Fields { + var fieldID client.FieldID + // We must check the source collection if the field already exists, and take its ID + // from there, otherwise the field must be generated by the sequence. + existingField, ok := col.GetFieldByName(globalField.Name) + if ok { + fieldID = existingField.ID + } else { + nextFieldID, err := fieldSeq.next(ctx, txn) + if err != nil { + return err + } + fieldID = client.FieldID(nextFieldID) + } + + existingCol.Fields = append( + existingCol.Fields, + client.CollectionFieldDescription{ + Name: globalField.Name, + ID: fieldID, + }, + ) + } + existingCol, err = description.SaveCollection(ctx, txn, existingCol) + if err != nil { + return err + } + isExistingCol = true + break existingColLoop + } + } + } + + if !isExistingCol { + colID, err := colSeq.next(ctx, txn) + if err != nil { + return err + } + + fieldSeq, err := db.getSequence(ctx, txn, core.NewFieldIDSequenceKey(col.RootID)) if err != nil { return err } - err = db.setDefaultSchemaVersionExplicit(ctx, txn, col.Name, schema.VersionID) + // Create any new collections without a name (inactive), if [setAsActiveVersion] is true + // they will be activated later along with any existing collection versions. + col.Name = immutable.None[string]() + col.ID = uint32(colID) + col.SchemaVersionID = schema.VersionID + col.Sources = []any{ + &client.CollectionSource{ + SourceCollectionID: previousID, + Transform: migration, + }, + } + + for _, globalField := range schema.Fields { + _, exists := col.GetFieldByName(globalField.Name) + if !exists { + fieldID, err := fieldSeq.next(ctx, txn) + if err != nil { + return err + } + + col.Fields = append( + col.Fields, + client.CollectionFieldDescription{ + Name: globalField.Name, + ID: client.FieldID(fieldID), + }, + ) + } + } + + _, err = description.SaveCollection(ctx, txn, col) if err != nil { return err } + + if migration.HasValue() { + err = db.LensRegistry().SetMigration(ctx, col.ID, migration.Value()) + if err != nil { + return err + } + } + } + } + + if setAsActiveVersion { + // activate collection versions using the new schema ID. This call must be made after + // all new collection versions have been saved. + err = db.setActiveSchemaVersion(ctx, txn, schema.VersionID) + if err != nil { + return err } } @@ -223,8 +373,6 @@ func (db *db) updateSchema( // Will return true if the given description differs from the current persisted state of the // schema. Will return an error if it fails validation. func (db *db) validateUpdateSchema( - ctx context.Context, - txn datastore.Txn, existingDescriptionsByName map[string]client.SchemaDescription, proposedDescriptionsByName map[string]client.SchemaDescription, proposedDesc client.SchemaDescription, @@ -253,7 +401,7 @@ func (db *db) validateUpdateSchema( } if proposedDesc.VersionID != "" && proposedDesc.VersionID != existingDesc.VersionID { - // If users specify this it will be overwritten, an error is prefered to quietly ignoring it. + // If users specify this it will be overwritten, an error is preferred to quietly ignoring it. return false, ErrCannotSetVersionID } @@ -271,26 +419,16 @@ func validateUpdateSchemaFields( proposedDesc client.SchemaDescription, ) (bool, error) { hasChanged := false - existingFieldsByID := map[client.FieldID]client.FieldDescription{} + existingFieldsByName := map[string]client.SchemaFieldDescription{} existingFieldIndexesByName := map[string]int{} for i, field := range existingDesc.Fields { existingFieldIndexesByName[field.Name] = i - existingFieldsByID[field.ID] = field + existingFieldsByName[field.Name] = field } newFieldNames := map[string]struct{}{} - newFieldIds := map[client.FieldID]struct{}{} for proposedIndex, proposedField := range proposedDesc.Fields { - var existingField client.FieldDescription - var fieldAlreadyExists bool - if proposedField.ID != client.FieldID(0) || - proposedField.Name == request.DocIDFieldName { - existingField, fieldAlreadyExists = existingFieldsByID[proposedField.ID] - } - - if proposedField.ID != client.FieldID(0) && !fieldAlreadyExists { - return false, NewErrCannotSetFieldID(proposedField.Name, proposedField.ID) - } + existingField, fieldAlreadyExists := existingFieldsByName[proposedField.Name] // If the field is new, then the collection has changed hasChanged = hasChanged || !fieldAlreadyExists @@ -307,40 +445,11 @@ func validateUpdateSchemaFields( return false, NewErrSchemaNotFound(proposedField.Name, proposedField.Schema) } - if proposedField.Kind == client.FieldKind_FOREIGN_OBJECT { - if !proposedField.RelationType.IsSet(client.Relation_Type_ONE) || - !(proposedField.RelationType.IsSet(client.Relation_Type_ONEONE) || - proposedField.RelationType.IsSet(client.Relation_Type_ONEMANY)) { - return false, NewErrRelationalFieldInvalidRelationType( - proposedField.Name, - fmt.Sprintf( - "%v and %v or %v, with optionally %v", - client.Relation_Type_ONE, - client.Relation_Type_ONEONE, - client.Relation_Type_ONEMANY, - client.Relation_Type_Primary, - ), - proposedField.RelationType, - ) - } - } - - if proposedField.Kind == client.FieldKind_FOREIGN_OBJECT_ARRAY { - if !proposedField.RelationType.IsSet(client.Relation_Type_MANY) || - !proposedField.RelationType.IsSet(client.Relation_Type_ONEMANY) { - return false, NewErrRelationalFieldInvalidRelationType( - proposedField.Name, - client.Relation_Type_MANY|client.Relation_Type_ONEMANY, - proposedField.RelationType, - ) - } - } - if proposedField.RelationName == "" { return false, NewErrRelationalFieldMissingRelationName(proposedField.Name) } - if proposedField.RelationType.IsSet(client.Relation_Type_Primary) { + if proposedField.IsPrimaryRelation { if proposedField.Kind == client.FieldKind_FOREIGN_OBJECT_ARRAY { return false, NewErrPrimarySideOnMany(proposedField.Name) } @@ -348,20 +457,12 @@ func validateUpdateSchemaFields( if proposedField.Kind == client.FieldKind_FOREIGN_OBJECT { idFieldName := proposedField.Name + request.RelatedObjectID - idField, idFieldFound := proposedDesc.GetField(idFieldName) + idField, idFieldFound := proposedDesc.GetFieldByName(idFieldName) if idFieldFound { if idField.Kind != client.FieldKind_DocID { return false, NewErrRelationalFieldIDInvalidType(idField.Name, client.FieldKind_DocID, idField.Kind) } - if idField.RelationType != client.Relation_Type_INTERNAL_ID { - return false, NewErrRelationalFieldInvalidRelationType( - idField.Name, - client.Relation_Type_INTERNAL_ID, - idField.RelationType, - ) - } - if idField.RelationName == "" { return false, NewErrRelationalFieldMissingRelationName(idField.Name) } @@ -369,10 +470,10 @@ func validateUpdateSchemaFields( } var relatedFieldFound bool - var relatedField client.FieldDescription + var relatedField client.SchemaFieldDescription for _, field := range relatedDesc.Fields { if field.RelationName == proposedField.RelationName && - !field.RelationType.IsSet(client.Relation_Type_INTERNAL_ID) && + field.Kind != client.FieldKind_DocID && !(relatedDesc.Name == proposedDesc.Name && field.Name == proposedField.Name) { relatedFieldFound = true relatedField = field @@ -384,43 +485,13 @@ func validateUpdateSchemaFields( return false, client.NewErrRelationOneSided(proposedField.Name, proposedField.Schema) } - if !(proposedField.RelationType.IsSet(client.Relation_Type_Primary) || - relatedField.RelationType.IsSet(client.Relation_Type_Primary)) { + if !(proposedField.IsPrimaryRelation || relatedField.IsPrimaryRelation) { return false, NewErrPrimarySideNotDefined(proposedField.RelationName) } - if proposedField.RelationType.IsSet(client.Relation_Type_Primary) && - relatedField.RelationType.IsSet(client.Relation_Type_Primary) { + if proposedField.IsPrimaryRelation && relatedField.IsPrimaryRelation { return false, NewErrBothSidesPrimary(proposedField.RelationName) } - - if proposedField.RelationType.IsSet(client.Relation_Type_ONEONE) && - relatedField.Kind != client.FieldKind_FOREIGN_OBJECT { - return false, NewErrRelatedFieldKindMismatch( - proposedField.RelationName, - client.FieldKind_FOREIGN_OBJECT, - relatedField.Kind, - ) - } - - if proposedField.RelationType.IsSet(client.Relation_Type_ONEMANY) && - proposedField.Kind == client.FieldKind_FOREIGN_OBJECT && - relatedField.Kind != client.FieldKind_FOREIGN_OBJECT_ARRAY { - return false, NewErrRelatedFieldKindMismatch( - proposedField.RelationName, - client.FieldKind_FOREIGN_OBJECT_ARRAY, - relatedField.Kind, - ) - } - - if proposedField.RelationType.IsSet(client.Relation_Type_ONEONE) && - !relatedField.RelationType.IsSet(client.Relation_Type_ONEONE) { - return false, NewErrRelatedFieldRelationTypeMismatch( - proposedField.RelationName, - client.Relation_Type_ONEONE, - relatedField.RelationType, - ) - } } if _, isDuplicate := newFieldNames[proposedField.Name]; isDuplicate { @@ -428,7 +499,7 @@ func validateUpdateSchemaFields( } if fieldAlreadyExists && proposedField != existingField { - return false, NewErrCannotMutateField(proposedField.ID, proposedField.Name) + return false, NewErrCannotMutateField(proposedField.Name) } if existingIndex := existingFieldIndexesByName[proposedField.Name]; fieldAlreadyExists && @@ -445,18 +516,24 @@ func validateUpdateSchemaFields( } newFieldNames[proposedField.Name] = struct{}{} - newFieldIds[proposedField.ID] = struct{}{} } for _, field := range existingDesc.Fields { - if _, stillExists := newFieldIds[field.ID]; !stillExists { - return false, NewErrCannotDeleteField(field.Name, field.ID) + if _, stillExists := newFieldNames[field.Name]; !stillExists { + return false, NewErrCannotDeleteField(field.Name) } } return hasChanged, nil } -func (db *db) setDefaultSchemaVersion( +// SetActiveSchemaVersion activates all collection versions with the given schema version, and deactivates all +// those without it (if they share the same schema root). +// +// This will affect all operations interacting with the schema where a schema version is not explicitly +// provided. This includes GQL queries and Collection operations. +// +// It will return an error if the provided schema version ID does not exist. +func (db *db) setActiveSchemaVersion( ctx context.Context, txn datastore.Txn, schemaVersionID string, @@ -465,86 +542,140 @@ func (db *db) setDefaultSchemaVersion( return ErrSchemaVersionIDEmpty } + cols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, schemaVersionID) + if err != nil { + return err + } + schema, err := description.GetSchemaVersion(ctx, txn, schemaVersionID) if err != nil { return err } - colDescs, err := description.GetCollectionsBySchemaRoot(ctx, txn, schema.Root) + colsWithRoot, err := description.GetCollectionsBySchemaRoot(ctx, txn, schema.Root) if err != nil { return err } - for _, col := range colDescs { - col.SchemaVersionID = schemaVersionID - col, err = description.SaveCollection(ctx, txn, col) + colsBySourceID := map[uint32][]client.CollectionDescription{} + colsByID := make(map[uint32]client.CollectionDescription, len(colsWithRoot)) + for _, col := range colsWithRoot { + colsByID[col.ID] = col + + sources := col.CollectionSources() + if len(sources) > 0 { + // For now, we assume that each collection can only have a single source. This will likely need + // to change later. + slice := colsBySourceID[sources[0].SourceCollectionID] + slice = append(slice, col) + colsBySourceID[sources[0].SourceCollectionID] = slice + } + } + + for _, col := range cols { + if col.Name.HasValue() { + // The collection is already active, so we can skip it and continue + continue + } + sources := col.CollectionSources() + + var activeCol client.CollectionDescription + var rootCol client.CollectionDescription + var isActiveFound bool + if len(sources) > 0 { + // For now, we assume that each collection can only have a single source. This will likely need + // to change later. + activeCol, rootCol, isActiveFound = db.getActiveCollectionDown(ctx, txn, colsByID, sources[0].SourceCollectionID) + } + if !isActiveFound { + // We need to look both down and up for the active version - the most recent is not necessarily the active one. + activeCol, isActiveFound = db.getActiveCollectionUp(ctx, txn, colsBySourceID, rootCol.ID) + } + + var newName string + if isActiveFound { + newName = activeCol.Name.Value() + } else { + // If there are no active versions in the collection set, take the name of the schema to be the name of the + // collection. + newName = schema.Name + } + col.Name = immutable.Some(newName) + + _, err = description.SaveCollection(ctx, txn, col) if err != nil { return err } + + if isActiveFound { + // Deactivate the currently active collection by setting its name to none. + activeCol.Name = immutable.None[string]() + _, err = description.SaveCollection(ctx, txn, activeCol) + if err != nil { + return err + } + } } + // Load the schema into the clients (e.g. GQL) return db.loadSchema(ctx, txn) } -func (db *db) setDefaultSchemaVersionExplicit( +func (db *db) getActiveCollectionDown( ctx context.Context, txn datastore.Txn, - collectionName string, - schemaVersionID string, -) error { - if schemaVersionID == "" { - return ErrSchemaVersionIDEmpty + colsByID map[uint32]client.CollectionDescription, + id uint32, +) (client.CollectionDescription, client.CollectionDescription, bool) { + col, ok := colsByID[id] + if !ok { + return client.CollectionDescription{}, client.CollectionDescription{}, false } - col, err := description.GetCollectionByName(ctx, txn, collectionName) - if err != nil { - return err + if col.Name.HasValue() { + return col, client.CollectionDescription{}, true } - col.SchemaVersionID = schemaVersionID + sources := col.CollectionSources() + if len(sources) == 0 { + // If a collection has zero sources it is likely the initial collection version, or + // this collection set is currently orphaned (can happen when setting migrations that + // do not yet link all the way back to a non-orphaned set) + return client.CollectionDescription{}, col, false + } - _, err = description.SaveCollection(ctx, txn, col) - return err + // For now, we assume that each collection can only have a single source. This will likely need + // to change later. + return db.getActiveCollectionDown(ctx, txn, colsByID, sources[0].SourceCollectionID) } -// getCollectionsByVersionId returns the [*collection]s at the given [schemaVersionId] version. -// -// Will return an error if the given key is empty, or if none are found. -func (db *db) getCollectionsByVersionID( +func (db *db) getActiveCollectionUp( ctx context.Context, txn datastore.Txn, - schemaVersionId string, -) ([]*collection, error) { - cols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, schemaVersionId) - if err != nil { - return nil, err + colsBySourceID map[uint32][]client.CollectionDescription, + id uint32, +) (client.CollectionDescription, bool) { + cols, ok := colsBySourceID[id] + if !ok { + // We have reached the top of the set, and have not found an active collection + return client.CollectionDescription{}, false } - collections := make([]*collection, len(cols)) - for i, col := range cols { - schema, err := description.GetSchemaVersion(ctx, txn, col.SchemaVersionID) - if err != nil { - return nil, err + for _, col := range cols { + if col.Name.HasValue() { + return col, true } - - collections[i] = db.newCollection(col, schema) - - err = collections[i].loadIndexes(ctx, txn) - if err != nil { - return nil, err + activeCol, isFound := db.getActiveCollectionUp(ctx, txn, colsBySourceID, col.ID) + if isFound { + return activeCol, isFound } } - return collections, nil + return client.CollectionDescription{}, false } -// getCollectionByName returns an existing collection within the database. -func (db *db) getCollectionByName(ctx context.Context, txn datastore.Txn, name string) (client.Collection, error) { - if name == "" { - return nil, ErrCollectionNameEmpty - } - - col, err := description.GetCollectionByName(ctx, txn, name) +func (db *db) getCollectionByID(ctx context.Context, txn datastore.Txn, id uint32) (client.Collection, error) { + col, err := description.GetCollectionByID(ctx, txn, id) if err != nil { return nil, err } @@ -563,56 +694,100 @@ func (db *db) getCollectionByName(ctx context.Context, txn datastore.Txn, name s return collection, nil } -// getCollectionsBySchemaRoot returns all existing collections using the schema root. -func (db *db) getCollectionsBySchemaRoot( - ctx context.Context, - txn datastore.Txn, - schemaRoot string, -) ([]client.Collection, error) { - if schemaRoot == "" { - return nil, ErrSchemaRootEmpty +// getCollectionByName returns an existing collection within the database. +func (db *db) getCollectionByName(ctx context.Context, txn datastore.Txn, name string) (client.Collection, error) { + if name == "" { + return nil, ErrCollectionNameEmpty } - cols, err := description.GetCollectionsBySchemaRoot(ctx, txn, schemaRoot) + cols, err := db.getCollections(ctx, txn, client.CollectionFetchOptions{Name: immutable.Some(name)}) if err != nil { return nil, err } - collections := make([]client.Collection, len(cols)) - for i, col := range cols { - schema, err := description.GetSchemaVersion(ctx, txn, col.SchemaVersionID) + // cols will always have length == 1 here + return cols[0], nil +} + +// getCollections returns all collections and their descriptions matching the given options +// that currently exist within this [Store]. +// +// Inactive collections are not returned by default unless a specific schema version ID +// is provided. +func (db *db) getCollections( + ctx context.Context, + txn datastore.Txn, + options client.CollectionFetchOptions, +) ([]client.Collection, error) { + var cols []client.CollectionDescription + + switch { + case options.Name.HasValue(): + col, err := description.GetCollectionByName(ctx, txn, options.Name.Value()) if err != nil { return nil, err } + cols = append(cols, col) - collection := db.newCollection(col, schema) - collections[i] = collection - - err = collection.loadIndexes(ctx, txn) + case options.SchemaVersionID.HasValue(): + var err error + cols, err = description.GetCollectionsBySchemaVersionID(ctx, txn, options.SchemaVersionID.Value()) if err != nil { return nil, err } - } - return collections, nil -} + case options.SchemaRoot.HasValue(): + var err error + cols, err = description.GetCollectionsBySchemaRoot(ctx, txn, options.SchemaRoot.Value()) + if err != nil { + return nil, err + } -// getAllCollections gets all the currently defined collections. -func (db *db) getAllCollections(ctx context.Context, txn datastore.Txn) ([]client.Collection, error) { - cols, err := description.GetCollections(ctx, txn) - if err != nil { - return nil, err + default: + if options.IncludeInactive.HasValue() && options.IncludeInactive.Value() { + var err error + cols, err = description.GetCollections(ctx, txn) + if err != nil { + return nil, err + } + } else { + var err error + cols, err = description.GetActiveCollections(ctx, txn) + if err != nil { + return nil, err + } + } } - collections := make([]client.Collection, len(cols)) - for i, col := range cols { + collections := []client.Collection{} + for _, col := range cols { + if options.SchemaVersionID.HasValue() { + if col.SchemaVersionID != options.SchemaVersionID.Value() { + continue + } + } + // By default, we don't return inactive collections unless a specific version is requested. + if !options.IncludeInactive.Value() && !col.Name.HasValue() && !options.SchemaVersionID.HasValue() { + continue + } + schema, err := description.GetSchemaVersion(ctx, txn, col.SchemaVersionID) if err != nil { - return nil, err + // If the schema is not found we leave it as empty and carry on. This can happen when + // a migration is registered before the schema is declared locally. + if !errors.Is(err, ds.ErrNotFound) { + return nil, err + } + } + + if options.SchemaRoot.HasValue() { + if schema.Root != options.SchemaRoot.Value() { + continue + } } collection := db.newCollection(col, schema) - collections[i] = collection + collections = append(collections, collection) err = collection.loadIndexes(ctx, txn) if err != nil { @@ -625,7 +800,7 @@ func (db *db) getAllCollections(ctx context.Context, txn datastore.Txn) ([]clien // getAllActiveDefinitions returns all queryable collection/views and any embedded schema used by them. func (db *db) getAllActiveDefinitions(ctx context.Context, txn datastore.Txn) ([]client.CollectionDefinition, error) { - cols, err := description.GetCollections(ctx, txn) + cols, err := description.GetActiveCollections(ctx, txn) if err != nil { return nil, err } @@ -682,7 +857,7 @@ func (c *collection) getAllDocIDsChan( txn datastore.Txn, ) (<-chan client.DocIDResult, error) { prefix := core.PrimaryDataStoreKey{ // empty path for all keys prefix - CollectionId: fmt.Sprint(c.ID()), + CollectionRootID: c.Description().RootID, } q, err := txn.Datastore().Query(ctx, query.Query{ Prefix: prefix.ToString(), @@ -740,7 +915,7 @@ func (c *collection) Description() client.CollectionDescription { } // Name returns the collection name. -func (c *collection) Name() string { +func (c *collection) Name() immutable.Option[string] { return c.Description().Name } @@ -976,7 +1151,7 @@ func (c *collection) save( return cid.Undef, client.NewErrFieldNotExist(k) } - fieldDescription, valid := c.Schema().GetField(k) + fieldDescription, valid := c.Definition().GetFieldByName(k) if !valid { return cid.Undef, client.NewErrFieldNotExist(k) } @@ -989,7 +1164,7 @@ func (c *collection) save( if isSecondaryRelationID { primaryId := val.Value().(string) - err = c.patchPrimaryDoc(ctx, txn, c.Name(), relationFieldDescription, primaryKey.DocID, primaryId) + err = c.patchPrimaryDoc(ctx, txn, c.Name().Value(), relationFieldDescription, primaryKey.DocID, primaryId) if err != nil { return cid.Undef, err } @@ -1067,10 +1242,10 @@ func (c *collection) validateOneToOneLinkDoesntAlreadyExist( ctx context.Context, txn datastore.Txn, docID string, - fieldDescription client.FieldDescription, + fieldDescription client.FieldDefinition, value any, ) error { - if !fieldDescription.RelationType.IsSet(client.Relation_Type_INTERNAL_ID) { + if fieldDescription.Kind != client.FieldKind_DocID { return nil } @@ -1078,11 +1253,29 @@ func (c *collection) validateOneToOneLinkDoesntAlreadyExist( return nil } - objFieldDescription, ok := c.Schema().GetField(strings.TrimSuffix(fieldDescription.Name, request.RelatedObjectID)) + objFieldDescription, ok := c.Definition().GetFieldByName( + strings.TrimSuffix(fieldDescription.Name, request.RelatedObjectID), + ) if !ok { return client.NewErrFieldNotExist(strings.TrimSuffix(fieldDescription.Name, request.RelatedObjectID)) } - if !objFieldDescription.RelationType.IsSet(client.Relation_Type_ONEONE) { + if objFieldDescription.Kind != client.FieldKind_FOREIGN_OBJECT { + return nil + } + + otherCol, err := c.db.getCollectionByName(ctx, txn, objFieldDescription.Schema) + if err != nil { + return err + } + otherSchema := otherCol.Schema() + otherObjFieldDescription, _ := otherCol.Description().GetFieldByRelation( + fieldDescription.RelationName, + c.Name().Value(), + objFieldDescription.Name, + &otherSchema, + ) + if otherObjFieldDescription.Kind != client.FieldKind_FOREIGN_OBJECT { + // If the other field is not an object field then this is not a one to one relation and we can continue return nil } @@ -1259,16 +1452,16 @@ func (c *collection) commitImplicitTxn(ctx context.Context, txn datastore.Txn) e func (c *collection) getPrimaryKeyFromDocID(docID client.DocID) core.PrimaryDataStoreKey { return core.PrimaryDataStoreKey{ - CollectionId: fmt.Sprint(c.ID()), - DocID: docID.String(), + CollectionRootID: c.Description().RootID, + DocID: docID.String(), } } func (c *collection) getDataStoreKeyFromDocID(docID client.DocID) core.DataStoreKey { return core.DataStoreKey{ - CollectionID: fmt.Sprint(c.ID()), - DocID: docID.String(), - InstanceType: core.ValueKey, + CollectionRootID: c.Description().RootID, + DocID: docID.String(), + InstanceType: core.ValueKey, } } @@ -1279,18 +1472,18 @@ func (c *collection) tryGetFieldKey(primaryKey core.PrimaryDataStoreKey, fieldNa } return core.DataStoreKey{ - CollectionID: primaryKey.CollectionId, - DocID: primaryKey.DocID, - FieldId: strconv.FormatUint(uint64(fieldId), 10), + CollectionRootID: c.Description().RootID, + DocID: primaryKey.DocID, + FieldId: strconv.FormatUint(uint64(fieldId), 10), }, true } // tryGetSchemaFieldID returns the FieldID of the given fieldName. // Will return false if the field is not found. func (c *collection) tryGetSchemaFieldID(fieldName string) (uint32, bool) { - for _, field := range c.Schema().Fields { + for _, field := range c.Definition().GetFields() { if field.Name == fieldName { - if field.IsObject() || field.IsObjectArray() { + if field.Kind.IsObject() || field.Kind.IsObjectArray() { // We do not wish to match navigational properties, only // fields directly on the collection. return uint32(0), false diff --git a/db/collection_delete.go b/db/collection_delete.go index f91b8e38f2..785b2830d7 100644 --- a/db/collection_delete.go +++ b/db/collection_delete.go @@ -12,7 +12,6 @@ package db import ( "context" - "fmt" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" @@ -58,7 +57,7 @@ func (c *collection) DeleteWithDocID( defer c.discardImplicitTxn(ctx, txn) dsKey := c.getPrimaryKeyFromDocID(docID) - res, err := c.deleteWithKey(ctx, txn, dsKey, client.Deleted) + res, err := c.deleteWithKey(ctx, txn, dsKey) if err != nil { return nil, err } @@ -110,7 +109,6 @@ func (c *collection) deleteWithKey( ctx context.Context, txn datastore.Txn, key core.PrimaryDataStoreKey, - status client.DocumentStatus, ) (*client.DeleteResult, error) { // Check the key we have been given to delete with actually has a corresponding // document (i.e. document actually exists in the collection). @@ -132,7 +130,7 @@ func (c *collection) deleteWithIDs( ctx context.Context, txn datastore.Txn, docIDs []client.DocID, - status client.DocumentStatus, + _ client.DocumentStatus, ) (*client.DeleteResult, error) { results := &client.DeleteResult{ DocIDs: make([]string, 0), @@ -161,7 +159,7 @@ func (c *collection) deleteWithFilter( ctx context.Context, txn datastore.Txn, filter any, - status client.DocumentStatus, + _ client.DocumentStatus, ) (*client.DeleteResult, error) { // Make a selection plan that will scan through only the documents with matching filter. selectionPlan, err := c.makeSelectionPlan(ctx, txn, filter) @@ -207,8 +205,8 @@ func (c *collection) deleteWithFilter( docID := doc.GetID() primaryKey := core.PrimaryDataStoreKey{ - CollectionId: fmt.Sprint(c.ID()), - DocID: docID, + CollectionRootID: c.Description().RootID, + DocID: docID, } // Delete the document that is associated with this DS key we got from the filter. diff --git a/db/collection_get.go b/db/collection_get.go index e19ccd58c0..cf245fc678 100644 --- a/db/collection_get.go +++ b/db/collection_get.go @@ -48,7 +48,7 @@ func (c *collection) get( ctx context.Context, txn datastore.Txn, primaryKey core.PrimaryDataStoreKey, - fields []client.FieldDescription, + fields []client.FieldDefinition, showDeleted bool, ) (*client.Document, error) { // create a new document fetcher diff --git a/db/collection_index.go b/db/collection_index.go index 4367d8ebdf..7fb036498a 100644 --- a/db/collection_index.go +++ b/db/collection_index.go @@ -18,10 +18,13 @@ import ( "strconv" "strings" + "github.com/sourcenetwork/immutable" + "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/db/base" + "github.com/sourcenetwork/defradb/db/description" "github.com/sourcenetwork/defradb/db/fetcher" "github.com/sourcenetwork/defradb/request/graphql/schema" ) @@ -54,12 +57,12 @@ func (db *db) dropCollectionIndex( return col.DropIndex(ctx, indexName) } -// getAllIndexes returns all the indexes in the database. -func (db *db) getAllIndexes( +// getAllIndexDescriptions returns all the index descriptions in the database. +func (db *db) getAllIndexDescriptions( ctx context.Context, txn datastore.Txn, ) (map[client.CollectionName][]client.IndexDescription, error) { - prefix := core.NewCollectionIndexKey("", "") + prefix := core.NewCollectionIndexKey(immutable.None[uint32](), "") keys, indexDescriptions, err := datastore.DeserializePrefix[client.IndexDescription](ctx, prefix.ToString(), txn.Systemstore()) @@ -75,8 +78,14 @@ func (db *db) getAllIndexes( if err != nil { return nil, NewErrInvalidStoredIndexKey(indexKey.ToString()) } - indexes[indexKey.CollectionName] = append( - indexes[indexKey.CollectionName], + + col, err := description.GetCollectionByID(ctx, txn, indexKey.CollectionID.Value()) + if err != nil { + return nil, err + } + + indexes[col.Name.Value()] = append( + indexes[col.Name.Value()], indexDescriptions[i], ) } @@ -87,9 +96,9 @@ func (db *db) getAllIndexes( func (db *db) fetchCollectionIndexDescriptions( ctx context.Context, txn datastore.Txn, - colName string, + colID uint32, ) ([]client.IndexDescription, error) { - prefix := core.NewCollectionIndexKey(colName, "") + prefix := core.NewCollectionIndexKey(immutable.Some(colID), "") _, indexDescriptions, err := datastore.DeserializePrefix[client.IndexDescription](ctx, prefix.ToString(), txn.Systemstore()) if err != nil { @@ -98,6 +107,55 @@ func (db *db) fetchCollectionIndexDescriptions( return indexDescriptions, nil } +func (c *collection) CreateDocIndex(ctx context.Context, doc *client.Document) error { + txn, err := c.getTxn(ctx, false) + if err != nil { + return err + } + defer c.discardImplicitTxn(ctx, txn) + + err = c.indexNewDoc(ctx, txn, doc) + if err != nil { + return err + } + + return c.commitImplicitTxn(ctx, txn) +} + +func (c *collection) UpdateDocIndex(ctx context.Context, oldDoc, newDoc *client.Document) error { + txn, err := c.getTxn(ctx, false) + if err != nil { + return err + } + defer c.discardImplicitTxn(ctx, txn) + + err = c.deleteIndexedDoc(ctx, txn, oldDoc) + if err != nil { + return err + } + err = c.indexNewDoc(ctx, txn, newDoc) + if err != nil { + return err + } + + return c.commitImplicitTxn(ctx, txn) +} + +func (c *collection) DeleteDocIndex(ctx context.Context, doc *client.Document) error { + txn, err := c.getTxn(ctx, false) + if err != nil { + return err + } + defer c.discardImplicitTxn(ctx, txn) + + err = c.deleteIndexedDoc(ctx, txn, doc) + if err != nil { + return err + } + + return c.commitImplicitTxn(ctx, txn) +} + func (c *collection) indexNewDoc(ctx context.Context, txn datastore.Txn, doc *client.Document) error { err := c.loadIndexes(ctx, txn) if err != nil { @@ -121,12 +179,11 @@ func (c *collection) updateIndexedDoc( if err != nil { return err } - desc := c.Description() - schema := c.Schema() oldDoc, err := c.get( ctx, txn, - c.getPrimaryKeyFromDocID(doc.ID()), desc.CollectIndexedFields(&schema), + c.getPrimaryKeyFromDocID(doc.ID()), + c.Definition().CollectIndexedFields(), false, ) if err != nil { @@ -141,6 +198,24 @@ func (c *collection) updateIndexedDoc( return nil } +func (c *collection) deleteIndexedDoc( + ctx context.Context, + txn datastore.Txn, + doc *client.Document, +) error { + err := c.loadIndexes(ctx, txn) + if err != nil { + return err + } + for _, index := range c.indexes { + err = index.Delete(ctx, txn, doc) + if err != nil { + return err + } + } + return nil +} + // CreateIndex creates a new index on the collection. // // If the index name is empty, a name will be automatically generated. @@ -186,7 +261,7 @@ func (c *collection) createIndex( return nil, err } - err = c.checkExistingFields(ctx, desc.Fields) + err = c.checkExistingFields(desc.Fields) if err != nil { return nil, err } @@ -196,7 +271,11 @@ func (c *collection) createIndex( return nil, err } - colSeq, err := c.db.getSequence(ctx, txn, fmt.Sprintf("%s/%d", core.COLLECTION_INDEX, c.ID())) + colSeq, err := c.db.getSequence( + ctx, + txn, + core.NewIndexIDSequenceKey(c.ID()), + ) if err != nil { return nil, err } @@ -223,7 +302,8 @@ func (c *collection) createIndex( c.indexes = append(c.indexes, colIndex) err = c.indexExistingDocs(ctx, txn, colIndex) if err != nil { - return nil, err + removeErr := colIndex.RemoveAll(ctx, txn) + return nil, errors.Join(err, removeErr) } return colIndex, nil } @@ -231,7 +311,7 @@ func (c *collection) createIndex( func (c *collection) iterateAllDocs( ctx context.Context, txn datastore.Txn, - fields []client.FieldDescription, + fields []client.FieldDefinition, exec func(doc *client.Document) error, ) error { df := c.newFetcher() @@ -275,14 +355,11 @@ func (c *collection) indexExistingDocs( txn datastore.Txn, index CollectionIndex, ) error { - fields := make([]client.FieldDescription, 0, 1) + fields := make([]client.FieldDefinition, 0, 1) for _, field := range index.Description().Fields { - for i := range c.Schema().Fields { - colField := c.Schema().Fields[i] - if field.Name == colField.Name { - fields = append(fields, colField) - break - } + colField, ok := c.Definition().GetFieldByName(field.Name) + if ok { + fields = append(fields, colField) } } @@ -338,7 +415,7 @@ func (c *collection) dropIndex(ctx context.Context, txn datastore.Txn, indexName break } } - key := core.NewCollectionIndexKey(c.Name(), indexName) + key := core.NewCollectionIndexKey(immutable.Some(c.ID()), indexName) err = txn.Systemstore().Delete(ctx, key.ToDS()) if err != nil { return err @@ -348,7 +425,7 @@ func (c *collection) dropIndex(ctx context.Context, txn datastore.Txn, indexName } func (c *collection) dropAllIndexes(ctx context.Context, txn datastore.Txn) error { - prefix := core.NewCollectionIndexKey(c.Name(), "") + prefix := core.NewCollectionIndexKey(immutable.Some(c.ID()), "") keys, err := datastore.FetchKeysForPrefix(ctx, prefix.ToString(), txn.Systemstore()) if err != nil { @@ -366,7 +443,7 @@ func (c *collection) dropAllIndexes(ctx context.Context, txn datastore.Txn) erro } func (c *collection) loadIndexes(ctx context.Context, txn datastore.Txn) error { - indexDescriptions, err := c.db.fetchCollectionIndexDescriptions(ctx, txn, c.Name()) + indexDescriptions, err := c.db.fetchCollectionIndexDescriptions(ctx, txn, c.ID()) if err != nil { return err } @@ -399,7 +476,6 @@ func (c *collection) GetIndexes(ctx context.Context) ([]client.IndexDescription, } func (c *collection) checkExistingFields( - ctx context.Context, fields []client.IndexedFieldDescription, ) error { collectionFields := c.Schema().Fields @@ -428,7 +504,7 @@ func (c *collection) generateIndexNameIfNeededAndCreateKey( nameIncrement := 1 for { desc.Name = generateIndexName(c, desc.Fields, nameIncrement) - indexKey = core.NewCollectionIndexKey(c.Name(), desc.Name) + indexKey = core.NewCollectionIndexKey(immutable.Some(c.ID()), desc.Name) exists, err := txn.Systemstore().Has(ctx, indexKey.ToDS()) if err != nil { return core.CollectionIndexKey{}, err @@ -439,7 +515,7 @@ func (c *collection) generateIndexNameIfNeededAndCreateKey( nameIncrement++ } } else { - indexKey = core.NewCollectionIndexKey(c.Name(), desc.Name) + indexKey = core.NewCollectionIndexKey(immutable.Some(c.ID()), desc.Name) exists, err := txn.Systemstore().Has(ctx, indexKey.ToDS()) if err != nil { return core.CollectionIndexKey{}, err @@ -458,16 +534,10 @@ func validateIndexDescription(desc client.IndexDescription) error { if len(desc.Fields) == 0 { return ErrIndexMissingFields } - if len(desc.Fields) == 1 && desc.Fields[0].Direction == client.Descending { - return ErrIndexSingleFieldWrongDirection - } for i := range desc.Fields { if desc.Fields[i].Name == "" { return ErrIndexFieldMissingName } - if desc.Fields[i].Direction == "" { - desc.Fields[i].Direction = client.Ascending - } } return nil } @@ -477,7 +547,11 @@ func generateIndexName(col client.Collection, fields []client.IndexedFieldDescri // at the moment we support only single field indexes that can be stored only in // ascending order. This will change once we introduce composite indexes. direction := "ASC" - sb.WriteString(col.Name()) + if col.Name().HasValue() { + sb.WriteString(col.Name().Value()) + } else { + sb.WriteString(fmt.Sprint(col.ID())) + } sb.WriteByte('_') // we can safely assume that there is at least one field in the slice // because we validate it before calling this function diff --git a/db/collection_update.go b/db/collection_update.go index 4c1895602b..fc985d2c41 100644 --- a/db/collection_update.go +++ b/db/collection_update.go @@ -291,15 +291,15 @@ func (c *collection) updateWithFilter( } // isSecondaryIDField returns true if the given field description represents a secondary relation field ID. -func (c *collection) isSecondaryIDField(fieldDesc client.FieldDescription) (client.FieldDescription, bool) { - if fieldDesc.RelationType != client.Relation_Type_INTERNAL_ID { - return client.FieldDescription{}, false +func (c *collection) isSecondaryIDField(fieldDesc client.FieldDefinition) (client.FieldDefinition, bool) { + if fieldDesc.RelationName == "" || fieldDesc.Kind != client.FieldKind_DocID { + return client.FieldDefinition{}, false } - relationFieldDescription, valid := c.Schema().GetField( + relationFieldDescription, valid := c.Definition().GetFieldByName( strings.TrimSuffix(fieldDesc.Name, request.RelatedObjectID), ) - return relationFieldDescription, valid && !relationFieldDescription.IsPrimaryRelation() + return relationFieldDescription, valid && !relationFieldDescription.IsPrimaryRelation } // patchPrimaryDoc patches the (primary) document linked to from the document of the given DocID via the @@ -312,7 +312,7 @@ func (c *collection) patchPrimaryDoc( ctx context.Context, txn datastore.Txn, secondaryCollectionName string, - relationFieldDescription client.FieldDescription, + relationFieldDescription client.FieldDefinition, docID string, fieldValue string, ) error { @@ -338,7 +338,7 @@ func (c *collection) patchPrimaryDoc( return client.NewErrFieldNotExist(relationFieldDescription.RelationName) } - primaryIDField, ok := primaryCol.Schema().GetField(primaryField.Name + request.RelatedObjectID) + primaryIDField, ok := primaryCol.Definition().GetFieldByName(primaryField.Name + request.RelatedObjectID) if !ok { return client.NewErrFieldNotExist(primaryField.Name + request.RelatedObjectID) } @@ -402,7 +402,7 @@ func (c *collection) makeSelectionPlan( return nil, ErrInvalidFilter } - f, err = c.db.parser.NewFilterFromString(c.Name(), fval) + f, err = c.db.parser.NewFilterFromString(c.Name().Value(), fval) if err != nil { return nil, err } @@ -432,14 +432,14 @@ func (c *collection) makeSelectionPlan( func (c *collection) makeSelectLocal(filter immutable.Option[request.Filter]) (*request.Select, error) { slct := &request.Select{ Field: request.Field{ - Name: c.Name(), + Name: c.Name().Value(), }, Filter: filter, Fields: make([]request.Selection, 0), } for _, fd := range c.Schema().Fields { - if fd.IsObject() { + if fd.Kind.IsObject() { continue } slct.Fields = append(slct.Fields, &request.Field{ diff --git a/db/db.go b/db/db.go index 1046b2db54..7b3ff7bcb8 100644 --- a/db/db.go +++ b/db/db.go @@ -226,7 +226,7 @@ func (db *db) initialize(ctx context.Context) error { // init meta data // collection sequence - _, err = db.getSequence(ctx, txn, core.COLLECTION) + _, err = db.getSequence(ctx, txn, core.CollectionIDSequenceKey{}) if err != nil { return err } diff --git a/db/description/collection.go b/db/description/collection.go index a334ec6384..8ffd473053 100644 --- a/db/description/collection.go +++ b/db/description/collection.go @@ -13,6 +13,7 @@ package description import ( "context" "encoding/json" + "sort" "github.com/ipfs/go-datastore/query" @@ -39,15 +40,17 @@ func SaveCollection( return client.CollectionDescription{}, err } - idBuf, err := json.Marshal(desc.ID) - if err != nil { - return client.CollectionDescription{}, err - } + if desc.Name.HasValue() { + idBuf, err := json.Marshal(desc.ID) + if err != nil { + return client.CollectionDescription{}, err + } - nameKey := core.NewCollectionNameKey(desc.Name) - err = txn.Systemstore().Put(ctx, nameKey.ToDS(), idBuf) - if err != nil { - return client.CollectionDescription{}, err + nameKey := core.NewCollectionNameKey(desc.Name.Value()) + err = txn.Systemstore().Put(ctx, nameKey.ToDS(), idBuf) + if err != nil { + return client.CollectionDescription{}, err + } } // The need for this key is temporary, we should replace it with the global collection ID @@ -61,6 +64,26 @@ func SaveCollection( return desc, nil } +func GetCollectionByID( + ctx context.Context, + txn datastore.Txn, + id uint32, +) (client.CollectionDescription, error) { + key := core.NewCollectionKey(id) + buf, err := txn.Systemstore().Get(ctx, key.ToDS()) + if err != nil { + return client.CollectionDescription{}, err + } + + var col client.CollectionDescription + err = json.Unmarshal(buf, &col) + if err != nil { + return client.CollectionDescription{}, err + } + + return col, nil +} + // GetCollectionByName returns the collection with the given name. // // If no collection of that name is found, it will return an error. @@ -81,19 +104,7 @@ func GetCollectionByName( return client.CollectionDescription{}, err } - key := core.NewCollectionKey(id) - buf, err := txn.Systemstore().Get(ctx, key.ToDS()) - if err != nil { - return client.CollectionDescription{}, err - } - - var col client.CollectionDescription - err = json.Unmarshal(buf, &col) - if err != nil { - return client.CollectionDescription{}, err - } - - return col, nil + return GetCollectionByID(ctx, txn, id) } // GetCollectionsBySchemaVersionID returns all collections that use the given @@ -183,6 +194,8 @@ func GetCollectionsBySchemaRoot( } // GetCollections returns all collections in the system. +// +// This includes inactive collections. func GetCollections( ctx context.Context, txn datastore.Txn, @@ -218,6 +231,47 @@ func GetCollections( return cols, nil } +// GetActiveCollections returns all active collections in the system. +func GetActiveCollections( + ctx context.Context, + txn datastore.Txn, +) ([]client.CollectionDescription, error) { + q, err := txn.Systemstore().Query(ctx, query.Query{ + Prefix: core.NewCollectionNameKey("").ToString(), + }) + if err != nil { + return nil, NewErrFailedToCreateCollectionQuery(err) + } + + cols := make([]client.CollectionDescription, 0) + for res := range q.Next() { + if res.Error != nil { + if err := q.Close(); err != nil { + return nil, NewErrFailedToCloseCollectionQuery(err) + } + return nil, err + } + + var id uint32 + err = json.Unmarshal(res.Value, &id) + if err != nil { + return nil, err + } + + col, err := GetCollectionByID(ctx, txn, id) + if err != nil { + return nil, err + } + + cols = append(cols, col) + } + + // Sort the results by ID, so that the order matches that of [GetCollections]. + sort.Slice(cols, func(i, j int) bool { return cols[i].ID < cols[j].ID }) + + return cols, nil +} + // HasCollectionByName returns true if there is a collection of the given name, // else returns false. func HasCollectionByName( diff --git a/db/description/schema.go b/db/description/schema.go index c486ee1a59..c46b1f7564 100644 --- a/db/description/schema.go +++ b/db/description/schema.go @@ -30,13 +30,6 @@ func CreateSchemaVersion( txn datastore.Txn, desc client.SchemaDescription, ) (client.SchemaDescription, error) { - for i := range desc.Fields { - // This is not wonderful and will probably break when we add the ability - // to delete fields, however it is good enough for now and matches the - // create behaviour. - desc.Fields[i].ID = client.FieldID(i) - } - buf, err := json.Marshal(desc) if err != nil { return client.SchemaDescription{}, err @@ -47,7 +40,6 @@ func CreateSchemaVersion( return client.SchemaDescription{}, err } versionID := scid.String() - previousSchemaVersionID := desc.VersionID isNew := desc.Root == "" desc.VersionID = versionID @@ -69,9 +61,9 @@ func CreateSchemaVersion( } if !isNew { - // We don't need to add a history key if this is the first version - schemaVersionHistoryKey := core.NewSchemaHistoryKey(desc.Root, previousSchemaVersionID) - err = txn.Systemstore().Put(ctx, schemaVersionHistoryKey.ToDS(), []byte(desc.VersionID)) + // We don't need to add a root key if this is the first version + schemaVersionHistoryKey := core.NewSchemaRootKey(desc.Root, desc.VersionID) + err = txn.Systemstore().Put(ctx, schemaVersionHistoryKey.ToDS(), []byte{}) if err != nil { return client.SchemaDescription{}, err } @@ -152,7 +144,7 @@ func GetSchemas( ctx context.Context, txn datastore.Txn, ) ([]client.SchemaDescription, error) { - cols, err := GetCollections(ctx, txn) + cols, err := GetActiveCollections(ctx, txn) if err != nil { return nil, err } @@ -253,7 +245,7 @@ func GetSchemaVersionIDs( // It is not present in the history prefix. schemaVersions := []string{schemaRoot} - prefix := core.NewSchemaHistoryKey(schemaRoot, "") + prefix := core.NewSchemaRootKey(schemaRoot, "") q, err := txn.Systemstore().Query(ctx, query.Query{ Prefix: prefix.ToString(), KeysOnly: true, @@ -270,15 +262,12 @@ func GetSchemaVersionIDs( return nil, err } - key, err := core.NewSchemaHistoryKeyFromString(res.Key) + key, err := core.NewSchemaRootKeyFromString(res.Key) if err != nil { - if err := q.Close(); err != nil { - return nil, NewErrFailedToCloseSchemaQuery(err) - } return nil, err } - schemaVersions = append(schemaVersions, key.PreviousSchemaVersionID) + schemaVersions = append(schemaVersions, key.SchemaVersionID) } return schemaVersions, nil diff --git a/db/errors.go b/db/errors.go index d8c9773926..34dd0d53b5 100644 --- a/db/errors.go +++ b/db/errors.go @@ -28,7 +28,6 @@ const ( errSchemaRootDoesntMatch string = "SchemaRoot does not match existing" errCannotModifySchemaName string = "modifying the schema name is not supported" errCannotSetVersionID string = "setting the VersionID is not supported. It is updated automatically" - errCannotSetFieldID string = "explicitly setting a field ID value is not supported" errRelationalFieldMissingSchema string = "a `Schema` [name] must be provided when adding a new relation field" errRelationalFieldInvalidRelationType string = "invalid RelationType" errRelationalFieldMissingIDField string = "missing id field for relation object field" @@ -52,7 +51,6 @@ const ( errNonZeroIndexIDProvided string = "non-zero index ID provided" errIndexFieldMissingName string = "index field missing name" errIndexFieldMissingDirection string = "index field missing direction" - errIndexSingleFieldWrongDirection string = "wrong direction for index with a single field" errIndexWithNameAlreadyExists string = "index with name already exists" errInvalidStoredIndex string = "invalid stored index" errInvalidStoredIndexKey string = "invalid stored index key" @@ -69,7 +67,6 @@ const ( errInvalidFieldValue string = "invalid field value" errUnsupportedIndexFieldType string = "unsupported index field type" errIndexDescriptionHasNoFields string = "index description has no fields" - errIndexDescHasNonExistingField string = "index description has non existing field" errFieldOrAliasToFieldNotExist string = "The given field or alias to field does not exist" errCreateFile string = "failed to create file" errRemoveFile string = "failed to remove file" @@ -86,28 +83,28 @@ const ( errExpectedJSONArray string = "expected JSON array" errOneOneAlreadyLinked string = "target document is already linked to another document" errIndexDoesNotMatchName string = "the index used does not match the given name" - errCanNotIndexNonUniqueField string = "can not index a doc's field that violates unique index" + errCanNotIndexNonUniqueFields string = "can not index a doc's field(s) that violates unique index" errInvalidViewQuery string = "the query provided is not valid as a View" ) var ( - ErrFailedToGetCollection = errors.New(errFailedToGetCollection) - ErrSubscriptionsNotAllowed = errors.New("server does not accept subscriptions") - ErrInvalidFilter = errors.New("invalid filter") - ErrCollectionAlreadyExists = errors.New("collection already exists") - ErrCollectionNameEmpty = errors.New("collection name can't be empty") - ErrSchemaNameEmpty = errors.New("schema name can't be empty") - ErrSchemaRootEmpty = errors.New("schema root can't be empty") - ErrSchemaVersionIDEmpty = errors.New("schema version ID can't be empty") - ErrKeyEmpty = errors.New("key cannot be empty") - ErrCannotSetVersionID = errors.New(errCannotSetVersionID) - ErrIndexMissingFields = errors.New(errIndexMissingFields) - ErrIndexFieldMissingName = errors.New(errIndexFieldMissingName) - ErrIndexSingleFieldWrongDirection = errors.New(errIndexSingleFieldWrongDirection) - ErrCorruptedIndex = errors.New(errCorruptedIndex) - ErrExpectedJSONObject = errors.New(errExpectedJSONObject) - ErrExpectedJSONArray = errors.New(errExpectedJSONArray) - ErrInvalidViewQuery = errors.New(errInvalidViewQuery) + ErrFailedToGetCollection = errors.New(errFailedToGetCollection) + ErrSubscriptionsNotAllowed = errors.New("server does not accept subscriptions") + ErrInvalidFilter = errors.New("invalid filter") + ErrCollectionAlreadyExists = errors.New("collection already exists") + ErrCollectionNameEmpty = errors.New("collection name can't be empty") + ErrSchemaNameEmpty = errors.New("schema name can't be empty") + ErrSchemaRootEmpty = errors.New("schema root can't be empty") + ErrSchemaVersionIDEmpty = errors.New("schema version ID can't be empty") + ErrKeyEmpty = errors.New("key cannot be empty") + ErrCannotSetVersionID = errors.New(errCannotSetVersionID) + ErrIndexMissingFields = errors.New(errIndexMissingFields) + ErrIndexFieldMissingName = errors.New(errIndexFieldMissingName) + ErrCorruptedIndex = errors.New(errCorruptedIndex) + ErrExpectedJSONObject = errors.New(errExpectedJSONObject) + ErrExpectedJSONArray = errors.New(errExpectedJSONArray) + ErrInvalidViewQuery = errors.New(errInvalidViewQuery) + ErrCanNotIndexNonUniqueFields = errors.New(errCanNotIndexNonUniqueFields) ) // NewErrFailedToGetHeads returns a new error indicating that the heads of a document @@ -237,14 +234,6 @@ func NewErrCannotModifySchemaName(existingName, proposedName string) error { ) } -func NewErrCannotSetFieldID(name string, id client.FieldID) error { - return errors.New( - errCannotSetFieldID, - errors.NewKV("Field", name), - errors.NewKV("ID", id), - ) -} - func NewErrRelationalFieldMissingSchema(name string, kind client.FieldKind) error { return errors.New( errRelationalFieldMissingSchema, @@ -253,15 +242,6 @@ func NewErrRelationalFieldMissingSchema(name string, kind client.FieldKind) erro ) } -func NewErrRelationalFieldInvalidRelationType(name string, expected any, actual client.RelationType) error { - return errors.New( - errRelationalFieldInvalidRelationType, - errors.NewKV("Field", name), - errors.NewKV("Expected", expected), - errors.NewKV("Actual", actual), - ) -} - func NewErrRelationalFieldMissingIDField(name string, expectedName string) error { return errors.New( errRelationalFieldMissingIDField, @@ -307,19 +287,6 @@ func NewErrRelatedFieldKindMismatch(relationName string, expected client.FieldKi ) } -func NewErrRelatedFieldRelationTypeMismatch( - relationName string, - expected client.RelationType, - actual client.RelationType, -) error { - return errors.New( - errRelatedFieldRelationTypeMismatch, - errors.NewKV("RelationName", relationName), - errors.NewKV("Expected", expected), - errors.NewKV("Actual", actual), - ) -} - func NewErrRelationalFieldIDInvalidType(name string, expected, actual client.FieldKind) error { return errors.New( errRelationalFieldIDInvalidType, @@ -356,10 +323,9 @@ func NewErrDuplicateField(name string) error { return errors.New(errDuplicateField, errors.NewKV("Name", name)) } -func NewErrCannotMutateField(id client.FieldID, name string) error { +func NewErrCannotMutateField(name string) error { return errors.New( errCannotMutateField, - errors.NewKV("ID", id), errors.NewKV("ProposedName", name), ) } @@ -373,11 +339,10 @@ func NewErrCannotMoveField(name string, proposedIndex, existingIndex int) error ) } -func NewErrCannotDeleteField(name string, id client.FieldID) error { +func NewErrCannotDeleteField(name string) error { return errors.New( errCannotDeleteField, errors.NewKV("Name", name), - errors.NewKV("ID", id), ) } @@ -468,16 +433,6 @@ func NewErrIndexDescHasNoFields(desc client.IndexDescription) error { ) } -// NewErrIndexDescHasNonExistingField returns a new error indicating that the given index -// description points to a field that does not exist. -func NewErrIndexDescHasNonExistingField(desc client.IndexDescription, fieldName string) error { - return errors.New( - errIndexDescHasNonExistingField, - errors.NewKV("Description", desc), - errors.NewKV("Field name", fieldName), - ) -} - // NewErrCreateFile returns a new error indicating there was a failure in creating a file. func NewErrCreateFile(inner error, filepath string) error { return errors.Wrap(errCreateFile, inner, errors.NewKV("Filepath", filepath)) @@ -566,13 +521,12 @@ func NewErrIndexDoesNotMatchName(index, name string) error { ) } -func NewErrCanNotIndexNonUniqueField(docID, fieldName string, value any) error { - return errors.New( - errCanNotIndexNonUniqueField, - errors.NewKV("DocID", docID), - errors.NewKV("Field name", fieldName), - errors.NewKV("Field value", value), - ) +func NewErrCanNotIndexNonUniqueFields(docID string, fieldValues ...errors.KV) error { + kvPairs := make([]errors.KV, 0, len(fieldValues)+1) + kvPairs = append(kvPairs, errors.NewKV("DocID", docID)) + kvPairs = append(kvPairs, fieldValues...) + + return errors.New(errCanNotIndexNonUniqueFields, kvPairs...) } func NewErrInvalidViewQueryCastFailed(query string) error { diff --git a/db/fetcher/encoded_doc.go b/db/fetcher/encoded_doc.go index e88ee80f9d..889aea848a 100644 --- a/db/fetcher/encoded_doc.go +++ b/db/fetcher/encoded_doc.go @@ -31,7 +31,7 @@ type EncodedDocument interface { // Properties returns a copy of the decoded property values mapped by their field // description. - Properties(onlyFilterProps bool) (map[client.FieldDescription]any, error) + Properties(onlyFilterProps bool) (map[client.FieldDefinition]any, error) // Reset re-initializes the EncodedDocument object. Reset() @@ -41,7 +41,7 @@ type EPTuple []encProperty // EncProperty is an encoded property of a EncodedDocument type encProperty struct { - Desc client.FieldDescription + Desc client.FieldDefinition Raw []byte // Filter flag to determine if this flag @@ -60,7 +60,7 @@ func (e encProperty) Decode() (any, error) { return nil, err } - return core.DecodeFieldValue(e.Desc, val) + return core.NormalizeFieldValue(e.Desc, val) } // @todo: Implement Encoded Document type @@ -68,8 +68,8 @@ type encodedDocument struct { id []byte schemaVersionID string status client.DocumentStatus - properties map[client.FieldDescription]*encProperty - decodedPropertyCache map[client.FieldDescription]any + properties map[client.FieldDefinition]*encProperty + decodedPropertyCache map[client.FieldDefinition]any // tracking bitsets // A value of 1 indicates a required field @@ -96,7 +96,7 @@ func (encdoc *encodedDocument) Status() client.DocumentStatus { // Reset re-initializes the EncodedDocument object. func (encdoc *encodedDocument) Reset() { - encdoc.properties = make(map[client.FieldDescription]*encProperty, 0) + encdoc.properties = make(map[client.FieldDefinition]*encProperty, 0) encdoc.id = nil encdoc.filterSet = nil encdoc.selectSet = nil @@ -172,10 +172,10 @@ func DecodeToDoc(encdoc EncodedDocument, mapping *core.DocumentMapping, filter b return doc, nil } -func (encdoc *encodedDocument) Properties(onlyFilterProps bool) (map[client.FieldDescription]any, error) { - result := map[client.FieldDescription]any{} +func (encdoc *encodedDocument) Properties(onlyFilterProps bool) (map[client.FieldDefinition]any, error) { + result := map[client.FieldDefinition]any{} if encdoc.decodedPropertyCache == nil { - encdoc.decodedPropertyCache = map[client.FieldDescription]any{} + encdoc.decodedPropertyCache = map[client.FieldDefinition]any{} } for _, prop := range encdoc.properties { diff --git a/db/fetcher/errors.go b/db/fetcher/errors.go index 84d947c46f..2a2967bbdb 100644 --- a/db/fetcher/errors.go +++ b/db/fetcher/errors.go @@ -11,11 +11,13 @@ package fetcher import ( + "fmt" + "github.com/sourcenetwork/defradb/errors" ) const ( - errFieldIdNotFound string = "unable to find FieldDescription for given FieldId" + errFieldIdNotFound string = "unable to find SchemaFieldDescription for given FieldId" errFailedToDecodeCIDForVFetcher string = "failed to decode CID for VersionedFetcher" errFailedToSeek string = "seek failed" errFailedToMergeState string = "failed merging state" @@ -26,6 +28,9 @@ const ( errVFetcherFailedToGetDagLink string = "(version fetcher) failed to get node link from DAG" errFailedToGetDagNode string = "failed to get DAG Node" errMissingMapper string = "missing document mapper" + errInvalidInOperatorValue string = "invalid _in/_nin value" + errInvalidFilterOperator string = "invalid filter operator is provided" + errUnexpectedTypeValue string = "unexpected type value" ) var ( @@ -41,6 +46,9 @@ var ( ErrFailedToGetDagNode = errors.New(errFailedToGetDagNode) ErrMissingMapper = errors.New(errMissingMapper) ErrSingleSpanOnly = errors.New("spans must contain only a single entry") + ErrInvalidInOperatorValue = errors.New(errInvalidInOperatorValue) + ErrInvalidFilterOperator = errors.New(errInvalidFilterOperator) + ErrUnexpectedTypeValue = errors.New(errUnexpectedTypeValue) ) // NewErrFieldIdNotFound returns an error indicating that the given FieldId was not found. @@ -93,3 +101,14 @@ func NewErrVFetcherFailedToGetDagLink(inner error) error { func NewErrFailedToGetDagNode(inner error) error { return errors.Wrap(errFailedToGetDagNode, inner) } + +// NewErrInvalidFilterOperator returns an error indicating that the given filter operator is invalid. +func NewErrInvalidFilterOperator(operator string) error { + return errors.New(errInvalidFilterOperator, errors.NewKV("Operator", operator)) +} + +// NewErrUnexpectedTypeValue returns an error indicating that the given value is of an unexpected type. +func NewErrUnexpectedTypeValue[T any](value any) error { + var t T + return errors.New(errUnexpectedTypeValue, errors.NewKV("Value", value), errors.NewKV("Type", fmt.Sprintf("%T", t))) +} diff --git a/db/fetcher/fetcher.go b/db/fetcher/fetcher.go index a9cb39d9d5..e4bb08cee4 100644 --- a/db/fetcher/fetcher.go +++ b/db/fetcher/fetcher.go @@ -58,7 +58,7 @@ type Fetcher interface { ctx context.Context, txn datastore.Txn, col client.Collection, - fields []client.FieldDescription, + fields []client.FieldDefinition, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, @@ -94,8 +94,8 @@ type DocumentFetcher struct { ranFilter bool // did we run the filter passedFilter bool // did we pass the filter - filterFields map[uint32]client.FieldDescription - selectFields map[uint32]client.FieldDescription + filterFields map[uint32]client.FieldDefinition + selectFields map[uint32]client.FieldDefinition // static bitset to which stores the IDs of fields // needed for filtering. @@ -138,7 +138,7 @@ func (df *DocumentFetcher) Init( ctx context.Context, txn datastore.Txn, col client.Collection, - fields []client.FieldDescription, + fields []client.FieldDefinition, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, @@ -164,7 +164,7 @@ func (df *DocumentFetcher) Init( func (df *DocumentFetcher) init( col client.Collection, - fields []client.FieldDescription, + fields []client.FieldDefinition, filter *mapper.Filter, docMapper *core.DocumentMapping, reverse bool, @@ -194,12 +194,12 @@ func (df *DocumentFetcher) init( } df.kvIter = nil - df.selectFields = make(map[uint32]client.FieldDescription, len(fields)) + df.selectFields = make(map[uint32]client.FieldDefinition, len(fields)) // if we haven't been told to get specific fields // get them all - var targetFields []client.FieldDescription + var targetFields []client.FieldDefinition if len(fields) == 0 { - targetFields = df.col.Schema().Fields + targetFields = df.col.Definition().GetFields() } else { targetFields = fields } @@ -210,11 +210,11 @@ func (df *DocumentFetcher) init( if df.filter != nil { conditions := df.filter.ToMap(df.mapping) - parsedfilterFields, err := parser.ParseFilterFieldsForDescription(conditions, df.col.Schema()) + parsedfilterFields, err := parser.ParseFilterFieldsForDescription(conditions, df.col.Definition()) if err != nil { return err } - df.filterFields = make(map[uint32]client.FieldDescription, len(parsedfilterFields)) + df.filterFields = make(map[uint32]client.FieldDefinition, len(parsedfilterFields)) df.filterSet = bitset.New(uint(len(col.Schema().Fields))) for _, field := range parsedfilterFields { df.filterFields[uint32(field.ID)] = field diff --git a/db/fetcher/indexer.go b/db/fetcher/indexer.go index b8608e2b7d..158c7cb88d 100644 --- a/db/fetcher/indexer.go +++ b/db/fetcher/indexer.go @@ -23,19 +23,18 @@ import ( // IndexFetcher is a fetcher that fetches documents by index. // It fetches only the indexed field and the rest of the fields are fetched by the internal fetcher. type IndexFetcher struct { - docFetcher Fetcher - col client.Collection - txn datastore.Txn - indexFilter *mapper.Filter - docFilter *mapper.Filter - doc *encodedDocument - mapping *core.DocumentMapping - indexedField client.FieldDescription - docFields []client.FieldDescription - indexDesc client.IndexDescription - indexIter indexIterator - indexDataStoreKey core.IndexDataStoreKey - execInfo ExecInfo + docFetcher Fetcher + col client.Collection + txn datastore.Txn + indexFilter *mapper.Filter + docFilter *mapper.Filter + doc *encodedDocument + mapping *core.DocumentMapping + indexedFields []client.FieldDefinition + docFields []client.FieldDefinition + indexDesc client.IndexDescription + indexIter indexIterator + execInfo ExecInfo } var _ Fetcher = (*IndexFetcher)(nil) @@ -43,13 +42,13 @@ var _ Fetcher = (*IndexFetcher)(nil) // NewIndexFetcher creates a new IndexFetcher. func NewIndexFetcher( docFetcher Fetcher, - indexedFieldDesc client.FieldDescription, + indexDesc client.IndexDescription, indexFilter *mapper.Filter, ) *IndexFetcher { return &IndexFetcher{ - docFetcher: docFetcher, - indexedField: indexedFieldDesc, - indexFilter: indexFilter, + docFetcher: docFetcher, + indexDesc: indexDesc, + indexFilter: indexFilter, } } @@ -57,7 +56,7 @@ func (f *IndexFetcher) Init( ctx context.Context, txn datastore.Txn, col client.Collection, - fields []client.FieldDescription, + fields []client.FieldDefinition, filter *mapper.Filter, docMapper *core.DocumentMapping, reverse bool, @@ -69,24 +68,25 @@ func (f *IndexFetcher) Init( f.mapping = docMapper f.txn = txn - for _, index := range col.Description().Indexes { - if index.Fields[0].Name == f.indexedField.Name { - f.indexDesc = index - f.indexDataStoreKey.IndexID = index.ID - break + for _, indexedField := range f.indexDesc.Fields { + field, ok := f.col.Definition().GetFieldByName(indexedField.Name) + if ok { + f.indexedFields = append(f.indexedFields, field) } } - f.indexDataStoreKey.CollectionID = f.col.ID() - + f.docFields = make([]client.FieldDefinition, 0, len(fields)) +outer: for i := range fields { - if fields[i].Name == f.indexedField.Name { - f.docFields = append(fields[:i], fields[i+1:]...) - break + for j := range f.indexedFields { + if fields[i].Name == f.indexedFields[j].Name { + continue outer + } } + f.docFields = append(f.docFields, fields[i]) } - iter, err := createIndexIterator(f.indexDataStoreKey, f.indexFilter, &f.execInfo, f.indexDesc.Unique) + iter, err := f.createIndexIterator() if err != nil { return err } @@ -123,18 +123,36 @@ func (f *IndexFetcher) FetchNext(ctx context.Context) (EncodedDocument, ExecInfo return nil, f.execInfo, nil } - property := &encProperty{ - Desc: f.indexedField, - Raw: res.key.FieldValues[0], + hasNilField := false + for i, indexedField := range f.indexedFields { + property := &encProperty{Desc: indexedField} + + field := res.key.Fields[i] + if field.Value == nil { + hasNilField = true + } + + // We need to convert it to cbor bytes as this is what it will be encoded from on value retrieval. + // In the future we have to either get rid of CBOR or properly handle different encoding + // for properties in a single document. + fieldBytes, err := client.NewFieldValue(client.NONE_CRDT, field.Value).Bytes() + if err != nil { + return nil, ExecInfo{}, err + } + property.Raw = fieldBytes + + f.doc.properties[indexedField] = property } - if f.indexDesc.Unique { + if f.indexDesc.Unique && !hasNilField { f.doc.id = res.value } else { - f.doc.id = res.key.FieldValues[1] + docID, ok := res.key.Fields[len(res.key.Fields)-1].Value.(string) + if !ok { + return nil, ExecInfo{}, err + } + f.doc.id = []byte(docID) } - f.doc.properties[f.indexedField] = property - f.execInfo.FieldsFetched++ if f.docFetcher != nil && len(f.docFields) > 0 { targetKey := base.MakeDataStoreKeyWithCollectionAndDocID(f.col.Description(), string(f.doc.id)) diff --git a/db/fetcher/indexer_iterators.go b/db/fetcher/indexer_iterators.go index aa24605559..482c15d31a 100644 --- a/db/fetcher/indexer_iterators.go +++ b/db/fetcher/indexer_iterators.go @@ -11,12 +11,11 @@ package fetcher import ( - "bytes" + "cmp" "context" "errors" "strings" - "github.com/fxamacker/cbor/v2" ds "github.com/ipfs/go-datastore" "github.com/sourcenetwork/defradb/client" @@ -29,16 +28,24 @@ import ( ) const ( - opEq = "_eq" - opGt = "_gt" - opGe = "_ge" - opLt = "_lt" - opLe = "_le" - opNe = "_ne" - opIn = "_in" - opNin = "_nin" - opLike = "_like" - opNlike = "_nlike" + opEq = "_eq" + opGt = "_gt" + opGe = "_ge" + opLt = "_lt" + opLe = "_le" + opNe = "_ne" + opIn = "_in" + opNin = "_nin" + opLike = "_like" + opNlike = "_nlike" + opILike = "_ilike" + opNILike = "_nilike" + // it's just there for composite indexes. We construct a slice of value matchers with + // every matcher being responsible for a corresponding field in the index to match. + // For some fields there might not be any criteria to match. For examples if you have + // composite index of /name/age/email/ and in the filter you specify only "name" and "email". + // Then the "_any" matcher will be used for "age". + opAny = "_any" ) // indexIterator is an iterator over index keys. @@ -57,71 +64,68 @@ type indexIterResult struct { } type queryResultIterator struct { - resultIter query.Results + resultIter query.Results + indexDesc client.IndexDescription + indexedFields []client.FieldDefinition } -func (i *queryResultIterator) Next() (indexIterResult, error) { - res, hasVal := i.resultIter.NextSync() +func (iter *queryResultIterator) Next() (indexIterResult, error) { + res, hasVal := iter.resultIter.NextSync() if res.Error != nil { return indexIterResult{}, res.Error } if !hasVal { return indexIterResult{}, nil } - key, err := core.NewIndexDataStoreKey(res.Key) + key, err := core.DecodeIndexDataStoreKey([]byte(res.Key), &iter.indexDesc, iter.indexedFields) if err != nil { return indexIterResult{}, err } + return indexIterResult{key: key, value: res.Value, foundKey: true}, nil } -func (i *queryResultIterator) Close() error { - return i.resultIter.Close() +func (iter *queryResultIterator) Close() error { + return iter.resultIter.Close() } type eqPrefixIndexIterator struct { - filterValueHolder + queryResultIterator indexKey core.IndexDataStoreKey execInfo *ExecInfo - - queryResultIterator + matchers []valueMatcher } -func (i *eqPrefixIndexIterator) Init(ctx context.Context, store datastore.DSReaderWriter) error { - i.indexKey.FieldValues = [][]byte{i.value} +func (iter *eqPrefixIndexIterator) Init(ctx context.Context, store datastore.DSReaderWriter) error { resultIter, err := store.Query(ctx, query.Query{ - Prefix: i.indexKey.ToString(), + Prefix: iter.indexKey.ToString(), }) if err != nil { return err } - i.resultIter = resultIter + iter.resultIter = resultIter return nil } -func (i *eqPrefixIndexIterator) Next() (indexIterResult, error) { - res, err := i.queryResultIterator.Next() - if res.foundKey { - i.execInfo.IndexesFetched++ +func (iter *eqPrefixIndexIterator) Next() (indexIterResult, error) { + for { + res, err := iter.queryResultIterator.Next() + if err != nil || !res.foundKey { + return res, err + } + iter.execInfo.IndexesFetched++ + doesMatch, err := executeValueMatchers(iter.matchers, res.key.Fields) + if err != nil { + return indexIterResult{}, err + } + if !doesMatch { + continue + } + return res, err } - return res, err -} - -type filterValueIndexIterator interface { - indexIterator - SetFilterValue([]byte) -} - -type filterValueHolder struct { - value []byte -} - -func (h *filterValueHolder) SetFilterValue(value []byte) { - h.value = value } type eqSingleIndexIterator struct { - filterValueHolder indexKey core.IndexDataStoreKey execInfo *ExecInfo @@ -129,27 +133,26 @@ type eqSingleIndexIterator struct { store datastore.DSReaderWriter } -func (i *eqSingleIndexIterator) Init(ctx context.Context, store datastore.DSReaderWriter) error { - i.ctx = ctx - i.store = store +func (iter *eqSingleIndexIterator) Init(ctx context.Context, store datastore.DSReaderWriter) error { + iter.ctx = ctx + iter.store = store return nil } -func (i *eqSingleIndexIterator) Next() (indexIterResult, error) { - if i.store == nil { +func (iter *eqSingleIndexIterator) Next() (indexIterResult, error) { + if iter.store == nil { return indexIterResult{}, nil } - i.indexKey.FieldValues = [][]byte{i.value} - val, err := i.store.Get(i.ctx, i.indexKey.ToDS()) + val, err := iter.store.Get(iter.ctx, iter.indexKey.ToDS()) if err != nil { if errors.Is(err, ds.ErrNotFound) { - return indexIterResult{key: i.indexKey}, nil + return indexIterResult{key: iter.indexKey}, nil } return indexIterResult{}, err } - i.store = nil - i.execInfo.IndexesFetched++ - return indexIterResult{key: i.indexKey, value: val, foundKey: true}, nil + iter.store = nil + iter.execInfo.IndexesFetched++ + return indexIterResult{key: iter.indexKey, value: val, foundKey: true}, nil } func (i *eqSingleIndexIterator) Close() error { @@ -157,61 +160,56 @@ func (i *eqSingleIndexIterator) Close() error { } type inIndexIterator struct { - filterValueIndexIterator - filterValues [][]byte + indexIterator + inValues []any nextValIndex int ctx context.Context store datastore.DSReaderWriter hasIterator bool } -func newInIndexIterator( - indexIter filterValueIndexIterator, - filterValues [][]byte, -) *inIndexIterator { - return &inIndexIterator{ - filterValueIndexIterator: indexIter, - filterValues: filterValues, - } -} - -func (i *inIndexIterator) nextIterator() (bool, error) { - if i.nextValIndex > 0 { - err := i.filterValueIndexIterator.Close() +func (iter *inIndexIterator) nextIterator() (bool, error) { + if iter.nextValIndex > 0 { + err := iter.indexIterator.Close() if err != nil { return false, err } } - if i.nextValIndex >= len(i.filterValues) { + if iter.nextValIndex >= len(iter.inValues) { return false, nil } - i.SetFilterValue(i.filterValues[i.nextValIndex]) - err := i.filterValueIndexIterator.Init(i.ctx, i.store) + switch fieldIter := iter.indexIterator.(type) { + case *eqPrefixIndexIterator: + fieldIter.indexKey.Fields[0].Value = iter.inValues[iter.nextValIndex] + case *eqSingleIndexIterator: + fieldIter.indexKey.Fields[0].Value = iter.inValues[iter.nextValIndex] + } + err := iter.indexIterator.Init(iter.ctx, iter.store) if err != nil { return false, err } - i.nextValIndex++ + iter.nextValIndex++ return true, nil } -func (i *inIndexIterator) Init(ctx context.Context, store datastore.DSReaderWriter) error { - i.ctx = ctx - i.store = store +func (iter *inIndexIterator) Init(ctx context.Context, store datastore.DSReaderWriter) error { + iter.ctx = ctx + iter.store = store var err error - i.hasIterator, err = i.nextIterator() + iter.hasIterator, err = iter.nextIterator() return err } -func (i *inIndexIterator) Next() (indexIterResult, error) { - for i.hasIterator { - res, err := i.filterValueIndexIterator.Next() +func (iter *inIndexIterator) Next() (indexIterResult, error) { + for iter.hasIterator { + res, err := iter.indexIterator.Next() if err != nil { return indexIterResult{}, err } if !res.foundKey { - i.hasIterator, err = i.nextIterator() + iter.hasIterator, err = iter.nextIterator() if err != nil { return indexIterResult{}, err } @@ -222,133 +220,182 @@ func (i *inIndexIterator) Next() (indexIterResult, error) { return indexIterResult{}, nil } -func (i *inIndexIterator) Close() error { +func (iter *inIndexIterator) Close() error { return nil } -type errorCheckingFilter struct { - matcher indexMatcher - err error -} - -func (f *errorCheckingFilter) Filter(e query.Entry) bool { - if f.err != nil { - return false - } - indexKey, err := core.NewIndexDataStoreKey(e.Key) - if err != nil { - f.err = err - return false - } - res, err := f.matcher.Match(indexKey) - if err != nil { - f.err = err - return false +func executeValueMatchers(matchers []valueMatcher, fields []core.IndexedField) (bool, error) { + for i := range matchers { + res, err := matchers[i].Match(fields[i].Value) + if err != nil { + return false, err + } + if !res { + return false, nil + } } - return res -} - -// execInfoIndexMatcherDecorator is a decorator for indexMatcher that counts the number -// of indexes fetched on every call to Match. -type execInfoIndexMatcherDecorator struct { - matcher indexMatcher - execInfo *ExecInfo -} - -func (d *execInfoIndexMatcherDecorator) Match(key core.IndexDataStoreKey) (bool, error) { - d.execInfo.IndexesFetched++ - return d.matcher.Match(key) + return true, nil } type scanningIndexIterator struct { queryResultIterator indexKey core.IndexDataStoreKey - matcher indexMatcher - filter errorCheckingFilter + matchers []valueMatcher execInfo *ExecInfo } -func (i *scanningIndexIterator) Init(ctx context.Context, store datastore.DSReaderWriter) error { - i.filter.matcher = &execInfoIndexMatcherDecorator{matcher: i.matcher, execInfo: i.execInfo} - - iter, err := store.Query(ctx, query.Query{ - Prefix: i.indexKey.ToString(), - Filters: []query.Filter{&i.filter}, +func (iter *scanningIndexIterator) Init(ctx context.Context, store datastore.DSReaderWriter) error { + resultIter, err := store.Query(ctx, query.Query{ + Prefix: iter.indexKey.ToString(), }) if err != nil { return err } - i.resultIter = iter + iter.resultIter = resultIter return nil } -func (i *scanningIndexIterator) Next() (indexIterResult, error) { - res, err := i.queryResultIterator.Next() - if i.filter.err != nil { - return indexIterResult{}, i.filter.err +func (iter *scanningIndexIterator) Next() (indexIterResult, error) { + for { + res, err := iter.queryResultIterator.Next() + if err != nil || !res.foundKey { + return indexIterResult{}, err + } + iter.execInfo.IndexesFetched++ + + didMatch, err := executeValueMatchers(iter.matchers, res.key.Fields) + + if didMatch { + return res, err + } } - return res, err } -// checks if the stored index value satisfies the condition -type indexMatcher interface { - Match(core.IndexDataStoreKey) (bool, error) +// checks if the value satisfies the condition +type valueMatcher interface { + Match(any) (bool, error) } -// indexByteValuesMatcher is a filter that compares the index value with a given value. -// It uses bytes.Compare to compare the values and evaluate the result with evalFunc. -type indexByteValuesMatcher struct { - value []byte - // evalFunc receives a result of bytes.Compare - evalFunc func(int) bool +type intMatcher struct { + value int64 + evalFunc func(int64, int64) bool } -func (m *indexByteValuesMatcher) Match(key core.IndexDataStoreKey) (bool, error) { - res := bytes.Compare(key.FieldValues[0], m.value) - return m.evalFunc(res), nil +func (m *intMatcher) Match(value any) (bool, error) { + intVal, ok := value.(int64) + if !ok { + return false, NewErrUnexpectedTypeValue[int64](value) + } + return m.evalFunc(intVal, m.value), nil } -// matcher if _ne condition is met -type neIndexMatcher struct { - value []byte +type floatMatcher struct { + value float64 + evalFunc func(float64, float64) bool } -func (m *neIndexMatcher) Match(key core.IndexDataStoreKey) (bool, error) { - return !bytes.Equal(key.FieldValues[0], m.value), nil +func (m *floatMatcher) Match(value any) (bool, error) { + floatVal, ok := value.(float64) + if !ok { + return false, NewErrUnexpectedTypeValue[float64](value) + } + return m.evalFunc(m.value, floatVal), nil +} + +type stringMatcher struct { + value string + evalFunc func(string, string) bool +} + +func (m *stringMatcher) Match(value any) (bool, error) { + stringVal, ok := value.(string) + if !ok { + return false, NewErrUnexpectedTypeValue[string](value) + } + return m.evalFunc(m.value, stringVal), nil +} + +type nilMatcher struct{} + +func (m *nilMatcher) Match(value any) (bool, error) { + return value == nil, nil } // checks if the index value is or is not in the given array type indexInArrayMatcher struct { - values map[string]bool - isIn bool + inValues []any + isIn bool } -func newNinIndexCmp(values [][]byte, isIn bool) *indexInArrayMatcher { - valuesMap := make(map[string]bool) - for _, v := range values { - valuesMap[string(v)] = true +func newNinIndexCmp(values []any, kind client.FieldKind, isIn bool) (*indexInArrayMatcher, error) { + normalizeValueFunc := getNormalizeValueFunc(kind) + for i := range values { + normalized, err := normalizeValueFunc(values[i]) + if err != nil { + return nil, err + } + values[i] = normalized } - return &indexInArrayMatcher{values: valuesMap, isIn: isIn} + return &indexInArrayMatcher{inValues: values, isIn: isIn}, nil } -func (m *indexInArrayMatcher) Match(key core.IndexDataStoreKey) (bool, error) { - _, found := m.values[string(key.FieldValues[0])] - return found == m.isIn, nil +func getNormalizeValueFunc(kind client.FieldKind) func(any) (any, error) { + switch kind { + case client.FieldKind_NILLABLE_INT: + return func(value any) (any, error) { + if v, ok := value.(int64); ok { + return v, nil + } + if v, ok := value.(int32); ok { + return int64(v), nil + } + return nil, ErrInvalidInOperatorValue + } + case client.FieldKind_NILLABLE_FLOAT: + return func(value any) (any, error) { + if v, ok := value.(float64); ok { + return v, nil + } + if v, ok := value.(float32); ok { + return float64(v), nil + } + return nil, ErrInvalidInOperatorValue + } + case client.FieldKind_NILLABLE_STRING: + return func(value any) (any, error) { + if v, ok := value.(string); ok { + return v, nil + } + return nil, ErrInvalidInOperatorValue + } + } + return nil +} + +func (m *indexInArrayMatcher) Match(value any) (bool, error) { + for _, inVal := range m.inValues { + if inVal == value { + return m.isIn, nil + } + } + return !m.isIn, nil } // checks if the index value satisfies the LIKE condition type indexLikeMatcher struct { - hasPrefix bool - hasSuffix bool - startAndEnd []string - isLike bool - value string + hasPrefix bool + hasSuffix bool + startAndEnd []string + isLike bool + isCaseInsensitive bool + value string } -func newLikeIndexCmp(filterValue string, isLike bool) *indexLikeMatcher { +func newLikeIndexCmp(filterValue string, isLike bool, isCaseInsensitive bool) (*indexLikeMatcher, error) { matcher := &indexLikeMatcher{ - isLike: isLike, + isLike: isLike, + isCaseInsensitive: isCaseInsensitive, } if len(filterValue) >= 2 { if filterValue[0] == '%' { @@ -363,16 +410,23 @@ func newLikeIndexCmp(filterValue string, isLike bool) *indexLikeMatcher { matcher.startAndEnd = strings.Split(filterValue, "%") } } - matcher.value = filterValue + if isCaseInsensitive { + matcher.value = strings.ToLower(filterValue) + } else { + matcher.value = filterValue + } - return matcher + return matcher, nil } -func (m *indexLikeMatcher) Match(key core.IndexDataStoreKey) (bool, error) { - var currentVal string - err := cbor.Unmarshal(key.FieldValues[0], ¤tVal) - if err != nil { - return false, err +func (m *indexLikeMatcher) Match(value any) (bool, error) { + currentVal, ok := value.(string) + if !ok { + return false, NewErrUnexpectedTypeValue[string](currentVal) + } + + if m.isCaseInsensitive { + currentVal = strings.ToLower(currentVal) } return m.doesMatch(currentVal) == m.isLike, nil @@ -395,146 +449,306 @@ func (m *indexLikeMatcher) doesMatch(currentVal string) bool { } } -func createIndexIterator( - indexDataStoreKey core.IndexDataStoreKey, - indexFilterConditions *mapper.Filter, - execInfo *ExecInfo, - isUnique bool, -) (indexIterator, error) { - var op string - var filterVal any - for _, indexFilterCond := range indexFilterConditions.Conditions { - condMap := indexFilterCond.(map[connor.FilterKey]any) - var key connor.FilterKey - for key, filterVal = range condMap { +type anyMatcher struct{} + +func (m *anyMatcher) Match(any) (bool, error) { return true, nil } + +// newPrefixIndexIterator creates a new eqPrefixIndexIterator for fetching indexed data. +// It can modify the input matchers slice. +func (f *IndexFetcher) newPrefixIndexIterator( + fieldConditions []fieldFilterCond, + matchers []valueMatcher, +) (*eqPrefixIndexIterator, error) { + keyFieldValues := make([]any, 0, len(fieldConditions)) + for i := range fieldConditions { + if fieldConditions[i].op != opEq { + // prefix can be created only for subsequent _eq conditions + // if we encounter any other condition, we built the longest prefix we could break } - opKey := key.(*mapper.Operator) - op = opKey.Operation - break + + keyFieldValues = append(keyFieldValues, fieldConditions[i].val) } - switch op { - case opEq, opGt, opGe, opLt, opLe, opNe: - fieldValue := client.NewFieldValue(client.LWW_REGISTER, filterVal) + // iterators for _eq filter already iterate over keys with first field value + // matching the filter value, so we can skip the first matcher + if len(matchers) > 1 { + matchers[0] = &anyMatcher{} + } - valueBytes, err := fieldValue.Bytes() - if err != nil { - return nil, err + key := f.newIndexDataStoreKeyWithValues(keyFieldValues) + + return &eqPrefixIndexIterator{ + queryResultIterator: f.newQueryResultIterator(), + indexKey: key, + execInfo: &f.execInfo, + matchers: matchers, + }, nil +} + +func (f *IndexFetcher) newQueryResultIterator() queryResultIterator { + return queryResultIterator{indexDesc: f.indexDesc, indexedFields: f.indexedFields} +} + +// newInIndexIterator creates a new inIndexIterator for fetching indexed data. +// It can modify the input matchers slice. +func (f *IndexFetcher) newInIndexIterator( + fieldConditions []fieldFilterCond, + matchers []valueMatcher, +) (*inIndexIterator, error) { + inArr, ok := fieldConditions[0].val.([]any) + if !ok { + return nil, ErrInvalidInOperatorValue + } + inValues := make([]any, 0, len(inArr)) + for _, v := range inArr { + fieldVal := client.NewFieldValue(client.NONE_CRDT, v) + inValues = append(inValues, fieldVal.Value()) + } + + // iterators for _in filter already iterate over keys with first field value + // matching the filter value, so we can skip the first matcher + if len(matchers) > 1 { + matchers[0] = &anyMatcher{} + } + + var iter indexIterator + if isUniqueFetchByFullKey(&f.indexDesc, fieldConditions) { + keyFieldValues := make([]any, len(fieldConditions)) + for i := range fieldConditions { + keyFieldValues[i] = fieldConditions[i].val + } + + key := f.newIndexDataStoreKeyWithValues(keyFieldValues) + + iter = &eqSingleIndexIterator{ + indexKey: key, + execInfo: &f.execInfo, + } + } else { + indexKey := f.newIndexDataStoreKey() + indexKey.Fields = []core.IndexedField{{Descending: f.indexDesc.Fields[0].Descending}} + + iter = &eqPrefixIndexIterator{ + queryResultIterator: f.newQueryResultIterator(), + indexKey: indexKey, + execInfo: &f.execInfo, + matchers: matchers, } + } + return &inIndexIterator{ + indexIterator: iter, + inValues: inValues, + }, nil +} - switch op { - case opEq: - if isUnique { - return &eqSingleIndexIterator{ - indexKey: indexDataStoreKey, - filterValueHolder: filterValueHolder{ - value: valueBytes, - }, - execInfo: execInfo, - }, nil - } else { - return &eqPrefixIndexIterator{ - indexKey: indexDataStoreKey, - filterValueHolder: filterValueHolder{ - value: valueBytes, - }, - execInfo: execInfo, - }, nil +func (f *IndexFetcher) newIndexDataStoreKey() core.IndexDataStoreKey { + key := core.IndexDataStoreKey{CollectionID: f.col.ID(), IndexID: f.indexDesc.ID} + return key +} + +func (f *IndexFetcher) newIndexDataStoreKeyWithValues(values []any) core.IndexDataStoreKey { + fields := make([]core.IndexedField, len(values)) + for i := range values { + fields[i].Value = values[i] + fields[i].Descending = f.indexDesc.Fields[i].Descending + } + return core.NewIndexDataStoreKey(f.col.ID(), f.indexDesc.ID, fields) +} + +func (f *IndexFetcher) createIndexIterator() (indexIterator, error) { + fieldConditions := f.determineFieldFilterConditions() + + matchers, err := createValueMatchers(fieldConditions) + if err != nil { + return nil, err + } + + switch fieldConditions[0].op { + case opEq: + if isUniqueFetchByFullKey(&f.indexDesc, fieldConditions) { + keyFieldValues := make([]any, len(fieldConditions)) + for i := range fieldConditions { + keyFieldValues[i] = fieldConditions[i].val } - case opGt: - return &scanningIndexIterator{ - indexKey: indexDataStoreKey, - matcher: &indexByteValuesMatcher{ - value: valueBytes, - evalFunc: func(res int) bool { return res > 0 }, - }, - execInfo: execInfo, - }, nil - case opGe: - return &scanningIndexIterator{ - indexKey: indexDataStoreKey, - matcher: &indexByteValuesMatcher{ - value: valueBytes, - evalFunc: func(res int) bool { return res > 0 || res == 0 }, - }, - execInfo: execInfo, - }, nil - case opLt: - return &scanningIndexIterator{ - indexKey: indexDataStoreKey, - matcher: &indexByteValuesMatcher{ - value: valueBytes, - evalFunc: func(res int) bool { return res < 0 }, - }, - execInfo: execInfo, - }, nil - case opLe: - return &scanningIndexIterator{ - indexKey: indexDataStoreKey, - matcher: &indexByteValuesMatcher{ - value: valueBytes, - evalFunc: func(res int) bool { return res < 0 || res == 0 }, - }, - execInfo: execInfo, - }, nil - case opNe: - return &scanningIndexIterator{ - indexKey: indexDataStoreKey, - matcher: &neIndexMatcher{ - value: valueBytes, - }, - execInfo: execInfo, + + key := f.newIndexDataStoreKeyWithValues(keyFieldValues) + + return &eqSingleIndexIterator{ + indexKey: key, + execInfo: &f.execInfo, }, nil + } else { + return f.newPrefixIndexIterator(fieldConditions, matchers) + } + case opIn: + return f.newInIndexIterator(fieldConditions, matchers) + case opGt, opGe, opLt, opLe, opNe, opNin, opLike, opNlike, opILike, opNILike: + return &scanningIndexIterator{ + queryResultIterator: f.newQueryResultIterator(), + indexKey: f.newIndexDataStoreKey(), + matchers: matchers, + execInfo: &f.execInfo, + }, nil + } + + return nil, NewErrInvalidFilterOperator(fieldConditions[0].op) +} + +func createValueMatcher(condition *fieldFilterCond) (valueMatcher, error) { + if condition.op == "" { + return &anyMatcher{}, nil + } + + if client.IsNillableKind(condition.kind) && condition.val == nil { + return &nilMatcher{}, nil + } + + switch condition.op { + case opEq, opGt, opGe, opLt, opLe, opNe: + switch condition.kind { + case client.FieldKind_NILLABLE_INT: + var intVal int64 + switch v := condition.val.(type) { + case int64: + intVal = v + case int32: + intVal = int64(v) + case int: + intVal = int64(v) + default: + return nil, NewErrUnexpectedTypeValue[int64](condition.val) + } + return &intMatcher{value: intVal, evalFunc: getCompareValsFunc[int64](condition.op)}, nil + case client.FieldKind_NILLABLE_FLOAT: + floatVal, ok := condition.val.(float64) + if !ok { + return nil, NewErrUnexpectedTypeValue[float64](condition.val) + } + return &floatMatcher{value: floatVal, evalFunc: getCompareValsFunc[float64](condition.op)}, nil + case client.FieldKind_DocID, client.FieldKind_NILLABLE_STRING: + strVal, ok := condition.val.(string) + if !ok { + return nil, NewErrUnexpectedTypeValue[string](condition.val) + } + return &stringMatcher{value: strVal, evalFunc: getCompareValsFunc[string](condition.op)}, nil } case opIn, opNin: - inArr, ok := filterVal.([]any) + inArr, ok := condition.val.([]any) if !ok { - return nil, errors.New("invalid _in/_nin value") + return nil, ErrInvalidInOperatorValue } - valArr := make([][]byte, 0, len(inArr)) - for _, v := range inArr { - fieldValue := client.NewFieldValue(client.LWW_REGISTER, v) - valueBytes, err := fieldValue.Bytes() - if err != nil { - return nil, err - } - valArr = append(valArr, valueBytes) + return newNinIndexCmp(inArr, condition.kind, condition.op == opIn) + case opLike, opNlike, opILike, opNILike: + strVal, ok := condition.val.(string) + if !ok { + return nil, NewErrUnexpectedTypeValue[string](condition.val) + } + isLike := condition.op == opLike || condition.op == opILike + isCaseInsensitive := condition.op == opILike || condition.op == opNILike + return newLikeIndexCmp(strVal, isLike, isCaseInsensitive) + case opAny: + return &anyMatcher{}, nil + } + + return nil, NewErrInvalidFilterOperator(condition.op) +} + +func createValueMatchers(conditions []fieldFilterCond) ([]valueMatcher, error) { + matchers := make([]valueMatcher, 0, len(conditions)) + for i := range conditions { + m, err := createValueMatcher(&conditions[i]) + if err != nil { + return nil, err } - if op == opIn { - var iter filterValueIndexIterator - if isUnique { - iter = &eqSingleIndexIterator{ - indexKey: indexDataStoreKey, - execInfo: execInfo, - } - } else { - iter = &eqPrefixIndexIterator{ - indexKey: indexDataStoreKey, - execInfo: execInfo, - } + matchers = append(matchers, m) + } + return matchers, nil +} + +type fieldFilterCond struct { + op string + val any + kind client.FieldKind +} + +// determineFieldFilterConditions determines the conditions and their corresponding operation +// for each indexed field. +// It returns a slice of fieldFilterCond, where each element corresponds to a field in the index. +func (f *IndexFetcher) determineFieldFilterConditions() []fieldFilterCond { + result := make([]fieldFilterCond, 0, len(f.indexedFields)) + for i := range f.indexedFields { + fieldInd := f.mapping.FirstIndexOfName(f.indexedFields[i].Name) + found := false + // iterate through conditions and find the one that matches the current field + for filterKey, indexFilterCond := range f.indexFilter.Conditions { + propKey, ok := filterKey.(*mapper.PropertyIndex) + if !ok || fieldInd != propKey.Index { + continue } - return newInIndexIterator(iter, valArr), nil - } else { - return &scanningIndexIterator{ - indexKey: indexDataStoreKey, - matcher: newNinIndexCmp(valArr, false), - execInfo: execInfo, - }, nil + + found = true + + condMap := indexFilterCond.(map[connor.FilterKey]any) + for key, filterVal := range condMap { + opKey := key.(*mapper.Operator) + result = append(result, fieldFilterCond{ + op: opKey.Operation, + val: filterVal, + kind: f.indexedFields[i].Kind, + }) + break + } + break + } + if !found { + result = append(result, fieldFilterCond{op: opAny}) } - case opLike: - return &scanningIndexIterator{ - indexKey: indexDataStoreKey, - matcher: newLikeIndexCmp(filterVal.(string), true), - execInfo: execInfo, - }, nil - case opNlike: - return &scanningIndexIterator{ - indexKey: indexDataStoreKey, - matcher: newLikeIndexCmp(filterVal.(string), false), - execInfo: execInfo, - }, nil } + return result +} + +// isUniqueFetchByFullKey checks if the only index key can be fetched by the full index key. +// +// This method ignores the first condition (unless it's nil) because it's expected to be called only +// when the first field is used as a prefix in the index key. So we only check if the +// rest of the conditions are _eq. +func isUniqueFetchByFullKey(indexDesc *client.IndexDescription, conditions []fieldFilterCond) bool { + // we need to check length of conditions because full key fetch is only possible + // if all fields of the index are specified in the filter + res := indexDesc.Unique && len(conditions) == len(indexDesc.Fields) + + // first condition is not required to be _eq, but if is, val must be not nil + res = res && (conditions[0].op != opEq || conditions[0].val != nil) + + // for the rest it must be _eq and val must be not nil + for i := 1; i < len(conditions); i++ { + res = res && (conditions[i].op == opEq && conditions[i].val != nil) + } + return res +} - return nil, errors.New("invalid index filter condition") +func getCompareValsFunc[T cmp.Ordered](op string) func(T, T) bool { + switch op { + case opGt: + return checkGT + case opGe: + return checkGE + case opLt: + return checkLT + case opLe: + return checkLE + case opEq: + return checkEQ + case opNe: + return checkNE + } + return nil } + +func checkGE[T cmp.Ordered](a, b T) bool { return a >= b } +func checkGT[T cmp.Ordered](a, b T) bool { return a > b } +func checkLE[T cmp.Ordered](a, b T) bool { return a <= b } +func checkLT[T cmp.Ordered](a, b T) bool { return a < b } +func checkEQ[T cmp.Ordered](a, b T) bool { return a == b } +func checkNE[T cmp.Ordered](a, b T) bool { return a != b } diff --git a/db/fetcher/mocks/encoded_document.go b/db/fetcher/mocks/encoded_document.go index 5d9382a14d..6a517e19dd 100644 --- a/db/fetcher/mocks/encoded_document.go +++ b/db/fetcher/mocks/encoded_document.go @@ -65,19 +65,19 @@ func (_c *EncodedDocument_ID_Call) RunAndReturn(run func() []byte) *EncodedDocum } // Properties provides a mock function with given fields: onlyFilterProps -func (_m *EncodedDocument) Properties(onlyFilterProps bool) (map[client.FieldDescription]interface{}, error) { +func (_m *EncodedDocument) Properties(onlyFilterProps bool) (map[client.FieldDefinition]interface{}, error) { ret := _m.Called(onlyFilterProps) - var r0 map[client.FieldDescription]interface{} + var r0 map[client.FieldDefinition]interface{} var r1 error - if rf, ok := ret.Get(0).(func(bool) (map[client.FieldDescription]interface{}, error)); ok { + if rf, ok := ret.Get(0).(func(bool) (map[client.FieldDefinition]interface{}, error)); ok { return rf(onlyFilterProps) } - if rf, ok := ret.Get(0).(func(bool) map[client.FieldDescription]interface{}); ok { + if rf, ok := ret.Get(0).(func(bool) map[client.FieldDefinition]interface{}); ok { r0 = rf(onlyFilterProps) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(map[client.FieldDescription]interface{}) + r0 = ret.Get(0).(map[client.FieldDefinition]interface{}) } } @@ -108,12 +108,12 @@ func (_c *EncodedDocument_Properties_Call) Run(run func(onlyFilterProps bool)) * return _c } -func (_c *EncodedDocument_Properties_Call) Return(_a0 map[client.FieldDescription]interface{}, _a1 error) *EncodedDocument_Properties_Call { +func (_c *EncodedDocument_Properties_Call) Return(_a0 map[client.FieldDefinition]interface{}, _a1 error) *EncodedDocument_Properties_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *EncodedDocument_Properties_Call) RunAndReturn(run func(bool) (map[client.FieldDescription]interface{}, error)) *EncodedDocument_Properties_Call { +func (_c *EncodedDocument_Properties_Call) RunAndReturn(run func(bool) (map[client.FieldDefinition]interface{}, error)) *EncodedDocument_Properties_Call { _c.Call.Return(run) return _c } diff --git a/db/fetcher/mocks/fetcher.go b/db/fetcher/mocks/fetcher.go index 1597b13b2e..044425c70b 100644 --- a/db/fetcher/mocks/fetcher.go +++ b/db/fetcher/mocks/fetcher.go @@ -134,11 +134,11 @@ func (_c *Fetcher_FetchNext_Call) RunAndReturn(run func(context.Context) (fetche } // Init provides a mock function with given fields: ctx, txn, col, fields, filter, docmapper, reverse, showDeleted -func (_m *Fetcher) Init(ctx context.Context, txn datastore.Txn, col client.Collection, fields []client.FieldDescription, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, showDeleted bool) error { +func (_m *Fetcher) Init(ctx context.Context, txn datastore.Txn, col client.Collection, fields []client.FieldDefinition, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, showDeleted bool) error { ret := _m.Called(ctx, txn, col, fields, filter, docmapper, reverse, showDeleted) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, datastore.Txn, client.Collection, []client.FieldDescription, *mapper.Filter, *core.DocumentMapping, bool, bool) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, datastore.Txn, client.Collection, []client.FieldDefinition, *mapper.Filter, *core.DocumentMapping, bool, bool) error); ok { r0 = rf(ctx, txn, col, fields, filter, docmapper, reverse, showDeleted) } else { r0 = ret.Error(0) @@ -156,7 +156,7 @@ type Fetcher_Init_Call struct { // - ctx context.Context // - txn datastore.Txn // - col client.Collection -// - fields []client.FieldDescription +// - fields []client.FieldDefinition // - filter *mapper.Filter // - docmapper *core.DocumentMapping // - reverse bool @@ -165,9 +165,9 @@ func (_e *Fetcher_Expecter) Init(ctx interface{}, txn interface{}, col interface return &Fetcher_Init_Call{Call: _e.mock.On("Init", ctx, txn, col, fields, filter, docmapper, reverse, showDeleted)} } -func (_c *Fetcher_Init_Call) Run(run func(ctx context.Context, txn datastore.Txn, col client.Collection, fields []client.FieldDescription, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, showDeleted bool)) *Fetcher_Init_Call { +func (_c *Fetcher_Init_Call) Run(run func(ctx context.Context, txn datastore.Txn, col client.Collection, fields []client.FieldDefinition, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, showDeleted bool)) *Fetcher_Init_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(datastore.Txn), args[2].(client.Collection), args[3].([]client.FieldDescription), args[4].(*mapper.Filter), args[5].(*core.DocumentMapping), args[6].(bool), args[7].(bool)) + run(args[0].(context.Context), args[1].(datastore.Txn), args[2].(client.Collection), args[3].([]client.FieldDefinition), args[4].(*mapper.Filter), args[5].(*core.DocumentMapping), args[6].(bool), args[7].(bool)) }) return _c } @@ -177,7 +177,7 @@ func (_c *Fetcher_Init_Call) Return(_a0 error) *Fetcher_Init_Call { return _c } -func (_c *Fetcher_Init_Call) RunAndReturn(run func(context.Context, datastore.Txn, client.Collection, []client.FieldDescription, *mapper.Filter, *core.DocumentMapping, bool, bool) error) *Fetcher_Init_Call { +func (_c *Fetcher_Init_Call) RunAndReturn(run func(context.Context, datastore.Txn, client.Collection, []client.FieldDefinition, *mapper.Filter, *core.DocumentMapping, bool, bool) error) *Fetcher_Init_Call { _c.Call.Return(run) return _c } diff --git a/db/fetcher/versioned.go b/db/fetcher/versioned.go index c33f1a35da..3f05f2c29a 100644 --- a/db/fetcher/versioned.go +++ b/db/fetcher/versioned.go @@ -101,7 +101,7 @@ func (vf *VersionedFetcher) Init( ctx context.Context, txn datastore.Txn, col client.Collection, - fields []client.FieldDescription, + fields []client.FieldDefinition, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, @@ -356,8 +356,7 @@ func (vf *VersionedFetcher) merge(c cid.Cid) error { return err } - schema := vf.col.Schema() - field, ok := vf.col.Description().GetFieldByName(l.Name, &schema) + field, ok := vf.col.Definition().GetFieldByName(l.Name) if !ok { return client.NewErrFieldNotExist(l.Name) } @@ -379,7 +378,7 @@ func (vf *VersionedFetcher) processNode( // handle CompositeDAG mcrdt, exists := vf.mCRDTs[crdtIndex] if !exists { - dsKey, err := base.MakePrimaryIndexKeyForCRDT(vf.col.Description(), vf.col.Schema(), ctype, vf.dsKey, fieldName) + dsKey, err := base.MakePrimaryIndexKeyForCRDT(vf.col.Definition(), ctype, vf.dsKey, fieldName) if err != nil { return err } diff --git a/db/index.go b/db/index.go index 59fd25eaa9..319cdeb8a7 100644 --- a/db/index.go +++ b/db/index.go @@ -25,16 +25,9 @@ import ( // It abstracts away common index functionality to be implemented // by different index types: non-unique, unique, and composite type CollectionIndex interface { - // Save indexes a document by storing it - Save(context.Context, datastore.Txn, *client.Document) error - // Update updates an existing document in the index - Update(context.Context, datastore.Txn, *client.Document, *client.Document) error + client.CollectionIndex // RemoveAll removes all documents from the index RemoveAll(context.Context, datastore.Txn) error - // Name returns the name of the index - Name() string - // Description returns the description of the index - Description() client.IndexDescription } func canConvertIndexFieldValue[T any](val any) bool { @@ -44,15 +37,15 @@ func canConvertIndexFieldValue[T any](val any) bool { func getValidateIndexFieldFunc(kind client.FieldKind) func(any) bool { switch kind { - case client.FieldKind_STRING, client.FieldKind_FOREIGN_OBJECT: + case client.FieldKind_NILLABLE_STRING, client.FieldKind_FOREIGN_OBJECT: return canConvertIndexFieldValue[string] - case client.FieldKind_INT: + case client.FieldKind_NILLABLE_INT: return canConvertIndexFieldValue[int64] - case client.FieldKind_FLOAT: + case client.FieldKind_NILLABLE_FLOAT: return canConvertIndexFieldValue[float64] - case client.FieldKind_BOOL: + case client.FieldKind_NILLABLE_BOOL: return canConvertIndexFieldValue[bool] - case client.FieldKind_BLOB: + case client.FieldKind_NILLABLE_BLOB: return func(val any) bool { blobStrVal, ok := val.(string) if !ok { @@ -60,7 +53,7 @@ func getValidateIndexFieldFunc(kind client.FieldKind) func(any) bool { } return types.BlobPattern.MatchString(blobStrVal) } - case client.FieldKind_DATETIME: + case client.FieldKind_NILLABLE_DATETIME: return func(val any) bool { timeStrVal, ok := val.(string) if !ok { @@ -90,16 +83,20 @@ func NewCollectionIndex( if len(desc.Fields) == 0 { return nil, NewErrIndexDescHasNoFields(desc) } - field, foundField := collection.Schema().GetField(desc.Fields[0].Name) - if !foundField { - return nil, NewErrIndexDescHasNonExistingField(desc, desc.Fields[0].Name) - } base := collectionBaseIndex{collection: collection, desc: desc} - base.fieldDesc = field - var err error - base.validateFieldFunc, err = getFieldValidateFunc(field.Kind) - if err != nil { - return nil, err + base.validateFieldFuncs = make([]func(any) bool, len(desc.Fields)) + base.fieldsDescs = make([]client.SchemaFieldDescription, len(desc.Fields)) + for i := range desc.Fields { + field, foundField := collection.Schema().GetFieldByName(desc.Fields[i].Name) + if !foundField { + return nil, client.NewErrFieldNotExist(desc.Fields[i].Name) + } + base.fieldsDescs[i] = field + validateFunc, err := getFieldValidateFunc(field.Kind) + if err != nil { + return nil, err + } + base.validateFieldFuncs[i] = validateFunc } if desc.Unique { return &collectionUniqueIndex{collectionBaseIndex: base}, nil @@ -109,46 +106,45 @@ func NewCollectionIndex( } type collectionBaseIndex struct { - collection client.Collection - desc client.IndexDescription - validateFieldFunc func(any) bool - fieldDesc client.FieldDescription + collection client.Collection + desc client.IndexDescription + validateFieldFuncs []func(any) bool + fieldsDescs []client.SchemaFieldDescription } -func (i *collectionBaseIndex) getDocFieldValue(doc *client.Document) ([]byte, error) { - // collectionSimpleIndex only supports single field indexes, that's why we - // can safely access the first field - indexedFieldName := i.desc.Fields[0].Name - fieldVal, err := doc.GetValue(indexedFieldName) - if err != nil { - if errors.Is(err, client.ErrFieldNotExist) { - return client.NewFieldValue(client.LWW_REGISTER, nil).Bytes() - } else { +func (index *collectionBaseIndex) getDocFieldValues(doc *client.Document) ([]*client.FieldValue, error) { + result := make([]*client.FieldValue, 0, len(index.fieldsDescs)) + for iter := range index.fieldsDescs { + fieldVal, err := doc.TryGetValue(index.fieldsDescs[iter].Name) + if err != nil { return nil, err } + if fieldVal == nil || fieldVal.Value() == nil { + result = append(result, client.NewFieldValue(client.NONE_CRDT, nil)) + continue + } + result = append(result, fieldVal) } - if !i.validateFieldFunc(fieldVal.Value()) { - return nil, NewErrInvalidFieldValue(i.fieldDesc.Kind, fieldVal) - } - return fieldVal.Bytes() + return result, nil } -func (i *collectionBaseIndex) getDocumentsIndexKey( +func (index *collectionBaseIndex) getDocumentsIndexKey( doc *client.Document, ) (core.IndexDataStoreKey, error) { - fieldValue, err := i.getDocFieldValue(doc) + fieldValues, err := index.getDocFieldValues(doc) if err != nil { return core.IndexDataStoreKey{}, err } - indexDataStoreKey := core.IndexDataStoreKey{} - indexDataStoreKey.CollectionID = i.collection.ID() - indexDataStoreKey.IndexID = i.desc.ID - indexDataStoreKey.FieldValues = [][]byte{fieldValue} - return indexDataStoreKey, nil + fields := make([]core.IndexedField, len(index.fieldsDescs)) + for i := range index.fieldsDescs { + fields[i].Value = fieldValues[i].Value() + fields[i].Descending = index.desc.Fields[i].Descending + } + return core.NewIndexDataStoreKey(index.collection.ID(), index.desc.ID, fields), nil } -func (i *collectionBaseIndex) deleteIndexKey( +func (index *collectionBaseIndex) deleteIndexKey( ctx context.Context, txn datastore.Txn, key core.IndexDataStoreKey, @@ -158,17 +154,17 @@ func (i *collectionBaseIndex) deleteIndexKey( return err } if !exists { - return NewErrCorruptedIndex(i.desc.Name) + return NewErrCorruptedIndex(index.desc.Name) } return txn.Datastore().Delete(ctx, key.ToDS()) } // RemoveAll remove all artifacts of the index from the storage, i.e. all index // field values for all documents. -func (i *collectionBaseIndex) RemoveAll(ctx context.Context, txn datastore.Txn) error { +func (index *collectionBaseIndex) RemoveAll(ctx context.Context, txn datastore.Txn) error { prefixKey := core.IndexDataStoreKey{} - prefixKey.CollectionID = i.collection.ID() - prefixKey.IndexID = i.desc.ID + prefixKey.CollectionID = index.collection.ID() + prefixKey.IndexID = index.desc.ID keys, err := datastore.FetchKeysForPrefix(ctx, prefixKey.ToString(), txn.Datastore()) if err != nil { @@ -186,13 +182,13 @@ func (i *collectionBaseIndex) RemoveAll(ctx context.Context, txn datastore.Txn) } // Name returns the name of the index -func (i *collectionBaseIndex) Name() string { - return i.desc.Name +func (index *collectionBaseIndex) Name() string { + return index.desc.Name } // Description returns the description of the index -func (i *collectionBaseIndex) Description() client.IndexDescription { - return i.desc +func (index *collectionBaseIndex) Description() client.IndexDescription { + return index.desc } // collectionSimpleIndex is an non-unique index that indexes documents by a single field. @@ -203,58 +199,76 @@ type collectionSimpleIndex struct { var _ CollectionIndex = (*collectionSimpleIndex)(nil) -func (i *collectionSimpleIndex) getDocumentsIndexKey( +func (index *collectionSimpleIndex) getDocumentsIndexKey( doc *client.Document, ) (core.IndexDataStoreKey, error) { - key, err := i.collectionBaseIndex.getDocumentsIndexKey(doc) + key, err := index.collectionBaseIndex.getDocumentsIndexKey(doc) if err != nil { return core.IndexDataStoreKey{}, err } - key.FieldValues = append(key.FieldValues, []byte(doc.ID().String())) + key.Fields = append(key.Fields, core.IndexedField{Value: doc.ID().String()}) return key, nil } // Save indexes a document by storing the indexed field value. -func (i *collectionSimpleIndex) Save( +func (index *collectionSimpleIndex) Save( ctx context.Context, txn datastore.Txn, doc *client.Document, ) error { - key, err := i.getDocumentsIndexKey(doc) + key, err := index.getDocumentsIndexKey(doc) if err != nil { return err } err = txn.Datastore().Put(ctx, key.ToDS(), []byte{}) if err != nil { - return NewErrFailedToStoreIndexedField(key.ToDS().String(), err) + return NewErrFailedToStoreIndexedField(key.ToString(), err) } return nil } -func (i *collectionSimpleIndex) Update( +func (index *collectionSimpleIndex) Update( ctx context.Context, txn datastore.Txn, oldDoc *client.Document, newDoc *client.Document, ) error { - err := i.deleteDocIndex(ctx, txn, oldDoc) + err := index.deleteDocIndex(ctx, txn, oldDoc) if err != nil { return err } - return i.Save(ctx, txn, newDoc) + return index.Save(ctx, txn, newDoc) } -func (i *collectionSimpleIndex) deleteDocIndex( +func (index *collectionSimpleIndex) Delete( ctx context.Context, txn datastore.Txn, doc *client.Document, ) error { - key, err := i.getDocumentsIndexKey(doc) + return index.deleteDocIndex(ctx, txn, doc) +} + +func (index *collectionSimpleIndex) deleteDocIndex( + ctx context.Context, + txn datastore.Txn, + doc *client.Document, +) error { + key, err := index.getDocumentsIndexKey(doc) if err != nil { return err } - return i.deleteIndexKey(ctx, txn, key) + return index.deleteIndexKey(ctx, txn, key) +} + +// hasIndexKeyNilField returns true if the index key has a field with nil value +func hasIndexKeyNilField(key *core.IndexDataStoreKey) bool { + for i := range key.Fields { + if key.Fields[i].Value == nil { + return true + } + } + return false } type collectionUniqueIndex struct { @@ -263,79 +277,121 @@ type collectionUniqueIndex struct { var _ CollectionIndex = (*collectionUniqueIndex)(nil) -func (i *collectionUniqueIndex) Save( +func (index *collectionUniqueIndex) save( ctx context.Context, txn datastore.Txn, - doc *client.Document, + key *core.IndexDataStoreKey, + val []byte, ) error { - key, err := i.getDocumentsIndexKey(doc) + err := txn.Datastore().Put(ctx, key.ToDS(), val) if err != nil { - return err + return NewErrFailedToStoreIndexedField(key.ToDS().String(), err) } - exists, err := txn.Datastore().Has(ctx, key.ToDS()) + return nil +} + +func (index *collectionUniqueIndex) Save( + ctx context.Context, + txn datastore.Txn, + doc *client.Document, +) error { + key, val, err := index.prepareIndexRecordToStore(ctx, txn, doc) if err != nil { return err } - if exists { - return i.newUniqueIndexError(doc) + return index.save(ctx, txn, &key, val) +} + +func (index *collectionUniqueIndex) newUniqueIndexError( + doc *client.Document, +) error { + kvs := make([]errors.KV, 0, len(index.fieldsDescs)) + for iter := range index.fieldsDescs { + fieldVal, err := doc.TryGetValue(index.fieldsDescs[iter].Name) + var val any + if err != nil { + return err + } + // If fieldVal is nil, we leave `val` as is (e.g. nil) + if fieldVal != nil { + val = fieldVal.Value() + } + kvs = append(kvs, errors.NewKV(index.fieldsDescs[iter].Name, val)) } - err = txn.Datastore().Put(ctx, key.ToDS(), []byte(doc.ID().String())) + + return NewErrCanNotIndexNonUniqueFields(doc.ID().String(), kvs...) +} + +func (index *collectionUniqueIndex) getDocumentsIndexRecord( + doc *client.Document, +) (core.IndexDataStoreKey, []byte, error) { + key, err := index.getDocumentsIndexKey(doc) if err != nil { - return NewErrFailedToStoreIndexedField(key.ToDS().String(), err) + return core.IndexDataStoreKey{}, nil, err + } + if hasIndexKeyNilField(&key) { + key.Fields = append(key.Fields, core.IndexedField{Value: doc.ID().String()}) + return key, []byte{}, nil + } else { + return key, []byte(doc.ID().String()), nil } - return nil } -func (i *collectionUniqueIndex) newUniqueIndexError( +func (index *collectionUniqueIndex) prepareIndexRecordToStore( + ctx context.Context, + txn datastore.Txn, doc *client.Document, -) error { - fieldVal, err := doc.GetValue(i.fieldDesc.Name) - var val any +) (core.IndexDataStoreKey, []byte, error) { + key, val, err := index.getDocumentsIndexRecord(doc) if err != nil { - // If the error is ErrFieldNotExist, we leave `val` as is (e.g. nil) - // otherwise we return the error - if !errors.Is(err, client.ErrFieldNotExist) { - return err + return core.IndexDataStoreKey{}, nil, err + } + if len(val) != 0 { + var exists bool + exists, err = txn.Datastore().Has(ctx, key.ToDS()) + if err != nil { + return core.IndexDataStoreKey{}, nil, err + } + if exists { + return core.IndexDataStoreKey{}, nil, index.newUniqueIndexError(doc) } - } else { - val = fieldVal.Value() } + return key, val, nil +} - return NewErrCanNotIndexNonUniqueField(doc.ID().String(), i.fieldDesc.Name, val) +func (index *collectionUniqueIndex) Delete( + ctx context.Context, + txn datastore.Txn, + doc *client.Document, +) error { + return index.deleteDocIndex(ctx, txn, doc) } -func (i *collectionUniqueIndex) Update( +func (index *collectionUniqueIndex) Update( ctx context.Context, txn datastore.Txn, oldDoc *client.Document, newDoc *client.Document, ) error { - newKey, err := i.getDocumentsIndexKey(newDoc) + newKey, newVal, err := index.prepareIndexRecordToStore(ctx, txn, newDoc) if err != nil { return err } - exists, err := txn.Datastore().Has(ctx, newKey.ToDS()) - if err != nil { - return err - } - if exists { - return i.newUniqueIndexError(newDoc) - } - err = i.deleteDocIndex(ctx, txn, oldDoc) + err = index.deleteDocIndex(ctx, txn, oldDoc) if err != nil { return err } - return i.Save(ctx, txn, newDoc) + return index.save(ctx, txn, &newKey, newVal) } -func (i *collectionUniqueIndex) deleteDocIndex( +func (index *collectionUniqueIndex) deleteDocIndex( ctx context.Context, txn datastore.Txn, doc *client.Document, ) error { - key, err := i.getDocumentsIndexKey(doc) + key, _, err := index.getDocumentsIndexRecord(doc) if err != nil { return err } - return i.deleteIndexKey(ctx, txn, key) + return index.deleteIndexKey(ctx, txn, key) } diff --git a/db/index_test.go b/db/index_test.go index 911228e649..44c2e45f52 100644 --- a/db/index_test.go +++ b/db/index_test.go @@ -20,6 +20,7 @@ import ( ds "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/query" + "github.com/sourcenetwork/immutable" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -59,6 +60,10 @@ type indexTestFixture struct { } func (f *indexTestFixture) addUsersCollection() client.Collection { + if f.users != nil { + return f.users + } + _, err := f.db.AddSchema( f.ctx, fmt.Sprintf( @@ -136,14 +141,14 @@ func newIndexTestFixture(t *testing.T) *indexTestFixture { func (f *indexTestFixture) createCollectionIndex( desc client.IndexDescription, ) (client.IndexDescription, error) { - return f.createCollectionIndexFor(f.users.Name(), desc) + return f.createCollectionIndexFor(f.users.Name().Value(), desc) } func getUsersIndexDescOnName() client.IndexDescription { return client.IndexDescription{ Name: testUsersColIndexName, Fields: []client.IndexedFieldDescription{ - {Name: usersNameFieldName, Direction: client.Ascending}, + {Name: usersNameFieldName}, }, } } @@ -152,7 +157,7 @@ func getUsersIndexDescOnAge() client.IndexDescription { return client.IndexDescription{ Name: testUsersColIndexAge, Fields: []client.IndexedFieldDescription{ - {Name: usersAgeFieldName, Direction: client.Ascending}, + {Name: usersAgeFieldName}, }, } } @@ -161,7 +166,7 @@ func getUsersIndexDescOnWeight() client.IndexDescription { return client.IndexDescription{ Name: testUsersColIndexWeight, Fields: []client.IndexedFieldDescription{ - {Name: usersWeightFieldName, Direction: client.Ascending}, + {Name: usersWeightFieldName}, }, } } @@ -170,13 +175,13 @@ func getProductsIndexDescOnCategory() client.IndexDescription { return client.IndexDescription{ Name: testUsersColIndexAge, Fields: []client.IndexedFieldDescription{ - {Name: productsCategoryFieldName, Direction: client.Ascending}, + {Name: productsCategoryFieldName}, }, } } func (f *indexTestFixture) createUserCollectionIndexOnName() client.IndexDescription { - newDesc, err := f.createCollectionIndexFor(f.users.Name(), getUsersIndexDescOnName()) + newDesc, err := f.createCollectionIndexFor(f.users.Name().Value(), getUsersIndexDescOnName()) require.NoError(f.t, err) return newDesc } @@ -188,13 +193,27 @@ func makeUnique(indexDesc client.IndexDescription) client.IndexDescription { func (f *indexTestFixture) createUserCollectionUniqueIndexOnName() client.IndexDescription { indexDesc := makeUnique(getUsersIndexDescOnName()) - newDesc, err := f.createCollectionIndexFor(f.users.Name(), indexDesc) + newDesc, err := f.createCollectionIndexFor(f.users.Name().Value(), indexDesc) + require.NoError(f.t, err) + return newDesc +} + +func addFieldToIndex(indexDesc client.IndexDescription, fieldName string) client.IndexDescription { + indexDesc.Fields = append(indexDesc.Fields, client.IndexedFieldDescription{ + Name: fieldName, + }) + return indexDesc +} + +func (f *indexTestFixture) createUserCollectionIndexOnNameAndAge() client.IndexDescription { + indexDesc := addFieldToIndex(getUsersIndexDescOnName(), usersAgeFieldName) + newDesc, err := f.createCollectionIndexFor(f.users.Name().Value(), indexDesc) require.NoError(f.t, err) return newDesc } func (f *indexTestFixture) createUserCollectionIndexOnAge() client.IndexDescription { - newDesc, err := f.createCollectionIndexFor(f.users.Name(), getUsersIndexDescOnAge()) + newDesc, err := f.createCollectionIndexFor(f.users.Name().Value(), getUsersIndexDescOnAge()) require.NoError(f.t, err) return newDesc } @@ -203,8 +222,8 @@ func (f *indexTestFixture) dropIndex(colName, indexName string) error { return f.db.dropCollectionIndex(f.ctx, f.txn, colName, indexName) } -func (f *indexTestFixture) countIndexPrefixes(colName, indexName string) int { - prefix := core.NewCollectionIndexKey(usersColName, indexName) +func (f *indexTestFixture) countIndexPrefixes(indexName string) int { + prefix := core.NewCollectionIndexKey(immutable.Some(f.users.ID()), indexName) q, err := f.txn.Systemstore().Query(f.ctx, query.Query{ Prefix: prefix.ToString(), }) @@ -244,11 +263,11 @@ func (f *indexTestFixture) createCollectionIndexFor( } func (f *indexTestFixture) getAllIndexes() (map[client.CollectionName][]client.IndexDescription, error) { - return f.db.getAllIndexes(f.ctx, f.txn) + return f.db.getAllIndexDescriptions(f.ctx, f.txn) } -func (f *indexTestFixture) getCollectionIndexes(colName string) ([]client.IndexDescription, error) { - return f.db.fetchCollectionIndexDescriptions(f.ctx, f.txn, colName) +func (f *indexTestFixture) getCollectionIndexes(colID uint32) ([]client.IndexDescription, error) { + return f.db.fetchCollectionIndexDescriptions(f.ctx, f.txn, colID) } func TestCreateIndex_IfFieldsIsEmpty_ReturnError(t *testing.T) { @@ -270,7 +289,7 @@ func TestCreateIndex_IfIndexDescriptionIDIsNotZero_ReturnError(t *testing.T) { Name: "some_index_name", ID: id, Fields: []client.IndexedFieldDescription{ - {Name: usersNameFieldName, Direction: client.Ascending}, + {Name: usersNameFieldName}, }, } _, err := f.createCollectionIndex(desc) @@ -285,7 +304,7 @@ func TestCreateIndex_IfValidInput_CreateIndex(t *testing.T) { desc := client.IndexDescription{ Name: "some_index_name", Fields: []client.IndexedFieldDescription{ - {Name: usersNameFieldName, Direction: client.Ascending}, + {Name: usersNameFieldName}, }, } resultDesc, err := f.createCollectionIndex(desc) @@ -302,7 +321,7 @@ func TestCreateIndex_IfFieldNameIsEmpty_ReturnError(t *testing.T) { desc := client.IndexDescription{ Name: "some_index_name", Fields: []client.IndexedFieldDescription{ - {Name: "", Direction: client.Ascending}, + {Name: ""}, }, } _, err := f.createCollectionIndex(desc) @@ -319,20 +338,7 @@ func TestCreateIndex_IfFieldHasNoDirection_DefaultToAsc(t *testing.T) { } newDesc, err := f.createCollectionIndex(desc) assert.NoError(t, err) - assert.Equal(t, client.Ascending, newDesc.Fields[0].Direction) -} - -func TestCreateIndex_IfSingleFieldInDescOrder_ReturnError(t *testing.T) { - f := newIndexTestFixture(t) - defer f.db.Close() - - desc := client.IndexDescription{ - Fields: []client.IndexedFieldDescription{ - {Name: usersNameFieldName, Direction: client.Descending}, - }, - } - _, err := f.createCollectionIndex(desc) - assert.EqualError(t, err, errIndexSingleFieldWrongDirection) + assert.False(t, newDesc.Fields[0].Descending) } func TestCreateIndex_IfIndexWithNameAlreadyExists_ReturnError(t *testing.T) { @@ -392,7 +398,7 @@ func TestCreateIndex_ShouldSaveToSystemStorage(t *testing.T) { _, err := f.createCollectionIndex(desc) assert.NoError(t, err) - key := core.NewCollectionIndexKey(f.users.Name(), name) + key := core.NewCollectionIndexKey(immutable.Some(f.users.ID()), name) data, err := f.txn.Systemstore().Get(f.ctx, key.ToDS()) assert.NoError(t, err) var deserialized client.IndexDescription @@ -435,16 +441,16 @@ func TestCreateIndex_WithMultipleCollectionsAndIndexes_AssignIncrementedIDPerCol makeIndex := func(fieldName string) client.IndexDescription { return client.IndexDescription{ Fields: []client.IndexedFieldDescription{ - {Name: fieldName, Direction: client.Ascending}, + {Name: fieldName}, }, } } createIndexAndAssert := func(col client.Collection, fieldName string, expectedID uint32) { - desc, err := f.createCollectionIndexFor(col.Name(), makeIndex(fieldName)) + desc, err := f.createCollectionIndexFor(col.Name().Value(), makeIndex(fieldName)) require.NoError(t, err) assert.Equal(t, expectedID, desc.ID) - seqKey := core.NewSequenceKey(fmt.Sprintf("%s/%d", core.COLLECTION_INDEX, col.ID())) + seqKey := core.NewIndexIDSequenceKey(col.ID()) storedSeqKey, err := f.txn.Systemstore().Get(f.ctx, seqKey.ToDS()) assert.NoError(t, err) storedSeqVal := binary.BigEndian.Uint64(storedSeqKey) @@ -517,14 +523,14 @@ func TestCreateIndex_IfAttemptToIndexOnUnsupportedType_ReturnError(t *testing.T) indexDesc := client.IndexDescription{ Fields: []client.IndexedFieldDescription{ - {Name: "field", Direction: client.Ascending}, + {Name: "field"}, }, } f.txn, err = f.db.NewTxn(f.ctx, false) require.NoError(f.t, err) - _, err = f.createCollectionIndexFor(collection.Name(), indexDesc) + _, err = f.createCollectionIndexFor(collection.Name().Value(), indexDesc) require.ErrorIs(f.t, err, NewErrUnsupportedIndexFieldType(unsupportedKind)) } @@ -562,7 +568,7 @@ func TestGetIndexes_IfInvalidIndexIsStored_ReturnError(t *testing.T) { f := newIndexTestFixture(t) defer f.db.Close() - indexKey := core.NewCollectionIndexKey(usersColName, "users_name_index") + indexKey := core.NewCollectionIndexKey(immutable.Some(f.users.ID()), "users_name_index") err := f.txn.Systemstore().Put(f.ctx, indexKey.ToDS(), []byte("invalid")) assert.NoError(t, err) @@ -574,12 +580,12 @@ func TestGetIndexes_IfInvalidIndexKeyIsStored_ReturnError(t *testing.T) { f := newIndexTestFixture(t) defer f.db.Close() - indexKey := core.NewCollectionIndexKey(usersColName, "users_name_index") + indexKey := core.NewCollectionIndexKey(immutable.Some(f.users.ID()), "users_name_index") key := ds.NewKey(indexKey.ToString() + "/invalid") desc := client.IndexDescription{ Name: "some_index_name", Fields: []client.IndexedFieldDescription{ - {Name: usersNameFieldName, Direction: client.Ascending}, + {Name: usersNameFieldName}, }, } descData, _ := json.Marshal(desc) @@ -663,7 +669,7 @@ func TestGetCollectionIndexes_ShouldReturnListOfCollectionIndexes(t *testing.T) _, err := f.createCollectionIndexFor(usersColName, usersIndexDesc) assert.NoError(t, err) - f.getProductsCollectionDesc() + products := f.getProductsCollectionDesc() productsIndexDesc := client.IndexDescription{ Name: "products_description_index", Fields: []client.IndexedFieldDescription{{Name: productsPriceFieldName}}, @@ -675,13 +681,13 @@ func TestGetCollectionIndexes_ShouldReturnListOfCollectionIndexes(t *testing.T) _, err = f.createCollectionIndexFor(productsColName, productsIndexDesc) assert.NoError(t, err) - userIndexes, err := f.getCollectionIndexes(usersColName) + userIndexes, err := f.getCollectionIndexes(f.users.ID()) assert.NoError(t, err) require.Equal(t, 1, len(userIndexes)) usersIndexDesc.ID = 1 assert.Equal(t, usersIndexDesc, userIndexes[0]) - productIndexes, err := f.getCollectionIndexes(productsColName) + productIndexes, err := f.getCollectionIndexes(products.ID()) assert.NoError(t, err) require.Equal(t, 1, len(productIndexes)) productsIndexDesc.ID = 1 @@ -700,7 +706,7 @@ func TestGetCollectionIndexes_IfSystemStoreFails_ReturnError(t *testing.T) { mockedTxn.EXPECT().Systemstore().Unset() mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore) - _, err := f.getCollectionIndexes(usersColName) + _, err := f.getCollectionIndexes(f.users.ID()) assert.ErrorIs(t, err, testErr) } @@ -716,7 +722,7 @@ func TestGetCollectionIndexes_IfSystemStoreFails_ShouldCloseIterator(t *testing. mockedTxn.EXPECT().Systemstore().Unset() mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore) - _, _ = f.getCollectionIndexes(usersColName) + _, _ = f.getCollectionIndexes(f.users.ID()) } func TestGetCollectionIndexes_IfSystemStoreQueryIteratorFails_ReturnError(t *testing.T) { @@ -732,7 +738,7 @@ func TestGetCollectionIndexes_IfSystemStoreQueryIteratorFails_ReturnError(t *tes mockedTxn.EXPECT().Systemstore().Unset() mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore) - _, err := f.getCollectionIndexes(usersColName) + _, err := f.getCollectionIndexes(f.users.ID()) assert.ErrorIs(t, err, testErr) } @@ -740,11 +746,11 @@ func TestGetCollectionIndexes_IfInvalidIndexIsStored_ReturnError(t *testing.T) { f := newIndexTestFixture(t) defer f.db.Close() - indexKey := core.NewCollectionIndexKey(usersColName, "users_name_index") + indexKey := core.NewCollectionIndexKey(immutable.Some(f.users.ID()), "users_name_index") err := f.txn.Systemstore().Put(f.ctx, indexKey.ToDS(), []byte("invalid")) assert.NoError(t, err) - _, err = f.getCollectionIndexes(usersColName) + _, err = f.getCollectionIndexes(f.users.ID()) assert.ErrorIs(t, err, datastore.NewErrInvalidStoredValue(nil)) } @@ -866,7 +872,6 @@ func TestCollectionGetIndexes_IfFailsToCreateTxn_ShouldNotCache(t *testing.T) { func TestCollectionGetIndexes_IfStoredIndexWithUnsupportedType_ReturnError(t *testing.T) { f := newIndexTestFixtureBare(t) - f.addUsersCollection() const unsupportedKind = client.FieldKind_BOOL_ARRAY _, err := f.db.AddSchema( @@ -886,7 +891,7 @@ func TestCollectionGetIndexes_IfStoredIndexWithUnsupportedType_ReturnError(t *te indexDesc := client.IndexDescription{ Fields: []client.IndexedFieldDescription{ - {Name: "field", Direction: client.Ascending}, + {Name: "field"}, }, } indexDescData, err := json.Marshal(indexDesc) @@ -1007,11 +1012,11 @@ func TestCollectionGetIndexes_ShouldReturnIndexesInOrderedByName(t *testing.T) { indexDesc := client.IndexDescription{ Name: indexNamePrefix + iStr, Fields: []client.IndexedFieldDescription{ - {Name: fieldNamePrefix + iStr, Direction: client.Ascending}, + {Name: fieldNamePrefix + iStr}, }, } - _, err := f.createCollectionIndexFor(collection.Name(), indexDesc) + _, err := f.createCollectionIndexFor(collection.Name().Value(), indexDesc) require.NoError(t, err) } @@ -1032,7 +1037,7 @@ func TestDropIndex_ShouldDeleteIndex(t *testing.T) { err := f.dropIndex(usersColName, desc.Name) assert.NoError(t, err) - indexKey := core.NewCollectionIndexKey(usersColName, desc.Name) + indexKey := core.NewCollectionIndexKey(immutable.Some(f.users.ID()), desc.Name) _, err = f.txn.Systemstore().Get(f.ctx, indexKey.ToDS()) assert.Error(t, err) } @@ -1148,24 +1153,24 @@ func TestDropAllIndexes_ShouldDeleteAllIndexes(t *testing.T) { defer f.db.Close() _, err := f.createCollectionIndexFor(usersColName, client.IndexDescription{ Fields: []client.IndexedFieldDescription{ - {Name: usersNameFieldName, Direction: client.Ascending}, + {Name: usersNameFieldName}, }, }) assert.NoError(f.t, err) _, err = f.createCollectionIndexFor(usersColName, client.IndexDescription{ Fields: []client.IndexedFieldDescription{ - {Name: usersAgeFieldName, Direction: client.Ascending}, + {Name: usersAgeFieldName}, }, }) assert.NoError(f.t, err) - assert.Equal(t, 2, f.countIndexPrefixes(usersColName, "")) + assert.Equal(t, 2, f.countIndexPrefixes("")) err = f.users.(*collection).dropAllIndexes(f.ctx, f.txn) assert.NoError(t, err) - assert.Equal(t, 0, f.countIndexPrefixes(usersColName, "")) + assert.Equal(t, 0, f.countIndexPrefixes("")) } func TestDropAllIndexes_IfStorageFails_ReturnError(t *testing.T) { @@ -1269,5 +1274,5 @@ func TestNewCollectionIndex_IfDescriptionHasNonExistingField_ReturnError(t *test desc := getUsersIndexDescOnName() desc.Fields[0].Name = "non_existing_field" _, err := NewCollectionIndex(f.users, desc) - require.ErrorIs(t, err, NewErrIndexDescHasNonExistingField(desc, desc.Fields[0].Name)) + require.ErrorIs(t, err, client.NewErrFieldNotExist(desc.Fields[0].Name)) } diff --git a/db/indexed_docs_test.go b/db/indexed_docs_test.go index b7c7abbf9d..d10ad8eb5b 100644 --- a/db/indexed_docs_test.go +++ b/db/indexed_docs_test.go @@ -14,11 +14,11 @@ import ( "context" "encoding/json" "errors" - "fmt" "testing" ipfsDatastore "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/query" + "github.com/sourcenetwork/immutable" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -77,12 +77,12 @@ func (f *indexTestFixture) newProdDoc(id int, price float64, cat string, col cli // The format of the non-unique index key is: "////" // Example: "/5/1/12/bae-61cd6879-63ca-5ca9-8731-470a3c1dac69" type indexKeyBuilder struct { - f *indexTestFixture - colName string - fieldName string - doc *client.Document - values [][]byte - isUnique bool + f *indexTestFixture + colName string + fieldsNames []string + descendingFields []bool + doc *client.Document + isUnique bool } func newIndexKeyBuilder(f *indexTestFixture) *indexKeyBuilder { @@ -94,11 +94,17 @@ func (b *indexKeyBuilder) Col(colName string) *indexKeyBuilder { return b } -// Field sets the field name for the index key. +// Fields sets the fields names for the index key. // If the field name is not set, the index key will contain only collection id. // When building a key it will it will find the field id to use in the key. -func (b *indexKeyBuilder) Field(fieldName string) *indexKeyBuilder { - b.fieldName = fieldName +func (b *indexKeyBuilder) Fields(fieldsNames ...string) *indexKeyBuilder { + b.fieldsNames = fieldsNames + return b +} + +// Fields sets the fields names for the index key. +func (b *indexKeyBuilder) DescendingFields(descending ...bool) *indexKeyBuilder { + b.descendingFields = descending return b } @@ -111,13 +117,6 @@ func (b *indexKeyBuilder) Doc(doc *client.Document) *indexKeyBuilder { return b } -// Values sets the values for the index key. -// It will override the field values stored in the document. -func (b *indexKeyBuilder) Values(values ...[]byte) *indexKeyBuilder { - b.values = values - return b -} - func (b *indexKeyBuilder) Unique() *indexKeyBuilder { b.isUnique = true return b @@ -130,11 +129,11 @@ func (b *indexKeyBuilder) Build() core.IndexDataStoreKey { return key } - cols, err := b.f.db.getAllCollections(b.f.ctx, b.f.txn) + cols, err := b.f.db.getCollections(b.f.ctx, b.f.txn, client.CollectionFetchOptions{}) require.NoError(b.f.t, err) var collection client.Collection for _, col := range cols { - if col.Name() == b.colName { + if col.Name().Value() == b.colName { collection = col break } @@ -144,38 +143,50 @@ func (b *indexKeyBuilder) Build() core.IndexDataStoreKey { } key.CollectionID = collection.ID() - if b.fieldName == "" { + if len(b.fieldsNames) == 0 { return key } indexes, err := collection.GetIndexes(b.f.ctx) require.NoError(b.f.t, err) +indexLoop: for _, index := range indexes { - if index.Fields[0].Name == b.fieldName { + if len(index.Fields) == len(b.fieldsNames) { + for i := range index.Fields { + if index.Fields[i].Name != b.fieldsNames[i] { + continue indexLoop + } + } key.IndexID = index.ID - break + break indexLoop } } if b.doc != nil { - var fieldBytesVal []byte - var fieldValue *client.FieldValue - var err error - if len(b.values) == 0 { - fieldValue, err = b.doc.GetValue(b.fieldName) - require.NoError(b.f.t, err) - } else { - fieldValue = client.NewFieldValue(client.LWW_REGISTER, b.values[0]) + hasNilValue := false + for i, fieldName := range b.fieldsNames { + fieldValue, err := b.doc.GetValue(fieldName) + var val any + if err != nil { + if !errors.Is(err, client.ErrFieldNotExist) { + require.NoError(b.f.t, err) + } + } else if fieldValue != nil { + val = fieldValue.Value() + } + if val == nil { + hasNilValue = true + } + descending := false + if i < len(b.descendingFields) { + descending = b.descendingFields[i] + } + key.Fields = append(key.Fields, core.IndexedField{Value: val, Descending: descending}) } - fieldBytesVal, err = fieldValue.Bytes() - require.NoError(b.f.t, err) - key.FieldValues = [][]byte{fieldBytesVal} - if !b.isUnique { - key.FieldValues = append(key.FieldValues, []byte(b.doc.ID().String())) + if !b.isUnique || hasNilValue { + key.Fields = append(key.Fields, core.IndexedField{Value: b.doc.ID().String()}) } - } else if len(b.values) > 0 { - key.FieldValues = b.values } return key @@ -211,12 +222,15 @@ func (*indexTestFixture) resetSystemStoreStubs(systemStoreOn *mocks.DSReaderWrit } func (f *indexTestFixture) stubSystemStore(systemStoreOn *mocks.DSReaderWriter_Expecter) { + if f.users == nil { + f.users = f.addUsersCollection() + } desc := getUsersIndexDescOnName() desc.ID = 1 indexOnNameDescData, err := json.Marshal(desc) require.NoError(f.t, err) - colIndexKey := core.NewCollectionIndexKey(usersColName, "") + colIndexKey := core.NewCollectionIndexKey(immutable.Some(f.users.ID()), "") matchPrefixFunc := func(q query.Query) bool { return q.Prefix == colIndexKey.ToDS().String() } @@ -230,11 +244,11 @@ func (f *indexTestFixture) stubSystemStore(systemStoreOn *mocks.DSReaderWriter_E systemStoreOn.Query(mock.Anything, mock.Anything).Maybe(). Return(mocks.NewQueryResultsWithValues(f.t), nil) - colIndexOnNameKey := core.NewCollectionIndexKey(usersColName, testUsersColIndexName) + colIndexOnNameKey := core.NewCollectionIndexKey(immutable.Some(f.users.ID()), testUsersColIndexName) systemStoreOn.Get(mock.Anything, colIndexOnNameKey.ToDS()).Maybe().Return(indexOnNameDescData, nil) if f.users != nil { - sequenceKey := core.NewSequenceKey(fmt.Sprintf("%s/%d", core.COLLECTION_INDEX, f.users.ID())) + sequenceKey := core.NewIndexIDSequenceKey(f.users.ID()) systemStoreOn.Get(mock.Anything, sequenceKey.ToDS()).Maybe().Return([]byte{0, 0, 0, 0, 0, 0, 0, 1}, nil) } @@ -255,7 +269,26 @@ func TestNonUnique_IfDocIsAdded_ShouldBeIndexed(t *testing.T) { doc := f.newUserDoc("John", 21, f.users) f.saveDocToCollection(doc, f.users) - key := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + key := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName).Doc(doc).Build() + + data, err := f.txn.Datastore().Get(f.ctx, key.ToDS()) + require.NoError(t, err) + assert.Len(t, data, 0) +} + +func TestNonUnique_IfDocWithDescendingOrderIsAdded_ShouldBeIndexed(t *testing.T) { + f := newIndexTestFixture(t) + defer f.db.Close() + + indexDesc := getUsersIndexDescOnName() + indexDesc.Fields[0].Descending = true + _, err := f.createCollectionIndexFor(f.users.Name().Value(), indexDesc) + require.NoError(f.t, err) + + doc := f.newUserDoc("John", 21, f.users) + f.saveDocToCollection(doc, f.users) + + key := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName).DescendingFields(true).Doc(doc).Build() data, err := f.txn.Datastore().Get(f.ctx, key.ToDS()) require.NoError(t, err) @@ -268,7 +301,7 @@ func TestNonUnique_IfFailsToStoredIndexedDoc_Error(t *testing.T) { f.createUserCollectionIndexOnName() doc := f.newUserDoc("John", 21, f.users) - key := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + key := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName).Doc(doc).Build() mockTxn := f.mockTxn() @@ -345,7 +378,7 @@ func TestNonUnique_IfIndexIntField_StoreIt(t *testing.T) { doc := f.newUserDoc("John", 21, f.users) f.saveDocToCollection(doc, f.users) - key := newIndexKeyBuilder(f).Col(usersColName).Field(usersAgeFieldName).Doc(doc).Build() + key := newIndexKeyBuilder(f).Col(usersColName).Fields(usersAgeFieldName).Doc(doc).Build() data, err := f.txn.Datastore().Get(f.ctx, key.ToDS()) require.NoError(t, err) @@ -357,9 +390,9 @@ func TestNonUnique_IfMultipleCollectionsWithIndexes_StoreIndexWithCollectionID(t users := f.addUsersCollection() products := f.getProductsCollectionDesc() - _, err := f.createCollectionIndexFor(users.Name(), getUsersIndexDescOnName()) + _, err := f.createCollectionIndexFor(users.Name().Value(), getUsersIndexDescOnName()) require.NoError(f.t, err) - _, err = f.createCollectionIndexFor(products.Name(), getProductsIndexDescOnCategory()) + _, err = f.createCollectionIndexFor(products.Name().Value(), getProductsIndexDescOnCategory()) require.NoError(f.t, err) f.commitTxn() @@ -372,8 +405,8 @@ func TestNonUnique_IfMultipleCollectionsWithIndexes_StoreIndexWithCollectionID(t require.NoError(f.t, err) f.commitTxn() - userDocID := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(userDoc).Build() - prodDocID := newIndexKeyBuilder(f).Col(productsColName).Field(productsCategoryFieldName).Doc(prodDoc).Build() + userDocID := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName).Doc(userDoc).Build() + prodDocID := newIndexKeyBuilder(f).Col(productsColName).Fields(productsCategoryFieldName).Doc(prodDoc).Build() data, err := f.txn.Datastore().Get(f.ctx, userDocID.ToDS()) require.NoError(t, err) @@ -392,8 +425,8 @@ func TestNonUnique_IfMultipleIndexes_StoreIndexWithIndexID(t *testing.T) { doc := f.newUserDoc("John", 21, f.users) f.saveDocToCollection(doc, f.users) - nameKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() - ageKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersAgeFieldName).Doc(doc).Build() + nameKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName).Doc(doc).Build() + ageKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersAgeFieldName).Doc(doc).Build() data, err := f.txn.Datastore().Get(f.ctx, nameKey.ToDS()) require.NoError(t, err) @@ -505,8 +538,7 @@ func TestNonUnique_IfIndexedFieldIsNil_StoreItAsNil(t *testing.T) { f.saveDocToCollection(doc, f.users) - key := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc). - Values([]byte(nil)).Build() + key := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName).Doc(doc).Build() data, err := f.txn.Datastore().Get(f.ctx, key.ToDS()) require.NoError(t, err) @@ -524,8 +556,8 @@ func TestNonUniqueCreate_ShouldIndexExistingDocs(t *testing.T) { f.createUserCollectionIndexOnName() - key1 := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc1).Build() - key2 := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc2).Build() + key1 := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName).Doc(doc1).Build() + key2 := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName).Doc(doc2).Build() data, err := f.txn.Datastore().Get(f.ctx, key1.ToDS()) require.NoError(t, err, key1.ToString()) @@ -596,7 +628,7 @@ func TestNonUniqueCreate_IfUponIndexingExistingDocsFetcherFails_ReturnError(t *t f.saveDocToCollection(doc, f.users) f.users.(*collection).fetcherFactory = tc.PrepareFetcher - key := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + key := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName).Doc(doc).Build() _, err := f.users.CreateIndex(f.ctx, getUsersIndexDescOnName()) require.ErrorIs(t, err, testError, tc.Name) @@ -614,7 +646,7 @@ func TestNonUniqueCreate_IfDatastoreFailsToStoreIndex_ReturnError(t *testing.T) f.saveDocToCollection(doc, f.users) fieldKeyString := core.DataStoreKey{ - CollectionID: f.users.Description().IDString(), + CollectionRootID: f.users.Description().RootID, }.WithDocID(doc.ID().String()). WithFieldId("1"). WithValueFlag(). @@ -633,11 +665,11 @@ func TestNonUniqueCreate_IfDatastoreFailsToStoreIndex_ReturnError(t *testing.T) func TestNonUniqueDrop_ShouldDeleteStoredIndexedFields(t *testing.T) { f := newIndexTestFixtureBare(t) users := f.addUsersCollection() - _, err := f.createCollectionIndexFor(users.Name(), getUsersIndexDescOnName()) + _, err := f.createCollectionIndexFor(users.Name().Value(), getUsersIndexDescOnName()) require.NoError(f.t, err) - _, err = f.createCollectionIndexFor(users.Name(), getUsersIndexDescOnAge()) + _, err = f.createCollectionIndexFor(users.Name().Value(), getUsersIndexDescOnAge()) require.NoError(f.t, err) - _, err = f.createCollectionIndexFor(users.Name(), getUsersIndexDescOnWeight()) + _, err = f.createCollectionIndexFor(users.Name().Value(), getUsersIndexDescOnWeight()) require.NoError(f.t, err) f.commitTxn() @@ -645,16 +677,16 @@ func TestNonUniqueDrop_ShouldDeleteStoredIndexedFields(t *testing.T) { f.saveDocToCollection(f.newUserDoc("Islam", 23, users), users) products := f.getProductsCollectionDesc() - _, err = f.createCollectionIndexFor(products.Name(), getProductsIndexDescOnCategory()) + _, err = f.createCollectionIndexFor(products.Name().Value(), getProductsIndexDescOnCategory()) require.NoError(f.t, err) f.commitTxn() f.saveDocToCollection(f.newProdDoc(1, 55, "games", products), products) - userNameKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Build() - userAgeKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersAgeFieldName).Build() - userWeightKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersWeightFieldName).Build() - prodCatKey := newIndexKeyBuilder(f).Col(productsColName).Field(productsCategoryFieldName).Build() + userNameKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName).Build() + userAgeKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersAgeFieldName).Build() + userWeightKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersWeightFieldName).Build() + prodCatKey := newIndexKeyBuilder(f).Col(productsColName).Fields(productsCategoryFieldName).Build() err = f.dropIndex(usersColName, testUsersColIndexAge) require.NoError(f.t, err) @@ -695,7 +727,7 @@ func TestNonUniqueUpdate_ShouldDeleteOldValueAndStoreNewOne(t *testing.T) { f.saveDocToCollection(doc, f.users) for _, tc := range cases { - oldKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + oldKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName).Doc(doc).Build() err := doc.Set(usersNameFieldName, tc.NewValue) require.NoError(t, err) @@ -703,7 +735,7 @@ func TestNonUniqueUpdate_ShouldDeleteOldValueAndStoreNewOne(t *testing.T) { require.NoError(t, err) f.commitTxn() - newKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + newKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName).Doc(doc).Build() _, err = f.txn.Datastore().Get(f.ctx, oldKey.ToDS()) require.Error(t, err) @@ -810,14 +842,14 @@ func TestNonUniqueUpdate_IfFetcherFails_ReturnError(t *testing.T) { f.saveDocToCollection(doc, f.users) f.users.(*collection).fetcherFactory = tc.PrepareFetcher - oldKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + oldKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName).Doc(doc).Build() err := doc.Set(usersNameFieldName, "Islam") require.NoError(t, err, tc.Name) err = f.users.Update(f.ctx, doc) require.Error(t, err, tc.Name) - newKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + newKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName).Doc(doc).Build() _, err = f.txn.Datastore().Get(f.ctx, oldKey.ToDS()) require.NoError(t, err, tc.Name) @@ -835,7 +867,7 @@ func TestNonUniqueUpdate_IfFailsToUpdateIndex_ReturnError(t *testing.T) { f.saveDocToCollection(doc, f.users) f.commitTxn() - validKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersAgeFieldName).Doc(doc).Build() + validKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersAgeFieldName).Doc(doc).Build() err := f.txn.Datastore().Delete(f.ctx, validKey.ToDS()) require.NoError(f.t, err) f.commitTxn() @@ -860,7 +892,7 @@ func TestNonUniqueUpdate_ShouldPassToFetcherOnlyRelevantFields(t *testing.T) { ctx context.Context, txn datastore.Txn, col client.Collection, - fields []client.FieldDescription, + fields []client.FieldDefinition, filter *mapper.Filter, mapping *core.DocumentMapping, reverse, showDeleted bool, @@ -956,8 +988,7 @@ func TestNonUpdate_IfIndexedFieldWasNil_ShouldDeleteIt(t *testing.T) { f.saveDocToCollection(doc, f.users) - oldKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc). - Values([]byte(nil)).Build() + oldKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName).Doc(doc).Build() err = doc.Set(usersNameFieldName, "John") require.NoError(f.t, err) @@ -966,7 +997,7 @@ func TestNonUpdate_IfIndexedFieldWasNil_ShouldDeleteIt(t *testing.T) { require.NoError(f.t, err) f.commitTxn() - newKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + newKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName).Doc(doc).Build() _, err = f.txn.Datastore().Get(f.ctx, newKey.ToDS()) require.NoError(t, err) @@ -978,7 +1009,7 @@ type shimEncodedDocument struct { key []byte schemaVersionID string status client.DocumentStatus - properties map[client.FieldDescription]any + properties map[client.FieldDefinition]any } var _ fetcher.EncodedDocument = (*shimEncodedDocument)(nil) @@ -995,7 +1026,7 @@ func (encdoc *shimEncodedDocument) Status() client.DocumentStatus { return encdoc.status } -func (encdoc *shimEncodedDocument) Properties(onlyFilterProps bool) (map[client.FieldDescription]any, error) { +func (encdoc *shimEncodedDocument) Properties(onlyFilterProps bool) (map[client.FieldDefinition]any, error) { return encdoc.properties, nil } @@ -1003,7 +1034,7 @@ func (encdoc *shimEncodedDocument) Reset() { encdoc.key = nil encdoc.schemaVersionID = "" encdoc.status = 0 - encdoc.properties = map[client.FieldDescription]any{} + encdoc.properties = map[client.FieldDefinition]any{} } func TestUniqueCreate_ShouldIndexExistingDocs(t *testing.T) { @@ -1017,8 +1048,8 @@ func TestUniqueCreate_ShouldIndexExistingDocs(t *testing.T) { f.createUserCollectionUniqueIndexOnName() - key1 := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Unique().Doc(doc1).Build() - key2 := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Unique().Doc(doc2).Build() + key1 := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName).Unique().Doc(doc1).Build() + key2 := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName).Unique().Doc(doc2).Build() data, err := f.txn.Datastore().Get(f.ctx, key1.ToDS()) require.NoError(t, err, key1.ToString()) @@ -1043,28 +1074,27 @@ func TestUnique_IfIndexedFieldIsNil_StoreItAsNil(t *testing.T) { f.saveDocToCollection(doc, f.users) - key := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Unique().Doc(doc). - Values([]byte(nil)).Build() + key := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName).Unique().Doc(doc).Build() data, err := f.txn.Datastore().Get(f.ctx, key.ToDS()) require.NoError(t, err) - assert.Equal(t, data, []byte(doc.ID().String())) + assert.Len(t, data, 0) } func TestUniqueDrop_ShouldDeleteStoredIndexedFields(t *testing.T) { f := newIndexTestFixtureBare(t) users := f.addUsersCollection() - _, err := f.createCollectionIndexFor(users.Name(), makeUnique(getUsersIndexDescOnName())) + _, err := f.createCollectionIndexFor(users.Name().Value(), makeUnique(getUsersIndexDescOnName())) require.NoError(f.t, err) - _, err = f.createCollectionIndexFor(users.Name(), makeUnique(getUsersIndexDescOnAge())) + _, err = f.createCollectionIndexFor(users.Name().Value(), makeUnique(getUsersIndexDescOnAge())) require.NoError(f.t, err) f.commitTxn() f.saveDocToCollection(f.newUserDoc("John", 21, users), users) f.saveDocToCollection(f.newUserDoc("Islam", 23, users), users) - userNameKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Build() - userAgeKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersAgeFieldName).Build() + userNameKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName).Build() + userAgeKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersAgeFieldName).Build() err = f.dropIndex(usersColName, testUsersColIndexAge) require.NoError(f.t, err) @@ -1103,7 +1133,7 @@ func TestUniqueUpdate_ShouldDeleteOldValueAndStoreNewOne(t *testing.T) { f.saveDocToCollection(doc, f.users) for _, tc := range cases { - oldKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Unique().Doc(doc).Build() + oldKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName).Unique().Doc(doc).Build() err := doc.Set(usersNameFieldName, tc.NewValue) require.NoError(t, err) @@ -1111,7 +1141,7 @@ func TestUniqueUpdate_ShouldDeleteOldValueAndStoreNewOne(t *testing.T) { require.NoError(t, err) f.commitTxn() - newKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Unique().Doc(doc).Build() + newKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName).Unique().Doc(doc).Build() _, err = f.txn.Datastore().Get(f.ctx, oldKey.ToDS()) require.Error(t, err) @@ -1119,3 +1149,211 @@ func TestUniqueUpdate_ShouldDeleteOldValueAndStoreNewOne(t *testing.T) { require.NoError(t, err) } } + +func TestCompositeCreate_ShouldIndexExistingDocs(t *testing.T) { + f := newIndexTestFixture(t) + defer f.db.Close() + + doc1 := f.newUserDoc("John", 21, f.users) + f.saveDocToCollection(doc1, f.users) + doc2 := f.newUserDoc("Islam", 18, f.users) + f.saveDocToCollection(doc2, f.users) + + f.createUserCollectionIndexOnNameAndAge() + + key1 := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName, usersAgeFieldName).Doc(doc1).Build() + key2 := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName, usersAgeFieldName).Doc(doc2).Build() + + ds := f.txn.Datastore() + data, err := ds.Get(f.ctx, key1.ToDS()) + require.NoError(t, err, key1.ToString()) + assert.Len(t, data, 0) + data, err = f.txn.Datastore().Get(f.ctx, key2.ToDS()) + require.NoError(t, err) + assert.Len(t, data, 0) +} + +func TestComposite_IfIndexedFieldIsNil_StoreItAsNil(t *testing.T) { + f := newIndexTestFixture(t) + defer f.db.Close() + f.createUserCollectionIndexOnNameAndAge() + + docJSON, err := json.Marshal(struct { + Age int `json:"age"` + }{Age: 44}) + require.NoError(f.t, err) + + doc, err := client.NewDocFromJSON(docJSON, f.users.Schema()) + require.NoError(f.t, err) + + f.saveDocToCollection(doc, f.users) + + key := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName, usersAgeFieldName). + Doc(doc).Build() + + data, err := f.txn.Datastore().Get(f.ctx, key.ToDS()) + require.NoError(t, err) + assert.Len(t, data, 0) +} + +func TestComposite_IfNilUpdateToValue_ShouldUpdateIndexStored(t *testing.T) { + testCases := []struct { + Name string + Doc string + Update string + Action func(*client.Document) error + }{ + { + Name: "/nil/44/docID -> /John/44", + Doc: `{"age": 44}`, + Update: `{"name": "John", "age": 44}`, + Action: func(doc *client.Document) error { + return doc.Set(usersNameFieldName, "John") + }, + }, + { + Name: "/Islam/33 -> /Islam/nil/docID", + Doc: `{"name": "Islam", "age": 33}`, + Update: `{"name": "Islam", "age": null}`, + Action: func(doc *client.Document) error { + return doc.Set(usersAgeFieldName, nil) + }, + }, + { + Name: "/Andy/nil/docID -> /nil/22/docID", + Doc: `{"name": "Andy"}`, + Update: `{"name": null, "age": 22}`, + Action: func(doc *client.Document) error { + return errors.Join(doc.Set(usersNameFieldName, nil), doc.Set(usersAgeFieldName, 22)) + }, + }, + { + Name: "/nil/55/docID -> /nil/nil/docID", + Doc: `{"age": 55}`, + Update: `{"name": null, "age": null}`, + Action: func(doc *client.Document) error { + return doc.Set(usersAgeFieldName, nil) + }, + }, + } + + for _, tc := range testCases { + f := newIndexTestFixture(t) + defer f.db.Close() + + indexDesc := makeUnique(addFieldToIndex(getUsersIndexDescOnName(), usersAgeFieldName)) + _, err := f.createCollectionIndexFor(f.users.Name().Value(), indexDesc) + require.NoError(f.t, err) + f.commitTxn() + + doc, err := client.NewDocFromJSON([]byte(tc.Doc), f.users.Schema()) + require.NoError(f.t, err) + + f.saveDocToCollection(doc, f.users) + + oldKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName, usersAgeFieldName). + Doc(doc).Unique().Build() + + require.NoError(t, doc.SetWithJSON([]byte(tc.Update))) + + newKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName, usersAgeFieldName). + Doc(doc).Unique().Build() + + require.NoError(t, f.users.Update(f.ctx, doc), tc.Name) + f.commitTxn() + + _, err = f.txn.Datastore().Get(f.ctx, oldKey.ToDS()) + require.Error(t, err, oldKey.ToString(), oldKey.ToDS(), tc.Name) + _, err = f.txn.Datastore().Get(f.ctx, newKey.ToDS()) + require.NoError(t, err, newKey.ToString(), newKey.ToDS(), tc.Name) + } +} + +func TestCompositeDrop_ShouldDeleteStoredIndexedFields(t *testing.T) { + f := newIndexTestFixtureBare(t) + users := f.addUsersCollection() + _, err := f.createCollectionIndexFor(users.Name().Value(), addFieldToIndex(getUsersIndexDescOnName(), usersAgeFieldName)) + require.NoError(f.t, err) + _, err = f.createCollectionIndexFor(users.Name().Value(), addFieldToIndex(getUsersIndexDescOnAge(), usersWeightFieldName)) + require.NoError(f.t, err) + f.commitTxn() + + f.saveDocToCollection(f.newUserDoc("John", 21, users), users) + f.saveDocToCollection(f.newUserDoc("Islam", 23, users), users) + + userNameAgeKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName, usersAgeFieldName).Build() + userAgeWeightKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersAgeFieldName, usersWeightFieldName).Build() + + err = f.dropIndex(usersColName, testUsersColIndexAge) + require.NoError(f.t, err) + + assert.Len(t, f.getPrefixFromDataStore(userNameAgeKey.ToString()), 2) + assert.Len(t, f.getPrefixFromDataStore(userAgeWeightKey.ToString()), 0) +} + +func TestCompositeUpdate_ShouldDeleteOldValueAndStoreNewOne(t *testing.T) { + f := newIndexTestFixture(t) + defer f.db.Close() + f.createUserCollectionIndexOnNameAndAge() + + cases := []struct { + Name string + Field string + NewValue any + Exec func(doc *client.Document) error + }{ + { + Name: "update first", + NewValue: "Islam", + Field: usersNameFieldName, + Exec: func(doc *client.Document) error { + return f.users.Update(f.ctx, doc) + }, + }, + { + Name: "save first", + NewValue: "Andy", + Field: usersNameFieldName, + Exec: func(doc *client.Document) error { + return f.users.Save(f.ctx, doc) + }, + }, + { + Name: "update second", + NewValue: 33, + Field: usersAgeFieldName, + Exec: func(doc *client.Document) error { + return f.users.Update(f.ctx, doc) + }, + }, + { + Name: "save second", + NewValue: 36, + Field: usersAgeFieldName, + Exec: func(doc *client.Document) error { + return f.users.Save(f.ctx, doc) + }, + }, + } + + doc := f.newUserDoc("John", 21, f.users) + f.saveDocToCollection(doc, f.users) + + for _, tc := range cases { + oldKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName, usersAgeFieldName).Doc(doc).Build() + + err := doc.Set(tc.Field, tc.NewValue) + require.NoError(t, err) + err = tc.Exec(doc) + require.NoError(t, err) + f.commitTxn() + + newKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName, usersAgeFieldName).Doc(doc).Build() + + _, err = f.txn.Datastore().Get(f.ctx, oldKey.ToDS()) + require.Error(t, err) + _, err = f.txn.Datastore().Get(f.ctx, newKey.ToDS()) + require.NoError(t, err) + f.commitTxn() + } +} diff --git a/db/lens.go b/db/lens.go new file mode 100644 index 0000000000..d5240dad83 --- /dev/null +++ b/db/lens.go @@ -0,0 +1,160 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package db + +import ( + "context" + + ds "github.com/ipfs/go-datastore" + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/db/description" + "github.com/sourcenetwork/defradb/errors" +) + +func (db *db) setMigration(ctx context.Context, txn datastore.Txn, cfg client.LensConfig) error { + dstCols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, cfg.DestinationSchemaVersionID) + if err != nil { + return err + } + + sourceCols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, cfg.SourceSchemaVersionID) + if err != nil { + return err + } + + colSeq, err := db.getSequence(ctx, txn, core.CollectionIDSequenceKey{}) + if err != nil { + return err + } + + if len(sourceCols) == 0 { + // If no collections are found with the given [SourceSchemaVersionID], this migration must be from + // a collection/schema version that does not yet exist locally. We must now create it. + colID, err := colSeq.next(ctx, txn) + if err != nil { + return err + } + + desc := client.CollectionDescription{ + ID: uint32(colID), + RootID: client.OrphanRootID, + SchemaVersionID: cfg.SourceSchemaVersionID, + } + + col, err := description.SaveCollection(ctx, txn, desc) + if err != nil { + return err + } + + sourceCols = append(sourceCols, col) + } + + for _, sourceCol := range sourceCols { + isDstCollectionFound := false + dstColsLoop: + for i, dstCol := range dstCols { + if len(dstCol.Sources) == 0 { + // If the destingation collection has no sources at all, it must have been added as an orphaned source + // by another migration. This can happen if the migrations are added in an unusual order, before + // their schemas have been defined locally. + dstCol.Sources = append(dstCol.Sources, &client.CollectionSource{ + SourceCollectionID: sourceCol.ID, + }) + dstCols[i] = dstCol + } + + for _, source := range dstCol.CollectionSources() { + if source.SourceCollectionID == sourceCol.ID { + isDstCollectionFound = true + break dstColsLoop + } + } + } + + if !isDstCollectionFound { + // If the destination collection was not found, we must create it. This can happen when setting a migration + // to a schema version that does not yet exist locally. + colID, err := colSeq.next(ctx, txn) + if err != nil { + return err + } + + desc := client.CollectionDescription{ + ID: uint32(colID), + RootID: sourceCol.RootID, + SchemaVersionID: cfg.DestinationSchemaVersionID, + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: sourceCol.ID, + // The transform will be set later, when updating all destination collections + // whether they are newly created or not. + }, + }, + } + + col, err := description.SaveCollection(ctx, txn, desc) + if err != nil { + return err + } + + if desc.RootID != client.OrphanRootID { + var schemaFound bool + // If the root schema id is known, we need to add it to the index, even if the schema is not known locally + schema, err := description.GetSchemaVersion(ctx, txn, cfg.SourceSchemaVersionID) + if err != nil { + if !errors.Is(err, ds.ErrNotFound) { + return err + } + } else { + schemaFound = true + } + + if schemaFound { + schemaRootKey := core.NewSchemaRootKey(schema.Root, cfg.DestinationSchemaVersionID) + err = txn.Systemstore().Put(ctx, schemaRootKey.ToDS(), []byte{}) + if err != nil { + return err + } + } + } + + dstCols = append(dstCols, col) + } + } + + for _, col := range dstCols { + collectionSources := col.CollectionSources() + + for _, source := range collectionSources { + // WARNING: Here we assume that the collection source points at a collection of the source schema version. + // This works currently, as collections only have a single source. If/when this changes we need to make + // sure we only update the correct source. + + source.Transform = immutable.Some(cfg.Lens) + + err = db.LensRegistry().SetMigration(ctx, col.ID, cfg.Lens) + if err != nil { + return err + } + } + + _, err = description.SaveCollection(ctx, txn, col) + if err != nil { + return err + } + } + + return nil +} diff --git a/db/schema.go b/db/schema.go index 988aea5e17..a4582158f3 100644 --- a/db/schema.go +++ b/db/schema.go @@ -18,6 +18,7 @@ import ( "unicode" jsonpatch "github.com/evanphx/json-patch/v5" + "github.com/lens-vm/lens/host-go/config/model" "github.com/sourcenetwork/immutable" @@ -39,16 +40,6 @@ func (db *db) addSchema( txn datastore.Txn, schemaString string, ) ([]client.CollectionDescription, error) { - existingCollections, err := db.getAllCollections(ctx, txn) - if err != nil { - return nil, err - } - - existingDefinitions := make([]client.CollectionDefinition, len(existingCollections)) - for i := range existingCollections { - existingDefinitions[i] = existingCollections[i].Definition() - } - newDefinitions, err := db.parser.ParseSDL(ctx, schemaString) if err != nil { return nil, err @@ -91,7 +82,13 @@ func (db *db) loadSchema(ctx context.Context, txn datastore.Txn) error { // The collections (including the schema version ID) will only be updated if any changes have actually // been made, if the net result of the patch matches the current persisted description then no changes // will be applied. -func (db *db) patchSchema(ctx context.Context, txn datastore.Txn, patchString string, setAsDefaultVersion bool) error { +func (db *db) patchSchema( + ctx context.Context, + txn datastore.Txn, + patchString string, + migration immutable.Option[model.Lens], + setAsDefaultVersion bool, +) error { patch, err := jsonpatch.DecodePatch([]byte(patchString)) if err != nil { return err @@ -138,6 +135,7 @@ func (db *db) patchSchema(ctx context.Context, txn datastore.Txn, patchString st existingSchemaByName, newSchemaByName, schema, + migration, setAsDefaultVersion, ) if err != nil { @@ -274,35 +272,63 @@ func substituteSchemaPatch( return patch, nil } -func (db *db) getSchemasByName( - ctx context.Context, - txn datastore.Txn, - name string, -) ([]client.SchemaDescription, error) { - return description.GetSchemasByName(ctx, txn, name) -} - func (db *db) getSchemaByVersionID( ctx context.Context, txn datastore.Txn, versionID string, ) (client.SchemaDescription, error) { - return description.GetSchemaVersion(ctx, txn, versionID) -} + schemas, err := db.getSchemas(ctx, txn, client.SchemaFetchOptions{ID: immutable.Some(versionID)}) + if err != nil { + return client.SchemaDescription{}, err + } -func (db *db) getSchemasByRoot( - ctx context.Context, - txn datastore.Txn, - root string, -) ([]client.SchemaDescription, error) { - return description.GetSchemasByRoot(ctx, txn, root) + // schemas will always have length == 1 here + return schemas[0], nil } -func (db *db) getAllSchemas( +func (db *db) getSchemas( ctx context.Context, txn datastore.Txn, + options client.SchemaFetchOptions, ) ([]client.SchemaDescription, error) { - return description.GetAllSchemas(ctx, txn) + schemas := []client.SchemaDescription{} + + switch { + case options.ID.HasValue(): + schema, err := description.GetSchemaVersion(ctx, txn, options.ID.Value()) + if err != nil { + return nil, err + } + schemas = append(schemas, schema) + + case options.Root.HasValue(): + var err error + schemas, err = description.GetSchemasByRoot(ctx, txn, options.Root.Value()) + if err != nil { + return nil, err + } + case options.Name.HasValue(): + var err error + schemas, err = description.GetSchemasByName(ctx, txn, options.Name.Value()) + if err != nil { + return nil, err + } + default: + return description.GetAllSchemas(ctx, txn) + } + + result := []client.SchemaDescription{} + for _, schema := range schemas { + if options.Root.HasValue() && schema.Root != options.Root.Value() { + continue + } + if options.Name.HasValue() && schema.Name != options.Name.Value() { + continue + } + result = append(result, schema) + } + + return result, nil } // getSubstituteFieldKind checks and attempts to get the underlying integer value for the given string @@ -335,18 +361,18 @@ func getSubstituteFieldKind( } } -// isFieldOrInner returns true if the given path points to a FieldDescription or a property within it. +// isFieldOrInner returns true if the given path points to a SchemaFieldDescription or a property within it. func isFieldOrInner(path []string) bool { //nolint:goconst return len(path) >= 3 && path[fieldsPathIndex] == "Fields" } -// isField returns true if the given path points to a FieldDescription. +// isField returns true if the given path points to a SchemaFieldDescription. func isField(path []string) bool { return len(path) == 3 && path[fieldsPathIndex] == "Fields" } -// isField returns true if the given path points to a FieldDescription.Kind property. +// isField returns true if the given path points to a SchemaFieldDescription.Kind property. func isFieldKind(path []string) bool { return len(path) == 4 && path[fieldIndexPathIndex+1] == "Kind" && diff --git a/db/sequence.go b/db/sequence.go index 1fcfbf7872..3c510ec78c 100644 --- a/db/sequence.go +++ b/db/sequence.go @@ -22,17 +22,13 @@ import ( ) type sequence struct { - key core.SequenceKey + key core.Key val uint64 } -func (db *db) getSequence(ctx context.Context, txn datastore.Txn, key string) (*sequence, error) { - if key == "" { - return nil, ErrKeyEmpty - } - seqKey := core.NewSequenceKey(key) +func (db *db) getSequence(ctx context.Context, txn datastore.Txn, key core.Key) (*sequence, error) { seq := &sequence{ - key: seqKey, + key: key, val: uint64(0), } diff --git a/db/txn_db.go b/db/txn_db.go index 92f9cde6c1..f2fbe7cea3 100644 --- a/db/txn_db.go +++ b/db/txn_db.go @@ -13,6 +13,9 @@ package db import ( "context" + "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable" + "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" ) @@ -76,50 +79,18 @@ func (db *implicitTxnDB) GetCollectionByName(ctx context.Context, name string) ( // GetCollectionByName returns an existing collection within the database. func (db *explicitTxnDB) GetCollectionByName(ctx context.Context, name string) (client.Collection, error) { - return db.getCollectionByName(ctx, db.txn, name) -} - -// GetCollectionsBySchemaRoot attempts to retrieve all collections using the given schema ID. -// -// If no matching collection is found an empty set will be returned. -func (db *implicitTxnDB) GetCollectionsBySchemaRoot( - ctx context.Context, - schemaRoot string, -) ([]client.Collection, error) { - txn, err := db.NewTxn(ctx, true) + col, err := db.getCollectionByName(ctx, db.txn, name) if err != nil { return nil, err } - defer txn.Discard(ctx) - cols, err := db.getCollectionsBySchemaRoot(ctx, txn, schemaRoot) - if err != nil { - return nil, err - } - - return cols, nil + return col.WithTxn(db.txn), nil } -// GetCollectionsBySchemaRoot attempts to retrieve all collections using the given schema ID. -// -// If no matching collection is found an empty set will be returned. -func (db *explicitTxnDB) GetCollectionsBySchemaRoot( +// GetCollections gets all the currently defined collections. +func (db *implicitTxnDB) GetCollections( ctx context.Context, - schemaRoot string, -) ([]client.Collection, error) { - cols, err := db.getCollectionsBySchemaRoot(ctx, db.txn, schemaRoot) - if err != nil { - return nil, err - } - - return cols, nil -} - -// GetCollectionsByVersionID attempts to retrieve all collections using the given schema version ID. -// -// If no matching collections are found an empty set will be returned. -func (db *implicitTxnDB) GetCollectionsByVersionID( - ctx context.Context, schemaVersionID string, + options client.CollectionFetchOptions, ) ([]client.Collection, error) { txn, err := db.NewTxn(ctx, true) if err != nil { @@ -127,68 +98,24 @@ func (db *implicitTxnDB) GetCollectionsByVersionID( } defer txn.Discard(ctx) - cols, err := db.getCollectionsByVersionID(ctx, txn, schemaVersionID) - if err != nil { - return nil, err - } - - collections := make([]client.Collection, len(cols)) - for i, col := range cols { - collections[i] = col - } - - return collections, nil + return db.getCollections(ctx, txn, options) } -// GetCollectionsByVersionID attempts to retrieve all collections using the given schema version ID. -// -// If no matching collections are found an empty set will be returned. -func (db *explicitTxnDB) GetCollectionsByVersionID( - ctx context.Context, schemaVersionID string, +// GetCollections gets all the currently defined collections. +func (db *explicitTxnDB) GetCollections( + ctx context.Context, + options client.CollectionFetchOptions, ) ([]client.Collection, error) { - cols, err := db.getCollectionsByVersionID(ctx, db.txn, schemaVersionID) - if err != nil { - return nil, err - } - - collections := make([]client.Collection, len(cols)) - for i, col := range cols { - collections[i] = col - } - - return collections, nil -} - -// GetAllCollections gets all the currently defined collections. -func (db *implicitTxnDB) GetAllCollections(ctx context.Context) ([]client.Collection, error) { - txn, err := db.NewTxn(ctx, true) + cols, err := db.getCollections(ctx, db.txn, options) if err != nil { return nil, err } - defer txn.Discard(ctx) - - return db.getAllCollections(ctx, txn) -} -// GetAllCollections gets all the currently defined collections. -func (db *explicitTxnDB) GetAllCollections(ctx context.Context) ([]client.Collection, error) { - return db.getAllCollections(ctx, db.txn) -} - -// GetSchemasByName returns the all schema versions with the given name. -func (db *implicitTxnDB) GetSchemasByName(ctx context.Context, name string) ([]client.SchemaDescription, error) { - txn, err := db.NewTxn(ctx, true) - if err != nil { - return nil, err + for i := range cols { + cols[i] = cols[i].WithTxn(db.txn) } - defer txn.Discard(ctx) - - return db.getSchemasByName(ctx, txn, name) -} -// GetSchemasByName returns the all schema versions with the given name. -func (db *explicitTxnDB) GetSchemasByName(ctx context.Context, name string) ([]client.SchemaDescription, error) { - return db.getSchemasByName(ctx, db.txn, name) + return cols, nil } // GetSchemaByVersionID returns the schema description for the schema version of the @@ -213,38 +140,28 @@ func (db *explicitTxnDB) GetSchemaByVersionID(ctx context.Context, versionID str return db.getSchemaByVersionID(ctx, db.txn, versionID) } -// GetSchemasByRoot returns the all schema versions for the given root. -func (db *implicitTxnDB) GetSchemasByRoot(ctx context.Context, root string) ([]client.SchemaDescription, error) { - txn, err := db.NewTxn(ctx, true) - if err != nil { - return nil, err - } - defer txn.Discard(ctx) - - return db.getSchemasByRoot(ctx, txn, root) -} - -// GetSchemasByRoot returns the all schema versions for the given root. -func (db *explicitTxnDB) GetSchemasByRoot(ctx context.Context, root string) ([]client.SchemaDescription, error) { - return db.getSchemasByRoot(ctx, db.txn, root) -} - -// GetAllSchemas returns all schema versions that currently exist within +// GetSchemas returns all schema versions that currently exist within // this [Store]. -func (db *implicitTxnDB) GetAllSchemas(ctx context.Context) ([]client.SchemaDescription, error) { +func (db *implicitTxnDB) GetSchemas( + ctx context.Context, + options client.SchemaFetchOptions, +) ([]client.SchemaDescription, error) { txn, err := db.NewTxn(ctx, true) if err != nil { return nil, err } defer txn.Discard(ctx) - return db.getAllSchemas(ctx, txn) + return db.getSchemas(ctx, txn, options) } -// GetAllSchemas returns all schema versions that currently exist within +// GetSchemas returns all schema versions that currently exist within // this [Store]. -func (db *explicitTxnDB) GetAllSchemas(ctx context.Context) ([]client.SchemaDescription, error) { - return db.getAllSchemas(ctx, db.txn) +func (db *explicitTxnDB) GetSchemas( + ctx context.Context, + options client.SchemaFetchOptions, +) ([]client.SchemaDescription, error) { + return db.getSchemas(ctx, db.txn, options) } // GetAllIndexes gets all the indexes in the database. @@ -257,14 +174,14 @@ func (db *implicitTxnDB) GetAllIndexes( } defer txn.Discard(ctx) - return db.getAllIndexes(ctx, txn) + return db.getAllIndexDescriptions(ctx, txn) } // GetAllIndexes gets all the indexes in the database. func (db *explicitTxnDB) GetAllIndexes( ctx context.Context, ) (map[client.CollectionName][]client.IndexDescription, error) { - return db.getAllIndexes(ctx, db.txn) + return db.getAllIndexDescriptions(ctx, db.txn) } // AddSchema takes the provided GQL schema in SDL format, and applies it to the database, @@ -310,14 +227,19 @@ func (db *explicitTxnDB) AddSchema(ctx context.Context, schemaString string) ([] // The collections (including the schema version ID) will only be updated if any changes have actually // been made, if the net result of the patch matches the current persisted description then no changes // will be applied. -func (db *implicitTxnDB) PatchSchema(ctx context.Context, patchString string, setAsDefaultVersion bool) error { +func (db *implicitTxnDB) PatchSchema( + ctx context.Context, + patchString string, + migration immutable.Option[model.Lens], + setAsDefaultVersion bool, +) error { txn, err := db.NewTxn(ctx, false) if err != nil { return err } defer txn.Discard(ctx) - err = db.patchSchema(ctx, txn, patchString, setAsDefaultVersion) + err = db.patchSchema(ctx, txn, patchString, migration, setAsDefaultVersion) if err != nil { return err } @@ -336,18 +258,23 @@ func (db *implicitTxnDB) PatchSchema(ctx context.Context, patchString string, se // The collections (including the schema version ID) will only be updated if any changes have actually // been made, if the net result of the patch matches the current persisted description then no changes // will be applied. -func (db *explicitTxnDB) PatchSchema(ctx context.Context, patchString string, setAsDefaultVersion bool) error { - return db.patchSchema(ctx, db.txn, patchString, setAsDefaultVersion) +func (db *explicitTxnDB) PatchSchema( + ctx context.Context, + patchString string, + migration immutable.Option[model.Lens], + setAsDefaultVersion bool, +) error { + return db.patchSchema(ctx, db.txn, patchString, migration, setAsDefaultVersion) } -func (db *implicitTxnDB) SetDefaultSchemaVersion(ctx context.Context, schemaVersionID string) error { +func (db *implicitTxnDB) SetActiveSchemaVersion(ctx context.Context, schemaVersionID string) error { txn, err := db.NewTxn(ctx, false) if err != nil { return err } defer txn.Discard(ctx) - err = db.setDefaultSchemaVersion(ctx, txn, schemaVersionID) + err = db.setActiveSchemaVersion(ctx, txn, schemaVersionID) if err != nil { return err } @@ -355,8 +282,8 @@ func (db *implicitTxnDB) SetDefaultSchemaVersion(ctx context.Context, schemaVers return txn.Commit(ctx) } -func (db *explicitTxnDB) SetDefaultSchemaVersion(ctx context.Context, schemaVersionID string) error { - return db.setDefaultSchemaVersion(ctx, db.txn, schemaVersionID) +func (db *explicitTxnDB) SetActiveSchemaVersion(ctx context.Context, schemaVersionID string) error { + return db.setActiveSchemaVersion(ctx, db.txn, schemaVersionID) } func (db *implicitTxnDB) SetMigration(ctx context.Context, cfg client.LensConfig) error { @@ -366,7 +293,7 @@ func (db *implicitTxnDB) SetMigration(ctx context.Context, cfg client.LensConfig } defer txn.Discard(ctx) - err = db.lensRegistry.SetMigration(ctx, cfg) + err = db.setMigration(ctx, txn, cfg) if err != nil { return err } @@ -375,17 +302,22 @@ func (db *implicitTxnDB) SetMigration(ctx context.Context, cfg client.LensConfig } func (db *explicitTxnDB) SetMigration(ctx context.Context, cfg client.LensConfig) error { - return db.lensRegistry.SetMigration(ctx, cfg) + return db.setMigration(ctx, db.txn, cfg) } -func (db *implicitTxnDB) AddView(ctx context.Context, query string, sdl string) ([]client.CollectionDefinition, error) { +func (db *implicitTxnDB) AddView( + ctx context.Context, + query string, + sdl string, + transform immutable.Option[model.Lens], +) ([]client.CollectionDefinition, error) { txn, err := db.NewTxn(ctx, false) if err != nil { return nil, err } defer txn.Discard(ctx) - defs, err := db.addView(ctx, txn, query, sdl) + defs, err := db.addView(ctx, txn, query, sdl, transform) if err != nil { return nil, err } @@ -398,8 +330,13 @@ func (db *implicitTxnDB) AddView(ctx context.Context, query string, sdl string) return defs, nil } -func (db *explicitTxnDB) AddView(ctx context.Context, query string, sdl string) ([]client.CollectionDefinition, error) { - return db.addView(ctx, db.txn, query, sdl) +func (db *explicitTxnDB) AddView( + ctx context.Context, + query string, + sdl string, + transform immutable.Option[model.Lens], +) ([]client.CollectionDefinition, error) { + return db.addView(ctx, db.txn, query, sdl, transform) } // BasicImport imports a json dataset. diff --git a/db/view.go b/db/view.go index 2b4666df22..ea57f94541 100644 --- a/db/view.go +++ b/db/view.go @@ -15,6 +15,9 @@ import ( "errors" "fmt" + "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable" + "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/datastore" @@ -26,6 +29,7 @@ func (db *db) addView( txn datastore.Txn, inputQuery string, sdl string, + transform immutable.Option[model.Lens], ) ([]client.CollectionDefinition, error) { // Wrap the given query as part of the GQL query object - this simplifies the syntax for users // and ensures that we can't be given mutations. In the future this line should disappear along @@ -57,12 +61,16 @@ func (db *db) addView( } for i := range newDefinitions { - newDefinitions[i].Description.BaseQuery = baseQuery + source := client.QuerySource{ + Query: *baseQuery, + Transform: transform, + } + newDefinitions[i].Description.Sources = append(newDefinitions[i].Description.Sources, &source) } returnDescriptions := make([]client.CollectionDefinition, len(newDefinitions)) for i, definition := range newDefinitions { - if definition.Description.Name == "" { + if !definition.Description.Name.HasValue() { schema, err := description.CreateSchemaVersion(ctx, txn, definition.Schema) if err != nil { return nil, err @@ -77,6 +85,15 @@ func (db *db) addView( return nil, err } returnDescriptions[i] = col.Definition() + + for _, source := range col.Description().QuerySources() { + if source.Transform.HasValue() { + err = db.LensRegistry().SetMigration(ctx, col.ID(), source.Transform.Value()) + if err != nil { + return nil, err + } + } + } } } diff --git a/docs/cli/defradb.md b/docs/cli/defradb.md index 459f43075d..c89ce0f1aa 100644 --- a/docs/cli/defradb.md +++ b/docs/cli/defradb.md @@ -12,21 +12,28 @@ Start a DefraDB node, interact with a local or remote node, and much more. ### Options ``` - -h, --help help for defradb - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + -h, --help help for defradb + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO * [defradb client](defradb_client.md) - Interact with a DefraDB node -* [defradb init](defradb_init.md) - Initialize DefraDB's root directory and configuration file * [defradb server-dump](defradb_server-dump.md) - Dumps the state of the entire database * [defradb start](defradb_start.md) - Start a DefraDB node * [defradb version](defradb_version.md) - Display the version information of DefraDB and its components diff --git a/docs/cli/defradb_client.md b/docs/cli/defradb_client.md index c3bd354bc7..30e8c804ee 100644 --- a/docs/cli/defradb_client.md +++ b/docs/cli/defradb_client.md @@ -17,14 +17,22 @@ Execute queries, add schema types, obtain node info, etc. ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_backup.md b/docs/cli/defradb_client_backup.md index 77e111795d..a7c7ae453b 100644 --- a/docs/cli/defradb_client_backup.md +++ b/docs/cli/defradb_client_backup.md @@ -16,15 +16,23 @@ Currently only supports JSON format. ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_backup_export.md b/docs/cli/defradb_client_backup_export.md index b7547ea641..6992b120c6 100644 --- a/docs/cli/defradb_client_backup_export.md +++ b/docs/cli/defradb_client_backup_export.md @@ -30,15 +30,23 @@ defradb client backup export [-c --collections | -p --pretty | -f --format] ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_backup_import.md b/docs/cli/defradb_client_backup_import.md index 73a9957fac..ad2d3a1117 100644 --- a/docs/cli/defradb_client_backup_import.md +++ b/docs/cli/defradb_client_backup_import.md @@ -22,15 +22,23 @@ defradb client backup import [flags] ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_collection.md b/docs/cli/defradb_client_collection.md index d164902b97..593e2d01ee 100644 --- a/docs/cli/defradb_client_collection.md +++ b/docs/cli/defradb_client_collection.md @@ -9,6 +9,7 @@ Create, read, update, and delete documents within a collection. ### Options ``` + --get-inactive Get inactive collections as well as active -h, --help help for collection --name string Collection name --schema string Collection schema Root @@ -19,14 +20,22 @@ Create, read, update, and delete documents within a collection. ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_collection_create.md b/docs/cli/defradb_client_collection_create.md index 42b53b37af..7c2cba7487 100644 --- a/docs/cli/defradb_client_collection_create.md +++ b/docs/cli/defradb_client_collection_create.md @@ -33,18 +33,27 @@ defradb client collection create [flags] ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --name string Collection name - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --schema string Collection schema Root - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") - --version string Collection version ID + --allowed-origins stringArray List of origins to allow for CORS requests + --get-inactive Get inactive collections as well as active + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --name string Collection name + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --schema string Collection schema Root + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + --version string Collection version ID ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_collection_delete.md b/docs/cli/defradb_client_collection_delete.md index 30676654d5..33a5af4809 100644 --- a/docs/cli/defradb_client_collection_delete.md +++ b/docs/cli/defradb_client_collection_delete.md @@ -28,18 +28,27 @@ defradb client collection delete [--filter --docID ] [flags] ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --name string Collection name - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --schema string Collection schema Root - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") - --version string Collection version ID + --allowed-origins stringArray List of origins to allow for CORS requests + --get-inactive Get inactive collections as well as active + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --name string Collection name + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --schema string Collection schema Root + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + --version string Collection version ID ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_collection_describe.md b/docs/cli/defradb_client_collection_describe.md index 88e2427bed..46e8623d6a 100644 --- a/docs/cli/defradb_client_collection_describe.md +++ b/docs/cli/defradb_client_collection_describe.md @@ -12,10 +12,10 @@ Example: view all collections Example: view collection by name defradb client collection describe --name User -Example: view collection by schema id +Example: view collection by schema root id defradb client collection describe --schema bae123 -Example: view collection by version id +Example: view collection by version id. This will also return inactive collections defradb client collection describe --version bae123 @@ -26,24 +26,33 @@ defradb client collection describe [flags] ### Options ``` - -h, --help help for describe + --get-inactive Get inactive collections as well as active + -h, --help help for describe + --name string Collection name + --schema string Collection schema Root + --version string Collection version ID ``` ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --name string Collection name - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --schema string Collection schema Root - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") - --version string Collection version ID + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_collection_docIDs.md b/docs/cli/defradb_client_collection_docIDs.md index 5ca8444e2e..c976d05417 100644 --- a/docs/cli/defradb_client_collection_docIDs.md +++ b/docs/cli/defradb_client_collection_docIDs.md @@ -23,18 +23,27 @@ defradb client collection docIDs [flags] ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --name string Collection name - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --schema string Collection schema Root - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") - --version string Collection version ID + --allowed-origins stringArray List of origins to allow for CORS requests + --get-inactive Get inactive collections as well as active + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --name string Collection name + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --schema string Collection schema Root + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + --version string Collection version ID ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_collection_get.md b/docs/cli/defradb_client_collection_get.md index 3f60490272..c2aeac17b3 100644 --- a/docs/cli/defradb_client_collection_get.md +++ b/docs/cli/defradb_client_collection_get.md @@ -24,18 +24,27 @@ defradb client collection get [--show-deleted] [flags] ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --name string Collection name - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --schema string Collection schema Root - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") - --version string Collection version ID + --allowed-origins stringArray List of origins to allow for CORS requests + --get-inactive Get inactive collections as well as active + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --name string Collection name + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --schema string Collection schema Root + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + --version string Collection version ID ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_collection_update.md b/docs/cli/defradb_client_collection_update.md index 4ba111f025..1200cc5b3e 100644 --- a/docs/cli/defradb_client_collection_update.md +++ b/docs/cli/defradb_client_collection_update.md @@ -34,18 +34,27 @@ defradb client collection update [--filter --docID --updater ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --name string Collection name - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --schema string Collection schema Root - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") - --version string Collection version ID + --allowed-origins stringArray List of origins to allow for CORS requests + --get-inactive Get inactive collections as well as active + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --name string Collection name + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --schema string Collection schema Root + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + --version string Collection version ID ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_dump.md b/docs/cli/defradb_client_dump.md index 3ebd35343c..bc00e292b9 100644 --- a/docs/cli/defradb_client_dump.md +++ b/docs/cli/defradb_client_dump.md @@ -15,15 +15,23 @@ defradb client dump [flags] ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_index.md b/docs/cli/defradb_client_index.md index a876bbcc4f..0dab1de7fe 100644 --- a/docs/cli/defradb_client_index.md +++ b/docs/cli/defradb_client_index.md @@ -15,15 +15,23 @@ Manage (create, drop, or list) collection indexes on a DefraDB node. ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_index_create.md b/docs/cli/defradb_client_index_create.md index e434cb91cd..cbdbbe1d50 100644 --- a/docs/cli/defradb_client_index_create.md +++ b/docs/cli/defradb_client_index_create.md @@ -32,15 +32,23 @@ defradb client index create -c --collection --fields [-n - ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_index_drop.md b/docs/cli/defradb_client_index_drop.md index c5171b756e..bb9e6ec30a 100644 --- a/docs/cli/defradb_client_index_drop.md +++ b/docs/cli/defradb_client_index_drop.md @@ -24,15 +24,23 @@ defradb client index drop -c --collection -n --name [flags] ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_index_list.md b/docs/cli/defradb_client_index_list.md index c7e96d4e4f..a2d7ca8dd0 100644 --- a/docs/cli/defradb_client_index_list.md +++ b/docs/cli/defradb_client_index_list.md @@ -26,15 +26,23 @@ defradb client index list [-c --collection ] [flags] ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_p2p.md b/docs/cli/defradb_client_p2p.md index 386929950d..171e2ab661 100644 --- a/docs/cli/defradb_client_p2p.md +++ b/docs/cli/defradb_client_p2p.md @@ -15,15 +15,23 @@ Interact with the DefraDB P2P system ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_p2p_collection.md b/docs/cli/defradb_client_p2p_collection.md index 6fec3171da..11ace67212 100644 --- a/docs/cli/defradb_client_p2p_collection.md +++ b/docs/cli/defradb_client_p2p_collection.md @@ -16,15 +16,23 @@ The selected collections synchronize their events on the pubsub network. ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_p2p_collection_add.md b/docs/cli/defradb_client_p2p_collection_add.md index c55c807404..c54f235a60 100644 --- a/docs/cli/defradb_client_p2p_collection_add.md +++ b/docs/cli/defradb_client_p2p_collection_add.md @@ -27,15 +27,23 @@ defradb client p2p collection add [collectionIDs] [flags] ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_p2p_collection_getall.md b/docs/cli/defradb_client_p2p_collection_getall.md index 46fcefc407..07c536d716 100644 --- a/docs/cli/defradb_client_p2p_collection_getall.md +++ b/docs/cli/defradb_client_p2p_collection_getall.md @@ -20,15 +20,23 @@ defradb client p2p collection getall [flags] ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_p2p_collection_remove.md b/docs/cli/defradb_client_p2p_collection_remove.md index 2aaa0901b4..5a8eb969b6 100644 --- a/docs/cli/defradb_client_p2p_collection_remove.md +++ b/docs/cli/defradb_client_p2p_collection_remove.md @@ -27,15 +27,23 @@ defradb client p2p collection remove [collectionIDs] [flags] ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_p2p_info.md b/docs/cli/defradb_client_p2p_info.md index 793179f3d7..27fdf7cb9b 100644 --- a/docs/cli/defradb_client_p2p_info.md +++ b/docs/cli/defradb_client_p2p_info.md @@ -19,15 +19,23 @@ defradb client p2p info [flags] ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_p2p_replicator.md b/docs/cli/defradb_client_p2p_replicator.md index 4aa4eb9996..725845a726 100644 --- a/docs/cli/defradb_client_p2p_replicator.md +++ b/docs/cli/defradb_client_p2p_replicator.md @@ -16,15 +16,23 @@ A replicator replicates one or all collection(s) from one node to another. ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_p2p_replicator_delete.md b/docs/cli/defradb_client_p2p_replicator_delete.md index fac7d4ce06..ef89979be6 100644 --- a/docs/cli/defradb_client_p2p_replicator_delete.md +++ b/docs/cli/defradb_client_p2p_replicator_delete.md @@ -25,15 +25,23 @@ defradb client p2p replicator delete [-c, --collection] [flags] ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_p2p_replicator_getall.md b/docs/cli/defradb_client_p2p_replicator_getall.md index 4c0c81f6d1..4d33b5243f 100644 --- a/docs/cli/defradb_client_p2p_replicator_getall.md +++ b/docs/cli/defradb_client_p2p_replicator_getall.md @@ -24,15 +24,23 @@ defradb client p2p replicator getall [flags] ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_p2p_replicator_set.md b/docs/cli/defradb_client_p2p_replicator_set.md index 86f47fc30f..55654ded0f 100644 --- a/docs/cli/defradb_client_p2p_replicator_set.md +++ b/docs/cli/defradb_client_p2p_replicator_set.md @@ -25,15 +25,23 @@ defradb client p2p replicator set [-c, --collection] [flags] ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_query.md b/docs/cli/defradb_client_query.md index 5e748229e2..b23bf50553 100644 --- a/docs/cli/defradb_client_query.md +++ b/docs/cli/defradb_client_query.md @@ -34,15 +34,23 @@ defradb client query [query request] [flags] ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_schema.md b/docs/cli/defradb_client_schema.md index ee3422741f..d37251c8db 100644 --- a/docs/cli/defradb_client_schema.md +++ b/docs/cli/defradb_client_schema.md @@ -15,15 +15,23 @@ Make changes, updates, or look for existing schema types. ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO @@ -33,5 +41,5 @@ Make changes, updates, or look for existing schema types. * [defradb client schema describe](defradb_client_schema_describe.md) - View schema descriptions. * [defradb client schema migration](defradb_client_schema_migration.md) - Interact with the schema migration system of a running DefraDB instance * [defradb client schema patch](defradb_client_schema_patch.md) - Patch an existing schema type -* [defradb client schema set-default](defradb_client_schema_set-default.md) - Set the default schema version +* [defradb client schema set-active](defradb_client_schema_set-active.md) - Set the active collection version diff --git a/docs/cli/defradb_client_schema_add.md b/docs/cli/defradb_client_schema_add.md index aa73039d0c..e0ad675241 100644 --- a/docs/cli/defradb_client_schema_add.md +++ b/docs/cli/defradb_client_schema_add.md @@ -31,15 +31,23 @@ defradb client schema add [schema] [flags] ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_schema_describe.md b/docs/cli/defradb_client_schema_describe.md index 749ba24235..cd79cce3c1 100644 --- a/docs/cli/defradb_client_schema_describe.md +++ b/docs/cli/defradb_client_schema_describe.md @@ -35,15 +35,23 @@ defradb client schema describe [flags] ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_schema_migration.md b/docs/cli/defradb_client_schema_migration.md index c0910ad746..b49420401c 100644 --- a/docs/cli/defradb_client_schema_migration.md +++ b/docs/cli/defradb_client_schema_migration.md @@ -15,23 +15,31 @@ Make set or look for existing schema migrations on a DefraDB node. ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO * [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node -* [defradb client schema migration down](defradb_client_schema_migration_down.md) - Reverses the migration from the specified schema version. -* [defradb client schema migration get](defradb_client_schema_migration_get.md) - Gets the schema migrations within DefraDB +* [defradb client schema migration down](defradb_client_schema_migration_down.md) - Reverses the migration to the specified collection version. * [defradb client schema migration reload](defradb_client_schema_migration_reload.md) - Reload the schema migrations within DefraDB * [defradb client schema migration set](defradb_client_schema_migration_set.md) - Set a schema migration within DefraDB -* [defradb client schema migration up](defradb_client_schema_migration_up.md) - Applies the migration to the specified schema version. +* [defradb client schema migration set-registry](defradb_client_schema_migration_set-registry.md) - Set a schema migration within the DefraDB LensRegistry +* [defradb client schema migration up](defradb_client_schema_migration_up.md) - Applies the migration to the specified collection version. diff --git a/docs/cli/defradb_client_schema_migration_down.md b/docs/cli/defradb_client_schema_migration_down.md index afb631e267..6172bf09b1 100644 --- a/docs/cli/defradb_client_schema_migration_down.md +++ b/docs/cli/defradb_client_schema_migration_down.md @@ -1,46 +1,54 @@ ## defradb client schema migration down -Reverses the migration from the specified schema version. +Reverses the migration to the specified collection version. ### Synopsis -Reverses the migration from the specified schema version. +Reverses the migration to the specified collection version. Documents is a list of documents to reverse the migration from. Example: migrate from string - defradb client schema migration down --version bae123 '[{"name": "Bob"}]' + defradb client schema migration down --collection 2 '[{"name": "Bob"}]' Example: migrate from file - defradb client schema migration down --version bae123 -f documents.json + defradb client schema migration down --collection 2 -f documents.json Example: migrate from stdin - cat documents.json | defradb client schema migration down --version bae123 - + cat documents.json | defradb client schema migration down --collection 2 - ``` -defradb client schema migration down --version [flags] +defradb client schema migration down --collection [flags] ``` ### Options ``` - -f, --file string File containing document(s) - -h, --help help for down - --version string Schema version id + --collection uint32 Collection id + -f, --file string File containing document(s) + -h, --help help for down ``` ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_schema_migration_reload.md b/docs/cli/defradb_client_schema_migration_reload.md index f9acfd2d19..01051e419a 100644 --- a/docs/cli/defradb_client_schema_migration_reload.md +++ b/docs/cli/defradb_client_schema_migration_reload.md @@ -19,15 +19,23 @@ defradb client schema migration reload [flags] ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_schema_migration_set-registry.md b/docs/cli/defradb_client_schema_migration_set-registry.md new file mode 100644 index 0000000000..8e80aa132d --- /dev/null +++ b/docs/cli/defradb_client_schema_migration_set-registry.md @@ -0,0 +1,50 @@ +## defradb client schema migration set-registry + +Set a schema migration within the DefraDB LensRegistry + +### Synopsis + +Set a migration to a collection within the LensRegistry of the local DefraDB node. +Does not persist the migration after restart. + +Example: set from an argument string: + defradb client schema migration set-registry 2 '{"lenses": [...' + +Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network. + +``` +defradb client schema migration set-registry [collectionID] [cfg] [flags] +``` + +### Options + +``` + -h, --help help for set-registry +``` + +### Options inherited from parent commands + +``` + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) +``` + +### SEE ALSO + +* [defradb client schema migration](defradb_client_schema_migration.md) - Interact with the schema migration system of a running DefraDB instance + diff --git a/docs/cli/defradb_client_schema_migration_set.md b/docs/cli/defradb_client_schema_migration_set.md index b9626bfeed..9e6bcfcfc4 100644 --- a/docs/cli/defradb_client_schema_migration_set.md +++ b/docs/cli/defradb_client_schema_migration_set.md @@ -4,7 +4,8 @@ Set a schema migration within DefraDB ### Synopsis -Set a migration between two schema versions within the local DefraDB node. +Set a migration from a source schema version to a destination schema version for +all collections that are on the given source schema version within the local DefraDB node. Example: set from an argument string: defradb client schema migration set bae123 bae456 '{"lenses": [...' @@ -31,15 +32,23 @@ defradb client schema migration set [src] [dst] [cfg] [flags] ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_schema_migration_up.md b/docs/cli/defradb_client_schema_migration_up.md index 8f7688ccad..bcd28453cf 100644 --- a/docs/cli/defradb_client_schema_migration_up.md +++ b/docs/cli/defradb_client_schema_migration_up.md @@ -1,46 +1,54 @@ ## defradb client schema migration up -Applies the migration to the specified schema version. +Applies the migration to the specified collection version. ### Synopsis -Applies the migration to the specified schema version. +Applies the migration to the specified collection version. Documents is a list of documents to apply the migration to. Example: migrate from string - defradb client schema migration up --version bae123 '[{"name": "Bob"}]' + defradb client schema migration up --collection 2 '[{"name": "Bob"}]' Example: migrate from file - defradb client schema migration up --version bae123 -f documents.json + defradb client schema migration up --collection 2 -f documents.json Example: migrate from stdin - cat documents.json | defradb client schema migration up --version bae123 - + cat documents.json | defradb client schema migration up --collection 2 - ``` -defradb client schema migration up --version [flags] +defradb client schema migration up --collection [flags] ``` ### Options ``` - -f, --file string File containing document(s) - -h, --help help for up - --version string Schema version id + --collection uint32 Collection id + -f, --file string File containing document(s) + -h, --help help for up ``` ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_schema_patch.md b/docs/cli/defradb_client_schema_patch.md index 357b43b2a8..f24670b945 100644 --- a/docs/cli/defradb_client_schema_patch.md +++ b/docs/cli/defradb_client_schema_patch.md @@ -9,7 +9,7 @@ Patch an existing schema. Uses JSON Patch to modify schema types. Example: patch from an argument string: - defradb client schema patch '[{ "op": "add", "path": "...", "value": {...} }]' + defradb client schema patch '[{ "op": "add", "path": "...", "value": {...} }]' '{"lenses": [...' Example: patch from file: defradb client schema patch -f patch.json @@ -20,29 +20,38 @@ Example: patch from stdin: To learn more about the DefraDB GraphQL Schema Language, refer to https://docs.source.network. ``` -defradb client schema patch [schema] [flags] +defradb client schema patch [schema] [migration] [flags] ``` ### Options ``` - -f, --file string File to load a patch from - -h, --help help for patch - --set-default Set default schema version + -h, --help help for patch + -t, --lens-file string File to load a lens config from + -p, --patch-file string File to load a patch from + --set-active Set the active schema version for all collections using the root schem ``` ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_schema_set-active.md b/docs/cli/defradb_client_schema_set-active.md new file mode 100644 index 0000000000..ff94ff88fe --- /dev/null +++ b/docs/cli/defradb_client_schema_set-active.md @@ -0,0 +1,45 @@ +## defradb client schema set-active + +Set the active collection version + +### Synopsis + +Activates all collection versions with the given schema version, and deactivates all +those without it (if they share the same schema root). + +``` +defradb client schema set-active [versionID] [flags] +``` + +### Options + +``` + -h, --help help for set-active +``` + +### Options inherited from parent commands + +``` + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) +``` + +### SEE ALSO + +* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node + diff --git a/docs/cli/defradb_client_tx.md b/docs/cli/defradb_client_tx.md index 4feab4af7b..65f7740419 100644 --- a/docs/cli/defradb_client_tx.md +++ b/docs/cli/defradb_client_tx.md @@ -15,15 +15,23 @@ Create, commit, and discard DefraDB transactions ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_tx_commit.md b/docs/cli/defradb_client_tx_commit.md index 21f0b50325..621459e134 100644 --- a/docs/cli/defradb_client_tx_commit.md +++ b/docs/cli/defradb_client_tx_commit.md @@ -19,15 +19,23 @@ defradb client tx commit [id] [flags] ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_tx_create.md b/docs/cli/defradb_client_tx_create.md index 8ba600b611..cf695da6c7 100644 --- a/docs/cli/defradb_client_tx_create.md +++ b/docs/cli/defradb_client_tx_create.md @@ -21,15 +21,23 @@ defradb client tx create [flags] ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_tx_discard.md b/docs/cli/defradb_client_tx_discard.md index d1f0bb6025..7340bedf2a 100644 --- a/docs/cli/defradb_client_tx_discard.md +++ b/docs/cli/defradb_client_tx_discard.md @@ -19,15 +19,23 @@ defradb client tx discard [id] [flags] ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_view.md b/docs/cli/defradb_client_view.md index c3aaf4a69f..9b93884430 100644 --- a/docs/cli/defradb_client_view.md +++ b/docs/cli/defradb_client_view.md @@ -15,15 +15,23 @@ Manage (add) views withing a running DefraDB instance ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_view_add.md b/docs/cli/defradb_client_view_add.md index caac7d862a..cdbab25a51 100644 --- a/docs/cli/defradb_client_view_add.md +++ b/docs/cli/defradb_client_view_add.md @@ -7,32 +7,41 @@ Add new view Add new database view. Example: add from an argument string: - defradb client view add 'Foo { name, ...}' 'type Foo { ... }' + defradb client view add 'Foo { name, ...}' 'type Foo { ... }' '{"lenses": [...' Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network. ``` -defradb client view add [query] [sdl] [flags] +defradb client view add [query] [sdl] [transform] [flags] ``` ### Options ``` - -h, --help help for add + -f, --file string Lens configuration file + -h, --help help for add ``` ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_server-dump.md b/docs/cli/defradb_server-dump.md index 0b91e10232..2b590da6fe 100644 --- a/docs/cli/defradb_server-dump.md +++ b/docs/cli/defradb_server-dump.md @@ -9,21 +9,28 @@ defradb server-dump [flags] ### Options ``` - -h, --help help for server-dump - --store string Datastore to use. Options are badger, memory (default "badger") + -h, --help help for server-dump ``` ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_start.md b/docs/cli/defradb_start.md index 174700674b..2591f9bc06 100644 --- a/docs/cli/defradb_start.md +++ b/docs/cli/defradb_start.md @@ -13,31 +13,28 @@ defradb start [flags] ### Options ``` - --allowed-origins stringArray List of origins to allow for CORS requests - --email string Email address used by the CA for notifications (default "example@example.com") - -h, --help help for start - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr string Listener address for the p2p network (formatted as a libp2p MultiAddr) (default "/ip4/0.0.0.0/tcp/9171") - --peers string List of peers to connect to - --privkeypath string Path to the private key for tls (default "certs/server.crt") - --pubkeypath string Path to the public key for tls (default "certs/server.key") - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tls Enable serving the API over https - --valuelogfilesize ByteSize Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1GiB) + -h, --help help for start ``` ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/cli/defradb_version.md b/docs/cli/defradb_version.md index 3e5bf59018..ce43eb148c 100644 --- a/docs/cli/defradb_version.md +++ b/docs/cli/defradb_version.md @@ -17,14 +17,22 @@ defradb version [flags] ### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --allowed-origins stringArray List of origins to allow for CORS requests + --logformat string Log format to use. Options are csv, json (default "csv") + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` ### SEE ALSO diff --git a/docs/config.md b/docs/config.md new file mode 100644 index 0000000000..5f8985f71c --- /dev/null +++ b/docs/config.md @@ -0,0 +1,66 @@ +# DefraDB configuration (YAML) + +The default DefraDB directory is `$HOME/.defradb`. It can be changed via the --rootdir CLI flag. + +Relative paths are interpreted as being rooted in the DefraDB directory. + +## `datastore.store` + +Store can be badger or memory. Defaults to `badger`. + +- badger: fast pure Go key-value store optimized for SSDs (https://github.com/dgraph-io/badger) +- memory: in-memory version of badger + +## `datastore.maxtxnretries` + +The number of retries to make in the event of a transaction conflict. Defaults to `5`. + +Currently this is only used within the P2P system and will not affect operations initiated by users. + +## `datastore.badger.path` + +The path to the database data file(s). Defaults to `data`. + +## `datastore.badger.valuelogfilesize` + +Maximum file size of the value log files. + +## `api.address` + +Address of the HTTP API to listen on or connect to. Defaults to `127.0.0.1:9181`. + +## `api.allowed-origins` + +The list of origins a cross-domain request can be executed from. + +## `api.pubkeypath` + +The path to the public key file for TLS / HTTPS. + +## `api.privkeypath` + +The path to the private key file for TLS / HTTPS. + +## `net.p2pdisabled` + +Whether P2P networking is disabled. Defaults to `false`. + +## `net.p2paddresses` + +List of addresses for the P2P network to listen on. Defaults to `/ip4/127.0.0.1/tcp/9171`. + +## `net.pubsubenabled` + +Whether PubSub is enabled. Defaults to `true`. + +## `net.peers` + +List of peers to boostrap with, specified as multiaddresses. + +https://docs.libp2p.io/concepts/addressing/ + +## `net.relay` + +Enable libp2p's Circuit relay transport protocol. Defaults to `false`. + +https://docs.libp2p.io/concepts/circuit-relay/ \ No newline at end of file diff --git a/docs/data_format_changes/i2161-document-strong-typing.md b/docs/data_format_changes/i2161-document-strong-typing.md index 918798e020..e056184e6c 100644 --- a/docs/data_format_changes/i2161-document-strong-typing.md +++ b/docs/data_format_changes/i2161-document-strong-typing.md @@ -1,3 +1,3 @@ # Add strong typing to document creation -Since we now inforce type safety in the document creation, some of the fields in our tests now marshal to a different types and this is causing CIDs and docIDs to change. \ No newline at end of file +Since we now enforce type safety in the document creation, some of the fields in our tests now marshal to a different types and this is causing CIDs and docIDs to change. \ No newline at end of file diff --git a/docs/data_format_changes/i2198-collection-remodel.md b/docs/data_format_changes/i2198-collection-remodel.md new file mode 100644 index 0000000000..8fb6898393 --- /dev/null +++ b/docs/data_format_changes/i2198-collection-remodel.md @@ -0,0 +1,5 @@ +# Remodel Collection SchemaVersions and migrations on Collections + +Models Collection SchemaVersions and migrations on Collections, instead of in the Lens Registry. + +Some test schema version IDs were also corrected. diff --git a/docs/data_format_changes/i2198-sec-index-key-change.md b/docs/data_format_changes/i2198-sec-index-key-change.md new file mode 100644 index 0000000000..8e372aa6ac --- /dev/null +++ b/docs/data_format_changes/i2198-sec-index-key-change.md @@ -0,0 +1,3 @@ +# Index secondary indexes by collection id + +Secondary indexes are now indexed by collection ID instead of collection name. \ No newline at end of file diff --git a/docs/data_format_changes/i2229-order-direction-for-indexed-fields.md b/docs/data_format_changes/i2229-order-direction-for-indexed-fields.md new file mode 100644 index 0000000000..b1260e77e5 --- /dev/null +++ b/docs/data_format_changes/i2229-order-direction-for-indexed-fields.md @@ -0,0 +1,3 @@ +# Order directions for indexed fields + +Secondary indexes are now using entirely different way of encoding fields. \ No newline at end of file diff --git a/docs/data_format_changes/i2276-multiple-nil-for-unique-index-change.md b/docs/data_format_changes/i2276-multiple-nil-for-unique-index-change.md new file mode 100644 index 0000000000..210aed1f44 --- /dev/null +++ b/docs/data_format_changes/i2276-multiple-nil-for-unique-index-change.md @@ -0,0 +1,3 @@ +# Multiple nil values for unique index change + +Added ability to have multiple docs with nil values on unique-indexed field. This slightly change datastore. \ No newline at end of file diff --git a/docs/data_format_changes/i2288-relation-type-is-primary.md b/docs/data_format_changes/i2288-relation-type-is-primary.md new file mode 100644 index 0000000000..ba6a95c653 --- /dev/null +++ b/docs/data_format_changes/i2288-relation-type-is-primary.md @@ -0,0 +1,3 @@ +# ReplaceRelation type with IsPrimary + +The FieldDescription struct has changed, this has affected schema version ids, and commit cids of documents as they are based off the schema version id. diff --git a/docs/data_format_changes/i2333-field-id-seq.md b/docs/data_format_changes/i2333-field-id-seq.md new file mode 100644 index 0000000000..269685d5f9 --- /dev/null +++ b/docs/data_format_changes/i2333-field-id-seq.md @@ -0,0 +1,3 @@ +# Generate field ids using a sequence + +The index and collection id sequences were also moved (for consistency). diff --git a/docs/data_format_changes/i2334-field-id-is-local.md b/docs/data_format_changes/i2334-field-id-is-local.md new file mode 100644 index 0000000000..a38862fb10 --- /dev/null +++ b/docs/data_format_changes/i2334-field-id-is-local.md @@ -0,0 +1,3 @@ +# Move field id off of schema + +Field ID has been made local, and moved off of the schema and onto collection. As a result schema root and schema version id are no longer dependent on it. diff --git a/docs/data_format_changes/i2386-no-change-tests-updated.md b/docs/data_format_changes/i2386-no-change-tests-updated.md new file mode 100644 index 0000000000..c7940e2e1c --- /dev/null +++ b/docs/data_format_changes/i2386-no-change-tests-updated.md @@ -0,0 +1,3 @@ +# Convert old update tests to newer framework + +This is is not a breaking change, some test definitions were changed. diff --git a/encoding/bytes.go b/encoding/bytes.go new file mode 100644 index 0000000000..ac390f1bd3 --- /dev/null +++ b/encoding/bytes.go @@ -0,0 +1,154 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +package encoding + +import ( + "bytes" +) + +const ( + // All terminators are encoded as \x00\x01 sequence. + // In order to distinguish \x00 byte it is escaped as \x00\xff + escape byte = 0x00 + escapedTerm byte = 0x01 + escaped00 byte = 0xff + escapedFF byte = 0x00 + escapeDesc byte = ^escape + escapedTermDesc byte = ^escapedTerm + escaped00Desc byte = ^escaped00 + escapedFFDesc byte = ^escapedFF +) + +type escapes struct { + escape byte + escapedTerm byte + escaped00 byte + escapedFF byte + marker byte +} + +var ( + ascendingBytesEscapes = escapes{escape, escapedTerm, escaped00, escapedFF, bytesMarker} + descendingBytesEscapes = escapes{escapeDesc, escapedTermDesc, escaped00Desc, escapedFFDesc, bytesDescMarker} +) + +// EncodeBytesAscending encodes the []byte value using an escape-based +// encoding. The encoded value is terminated with the sequence +// "\x00\x01" which is guaranteed to not occur elsewhere in the +// encoded value. The encoded bytes are append to the supplied buffer +// and the resulting buffer is returned. +func EncodeBytesAscending(b []byte, data []byte) []byte { + return encodeBytesAscendingWithTerminatorAndPrefix(b, data, ascendingBytesEscapes.escapedTerm, bytesMarker) +} + +// encodeBytesAscendingWithTerminatorAndPrefix encodes the []byte value using an escape-based +// encoding. The encoded value is terminated with the sequence +// "\x00\terminator". The encoded bytes are append to the supplied buffer +// and the resulting buffer is returned. The terminator allows us to pass +// different terminators for things such as JSON key encoding. +func encodeBytesAscendingWithTerminatorAndPrefix( + b []byte, data []byte, terminator byte, prefix byte, +) []byte { + b = append(b, prefix) + return encodeBytesAscendingWithTerminator(b, data, terminator) +} + +// encodeBytesAscendingWithTerminator encodes the []byte value using an escape-based +// encoding. The encoded value is terminated with the sequence +// "\x00\terminator". The encoded bytes are append to the supplied buffer +// and the resulting buffer is returned. The terminator allows us to pass +// different terminators for things such as JSON key encoding. +func encodeBytesAscendingWithTerminator(b []byte, data []byte, terminator byte) []byte { + bs := encodeBytesAscendingWithoutTerminatorOrPrefix(b, data) + return append(bs, escape, terminator) +} + +// encodeBytesAscendingWithoutTerminatorOrPrefix encodes the []byte value using an escape-based +// encoding. +func encodeBytesAscendingWithoutTerminatorOrPrefix(b []byte, data []byte) []byte { + for { + // IndexByte is implemented by the go runtime in assembly and is + // much faster than looping over the bytes in the slice. + i := bytes.IndexByte(data, escape) + if i == -1 { + break + } + b = append(b, data[:i]...) + b = append(b, escape, escaped00) + data = data[i+1:] + } + return append(b, data...) +} + +// EncodeBytesDescending encodes the []byte value using an +// escape-based encoding and then inverts (ones complement) the result +// so that it sorts in reverse order, from larger to smaller +// lexicographically. +func EncodeBytesDescending(b []byte, data []byte) []byte { + n := len(b) + b = EncodeBytesAscending(b, data) + b[n] = bytesDescMarker + onesComplement(b[n+1:]) + return b +} + +// DecodeBytesAscending decodes a []byte value from the input buffer +// which was encoded using EncodeBytesAscending. The decoded bytes +// are appended to r. The remainder of the input buffer and the +// decoded []byte are returned. +func DecodeBytesAscending(b []byte) ([]byte, []byte, error) { + return decodeBytesInternal(b, ascendingBytesEscapes, true /* expectMarker */) +} + +// DecodeBytesDescending decodes a []byte value from the input buffer +// which was encoded using EncodeBytesDescending. The decoded bytes +// are appended to r. The remainder of the input buffer and the +// decoded []byte are returned. +func DecodeBytesDescending(b []byte) ([]byte, []byte, error) { + b, r, err := decodeBytesInternal(b, descendingBytesEscapes, true /* expectMarker */) + onesComplement(r) + return b, r, err +} + +func decodeBytesInternal(b []byte, e escapes, expectMarker bool) ([]byte, []byte, error) { + if expectMarker { + if len(b) == 0 || b[0] != e.marker { + return nil, nil, NewErrMarkersNotFound(b, e.marker) + } + b = b[1:] + } + + var r []byte + for { + i := bytes.IndexByte(b, e.escape) + if i == -1 { + return nil, nil, NewErrTerminatorNotFound(b, e.escape) + } + if i+1 >= len(b) { + return nil, nil, NewErrMalformedEscape(b) + } + v := b[i+1] + if v == e.escapedTerm { + r = append(r, b[:i]...) + return b[i+2:], r, nil + } + + if v != e.escaped00 { + return nil, nil, NewErrUnknownEscapeSequence(b[i:i+2], e.escape) + } + + r = append(r, b[:i]...) + r = append(r, e.escapedFF) + b = b[i+2:] + } +} diff --git a/encoding/bytes_test.go b/encoding/bytes_test.go new file mode 100644 index 0000000000..ba29239530 --- /dev/null +++ b/encoding/bytes_test.go @@ -0,0 +1,205 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +package encoding + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestEncodeDecodeBytes(t *testing.T) { + testCases := []struct { + value []byte + encoded []byte + }{ + {[]byte{0, 1, 'a'}, []byte{bytesMarker, 0x00, escaped00, 1, 'a', escape, escapedTerm}}, + {[]byte{0, 'a'}, []byte{bytesMarker, 0x00, escaped00, 'a', escape, escapedTerm}}, + {[]byte{0, 0xff, 'a'}, []byte{bytesMarker, 0x00, escaped00, 0xff, 'a', escape, escapedTerm}}, + {[]byte{'a'}, []byte{bytesMarker, 'a', escape, escapedTerm}}, + {[]byte{'b'}, []byte{bytesMarker, 'b', escape, escapedTerm}}, + {[]byte{'b', 0}, []byte{bytesMarker, 'b', 0x00, escaped00, escape, escapedTerm}}, + {[]byte{'b', 0, 0}, []byte{bytesMarker, 'b', 0x00, escaped00, 0x00, escaped00, escape, escapedTerm}}, + {[]byte{'b', 0, 0, 'a'}, []byte{bytesMarker, 'b', 0x00, escaped00, 0x00, escaped00, 'a', escape, escapedTerm}}, + {[]byte{'b', 0xff}, []byte{bytesMarker, 'b', 0xff, escape, escapedTerm}}, + {[]byte("hello"), []byte{bytesMarker, 'h', 'e', 'l', 'l', 'o', escape, escapedTerm}}, + } + for i, c := range testCases { + enc := EncodeBytesAscending(nil, c.value) + if !bytes.Equal(enc, c.encoded) { + t.Errorf("unexpected encoding mismatch for %v. expected [% x], got [% x]", + c.value, c.encoded, enc) + } + if i > 0 { + if bytes.Compare(testCases[i-1].encoded, enc) >= 0 { + t.Errorf("%v: expected [% x] to be less than [% x]", + c.value, testCases[i-1].encoded, enc) + } + } + remainder, dec, err := DecodeBytesAscending(enc) + if err != nil { + t.Error(err) + continue + } + if !bytes.Equal(c.value, dec) { + t.Errorf("unexpected decoding mismatch for %v. got %v", c.value, dec) + } + if len(remainder) != 0 { + t.Errorf("unexpected remaining bytes: %v", remainder) + } + + enc = append(enc, []byte("remainder")...) + remainder, _, err = DecodeBytesAscending(enc) + if err != nil { + t.Error(err) + continue + } + if string(remainder) != "remainder" { + t.Errorf("unexpected remaining bytes: %v", remainder) + } + } +} + +func TestEncodeDecodeBytesDescending(t *testing.T) { + testCases := []struct { + value []byte + encoded []byte + }{ + {[]byte("hello"), []byte{bytesDescMarker, ^byte('h'), ^byte('e'), ^byte('l'), ^byte('l'), ^byte('o'), escapeDesc, escapedTermDesc}}, + {[]byte{'b', 0xff}, []byte{bytesDescMarker, ^byte('b'), 0x00, escapeDesc, escapedTermDesc}}, + {[]byte{'b', 0, 0, 'a'}, []byte{bytesDescMarker, ^byte('b'), 0xff, escaped00Desc, 0xff, escaped00Desc, ^byte('a'), escapeDesc, escapedTermDesc}}, + {[]byte{'b', 0, 0}, []byte{bytesDescMarker, ^byte('b'), 0xff, escaped00Desc, 0xff, escaped00Desc, escapeDesc, escapedTermDesc}}, + {[]byte{'b', 0}, []byte{bytesDescMarker, ^byte('b'), 0xff, escaped00Desc, escapeDesc, escapedTermDesc}}, + {[]byte{'b'}, []byte{bytesDescMarker, ^byte('b'), escapeDesc, escapedTermDesc}}, + {[]byte{'a'}, []byte{bytesDescMarker, ^byte('a'), escapeDesc, escapedTermDesc}}, + {[]byte{0, 0xff, 'a'}, []byte{bytesDescMarker, 0xff, escaped00Desc, 0x00, ^byte('a'), escapeDesc, escapedTermDesc}}, + {[]byte{0, 'a'}, []byte{bytesDescMarker, 0xff, escaped00Desc, ^byte('a'), escapeDesc, escapedTermDesc}}, + {[]byte{0, 1, 'a'}, []byte{bytesDescMarker, 0xff, escaped00Desc, ^byte(1), ^byte('a'), escapeDesc, escapedTermDesc}}, + } + for i, c := range testCases { + enc := EncodeBytesDescending(nil, c.value) + if !bytes.Equal(enc, c.encoded) { + t.Errorf("%d: unexpected encoding mismatch for %v ([% x]). expected [% x], got [% x]", + i, c.value, c.value, c.encoded, enc) + } + if i > 0 { + if bytes.Compare(testCases[i-1].encoded, enc) >= 0 { + t.Errorf("%v: expected [% x] to be less than [% x]", + c.value, testCases[i-1].encoded, enc) + } + } + remainder, dec, err := DecodeBytesDescending(enc) + if err != nil { + t.Error(err) + continue + } + if !bytes.Equal(c.value, dec) { + t.Errorf("unexpected decoding mismatch for %v. got %v", c.value, dec) + } + if len(remainder) != 0 { + t.Errorf("unexpected remaining bytes: %v", remainder) + } + + enc = append(enc, []byte("remainder")...) + remainder, _, err = DecodeBytesDescending(enc) + if err != nil { + t.Error(err) + continue + } + if string(remainder) != "remainder" { + t.Errorf("unexpected remaining bytes: %v", remainder) + } + } +} + +// TestDecodeInvalid tests that decoding invalid bytes panics. +func TestDecodeInvalid(t *testing.T) { + tests := []struct { + name string // name printed with errors. + buf []byte // buf contains an invalid uvarint to decode. + expectedErr error // expectedErr is the expected error. + decode func([]byte) error // decode is called with buf. + }{ + { + name: "DecodeVarint, overflows int64", + buf: []byte{IntMax, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + expectedErr: ErrVarintOverflow, + decode: func(b []byte) error { _, _, err := DecodeVarintAscending(b); return err }, + }, + { + name: "Bytes, no marker", + buf: []byte{'a'}, + expectedErr: ErrMarkersNotFound, + decode: func(b []byte) error { _, _, err := DecodeBytesAscending(b); return err }, + }, + { + name: "Bytes, no terminator", + buf: []byte{bytesMarker, 'a'}, + expectedErr: ErrTerminatorNotFound, + decode: func(b []byte) error { _, _, err := DecodeBytesAscending(b); return err }, + }, + { + name: "Bytes, malformed escape", + buf: []byte{bytesMarker, 'a', 0x00}, + expectedErr: ErrMalformedEscape, + decode: func(b []byte) error { _, _, err := DecodeBytesAscending(b); return err }, + }, + { + name: "Bytes, invalid escape 1", + buf: []byte{bytesMarker, 'a', 0x00, 0x00}, + expectedErr: ErrUnknownEscapeSequence, + decode: func(b []byte) error { _, _, err := DecodeBytesAscending(b); return err }, + }, + { + name: "Bytes, invalid escape 2", + buf: []byte{bytesMarker, 'a', 0x00, 0x02}, + expectedErr: ErrUnknownEscapeSequence, + decode: func(b []byte) error { _, _, err := DecodeBytesAscending(b); return err }, + }, + { + name: "BytesDescending, no marker", + buf: []byte{'a'}, + expectedErr: ErrMarkersNotFound, + decode: func(b []byte) error { _, _, err := DecodeBytesAscending(b); return err }, + }, + { + name: "BytesDescending, no terminator", + buf: []byte{bytesDescMarker, ^byte('a')}, + expectedErr: ErrTerminatorNotFound, + decode: func(b []byte) error { _, _, err := DecodeBytesDescending(b); return err }, + }, + { + name: "BytesDescending, malformed escape", + buf: []byte{bytesDescMarker, ^byte('a'), 0xff}, + expectedErr: ErrMalformedEscape, + decode: func(b []byte) error { _, _, err := DecodeBytesDescending(b); return err }, + }, + { + name: "BytesDescending, invalid escape 1", + buf: []byte{bytesDescMarker, ^byte('a'), 0xff, 0xff}, + expectedErr: ErrUnknownEscapeSequence, + decode: func(b []byte) error { _, _, err := DecodeBytesDescending(b); return err }, + }, + { + name: "BytesDescending, invalid escape 2", + buf: []byte{bytesDescMarker, ^byte('a'), 0xff, 0xfd}, + expectedErr: ErrUnknownEscapeSequence, + decode: func(b []byte) error { _, _, err := DecodeBytesDescending(b); return err }, + }, + } + for _, test := range tests { + err := test.decode(test.buf) + assert.ErrorIs(t, err, test.expectedErr) + } +} diff --git a/encoding/encoding.go b/encoding/encoding.go new file mode 100644 index 0000000000..164706d922 --- /dev/null +++ b/encoding/encoding.go @@ -0,0 +1,55 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +// Portions of this code are adapted from basic functionality found in the CockroachDB project, +// specifically within the encoding package at: +// https://github.com/cockroachdb/cockroach/tree/v20.2.19/pkg/util/encoding +// +// Our use of this code is in compliance with the Apache License 2.0, under which it is shared. + +package encoding + +const ( + encodedNull = iota + floatNaN + floatNeg + floatZero + floatPos + floatNaNDesc + bytesMarker + bytesDescMarker + + // These constants define a range of values and are used to determine how many bytes are + // needed to represent the given uint64 value. The constants IntMin and IntMax define the + // lower and upper bounds of the range, while intMaxWidth is the maximum width (in bytes) + // for encoding an integer. intZero is the starting point for encoding small integers, + // and intSmall represents the threshold below which a value can be encoded in a single byte. + + // IntMin is set to 0x80 (128) to avoid overlap with the ASCII range, enhancing testing clarity. + IntMin = 0x80 // 128 + // Maximum number of bytes to represent an integer, affecting encoding size. + intMaxWidth = 8 + // intZero is the base value for encoding non-negative integers, calculated to avoid ASCII conflicts. + intZero = IntMin + intMaxWidth // 136 + // intSmall defines the upper limit for integers that can be encoded in a single byte, considering offset. + intSmall = IntMax - intZero - intMaxWidth // 109 + // IntMax marks the upper bound for integer tag values, reserved for encoding use. + IntMax = 0xfd // 253 + + encodedNullDesc = 0xff +) + +func onesComplement(b []byte) { + for i := range b { + b[i] = ^b[i] + } +} diff --git a/encoding/errors.go b/encoding/errors.go new file mode 100644 index 0000000000..38b4671633 --- /dev/null +++ b/encoding/errors.go @@ -0,0 +1,91 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package encoding + +import ( + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/errors" +) + +const ( + errInsufficientBytesToDecode = "insufficient bytes to decode buffer into a target type" + errCanNotDecodeFieldValue = "can not decode field value" + errMarkersNotFound = "did not find any of required markers in buffer" + errTerminatorNotFound = "did not find required terminator in buffer" + errMalformedEscape = "malformed escape in buffer" + errUnknownEscapeSequence = "unknown escape sequence" + errInvalidUvarintLength = "invalid length for uvarint" + errVarintOverflow = "varint overflows a 64-bit integer" +) + +var ( + ErrInsufficientBytesToDecode = errors.New(errInsufficientBytesToDecode) + ErrCanNotDecodeFieldValue = errors.New(errCanNotDecodeFieldValue) + ErrMarkersNotFound = errors.New(errMarkersNotFound) + ErrTerminatorNotFound = errors.New(errTerminatorNotFound) + ErrMalformedEscape = errors.New(errMalformedEscape) + ErrUnknownEscapeSequence = errors.New(errUnknownEscapeSequence) + ErrInvalidUvarintLength = errors.New(errInvalidUvarintLength) + ErrVarintOverflow = errors.New(errVarintOverflow) +) + +// NewErrInsufficientBytesToDecode returns a new error indicating that the provided +// bytes are not sufficient to decode into a target type. +func NewErrInsufficientBytesToDecode(b []byte, decodeTarget string) error { + return errors.New(errInsufficientBytesToDecode, + errors.NewKV("Buffer", b), errors.NewKV("Decode Target", decodeTarget)) +} + +// NewErrCanNotDecodeFieldValue returns a new error indicating that the encoded +// bytes could not be decoded into a client.FieldValue of a certain kind. +func NewErrCanNotDecodeFieldValue(b []byte, kind client.FieldKind, innerErr ...error) error { + kvs := []errors.KV{errors.NewKV("Buffer", b), errors.NewKV("Kind", kind)} + if len(innerErr) > 0 { + kvs = append(kvs, errors.NewKV("InnerErr", innerErr[0])) + } + return errors.New(errCanNotDecodeFieldValue, kvs...) +} + +// NewErrMarkersNotFound returns a new error indicating that the required +// marker was not found in the buffer. +func NewErrMarkersNotFound(b []byte, markers ...byte) error { + return errors.New(errMarkersNotFound, errors.NewKV("Markers", markers), errors.NewKV("Buffer", b)) +} + +// NewErrTerminatorNotFound returns a new error indicating that the required +// terminator was not found in the buffer. +func NewErrTerminatorNotFound(b []byte, terminator byte) error { + return errors.New(errTerminatorNotFound, errors.NewKV("Terminator", terminator), errors.NewKV("Buffer", b)) +} + +// NewErrMalformedEscape returns a new error indicating that the buffer +// contains a malformed escape sequence. +func NewErrMalformedEscape(b []byte) error { + return errors.New(errMalformedEscape, errors.NewKV("Buffer", b)) +} + +// NewErrUnknownEscapeSequence returns a new error indicating that the buffer +// contains an unknown escape sequence. +func NewErrUnknownEscapeSequence(b []byte, escape byte) error { + return errors.New(errUnknownEscapeSequence, errors.NewKV("Escape", escape), errors.NewKV("Buffer", b)) +} + +// NewErrInvalidUvarintLength returns a new error indicating that the buffer +// contains an invalid length for a uvarint. +func NewErrInvalidUvarintLength(b []byte, length int) error { + return errors.New(errInvalidUvarintLength, errors.NewKV("Buffer", b), errors.NewKV("Length", length)) +} + +// NewErrVarintOverflow returns a new error indicating that the buffer +// contains a varint that overflows a 64-bit integer. +func NewErrVarintOverflow(b []byte, value uint64) error { + return errors.New(errVarintOverflow, errors.NewKV("Buffer", b), errors.NewKV("Value", value)) +} diff --git a/encoding/field_value.go b/encoding/field_value.go new file mode 100644 index 0000000000..9c8cd5589f --- /dev/null +++ b/encoding/field_value.go @@ -0,0 +1,114 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package encoding + +import ( + "golang.org/x/exp/constraints" + + "github.com/sourcenetwork/defradb/client" +) + +func encodeIntFieldValue[T constraints.Integer](b []byte, val T, descending bool) []byte { + if descending { + return EncodeVarintDescending(b, int64(val)) + } + return EncodeVarintAscending(b, int64(val)) +} + +// EncodeFieldValue encodes a FieldValue into a byte slice. +// The encoded value is appended to the supplied buffer and the resulting buffer is returned. +func EncodeFieldValue(b []byte, val any, descending bool) []byte { + if val == nil { + if descending { + return EncodeNullDescending(b) + } else { + return EncodeNullAscending(b) + } + } + switch v := val.(type) { + case bool: + var boolInt int64 = 0 + if v { + boolInt = 1 + } + if descending { + return EncodeVarintDescending(b, boolInt) + } + return EncodeVarintAscending(b, boolInt) + case int: + return encodeIntFieldValue(b, v, descending) + case int32: + return encodeIntFieldValue(b, v, descending) + case int64: + return encodeIntFieldValue(b, v, descending) + case float64: + if descending { + return EncodeFloatDescending(b, v) + } + return EncodeFloatAscending(b, v) + case string: + if descending { + return EncodeStringDescending(b, v) + } + return EncodeStringAscending(b, v) + } + + return b +} + +// DecodeFieldValue decodes a FieldValue from a byte slice. +// The decoded value is returned along with the remaining byte slice. +func DecodeFieldValue(b []byte, descending bool) ([]byte, any, error) { + typ := PeekType(b) + switch typ { + case Null: + b, _ = DecodeIfNull(b) + return b, nil, nil + case Int: + var v int64 + var err error + if descending { + b, v, err = DecodeVarintDescending(b) + } else { + b, v, err = DecodeVarintAscending(b) + } + if err != nil { + return nil, nil, NewErrCanNotDecodeFieldValue(b, client.FieldKind_NILLABLE_INT, err) + } + return b, v, nil + case Float: + var v float64 + var err error + if descending { + b, v, err = DecodeFloatDescending(b) + } else { + b, v, err = DecodeFloatAscending(b) + } + if err != nil { + return nil, nil, NewErrCanNotDecodeFieldValue(b, client.FieldKind_NILLABLE_FLOAT, err) + } + return b, v, nil + case Bytes, BytesDesc: + var v []byte + var err error + if descending { + b, v, err = DecodeBytesDescending(b) + } else { + b, v, err = DecodeBytesAscending(b) + } + if err != nil { + return nil, nil, NewErrCanNotDecodeFieldValue(b, client.FieldKind_NILLABLE_STRING, err) + } + return b, v, nil + } + + return nil, nil, NewErrCanNotDecodeFieldValue(b, client.FieldKind_NILLABLE_STRING) +} diff --git a/encoding/field_value_test.go b/encoding/field_value_test.go new file mode 100644 index 0000000000..a08446cb1f --- /dev/null +++ b/encoding/field_value_test.go @@ -0,0 +1,142 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package encoding + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestEncodeDecodeFieldValue(t *testing.T) { + tests := []struct { + name string + inputVal any + expectedBytes []byte + expectedBytesDesc []byte + expectedDecodedVal any + }{ + { + name: "nil", + inputVal: nil, + expectedBytes: EncodeNullAscending(nil), + expectedBytesDesc: EncodeNullDescending(nil), + expectedDecodedVal: nil, + }, + { + name: "bool true", + inputVal: true, + expectedBytes: EncodeVarintAscending(nil, 1), + expectedBytesDesc: EncodeVarintDescending(nil, 1), + expectedDecodedVal: int64(1), + }, + { + name: "bool false", + inputVal: false, + expectedBytes: EncodeVarintAscending(nil, 0), + expectedBytesDesc: EncodeVarintDescending(nil, 0), + expectedDecodedVal: int64(0), + }, + { + name: "int", + inputVal: int64(55), + expectedBytes: EncodeVarintAscending(nil, 55), + expectedBytesDesc: EncodeVarintDescending(nil, 55), + expectedDecodedVal: int64(55), + }, + { + name: "float", + inputVal: 0.2, + expectedBytes: EncodeFloatAscending(nil, 0.2), + expectedBytesDesc: EncodeFloatDescending(nil, 0.2), + expectedDecodedVal: 0.2, + }, + { + name: "string", + inputVal: "str", + expectedBytes: EncodeBytesAscending(nil, []byte("str")), + expectedBytesDesc: EncodeBytesDescending(nil, []byte("str")), + expectedDecodedVal: []byte("str"), + }, + } + + for _, tt := range tests { + for _, descending := range []bool{false, true} { + label := " (ascending)" + if descending { + label = " (descending)" + } + t.Run(tt.name+label, func(t *testing.T) { + encoded := EncodeFieldValue(nil, tt.inputVal, descending) + expectedBytes := tt.expectedBytes + if descending { + expectedBytes = tt.expectedBytesDesc + } + if !reflect.DeepEqual(encoded, expectedBytes) { + t.Errorf("EncodeFieldValue() = %v, want %v", encoded, expectedBytes) + } + + _, decodedFieldVal, err := DecodeFieldValue(encoded, descending) + assert.NoError(t, err) + if !reflect.DeepEqual(decodedFieldVal, tt.expectedDecodedVal) { + t.Errorf("DecodeFieldValue() = %v, want %v", decodedFieldVal, tt.expectedDecodedVal) + } + }) + } + } +} + +func TestDecodeInvalidFieldValue(t *testing.T) { + tests := []struct { + name string + inputBytes []byte + inputBytesDesc []byte + }{ + { + name: "invalid int value", + inputBytes: []byte{IntMax, 2}, + inputBytesDesc: []byte{^byte(IntMax), 2}, + }, + { + name: "invalid float value", + inputBytes: []byte{floatPos, 2}, + inputBytesDesc: []byte{floatPos, 2}, + }, + { + name: "invalid bytes value", + inputBytes: []byte{bytesMarker, 2}, + inputBytesDesc: []byte{bytesMarker, 2}, + }, + { + name: "invalid data", + inputBytes: []byte{IntMin - 1, 2}, + inputBytesDesc: []byte{^byte(IntMin - 1), 2}, + }, + } + + for _, tt := range tests { + for _, descending := range []bool{false, true} { + label := " (ascending)" + if descending { + label = " (descending)" + } + t.Run(tt.name+label, func(t *testing.T) { + inputBytes := tt.inputBytes + if descending { + inputBytes = tt.inputBytesDesc + } + _, _, err := DecodeFieldValue(inputBytes, descending) + assert.ErrorIs(t, err, ErrCanNotDecodeFieldValue) + }) + } + } +} diff --git a/encoding/float.go b/encoding/float.go new file mode 100644 index 0000000000..322ea9f9b8 --- /dev/null +++ b/encoding/float.go @@ -0,0 +1,97 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +package encoding + +import ( + "math" +) + +// EncodeFloatAscending returns the resulting byte slice with the encoded float64 +// appended to b. The encoded format for a float64 value f is, for positive f, the +// encoding of the 64 bits (in IEEE 754 format) re-interpreted as an int64 and +// encoded using EncodeUint64Ascending. For negative f, we keep the sign bit and +// invert all other bits, encoding this value using EncodeUint64Descending. This +// approach was inspired by in github.com/google/orderedcode/orderedcode.go. +// +// One of five single-byte prefix tags are appended to the front of the encoding. +// These tags enforce logical ordering of keys for both ascending and descending +// encoding directions. The tags split the encoded floats into five categories: +// - NaN for an ascending encoding direction +// - Negative valued floats +// - Zero (positive and negative) +// - Positive valued floats +// - NaN for a descending encoding direction +// This ordering ensures that NaNs are always sorted first in either encoding +// direction, and that after them a logical ordering is followed. +func EncodeFloatAscending(b []byte, f float64) []byte { + // Handle the simplistic cases first. + switch { + case math.IsNaN(f): + return append(b, floatNaN) + case f == 0: + // This encodes both positive and negative zero the same. Negative zero uses + // composite indexes to decode itself correctly. + return append(b, floatZero) + } + u := math.Float64bits(f) + if u&(1<<63) != 0 { + u = ^u + b = append(b, floatNeg) + } else { + b = append(b, floatPos) + } + return EncodeUint64Ascending(b, u) +} + +// EncodeFloatDescending is the descending version of EncodeFloatAscending. +func EncodeFloatDescending(b []byte, f float64) []byte { + if math.IsNaN(f) { + return append(b, floatNaNDesc) + } + return EncodeFloatAscending(b, -f) +} + +// DecodeFloatAscending returns the remaining byte slice after decoding and the decoded +// float64 from buf. +func DecodeFloatAscending(buf []byte) ([]byte, float64, error) { + if PeekType(buf) != Float { + return buf, 0, NewErrMarkersNotFound(buf, floatNaN, floatNeg, floatZero, floatPos, floatNaNDesc) + } + switch buf[0] { + case floatNaN, floatNaNDesc: + return buf[1:], math.NaN(), nil + case floatNeg: + b, u, err := DecodeUint64Ascending(buf[1:]) + if err != nil { + return b, 0, err + } + u = ^u + return b, math.Float64frombits(u), nil + case floatZero: + return buf[1:], 0, nil + case floatPos: + b, u, err := DecodeUint64Ascending(buf[1:]) + if err != nil { + return b, 0, err + } + return b, math.Float64frombits(u), nil + default: + return nil, 0, NewErrMarkersNotFound(buf, floatNaN, floatNeg, floatZero, floatPos, floatNaNDesc) + } +} + +// DecodeFloatDescending decodes floats encoded with EncodeFloatDescending. +func DecodeFloatDescending(buf []byte) ([]byte, float64, error) { + b, r, err := DecodeFloatAscending(buf) + return b, -r, err +} diff --git a/encoding/float_test.go b/encoding/float_test.go new file mode 100644 index 0000000000..6fc610db24 --- /dev/null +++ b/encoding/float_test.go @@ -0,0 +1,127 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +package encoding + +import ( + "bytes" + "math" + "testing" +) + +func TestEncodeFloatOrdered(t *testing.T) { + testCases := []struct { + Value float64 + Encoding []byte + }{ + {math.NaN(), []byte{floatNaN}}, + {math.Inf(-1), []byte{floatNeg, 0x00, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, + {-math.MaxFloat64, []byte{floatNeg, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + {-1e308, []byte{floatNeg, 0x00, 0x1e, 0x33, 0x0c, 0x7a, 0x14, 0x37, 0x5f}}, + {-10000.0, []byte{floatNeg, 0x3f, 0x3c, 0x77, 0xff, 0xff, 0xff, 0xff, 0xff}}, + {-9999.0, []byte{floatNeg, 0x3f, 0x3c, 0x78, 0x7f, 0xff, 0xff, 0xff, 0xff}}, + {-100.0, []byte{floatNeg, 0x3f, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, + {-99.0, []byte{floatNeg, 0x3f, 0xa7, 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff}}, + {-1.0, []byte{floatNeg, 0x40, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, + {-0.00123, []byte{floatNeg, 0x40, 0xab, 0xd9, 0x01, 0x8e, 0x75, 0x79, 0x28}}, + {-1e-307, []byte{floatNeg, 0x7f, 0xce, 0x05, 0xe7, 0xd3, 0xbf, 0x39, 0xf2}}, + {-math.SmallestNonzeroFloat64, []byte{floatNeg, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe}}, + {math.Copysign(0, -1), []byte{floatZero}}, + {0, []byte{floatZero}}, + {math.SmallestNonzeroFloat64, []byte{floatPos, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}}, + {1e-307, []byte{floatPos, 0x00, 0x31, 0xfa, 0x18, 0x2c, 0x40, 0xc6, 0x0d}}, + {0.00123, []byte{floatPos, 0x3f, 0x54, 0x26, 0xfe, 0x71, 0x8a, 0x86, 0xd7}}, + {0.0123, []byte{floatPos, 0x3f, 0x89, 0x30, 0xbe, 0x0d, 0xed, 0x28, 0x8d}}, + {0.123, []byte{floatPos, 0x3f, 0xbf, 0x7c, 0xed, 0x91, 0x68, 0x72, 0xb0}}, + {1.0, []byte{floatPos, 0x3f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + {10.0, []byte{floatPos, 0x40, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + {12.345, []byte{floatPos, 0x40, 0x28, 0xb0, 0xa3, 0xd7, 0x0a, 0x3d, 0x71}}, + {99.0, []byte{floatPos, 0x40, 0x58, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00}}, + {99.0001, []byte{floatPos, 0x40, 0x58, 0xc0, 0x01, 0xa3, 0x6e, 0x2e, 0xb2}}, + {99.01, []byte{floatPos, 0x40, 0x58, 0xc0, 0xa3, 0xd7, 0x0a, 0x3d, 0x71}}, + {100.0, []byte{floatPos, 0x40, 0x59, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + {100.01, []byte{floatPos, 0x40, 0x59, 0x00, 0xa3, 0xd7, 0x0a, 0x3d, 0x71}}, + {100.1, []byte{floatPos, 0x40, 0x59, 0x06, 0x66, 0x66, 0x66, 0x66, 0x66}}, + {1234, []byte{floatPos, 0x40, 0x93, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00}}, + {1234.5, []byte{floatPos, 0x40, 0x93, 0x4a, 0x00, 0x00, 0x00, 0x00, 0x00}}, + {9999, []byte{floatPos, 0x40, 0xc3, 0x87, 0x80, 0x00, 0x00, 0x00, 0x00}}, + {9999.000001, []byte{floatPos, 0x40, 0xc3, 0x87, 0x80, 0x00, 0x08, 0x63, 0x7c}}, + {9999.000009, []byte{floatPos, 0x40, 0xc3, 0x87, 0x80, 0x00, 0x4b, 0x7f, 0x5a}}, + {9999.00001, []byte{floatPos, 0x40, 0xc3, 0x87, 0x80, 0x00, 0x53, 0xe2, 0xd6}}, + {9999.00009, []byte{floatPos, 0x40, 0xc3, 0x87, 0x80, 0x02, 0xf2, 0xf9, 0x87}}, + {9999.000099, []byte{floatPos, 0x40, 0xc3, 0x87, 0x80, 0x03, 0x3e, 0x78, 0xe2}}, + {9999.0001, []byte{floatPos, 0x40, 0xc3, 0x87, 0x80, 0x03, 0x46, 0xdc, 0x5d}}, + {9999.001, []byte{floatPos, 0x40, 0xc3, 0x87, 0x80, 0x20, 0xc4, 0x9b, 0xa6}}, + {9999.01, []byte{floatPos, 0x40, 0xc3, 0x87, 0x81, 0x47, 0xae, 0x14, 0x7b}}, + {9999.1, []byte{floatPos, 0x40, 0xc3, 0x87, 0x8c, 0xcc, 0xcc, 0xcc, 0xcd}}, + {10000, []byte{floatPos, 0x40, 0xc3, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00}}, + {10001, []byte{floatPos, 0x40, 0xc3, 0x88, 0x80, 0x00, 0x00, 0x00, 0x00}}, + {12345, []byte{floatPos, 0x40, 0xc8, 0x1c, 0x80, 0x00, 0x00, 0x00, 0x00}}, + {123450, []byte{floatPos, 0x40, 0xfe, 0x23, 0xa0, 0x00, 0x00, 0x00, 0x00}}, + {1e308, []byte{floatPos, 0x7f, 0xe1, 0xcc, 0xf3, 0x85, 0xeb, 0xc8, 0xa0}}, + {math.MaxFloat64, []byte{floatPos, 0x7f, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, + {math.Inf(1), []byte{floatPos, 0x7f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + } + + var lastEncoded []byte + for _, isAscending := range []bool{true, false} { + for i, c := range testCases { + var enc []byte + var err error + var dec float64 + if isAscending { + enc = EncodeFloatAscending(nil, c.Value) + _, dec, err = DecodeFloatAscending(enc) + } else { + enc = EncodeFloatDescending(nil, c.Value) + _, dec, err = DecodeFloatDescending(enc) + } + if isAscending && !bytes.Equal(enc, c.Encoding) { + t.Errorf("unexpected mismatch for %v. expected [% x], got [% x]", + c.Value, c.Encoding, enc) + } + if i > 0 { + if (bytes.Compare(lastEncoded, enc) > 0 && isAscending) || + (bytes.Compare(lastEncoded, enc) < 0 && !isAscending) { + t.Errorf("%v: expected [% x] to be less than or equal to [% x]", + c.Value, testCases[i-1].Encoding, enc) + } + } + if err != nil { + t.Error(err) + continue + } + if math.IsNaN(c.Value) { + if !math.IsNaN(dec) { + t.Errorf("unexpected mismatch for %v. got %v", c.Value, dec) + } + } else if dec != c.Value { + t.Errorf("unexpected mismatch for %v. got %v", c.Value, dec) + } + lastEncoded = enc + } + + // Test that appending the float to an existing buffer works. + var enc []byte + var dec float64 + if isAscending { + enc = EncodeFloatAscending([]byte("hello"), 1.23) + _, dec, _ = DecodeFloatAscending(enc[5:]) + } else { + enc = EncodeFloatDescending([]byte("hello"), 1.23) + _, dec, _ = DecodeFloatDescending(enc[5:]) + } + if dec != 1.23 { + t.Errorf("unexpected mismatch for %v. got %v", 1.23, dec) + } + } +} diff --git a/encoding/int.go b/encoding/int.go new file mode 100644 index 0000000000..733ed94b12 --- /dev/null +++ b/encoding/int.go @@ -0,0 +1,246 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +package encoding + +import ( + "encoding/binary" + "math" +) + +// EncodeUint64Ascending encodes the uint64 value using a big-endian 8 byte +// representation. The bytes are appended to the supplied buffer and +// the final buffer is returned. +func EncodeUint64Ascending(b []byte, v uint64) []byte { + return append(b, + byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), + byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// EncodeUint64Descending encodes the uint64 value so that it sorts in +// reverse order, from largest to smallest. +func EncodeUint64Descending(b []byte, v uint64) []byte { + return EncodeUint64Ascending(b, ^v) +} + +// DecodeUint64Ascending decodes a uint64 from the input buffer, treating +// the input as a big-endian 8 byte uint64 representation. The remainder +// of the input buffer and the decoded uint64 are returned. +func DecodeUint64Ascending(b []byte) ([]byte, uint64, error) { + if len(b) < 8 { + return nil, 0, NewErrInsufficientBytesToDecode(b, "uint64") + } + v := binary.BigEndian.Uint64(b) + return b[8:], v, nil +} + +// DecodeUint64Descending decodes a uint64 value which was encoded +// using EncodeUint64Descending. +func DecodeUint64Descending(b []byte) ([]byte, uint64, error) { + leftover, v, err := DecodeUint64Ascending(b) + return leftover, ^v, err +} + +// EncodeVarintAscending encodes the int64 value using a variable length +// (length-prefixed) representation. The length is encoded as a single +// byte. If the value to be encoded is negative the length is encoded +// as 8-numBytes. If the value is positive it is encoded as +// 8+numBytes. The encoded bytes are appended to the supplied buffer +// and the final buffer is returned. +func EncodeVarintAscending(b []byte, v int64) []byte { + if v < 0 { + switch { + case v >= -0xff: + return append(b, IntMin+7, byte(v)) + case v >= -0xffff: + return append(b, IntMin+6, byte(v>>8), byte(v)) + case v >= -0xffffff: + return append(b, IntMin+5, byte(v>>16), byte(v>>8), byte(v)) + case v >= -0xffffffff: + return append(b, IntMin+4, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) + case v >= -0xffffffffff: + return append(b, IntMin+3, byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), + byte(v)) + case v >= -0xffffffffffff: + return append(b, IntMin+2, byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), + byte(v>>8), byte(v)) + case v >= -0xffffffffffffff: + return append(b, IntMin+1, byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), + byte(v>>16), byte(v>>8), byte(v)) + default: + return append(b, IntMin, byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), + byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) + } + } + return EncodeUvarintAscending(b, uint64(v)) +} + +// EncodeVarintDescending encodes the int64 value so that it sorts in reverse +// order, from largest to smallest. +func EncodeVarintDescending(b []byte, v int64) []byte { + return EncodeVarintAscending(b, ^v) +} + +// DecodeVarintAscending decodes a value encoded by EncodeVarintAscending. +func DecodeVarintAscending(b []byte) ([]byte, int64, error) { + if len(b) == 0 { + return nil, 0, NewErrInsufficientBytesToDecode(b, "varint") + } + length := int(b[0]) - intZero + if length < 0 { + length = -length + remB := b[1:] + if len(remB) < length { + return nil, 0, NewErrInsufficientBytesToDecode(b, "varint") + } + var v int64 + // Use the ones-complement of each encoded byte in order to build + // up a positive number, then take the ones-complement again to + // arrive at our negative value. + for _, t := range remB[:length] { + v = (v << 8) | int64(^t) + } + return remB[length:], ^v, nil + } + + remB, v, err := DecodeUvarintAscending(b) + if err != nil { + return remB, 0, err + } + if v > math.MaxInt64 { + return nil, 0, NewErrVarintOverflow(b, v) + } + return remB, int64(v), nil +} + +// DecodeVarintDescending decodes a int64 value which was encoded +// using EncodeVarintDescending. +func DecodeVarintDescending(b []byte) ([]byte, int64, error) { + leftover, v, err := DecodeVarintAscending(b) + return leftover, ^v, err +} + +// EncodeUvarintAscending encodes the uint64 value using a variable length +// (length-prefixed) representation. The length is encoded as a single +// byte indicating the number of encoded bytes (-8) to follow. See +// EncodeVarintAscending for rationale. The encoded bytes are appended to the +// supplied buffer and the final buffer is returned. +func EncodeUvarintAscending(b []byte, v uint64) []byte { + switch { + case v <= intSmall: + return append(b, intZero+byte(v)) + case v <= 0xff: + return append(b, IntMax-7, byte(v)) + case v <= 0xffff: + return append(b, IntMax-6, byte(v>>8), byte(v)) + case v <= 0xffffff: + return append(b, IntMax-5, byte(v>>16), byte(v>>8), byte(v)) + case v <= 0xffffffff: + return append(b, IntMax-4, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) + case v <= 0xffffffffff: + return append(b, IntMax-3, byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), + byte(v)) + case v <= 0xffffffffffff: + return append(b, IntMax-2, byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), + byte(v>>8), byte(v)) + case v <= 0xffffffffffffff: + return append(b, IntMax-1, byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), + byte(v>>16), byte(v>>8), byte(v)) + default: + return append(b, IntMax, byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), + byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) + } +} + +// EncodeUvarintDescending encodes the uint64 value so that it sorts in +// reverse order, from largest to smallest. +func EncodeUvarintDescending(b []byte, v uint64) []byte { + switch { + case v == 0: + return append(b, IntMin+8) + case v <= 0xff: + v = ^v + return append(b, IntMin+7, byte(v)) + case v <= 0xffff: + v = ^v + return append(b, IntMin+6, byte(v>>8), byte(v)) + case v <= 0xffffff: + v = ^v + return append(b, IntMin+5, byte(v>>16), byte(v>>8), byte(v)) + case v <= 0xffffffff: + v = ^v + return append(b, IntMin+4, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) + case v <= 0xffffffffff: + v = ^v + return append(b, IntMin+3, byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), + byte(v)) + case v <= 0xffffffffffff: + v = ^v + return append(b, IntMin+2, byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), + byte(v>>8), byte(v)) + case v <= 0xffffffffffffff: + v = ^v + return append(b, IntMin+1, byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), + byte(v>>16), byte(v>>8), byte(v)) + default: + v = ^v + return append(b, IntMin, byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), + byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) + } +} + +// DecodeUvarintAscending decodes a uvarint encoded uint64 from the input +// buffer. The remainder of the input buffer and the decoded uint64 +// are returned. +func DecodeUvarintAscending(b []byte) ([]byte, uint64, error) { + if len(b) == 0 { + return nil, 0, NewErrInsufficientBytesToDecode(b, "uvarint") + } + length := int(b[0]) - intZero + b = b[1:] // skip length byte + if length <= intSmall { + return b, uint64(length), nil + } + length -= intSmall + if length < 0 || length > 8 { + return nil, 0, NewErrInvalidUvarintLength(b, length) + } else if len(b) < length { + return nil, 0, NewErrInsufficientBytesToDecode(b, "uvarint") + } + var v uint64 + // It is faster to range over the elements in a slice than to index + // into the slice on each loop iteration. + for _, t := range b[:length] { + v = (v << 8) | uint64(t) + } + return b[length:], v, nil +} + +// DecodeUvarintDescending decodes a uint64 value which was encoded +// using EncodeUvarintDescending. +func DecodeUvarintDescending(b []byte) ([]byte, uint64, error) { + if len(b) == 0 { + return nil, 0, NewErrInsufficientBytesToDecode(b, "uvarint") + } + length := intZero - int(b[0]) + b = b[1:] // skip length byte + if length < 0 || length > 8 { + return nil, 0, NewErrInvalidUvarintLength(b, length) + } else if len(b) < length { + return nil, 0, NewErrInsufficientBytesToDecode(b, "uvarint") + } + var x uint64 + for _, t := range b[:length] { + x = (x << 8) | uint64(^t) + } + return b[length:], x, nil +} diff --git a/encoding/int_test.go b/encoding/int_test.go new file mode 100644 index 0000000000..80c3f502c4 --- /dev/null +++ b/encoding/int_test.go @@ -0,0 +1,223 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +package encoding + +import ( + "bytes" + "math" + "testing" +) + +func testBasicEncodeDecodeUint64( + encFunc func([]byte, uint64) []byte, + decFunc func([]byte) ([]byte, uint64, error), + descending bool, + t *testing.T, +) { + testCases := []uint64{ + 0, 1, + 1<<8 - 1, 1 << 8, + 1<<16 - 1, 1 << 16, + 1<<24 - 1, 1 << 24, + 1<<32 - 1, 1 << 32, + 1<<40 - 1, 1 << 40, + 1<<48 - 1, 1 << 48, + 1<<56 - 1, 1 << 56, + math.MaxUint64 - 1, math.MaxUint64, + } + + var lastEnc []byte + for i, v := range testCases { + enc := encFunc(nil, v) + if i > 0 { + if (descending && bytes.Compare(enc, lastEnc) >= 0) || + (!descending && bytes.Compare(enc, lastEnc) < 0) { + t.Errorf("ordered constraint violated for %d: [% x] vs. [% x]", v, enc, lastEnc) + } + } + b, decode, err := decFunc(enc) + if err != nil { + t.Error(err) + continue + } + if len(b) != 0 { + t.Errorf("leftover bytes: [% x]", b) + } + if decode != v { + t.Errorf("decode yielded different value than input: %d vs. %d", decode, v) + } + lastEnc = enc + } +} + +var int64TestCases = [...]int64{ + math.MinInt64, math.MinInt64 + 1, + -1<<56 - 1, -1 << 56, + -1<<48 - 1, -1 << 48, + -1<<40 - 1, -1 << 40, + -1<<32 - 1, -1 << 32, + -1<<24 - 1, -1 << 24, + -1<<16 - 1, -1 << 16, + -1<<8 - 1, -1 << 8, + -1, 0, 1, + 1<<8 - 1, 1 << 8, + 1<<16 - 1, 1 << 16, + 1<<24 - 1, 1 << 24, + 1<<32 - 1, 1 << 32, + 1<<40 - 1, 1 << 40, + 1<<48 - 1, 1 << 48, + 1<<56 - 1, 1 << 56, + math.MaxInt64 - 1, math.MaxInt64, +} + +func testBasicEncodeDecodeInt64( + encFunc func([]byte, int64) []byte, + decFunc func([]byte) ([]byte, int64, error), + descending bool, + t *testing.T, +) { + var lastEnc []byte + for i, v := range int64TestCases { + enc := encFunc(nil, v) + if i > 0 { + if (descending && bytes.Compare(enc, lastEnc) >= 0) || + (!descending && bytes.Compare(enc, lastEnc) < 0) { + t.Errorf("ordered constraint violated for %d: [% x] vs. [% x]", v, enc, lastEnc) + } + } + b, decode, err := decFunc(enc) + if err != nil { + t.Errorf("%v: %d [%x]", err, v, enc) + continue + } + if len(b) != 0 { + t.Errorf("leftover bytes: [% x]", b) + } + if decode != v { + t.Errorf("decode yielded different value than input: %d vs. %d [%x]", decode, v, enc) + } + lastEnc = enc + } +} + +type testCaseInt64 struct { + value int64 + expEnc []byte +} + +func testCustomEncodeInt64( + testCases []testCaseInt64, encFunc func([]byte, int64) []byte, t *testing.T, +) { + for _, test := range testCases { + enc := encFunc(nil, test.value) + if !bytes.Equal(enc, test.expEnc) { + t.Errorf("expected [% x]; got [% x] (value: %d)", test.expEnc, enc, test.value) + } + } +} + +type testCaseUint64 struct { + value uint64 + expEnc []byte +} + +func testCustomEncodeUint64( + testCases []testCaseUint64, encFunc func([]byte, uint64) []byte, t *testing.T, +) { + for _, test := range testCases { + enc := encFunc(nil, test.value) + if !bytes.Equal(enc, test.expEnc) { + t.Errorf("expected [% x]; got [% x] (value: %d)", test.expEnc, enc, test.value) + } + } +} + +func TestEncodeDecodeUint64(t *testing.T) { + testBasicEncodeDecodeUint64(EncodeUint64Ascending, DecodeUint64Ascending, false, t) + testCases := []testCaseUint64{ + {0, []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + {1, []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}}, + {1 << 8, []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00}}, + {math.MaxUint64, []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, + } + testCustomEncodeUint64(testCases, EncodeUint64Ascending, t) +} + +func TestEncodeDecodeUint64Descending(t *testing.T) { + testBasicEncodeDecodeUint64(EncodeUint64Descending, DecodeUint64Descending, true, t) + testCases := []testCaseUint64{ + {0, []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, + {1, []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe}}, + {1 << 8, []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff}}, + {math.MaxUint64, []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + } + testCustomEncodeUint64(testCases, EncodeUint64Descending, t) +} + +func TestEncodeDecodeVarint(t *testing.T) { + testBasicEncodeDecodeInt64(EncodeVarintAscending, DecodeVarintAscending, false, t) + testCases := []testCaseInt64{ + {math.MinInt64, []byte{0x80, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + {math.MinInt64 + 1, []byte{0x80, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}}, + {-1 << 8, []byte{0x86, 0xff, 0x00}}, + {-1, []byte{0x87, 0xff}}, + {0, []byte{0x88}}, + {1, []byte{0x89}}, + {109, []byte{0xf5}}, + {112, []byte{0xf6, 0x70}}, + {1 << 8, []byte{0xf7, 0x01, 0x00}}, + {math.MaxInt64, []byte{0xfd, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, + } + testCustomEncodeInt64(testCases, EncodeVarintAscending, t) +} + +func TestEncodeDecodeVarintDescending(t *testing.T) { + testBasicEncodeDecodeInt64(EncodeVarintDescending, DecodeVarintDescending, true, t) + testCases := []testCaseInt64{ + {math.MinInt64, []byte{0xfd, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, + {math.MinInt64 + 1, []byte{0xfd, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe}}, + {-1 << 8, []byte{0xf6, 0xff}}, + {-110, []byte{0xf5}}, + {-1, []byte{0x88}}, + {0, []byte{0x87, 0xff}}, + {1, []byte{0x87, 0xfe}}, + {1 << 8, []byte{0x86, 0xfe, 0xff}}, + {math.MaxInt64, []byte{0x80, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + } + testCustomEncodeInt64(testCases, EncodeVarintDescending, t) +} + +func TestEncodeDecodeUvarint(t *testing.T) { + testBasicEncodeDecodeUint64(EncodeUvarintAscending, DecodeUvarintAscending, false, t) + testCases := []testCaseUint64{ + {0, []byte{0x88}}, + {1, []byte{0x89}}, + {109, []byte{0xf5}}, + {110, []byte{0xf6, 0x6e}}, + {1 << 8, []byte{0xf7, 0x01, 0x00}}, + {math.MaxUint64, []byte{0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, + } + testCustomEncodeUint64(testCases, EncodeUvarintAscending, t) +} + +func TestEncodeDecodeUvarintDescending(t *testing.T) { + testBasicEncodeDecodeUint64(EncodeUvarintDescending, DecodeUvarintDescending, true, t) + testCases := []testCaseUint64{ + {0, []byte{0x88}}, + {1, []byte{0x87, 0xfe}}, + {1 << 8, []byte{0x86, 0xfe, 0xff}}, + {math.MaxUint64 - 1, []byte{0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}}, + {math.MaxUint64, []byte{0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + } + testCustomEncodeUint64(testCases, EncodeUvarintDescending, t) +} diff --git a/encoding/null.go b/encoding/null.go new file mode 100644 index 0000000000..067c348122 --- /dev/null +++ b/encoding/null.go @@ -0,0 +1,41 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +package encoding + +// EncodeNullAscending encodes a NULL value. The encodes bytes are appended to the +// supplied buffer and the final buffer is returned. The encoded value for a +// NULL is guaranteed to not be a prefix for the EncodeVarint, EncodeFloat, +// EncodeBytes and EncodeString encodings. +func EncodeNullAscending(b []byte) []byte { + return append(b, encodedNull) +} + +// EncodeNullDescending is the descending equivalent of EncodeNullAscending. +func EncodeNullDescending(b []byte) []byte { + return append(b, encodedNullDesc) +} + +// DecodeIfNull decodes a NULL value from the input buffer. If the input buffer +// contains a null at the start of the buffer then it is removed from the +// buffer and true is returned for the second result. Otherwise, the buffer is +// returned unchanged and false is returned for the second result. Since the +// NULL value encoding is guaranteed to never occur as the prefix for the +// EncodeVarint, EncodeFloat, EncodeBytes and EncodeString encodings, it is +// safe to call DecodeIfNull on their encoded values. +// This function handles both ascendingly and descendingly encoded NULLs. +func DecodeIfNull(b []byte) ([]byte, bool) { + if PeekType(b) == Null { + return b[1:], true + } + return b, false +} diff --git a/encoding/null_test.go b/encoding/null_test.go new file mode 100644 index 0000000000..fb18d2ac64 --- /dev/null +++ b/encoding/null_test.go @@ -0,0 +1,40 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +package encoding + +import ( + "bytes" + "testing" +) + +func TestEncodeDecodeNull(t *testing.T) { + const hello = "hello" + + buf := EncodeNullAscending([]byte(hello)) + expected := []byte(hello + "\x00") + if !bytes.Equal(expected, buf) { + t.Fatalf("expected %q, but found %q", expected, buf) + } + + if remaining, isNull := DecodeIfNull([]byte(hello)); isNull { + t.Fatalf("expected isNull=false, but found isNull=%v", isNull) + } else if hello != string(remaining) { + t.Fatalf("expected %q, but found %q", hello, remaining) + } + + if remaining, isNull := DecodeIfNull([]byte("\x00" + hello)); !isNull { + t.Fatalf("expected isNull=true, but found isNull=%v", isNull) + } else if hello != string(remaining) { + t.Fatalf("expected %q, but found %q", hello, remaining) + } +} diff --git a/encoding/string.go b/encoding/string.go new file mode 100644 index 0000000000..23b6d379ae --- /dev/null +++ b/encoding/string.go @@ -0,0 +1,52 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +package encoding + +import ( + "unsafe" +) + +// unsafeConvertStringToBytes converts a string to a byte array to be used with +// string encoding functions. Note that the output byte array should not be +// modified if the input string is expected to be used again - doing so could +// violate Go semantics. +func unsafeConvertStringToBytes(s string) []byte { + if len(s) == 0 { + return nil + } + return unsafe.Slice(unsafe.StringData(s), len(s)) +} + +// EncodeStringAscending encodes the string value using an escape-based encoding. See +// EncodeBytes for details. The encoded bytes are append to the supplied buffer +// and the resulting buffer is returned. +func EncodeStringAscending(b []byte, s string) []byte { + return encodeStringAscendingWithTerminatorAndPrefix(b, s, ascendingBytesEscapes.escapedTerm, bytesMarker) +} + +// encodeStringAscendingWithTerminatorAndPrefix encodes the string value using an escape-based encoding. See +// EncodeBytes for details. The encoded bytes are append to the supplied buffer +// and the resulting buffer is returned. We can also pass a terminator byte to be used with +// JSON key encoding. +func encodeStringAscendingWithTerminatorAndPrefix( + b []byte, s string, terminator byte, prefix byte, +) []byte { + unsafeString := unsafeConvertStringToBytes(s) + return encodeBytesAscendingWithTerminatorAndPrefix(b, unsafeString, terminator, prefix) +} + +// EncodeStringDescending is the descending version of EncodeStringAscending. +func EncodeStringDescending(b []byte, s string) []byte { + unsafeString := unsafeConvertStringToBytes(s) + return EncodeBytesDescending(b, unsafeString) +} diff --git a/encoding/string_test.go b/encoding/string_test.go new file mode 100644 index 0000000000..9304b8303e --- /dev/null +++ b/encoding/string_test.go @@ -0,0 +1,122 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +package encoding + +import ( + "bytes" + "testing" +) + +func TestEncodeDecodeUnsafeString(t *testing.T) { + testCases := []struct { + value string + encoded []byte + }{ + {"\x00\x01a", []byte{bytesMarker, 0x00, escaped00, 1, 'a', escape, escapedTerm}}, + {"\x00a", []byte{bytesMarker, 0x00, escaped00, 'a', escape, escapedTerm}}, + {"\x00\xffa", []byte{bytesMarker, 0x00, escaped00, 0xff, 'a', escape, escapedTerm}}, + {"a", []byte{bytesMarker, 'a', escape, escapedTerm}}, + {"b", []byte{bytesMarker, 'b', escape, escapedTerm}}, + {"b\x00", []byte{bytesMarker, 'b', 0x00, escaped00, escape, escapedTerm}}, + {"b\x00\x00", []byte{bytesMarker, 'b', 0x00, escaped00, 0x00, escaped00, escape, escapedTerm}}, + {"b\x00\x00a", []byte{bytesMarker, 'b', 0x00, escaped00, 0x00, escaped00, 'a', escape, escapedTerm}}, + {"b\xff", []byte{bytesMarker, 'b', 0xff, escape, escapedTerm}}, + {"hello", []byte{bytesMarker, 'h', 'e', 'l', 'l', 'o', escape, escapedTerm}}, + } + for i, c := range testCases { + enc := EncodeStringAscending(nil, c.value) + if !bytes.Equal(enc, c.encoded) { + t.Errorf("unexpected encoding mismatch for %v. expected [% x], got [% x]", + c.value, c.encoded, enc) + } + if i > 0 { + if bytes.Compare(testCases[i-1].encoded, enc) >= 0 { + t.Errorf("%v: expected [% x] to be less than [% x]", + c.value, testCases[i-1].encoded, enc) + } + } + remainder, dec, err := DecodeBytesAscending(enc) + if err != nil { + t.Error(err) + continue + } + if c.value != string(dec) { + t.Errorf("unexpected decoding mismatch for %v. got %v", c.value, string(dec)) + } + if len(remainder) != 0 { + t.Errorf("unexpected remaining bytes: %v", remainder) + } + + enc = append(enc, "remainder"...) + remainder, _, err = DecodeBytesAscending(enc) + if err != nil { + t.Error(err) + continue + } + if string(remainder) != "remainder" { + t.Errorf("unexpected remaining bytes: %v", remainder) + } + } +} + +func TestEncodeDecodeUnsafeStringDescending(t *testing.T) { + testCases := []struct { + value string + encoded []byte + }{ + {"hello", []byte{bytesDescMarker, ^byte('h'), ^byte('e'), ^byte('l'), ^byte('l'), ^byte('o'), escapeDesc, escapedTermDesc}}, + {"b\xff", []byte{bytesDescMarker, ^byte('b'), ^byte(0xff), escapeDesc, escapedTermDesc}}, + {"b\x00\x00a", []byte{bytesDescMarker, ^byte('b'), ^byte(0), escaped00Desc, ^byte(0), escaped00Desc, ^byte('a'), escapeDesc, escapedTermDesc}}, + {"b\x00\x00", []byte{bytesDescMarker, ^byte('b'), ^byte(0), escaped00Desc, ^byte(0), escaped00Desc, escapeDesc, escapedTermDesc}}, + {"b\x00", []byte{bytesDescMarker, ^byte('b'), ^byte(0), escaped00Desc, escapeDesc, escapedTermDesc}}, + {"b", []byte{bytesDescMarker, ^byte('b'), escapeDesc, escapedTermDesc}}, + {"a", []byte{bytesDescMarker, ^byte('a'), escapeDesc, escapedTermDesc}}, + {"\x00\xffa", []byte{bytesDescMarker, ^byte(0), escaped00Desc, ^byte(0xff), ^byte('a'), escapeDesc, escapedTermDesc}}, + {"\x00a", []byte{bytesDescMarker, ^byte(0), escaped00Desc, ^byte('a'), escapeDesc, escapedTermDesc}}, + {"\x00\x01a", []byte{bytesDescMarker, ^byte(0), escaped00Desc, ^byte(1), ^byte('a'), escapeDesc, escapedTermDesc}}, + } + for i, c := range testCases { + enc := EncodeStringDescending(nil, c.value) + if !bytes.Equal(enc, c.encoded) { + t.Errorf("unexpected encoding mismatch for %v. expected [% x], got [% x]", + c.value, c.encoded, enc) + } + if i > 0 { + if bytes.Compare(testCases[i-1].encoded, enc) >= 0 { + t.Errorf("%v: expected [% x] to be less than [% x]", + c.value, testCases[i-1].encoded, enc) + } + } + remainder, dec, err := DecodeBytesDescending(enc) + if err != nil { + t.Error(err) + continue + } + if c.value != string(dec) { + t.Errorf("unexpected decoding mismatch for %v. got [% x]", c.value, string(dec)) + } + if len(remainder) != 0 { + t.Errorf("unexpected remaining bytes: %v", remainder) + } + + enc = append(enc, "remainder"...) + remainder, _, err = DecodeBytesDescending(enc) + if err != nil { + t.Error(err) + continue + } + if string(remainder) != "remainder" { + t.Errorf("unexpected remaining bytes: %v", remainder) + } + } +} diff --git a/encoding/type.go b/encoding/type.go new file mode 100644 index 0000000000..b4b85cf7bf --- /dev/null +++ b/encoding/type.go @@ -0,0 +1,46 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +package encoding + +// Type represents the type of a value encoded by +// Encode{Null,Varint,Uvarint,Float,Bytes}. +type Type int + +const ( + Unknown Type = 0 + Null Type = 1 + Int Type = 3 + Float Type = 4 + Bytes Type = 6 + BytesDesc Type = 7 +) + +// PeekType peeks at the type of the value encoded at the start of b. +func PeekType(b []byte) Type { + if len(b) >= 1 { + m := b[0] + switch { + case m == encodedNull, m == encodedNullDesc: + return Null + case m == bytesMarker: + return Bytes + case m == bytesDescMarker: + return BytesDesc + case m >= IntMin && m <= IntMax: + return Int + case m >= floatNaN && m <= floatNaNDesc: + return Float + } + } + return Unknown +} diff --git a/encoding/type_test.go b/encoding/type_test.go new file mode 100644 index 0000000000..f3114858bd --- /dev/null +++ b/encoding/type_test.go @@ -0,0 +1,41 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +package encoding + +import ( + "testing" +) + +func TestPeekType(t *testing.T) { + testCases := []struct { + enc []byte + typ Type + }{ + {EncodeNullAscending(nil), Null}, + {EncodeNullDescending(nil), Null}, + {EncodeVarintAscending(nil, 0), Int}, + {EncodeVarintDescending(nil, 0), Int}, + {EncodeUvarintAscending(nil, 0), Int}, + {EncodeUvarintDescending(nil, 0), Int}, + {EncodeFloatAscending(nil, 0), Float}, + {EncodeFloatDescending(nil, 0), Float}, + {EncodeBytesAscending(nil, []byte("")), Bytes}, + {EncodeBytesDescending(nil, []byte("")), BytesDesc}, + } + for i, c := range testCases { + typ := PeekType(c.enc) + if c.typ != typ { + t.Fatalf("%d: expected %d, but found %d", i, c.typ, typ) + } + } +} diff --git a/go.mod b/go.mod index f9de929232..3d114d62ca 100644 --- a/go.mod +++ b/go.mod @@ -1,19 +1,19 @@ module github.com/sourcenetwork/defradb -go 1.20 +go 1.21 require ( github.com/bits-and-blooms/bitset v1.13.0 github.com/bxcodec/faker v2.0.1+incompatible - github.com/evanphx/json-patch/v5 v5.7.0 - github.com/fxamacker/cbor/v2 v2.5.0 - github.com/getkin/kin-openapi v0.122.0 - github.com/go-chi/chi/v5 v5.0.11 + github.com/evanphx/json-patch/v5 v5.9.0 + github.com/fxamacker/cbor/v2 v2.6.0 + github.com/getkin/kin-openapi v0.123.0 + github.com/go-chi/chi/v5 v5.0.12 github.com/go-chi/cors v1.2.1 github.com/go-errors/errors v1.5.1 github.com/gofrs/uuid/v5 v5.0.0 github.com/iancoleman/strcase v0.3.0 - github.com/ipfs/boxo v0.17.0 + github.com/ipfs/boxo v0.18.0 github.com/ipfs/go-block-format v0.2.0 github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-datastore v0.6.0 @@ -27,8 +27,7 @@ require ( github.com/libp2p/go-libp2p-kad-dht v0.25.2 github.com/libp2p/go-libp2p-pubsub v0.10.0 github.com/libp2p/go-libp2p-record v0.2.0 - github.com/mitchellh/mapstructure v1.5.0 - github.com/multiformats/go-multiaddr v0.12.1 + github.com/multiformats/go-multiaddr v0.12.2 github.com/multiformats/go-multibase v0.2.0 github.com/multiformats/go-multihash v0.2.3 github.com/sourcenetwork/badger/v4 v4.2.1-0.20231113215945-a63444ca5276 @@ -38,18 +37,16 @@ require ( github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.18.2 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 github.com/tidwall/btree v1.7.0 github.com/ugorji/go/codec v1.2.12 github.com/valyala/fastjson v1.6.4 github.com/vito/go-sse v1.0.0 - go.opentelemetry.io/otel/metric v1.21.0 - go.opentelemetry.io/otel/sdk/metric v1.21.0 - go.uber.org/zap v1.26.0 - golang.org/x/crypto v0.18.0 + go.opentelemetry.io/otel/metric v1.24.0 + go.opentelemetry.io/otel/sdk/metric v1.24.0 + go.uber.org/zap v1.27.0 golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc - golang.org/x/net v0.20.0 - google.golang.org/grpc v1.60.1 + google.golang.org/grpc v1.62.0 google.golang.org/protobuf v1.32.0 ) @@ -75,18 +72,18 @@ require ( github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/swag v0.22.4 // indirect + github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/swag v0.22.8 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.1.2 // indirect + github.com/golang/glog v1.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/flatbuffers v2.0.6+incompatible // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20231229205709-960ae82b1e42 // indirect - github.com/google/uuid v1.5.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect @@ -133,6 +130,7 @@ require ( github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/sha256-simd v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect @@ -168,27 +166,29 @@ require ( github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect github.com/textileio/go-log/v2 v2.1.3-gke-2 // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect github.com/x448/float16 v0.8.4 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel v1.21.0 // indirect - go.opentelemetry.io/otel/sdk v1.21.0 // indirect - go.opentelemetry.io/otel/trace v1.21.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/sdk v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect go.uber.org/dig v1.17.1 // indirect go.uber.org/fx v1.20.1 // indirect go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.19.0 // indirect golang.org/x/mod v0.14.0 // indirect + golang.org/x/net v0.21.0 // indirect golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.16.0 // indirect + golang.org/x/sys v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.16.1 // indirect gonum.org/v1/gonum v0.14.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect diff --git a/go.sum b/go.sum index f70ba2aaaf..deb2f24823 100644 --- a/go.sum +++ b/go.sum @@ -8,11 +8,14 @@ dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1 dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= +github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -31,6 +34,7 @@ github.com/bytecodealliance/wasmtime-go/v15 v15.0.0 h1:4R2MpSPPbtSxqdsOTvsMn1pnw github.com/bytecodealliance/wasmtime-go/v15 v15.0.0/go.mod h1:m6vB/SsM+pnJkVHmO1wzHYUeYtciltTKuxuvkR8pYcY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -49,7 +53,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsr github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668 h1:ZFUue+PNxmHlu7pYv+IYMtqlaO/0VwaGEqKepZf9JpA= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -59,10 +63,13 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= +github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= github.com/dgraph-io/badger/v3 v3.2011.1 h1:Hmyof0WMEF/QtutX5SQHzIMnJQxb/IrSzhjckV2SD6g= +github.com/dgraph-io/badger/v3 v3.2011.1/go.mod h1:0rLLrQpKVQAL0or/lBLMQznhr6dWWX7h5AKnmnqx268= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= @@ -80,26 +87,27 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc= -github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/noise v1.0.1 h1:vPp/jdQLXC6ppsXSj/pM3W1BIJ5FEHE2TulSJBpb43Y= github.com/flynn/noise v1.0.1/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE= -github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= -github.com/getkin/kin-openapi v0.122.0 h1:WB9Jbl0Hp/T79/JF9xlSW5Kl9uYdk/AWD0yAd9HOM10= -github.com/getkin/kin-openapi v0.122.0/go.mod h1:PCWw/lfBrJY4HcdqE3jj+QFkaFK8ABoqo7PvqVhXXqw= +github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= +github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/getkin/kin-openapi v0.123.0 h1:zIik0mRwFNLyvtXK274Q6ut+dPh6nlxBp0x7mNrPhs8= +github.com/getkin/kin-openapi v0.123.0/go.mod h1:wb1aSZA/iWmorQP9KTAS/phLj/t17B5jT7+fS8ed9NM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-chi/chi/v5 v5.0.11 h1:BnpYbFZ3T3S1WMpD79r7R5ThWX40TaFB7L31Y8xqSwA= -github.com/go-chi/chi/v5 v5.0.11/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s= +github.com/go-chi/chi/v5 v5.0.12/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-chi/cors v1.2.1 h1:xEC8UT3Rlp2QuWNEr4Fs/c2EAGVKBwy/1vHx3bppil4= github.com/go-chi/cors v1.2.1/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= @@ -110,15 +118,15 @@ github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= -github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= +github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/swag v0.22.8 h1:/9RjDSQ0vbFR+NyjGMkFTsA1IA0fmhKSThmfGZjicbw= +github.com/go-openapi/swag v0.22.8/go.mod h1:6QT22icPLEqAM/z/TChgb4WAveCHF92+2gF0CNjHpPI= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= +github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -131,8 +139,8 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= -github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= +github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= +github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -167,6 +175,7 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= @@ -177,12 +186,13 @@ github.com/google/pprof v0.0.0-20231229205709-960ae82b1e42 h1:dHLYa5D8/Ta0aLR2Xc github.com/google/pprof v0.0.0-20231229205709-960ae82b1e42/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= +github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -202,6 +212,7 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hsanjuan/ipfs-lite v1.8.1 h1:Rpd9bTXYgkmnt8M5QsZnWwtW6ebxAB7HlU/d0zE4BmA= +github.com/hsanjuan/ipfs-lite v1.8.1/go.mod h1:oGCaHBi+I73UFjc6wPAQ75hr4FjJhoqy6YPZjtghDIc= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= @@ -212,21 +223,26 @@ github.com/invopop/yaml v0.2.0 h1:7zky/qH+O0DwAyoobXUqvVBwgBFRxKoQ/3FjcVpjTMY= github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/boxo v0.17.0 h1:fVXAb12dNbraCX1Cdid5BB6Kl62gVLNVA+e0EYMqAU0= -github.com/ipfs/boxo v0.17.0/go.mod h1:pIZgTWdm3k3pLF9Uq6MB8JEcW07UDwNJjlXW1HELW80= +github.com/ipfs/boxo v0.18.0 h1:MOL9/AgoV3e7jlVMInicaSdbgralfqSsbkc31dZ9tmw= +github.com/ipfs/boxo v0.18.0/go.mod h1:pIZgTWdm3k3pLF9Uq6MB8JEcW07UDwNJjlXW1HELW80= github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= +github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU= github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q= +github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA= github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro= +github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= +github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= +github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= @@ -278,6 +294,7 @@ github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoK github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -308,6 +325,7 @@ github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvN github.com/libp2p/go-libp2p-routing-helpers v0.7.3 h1:u1LGzAMVRK9Nqq5aYDVOiq/HaB93U9WWczBzGyAC5ZY= github.com/libp2p/go-libp2p-routing-helpers v0.7.3/go.mod h1:cN4mJAD/7zfPKXBcs9ze31JGYAZgzdABEm+q/hkswb8= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= +github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= @@ -319,6 +337,7 @@ github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8S github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv53Q= +github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -348,6 +367,7 @@ github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1 github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -363,8 +383,8 @@ github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9 github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.12.1 h1:vm+BA/WZA8QZDp1pF1FWhi5CT3g1tbi5GJmqpb6wnlk= -github.com/multiformats/go-multiaddr v0.12.1/go.mod h1:7mPkiBMmLeFipt+nNSq9pHZUeJSt8lHBgH6yhj0YQzE= +github.com/multiformats/go-multiaddr v0.12.2 h1:9G9sTY/wCYajKa9lyfWPmpZAwe6oV+Wb1zcmMS1HG24= +github.com/multiformats/go-multiaddr v0.12.2/go.mod h1:GKyaTYjZRdcUhyOetrxTk9z0cW+jA/YrnqTOvKgi44M= github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= @@ -398,6 +418,7 @@ github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -442,7 +463,8 @@ github.com/quic-go/webtransport-go v0.6.0/go.mod h1:9KjU4AEBqEQidGHNDkZrb8CAa1ab github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -452,6 +474,7 @@ github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgY github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA= +github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= @@ -507,8 +530,9 @@ github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -518,15 +542,18 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/textileio/go-datastore-extensions v1.0.1 h1:qIJGqJaigQ1wD4TdwS/hf73u0HChhXvvUSJuxBEKS+c= +github.com/textileio/go-datastore-extensions v1.0.1/go.mod h1:Pzj9FDRkb55910dr/FX8M7WywvnS26gBgEDez1ZBuLE= github.com/textileio/go-ds-badger3 v0.1.0 h1:q0kBuBmAcRUR3ClMSYlyw0224XeuzjjGinU53Qz1uXI= +github.com/textileio/go-ds-badger3 v0.1.0/go.mod h1:z8LuXcihtZ91spEaqhEiNGIWx3E59iFq1HZj4gwwGrU= github.com/textileio/go-log/v2 v2.1.3-gke-2 h1:YkMA5ua0Cf/X6CkbexInsoJ/HdaHQBlgiv9Yy9hddNM= github.com/textileio/go-log/v2 v2.1.3-gke-2/go.mod h1:DwACkjFS3kjZZR/4Spx3aPfSsciyslwUe5bxV8CEU2w= github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= @@ -542,10 +569,13 @@ github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMI github.com/vito/go-sse v1.0.0 h1:e6/iTrrvy8BRrOwJwmQmlndlil+TLdxXvHi55ZDzH6M= github.com/vito/go-sse v1.0.0/go.mod h1:2wkcaQ+jtlZ94Uve8gYZjFpL68luAjssTINA2hpgcZs= github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s= +github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboadS0DvysUuJXZ4lWVv5Bh5i7+tbIyi+ck4= +github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= @@ -556,25 +586,27 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= -go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/sdk/metric v1.21.0 h1:smhI5oD714d6jHE6Tie36fPx4WDFIg+Y6RfAY4ICcR0= -go.opentelemetry.io/otel/sdk/metric v1.21.0/go.mod h1:FJ8RAsoPGv/wYMgBdUJXOm+6pzFY3YdljnXtv1SBE8Q= -go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= +go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= +go.opentelemetry.io/otel/sdk/metric v1.24.0 h1:yyMQrPzF+k88/DbH7o4FMAs80puqd+9osbiBrJrz/w8= +go.opentelemetry.io/otel/sdk/metric v1.24.0/go.mod h1:I6Y5FjH6rvEnTTAYQz3Mmv2kl6Ek5IIrmwTLqMrrOE0= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= go.uber.org/fx v1.20.1 h1:zVwVQGS8zYvhh9Xxcu4w1M6ESyeMzebzj2NbSayZ4Mk= go.uber.org/fx v1.20.1/go.mod h1:iSYNbHf2y55acNCwCXKx7LbWb5WG1Bnue5RDXz1OREg= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= @@ -584,8 +616,8 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -597,8 +629,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc h1:ao2WRsKSzW6KuUY9IWPwWahcHCgR0s52IfwutMfEbdM= golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= @@ -634,8 +666,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -677,8 +709,8 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -728,8 +760,8 @@ google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 h1:gphdwh0npgs8elJ4T6J+DQJHPVF7RsuJHCfwztUb4J4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -738,8 +770,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= -google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= +google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk= +google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -770,6 +802,7 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/http/client.go b/http/client.go index dc289ceb39..142a359c5b 100644 --- a/http/client.go +++ b/http/client.go @@ -17,9 +17,12 @@ import ( "io" "net/http" "net/url" + "strconv" "strings" blockstore "github.com/ipfs/boxo/blockstore" + "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable" sse "github.com/vito/go-sse/sse" "github.com/sourcenetwork/defradb/client" @@ -134,12 +137,18 @@ func (c *Client) AddSchema(ctx context.Context, schema string) ([]client.Collect type patchSchemaRequest struct { Patch string SetAsDefaultVersion bool + Migration immutable.Option[model.Lens] } -func (c *Client) PatchSchema(ctx context.Context, patch string, setAsDefaultVersion bool) error { +func (c *Client) PatchSchema( + ctx context.Context, + patch string, + migration immutable.Option[model.Lens], + setAsDefaultVersion bool, +) error { methodURL := c.http.baseURL.JoinPath("schema") - body, err := json.Marshal(patchSchemaRequest{patch, setAsDefaultVersion}) + body, err := json.Marshal(patchSchemaRequest{patch, setAsDefaultVersion, migration}) if err != nil { return err } @@ -152,7 +161,7 @@ func (c *Client) PatchSchema(ctx context.Context, patch string, setAsDefaultVers return err } -func (c *Client) SetDefaultSchemaVersion(ctx context.Context, schemaVersionID string) error { +func (c *Client) SetActiveSchemaVersion(ctx context.Context, schemaVersionID string) error { methodURL := c.http.baseURL.JoinPath("schema", "default") req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), strings.NewReader(schemaVersionID)) @@ -164,14 +173,20 @@ func (c *Client) SetDefaultSchemaVersion(ctx context.Context, schemaVersionID st } type addViewRequest struct { - Query string - SDL string + Query string + SDL string + Transform immutable.Option[model.Lens] } -func (c *Client) AddView(ctx context.Context, query string, sdl string) ([]client.CollectionDefinition, error) { +func (c *Client) AddView( + ctx context.Context, + query string, + sdl string, + transform immutable.Option[model.Lens], +) ([]client.CollectionDefinition, error) { methodURL := c.http.baseURL.JoinPath("view") - body, err := json.Marshal(addViewRequest{query, sdl}) + body, err := json.Marshal(addViewRequest{query, sdl, transform}) if err != nil { return nil, err } @@ -190,7 +205,20 @@ func (c *Client) AddView(ctx context.Context, query string, sdl string) ([]clien } func (c *Client) SetMigration(ctx context.Context, config client.LensConfig) error { - return c.LensRegistry().SetMigration(ctx, config) + methodURL := c.http.baseURL.JoinPath("lens") + + body, err := json.Marshal(config) + if err != nil { + return err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), bytes.NewBuffer(body)) + if err != nil { + return err + } + + _, err = c.http.request(req) + return err } func (c *Client) LensRegistry() client.LensRegistry { @@ -198,60 +226,34 @@ func (c *Client) LensRegistry() client.LensRegistry { } func (c *Client) GetCollectionByName(ctx context.Context, name client.CollectionName) (client.Collection, error) { - methodURL := c.http.baseURL.JoinPath("collections") - methodURL.RawQuery = url.Values{"name": []string{name}}.Encode() - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) + cols, err := c.GetCollections(ctx, client.CollectionFetchOptions{Name: immutable.Some(name)}) if err != nil { return nil, err } - var definition client.CollectionDefinition - if err := c.http.requestJson(req, &definition); err != nil { - return nil, err - } - return &Collection{c.http, definition}, nil -} - -func (c *Client) GetCollectionsBySchemaRoot(ctx context.Context, schemaRoot string) ([]client.Collection, error) { - methodURL := c.http.baseURL.JoinPath("collections") - methodURL.RawQuery = url.Values{"schema_root": []string{schemaRoot}}.Encode() - req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) - if err != nil { - return nil, err - } - var descriptions []client.CollectionDefinition - if err := c.http.requestJson(req, &descriptions); err != nil { - return nil, err - } - collections := make([]client.Collection, len(descriptions)) - for i, d := range descriptions { - collections[i] = &Collection{c.http, d} - } - return collections, nil + // cols will always have length == 1 here + return cols[0], nil } -func (c *Client) GetCollectionsByVersionID(ctx context.Context, versionId string) ([]client.Collection, error) { +func (c *Client) GetCollections( + ctx context.Context, + options client.CollectionFetchOptions, +) ([]client.Collection, error) { methodURL := c.http.baseURL.JoinPath("collections") - methodURL.RawQuery = url.Values{"version_id": []string{versionId}}.Encode() - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) - if err != nil { - return nil, err + params := url.Values{} + if options.Name.HasValue() { + params.Add("name", options.Name.Value()) } - var descriptions []client.CollectionDefinition - if err := c.http.requestJson(req, &descriptions); err != nil { - return nil, err + if options.SchemaVersionID.HasValue() { + params.Add("version_id", options.SchemaVersionID.Value()) } - collections := make([]client.Collection, len(descriptions)) - for i, d := range descriptions { - collections[i] = &Collection{c.http, d} + if options.SchemaRoot.HasValue() { + params.Add("schema_root", options.SchemaRoot.Value()) } - return collections, nil -} - -func (c *Client) GetAllCollections(ctx context.Context) ([]client.Collection, error) { - methodURL := c.http.baseURL.JoinPath("collections") + if options.IncludeInactive.HasValue() { + params.Add("get_inactive", strconv.FormatBool(options.IncludeInactive.Value())) + } + methodURL.RawQuery = params.Encode() req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) if err != nil { @@ -268,53 +270,32 @@ func (c *Client) GetAllCollections(ctx context.Context) ([]client.Collection, er return collections, nil } -func (c *Client) GetSchemasByName(ctx context.Context, name string) ([]client.SchemaDescription, error) { - methodURL := c.http.baseURL.JoinPath("schema") - methodURL.RawQuery = url.Values{"name": []string{name}}.Encode() - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) - if err != nil { - return nil, err - } - var schema []client.SchemaDescription - if err := c.http.requestJson(req, &schema); err != nil { - return nil, err - } - return schema, nil -} - func (c *Client) GetSchemaByVersionID(ctx context.Context, versionID string) (client.SchemaDescription, error) { - methodURL := c.http.baseURL.JoinPath("schema") - methodURL.RawQuery = url.Values{"version_id": []string{versionID}}.Encode() - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) + schemas, err := c.GetSchemas(ctx, client.SchemaFetchOptions{ID: immutable.Some(versionID)}) if err != nil { return client.SchemaDescription{}, err } - var schema client.SchemaDescription - if err := c.http.requestJson(req, &schema); err != nil { - return client.SchemaDescription{}, err - } - return schema, nil + + // schemas will always have length == 1 here + return schemas[0], nil } -func (c *Client) GetSchemasByRoot(ctx context.Context, root string) ([]client.SchemaDescription, error) { +func (c *Client) GetSchemas( + ctx context.Context, + options client.SchemaFetchOptions, +) ([]client.SchemaDescription, error) { methodURL := c.http.baseURL.JoinPath("schema") - methodURL.RawQuery = url.Values{"root": []string{root}}.Encode() - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) - if err != nil { - return nil, err + params := url.Values{} + if options.ID.HasValue() { + params.Add("version_id", options.ID.Value()) } - var schema []client.SchemaDescription - if err := c.http.requestJson(req, &schema); err != nil { - return nil, err + if options.Root.HasValue() { + params.Add("root", options.Root.Value()) } - return schema, nil -} - -func (c *Client) GetAllSchemas(ctx context.Context) ([]client.SchemaDescription, error) { - methodURL := c.http.baseURL.JoinPath("schema") + if options.Name.HasValue() { + params.Add("name", options.Name.Value()) + } + methodURL.RawQuery = params.Encode() req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) if err != nil { @@ -363,7 +344,7 @@ func (c *Client) ExecRequest(ctx context.Context, query string) *client.RequestR return result } if res.Header.Get("Content-Type") == "text/event-stream" { - result.Pub = c.execRequestSubscription(ctx, res.Body) + result.Pub = c.execRequestSubscription(res.Body) return result } // ignore close errors because they have @@ -386,7 +367,7 @@ func (c *Client) ExecRequest(ctx context.Context, query string) *client.RequestR return result } -func (c *Client) execRequestSubscription(ctx context.Context, r io.ReadCloser) *events.Publisher[events.Update] { +func (c *Client) execRequestSubscription(r io.ReadCloser) *events.Publisher[events.Update] { pubCh := events.New[events.Update](0, 0) pub, err := events.NewPublisher[events.Update](pubCh, 0) if err != nil { diff --git a/http/client_collection.go b/http/client_collection.go index 95a81df84f..876c175338 100644 --- a/http/client_collection.go +++ b/http/client_collection.go @@ -20,6 +20,7 @@ import ( "net/url" "strings" + "github.com/sourcenetwork/immutable" sse "github.com/vito/go-sse/sse" "github.com/sourcenetwork/defradb/client" @@ -39,7 +40,7 @@ func (c *Collection) Description() client.CollectionDescription { return c.def.Description } -func (c *Collection) Name() string { +func (c *Collection) Name() immutable.Option[string] { return c.Description().Name } @@ -60,7 +61,11 @@ func (c *Collection) Definition() client.CollectionDefinition { } func (c *Collection) Create(ctx context.Context, doc *client.Document) error { - methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name) + if !c.Description().Name.HasValue() { + return client.ErrOperationNotPermittedOnNamelessCols + } + + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name.Value()) body, err := doc.String() if err != nil { @@ -79,7 +84,10 @@ func (c *Collection) Create(ctx context.Context, doc *client.Document) error { } func (c *Collection) CreateMany(ctx context.Context, docs []*client.Document) error { - methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name) + if !c.Description().Name.HasValue() { + return client.ErrOperationNotPermittedOnNamelessCols + } + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name.Value()) var docMapList []json.RawMessage for _, doc := range docs { @@ -108,7 +116,11 @@ func (c *Collection) CreateMany(ctx context.Context, docs []*client.Document) er } func (c *Collection) Update(ctx context.Context, doc *client.Document) error { - methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name, doc.ID().String()) + if !c.Description().Name.HasValue() { + return client.ErrOperationNotPermittedOnNamelessCols + } + + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name.Value(), doc.ID().String()) body, err := doc.ToJSONPatch() if err != nil { @@ -138,7 +150,11 @@ func (c *Collection) Save(ctx context.Context, doc *client.Document) error { } func (c *Collection) Delete(ctx context.Context, docID client.DocID) (bool, error) { - methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name, docID.String()) + if !c.Description().Name.HasValue() { + return false, client.ErrOperationNotPermittedOnNamelessCols + } + + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name.Value(), docID.String()) req, err := http.NewRequestWithContext(ctx, http.MethodDelete, methodURL.String(), nil) if err != nil { @@ -176,7 +192,11 @@ func (c *Collection) updateWith( ctx context.Context, request CollectionUpdateRequest, ) (*client.UpdateResult, error) { - methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name) + if !c.Description().Name.HasValue() { + return nil, client.ErrOperationNotPermittedOnNamelessCols + } + + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name.Value()) body, err := json.Marshal(request) if err != nil { @@ -247,7 +267,11 @@ func (c *Collection) deleteWith( ctx context.Context, request CollectionDeleteRequest, ) (*client.DeleteResult, error) { - methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name) + if !c.Description().Name.HasValue() { + return nil, client.ErrOperationNotPermittedOnNamelessCols + } + + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name.Value()) body, err := json.Marshal(request) if err != nil { @@ -287,12 +311,16 @@ func (c *Collection) DeleteWithDocIDs(ctx context.Context, docIDs []client.DocID } func (c *Collection) Get(ctx context.Context, docID client.DocID, showDeleted bool) (*client.Document, error) { + if !c.Description().Name.HasValue() { + return nil, client.ErrOperationNotPermittedOnNamelessCols + } + query := url.Values{} if showDeleted { query.Add("show_deleted", "true") } - methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name, docID.String()) + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name.Value(), docID.String()) methodURL.RawQuery = query.Encode() req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) @@ -320,7 +348,11 @@ func (c *Collection) WithTxn(tx datastore.Txn) client.Collection { } func (c *Collection) GetAllDocIDs(ctx context.Context) (<-chan client.DocIDResult, error) { - methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name) + if !c.Description().Name.HasValue() { + return nil, client.ErrOperationNotPermittedOnNamelessCols + } + + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name.Value()) req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) if err != nil { @@ -372,7 +404,11 @@ func (c *Collection) CreateIndex( ctx context.Context, indexDesc client.IndexDescription, ) (client.IndexDescription, error) { - methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name, "indexes") + if !c.Description().Name.HasValue() { + return client.IndexDescription{}, client.ErrOperationNotPermittedOnNamelessCols + } + + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name.Value(), "indexes") body, err := json.Marshal(&indexDesc) if err != nil { @@ -390,7 +426,11 @@ func (c *Collection) CreateIndex( } func (c *Collection) DropIndex(ctx context.Context, indexName string) error { - methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name, "indexes", indexName) + if !c.Description().Name.HasValue() { + return client.ErrOperationNotPermittedOnNamelessCols + } + + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name.Value(), "indexes", indexName) req, err := http.NewRequestWithContext(ctx, http.MethodDelete, methodURL.String(), nil) if err != nil { @@ -401,7 +441,11 @@ func (c *Collection) DropIndex(ctx context.Context, indexName string) error { } func (c *Collection) GetIndexes(ctx context.Context) ([]client.IndexDescription, error) { - methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name, "indexes") + if !c.Description().Name.HasValue() { + return nil, client.ErrOperationNotPermittedOnNamelessCols + } + + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name.Value(), "indexes") req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) if err != nil { @@ -413,3 +457,15 @@ func (c *Collection) GetIndexes(ctx context.Context) ([]client.IndexDescription, } return indexes, nil } + +func (c *Collection) CreateDocIndex(context.Context, *client.Document) error { + return ErrMethodIsNotImplemented +} + +func (c *Collection) UpdateDocIndex(ctx context.Context, oldDoc, newDoc *client.Document) error { + return ErrMethodIsNotImplemented +} + +func (c *Collection) DeleteDocIndex(context.Context, *client.Document) error { + return ErrMethodIsNotImplemented +} diff --git a/http/client_lens.go b/http/client_lens.go index 3c8c2fc903..9021aa31d6 100644 --- a/http/client_lens.go +++ b/http/client_lens.go @@ -14,8 +14,10 @@ import ( "bytes" "context" "encoding/json" + "fmt" "net/http" + "github.com/lens-vm/lens/host-go/config/model" "github.com/sourcenetwork/immutable/enumerable" "github.com/sourcenetwork/defradb/client" @@ -34,10 +36,18 @@ func (c *LensRegistry) WithTxn(tx datastore.Txn) client.LensRegistry { return &LensRegistry{http} } -func (c *LensRegistry) SetMigration(ctx context.Context, config client.LensConfig) error { - methodURL := c.http.baseURL.JoinPath("lens") +type setMigrationRequest struct { + CollectionID uint32 + Config model.Lens +} + +func (c *LensRegistry) SetMigration(ctx context.Context, collectionID uint32, config model.Lens) error { + methodURL := c.http.baseURL.JoinPath("lens", "registry") - body, err := json.Marshal(config) + body, err := json.Marshal(setMigrationRequest{ + CollectionID: collectionID, + Config: config, + }) if err != nil { return err } @@ -50,7 +60,7 @@ func (c *LensRegistry) SetMigration(ctx context.Context, config client.LensConfi } func (c *LensRegistry) ReloadLenses(ctx context.Context) error { - methodURL := c.http.baseURL.JoinPath("lens", "reload") + methodURL := c.http.baseURL.JoinPath("lens", "registry", "reload") req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), nil) if err != nil { @@ -60,12 +70,17 @@ func (c *LensRegistry) ReloadLenses(ctx context.Context) error { return err } +type migrateRequest struct { + CollectionID uint32 + Data []map[string]any +} + func (c *LensRegistry) MigrateUp( ctx context.Context, src enumerable.Enumerable[map[string]any], - schemaVersionID string, + collectionID uint32, ) (enumerable.Enumerable[map[string]any], error) { - methodURL := c.http.baseURL.JoinPath("lens", schemaVersionID, "up") + methodURL := c.http.baseURL.JoinPath("lens", "registry", fmt.Sprint(collectionID), "up") var data []map[string]any err := enumerable.ForEach(src, func(item map[string]any) { @@ -74,7 +89,13 @@ func (c *LensRegistry) MigrateUp( if err != nil { return nil, err } - body, err := json.Marshal(data) + + request := migrateRequest{ + CollectionID: collectionID, + Data: data, + } + + body, err := json.Marshal(request) if err != nil { return nil, err } @@ -92,9 +113,9 @@ func (c *LensRegistry) MigrateUp( func (c *LensRegistry) MigrateDown( ctx context.Context, src enumerable.Enumerable[map[string]any], - schemaVersionID string, + collectionID uint32, ) (enumerable.Enumerable[map[string]any], error) { - methodURL := c.http.baseURL.JoinPath("lens", schemaVersionID, "down") + methodURL := c.http.baseURL.JoinPath("lens", "registry", fmt.Sprint(collectionID), "down") var data []map[string]any err := enumerable.ForEach(src, func(item map[string]any) { @@ -103,7 +124,13 @@ func (c *LensRegistry) MigrateDown( if err != nil { return nil, err } - body, err := json.Marshal(data) + + request := migrateRequest{ + CollectionID: collectionID, + Data: data, + } + + body, err := json.Marshal(request) if err != nil { return nil, err } @@ -117,31 +144,3 @@ func (c *LensRegistry) MigrateDown( } return enumerable.New(result), nil } - -func (c *LensRegistry) Config(ctx context.Context) ([]client.LensConfig, error) { - methodURL := c.http.baseURL.JoinPath("lens") - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) - if err != nil { - return nil, err - } - var cfgs []client.LensConfig - if err := c.http.requestJson(req, &cfgs); err != nil { - return nil, err - } - return cfgs, nil -} - -func (c *LensRegistry) HasMigration(ctx context.Context, schemaVersionID string) (bool, error) { - methodURL := c.http.baseURL.JoinPath("lens", schemaVersionID) - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) - if err != nil { - return false, err - } - _, err = c.http.request(req) - if err != nil { - return false, err - } - return true, nil -} diff --git a/http/errors.go b/http/errors.go index b78771723f..1510c2e520 100644 --- a/http/errors.go +++ b/http/errors.go @@ -17,7 +17,8 @@ import ( ) const ( - errFailedToLoadKeys string = "failed to load given keys" + errFailedToLoadKeys string = "failed to load given keys" + errMethodIsNotImplemented string = "the method is not implemented" ) // Errors returnable from this package. @@ -25,14 +26,15 @@ const ( // This list is incomplete. Undefined errors may also be returned. // Errors returned from this package may be tested against these errors with errors.Is. var ( - ErrNoListener = errors.New("cannot serve with no listener") - ErrNoEmail = errors.New("email address must be specified for tls with autocert") - ErrInvalidRequestBody = errors.New("invalid request body") - ErrStreamingNotSupported = errors.New("streaming not supported") - ErrMigrationNotFound = errors.New("migration not found") - ErrMissingRequest = errors.New("missing request") - ErrInvalidTransactionId = errors.New("invalid transaction id") - ErrP2PDisabled = errors.New("p2p network is disabled") + ErrNoListener = errors.New("cannot serve with no listener") + ErrNoEmail = errors.New("email address must be specified for tls with autocert") + ErrInvalidRequestBody = errors.New("invalid request body") + ErrStreamingNotSupported = errors.New("streaming not supported") + ErrMigrationNotFound = errors.New("migration not found") + ErrMissingRequest = errors.New("missing request") + ErrInvalidTransactionId = errors.New("invalid transaction id") + ErrP2PDisabled = errors.New("p2p network is disabled") + ErrMethodIsNotImplemented = errors.New(errMethodIsNotImplemented) ) type errorResponse struct { diff --git a/http/handler.go b/http/handler.go index 1df8987964..b06ef06cb6 100644 --- a/http/handler.go +++ b/http/handler.go @@ -20,7 +20,6 @@ import ( "github.com/sourcenetwork/defradb/datastore" "github.com/go-chi/chi/v5" - "github.com/go-chi/chi/v5/middleware" ) // Version is the identifier for the current API version. @@ -29,15 +28,7 @@ var Version string = "v0" // playgroundHandler is set when building with the playground build tag var playgroundHandler http.Handler = http.HandlerFunc(http.NotFound) -type Handler struct { - db client.DB - mux *chi.Mux - txs *sync.Map -} - -func NewHandler(db client.DB, opts ServerOptions) (*Handler, error) { - txs := &sync.Map{} - +func NewApiRouter() (*Router, error) { tx_handler := &txHandler{} store_handler := &storeHandler{} collection_handler := &collectionHandler{} @@ -50,12 +41,6 @@ func NewHandler(db client.DB, opts ServerOptions) (*Handler, error) { return nil, err } - router.AddMiddleware( - ApiMiddleware(db, txs, opts), - TransactionMiddleware, - StoreMiddleware, - ) - tx_handler.bindRoutes(router) store_handler.bindRoutes(router) p2p_handler.bindRoutes(router) @@ -74,14 +59,31 @@ func NewHandler(db client.DB, opts ServerOptions) (*Handler, error) { if err := router.Validate(context.Background()); err != nil { return nil, err } + return router, nil +} + +type Handler struct { + db client.DB + mux *chi.Mux + txs *sync.Map +} + +func NewHandler(db client.DB) (*Handler, error) { + router, err := NewApiRouter() + if err != nil { + return nil, err + } + txs := &sync.Map{} mux := chi.NewMux() - mux.Use( - middleware.RequestLogger(&logFormatter{}), - middleware.Recoverer, - CorsMiddleware(opts), - ) - mux.Mount("/api/"+Version, router) + mux.Route("/api/"+Version, func(r chi.Router) { + r.Use( + ApiMiddleware(db, txs), + TransactionMiddleware, + StoreMiddleware, + ) + r.Handle("/*", router) + }) mux.Get("/openapi.json", func(rw http.ResponseWriter, req *http.Request) { responseJSON(rw, http.StatusOK, router.OpenAPI()) }) diff --git a/http/handler_ccip_test.go b/http/handler_ccip_test.go index c0df7e6a26..2a2cc4f077 100644 --- a/http/handler_ccip_test.go +++ b/http/handler_ccip_test.go @@ -49,7 +49,7 @@ func TestCCIPGet_WithValidData(t *testing.T) { req := httptest.NewRequest(http.MethodGet, url, nil) rec := httptest.NewRecorder() - handler, err := NewHandler(cdb, ServerOptions{}) + handler, err := NewHandler(cdb) require.NoError(t, err) handler.ServeHTTP(rec, req) @@ -88,7 +88,7 @@ func TestCCIPGet_WithSubscription(t *testing.T) { req := httptest.NewRequest(http.MethodGet, url, nil) rec := httptest.NewRecorder() - handler, err := NewHandler(cdb, ServerOptions{}) + handler, err := NewHandler(cdb) require.NoError(t, err) handler.ServeHTTP(rec, req) @@ -106,7 +106,7 @@ func TestCCIPGet_WithInvalidData(t *testing.T) { req := httptest.NewRequest(http.MethodGet, url, nil) rec := httptest.NewRecorder() - handler, err := NewHandler(cdb, ServerOptions{}) + handler, err := NewHandler(cdb) require.NoError(t, err) handler.ServeHTTP(rec, req) @@ -135,7 +135,7 @@ func TestCCIPPost_WithValidData(t *testing.T) { req := httptest.NewRequest(http.MethodPost, "http://localhost:9181/api/v0/ccip", bytes.NewBuffer(body)) rec := httptest.NewRecorder() - handler, err := NewHandler(cdb, ServerOptions{}) + handler, err := NewHandler(cdb) require.NoError(t, err) handler.ServeHTTP(rec, req) @@ -167,7 +167,7 @@ func TestCCIPPost_WithInvalidGraphQLRequest(t *testing.T) { req := httptest.NewRequest(http.MethodPost, "http://localhost:9181/api/v0/ccip", bytes.NewBuffer(body)) rec := httptest.NewRecorder() - handler, err := NewHandler(cdb, ServerOptions{}) + handler, err := NewHandler(cdb) require.NoError(t, err) handler.ServeHTTP(rec, req) @@ -181,7 +181,7 @@ func TestCCIPPost_WithInvalidBody(t *testing.T) { req := httptest.NewRequest(http.MethodPost, "http://localhost:9181/api/v0/ccip", nil) rec := httptest.NewRecorder() - handler, err := NewHandler(cdb, ServerOptions{}) + handler, err := NewHandler(cdb) require.NoError(t, err) handler.ServeHTTP(rec, req) diff --git a/http/handler_lens.go b/http/handler_lens.go index 5d0838b76a..532eaacefc 100644 --- a/http/handler_lens.go +++ b/http/handler_lens.go @@ -14,7 +14,6 @@ import ( "net/http" "github.com/getkin/kin-openapi/openapi3" - "github.com/go-chi/chi/v5" "github.com/sourcenetwork/immutable/enumerable" "github.com/sourcenetwork/defradb/client" @@ -36,12 +35,13 @@ func (s *lensHandler) ReloadLenses(rw http.ResponseWriter, req *http.Request) { func (s *lensHandler) SetMigration(rw http.ResponseWriter, req *http.Request) { lens := req.Context().Value(lensContextKey).(client.LensRegistry) - var cfg client.LensConfig - if err := requestJSON(req, &cfg); err != nil { + var request setMigrationRequest + if err := requestJSON(req, &request); err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - err := lens.SetMigration(req.Context(), cfg) + + err := lens.SetMigration(req.Context(), request.CollectionID, request.Config) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return @@ -52,12 +52,13 @@ func (s *lensHandler) SetMigration(rw http.ResponseWriter, req *http.Request) { func (s *lensHandler) MigrateUp(rw http.ResponseWriter, req *http.Request) { lens := req.Context().Value(lensContextKey).(client.LensRegistry) - var src []map[string]any - if err := requestJSON(req, &src); err != nil { + var request migrateRequest + if err := requestJSON(req, &request); err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - result, err := lens.MigrateUp(req.Context(), enumerable.New(src), chi.URLParam(req, "version")) + + result, err := lens.MigrateUp(req.Context(), enumerable.New(request.Data), request.CollectionID) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return @@ -76,12 +77,13 @@ func (s *lensHandler) MigrateUp(rw http.ResponseWriter, req *http.Request) { func (s *lensHandler) MigrateDown(rw http.ResponseWriter, req *http.Request) { lens := req.Context().Value(lensContextKey).(client.LensRegistry) - var src []map[string]any - if err := requestJSON(req, &src); err != nil { + var request migrateRequest + if err := requestJSON(req, &request); err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - result, err := lens.MigrateDown(req.Context(), enumerable.New(src), chi.URLParam(req, "version")) + + result, err := lens.MigrateDown(req.Context(), enumerable.New(request.Data), request.CollectionID) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return @@ -97,32 +99,6 @@ func (s *lensHandler) MigrateDown(rw http.ResponseWriter, req *http.Request) { responseJSON(rw, http.StatusOK, value) } -func (s *lensHandler) Config(rw http.ResponseWriter, req *http.Request) { - lens := req.Context().Value(lensContextKey).(client.LensRegistry) - - cfgs, err := lens.Config(req.Context()) - if err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - responseJSON(rw, http.StatusOK, cfgs) -} - -func (s *lensHandler) HasMigration(rw http.ResponseWriter, req *http.Request) { - lens := req.Context().Value(lensContextKey).(client.LensRegistry) - - exists, err := lens.HasMigration(req.Context(), chi.URLParam(req, "version")) - if err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - if !exists { - responseJSON(rw, http.StatusNotFound, errorResponse{ErrMigrationNotFound}) - return - } - rw.WriteHeader(http.StatusOK) -} - func (h *lensHandler) bindRoutes(router *Router) { errorResponse := &openapi3.ResponseRef{ Ref: "#/components/responses/error", @@ -130,32 +106,20 @@ func (h *lensHandler) bindRoutes(router *Router) { successResponse := &openapi3.ResponseRef{ Ref: "#/components/responses/success", } - documentSchema := &openapi3.SchemaRef{ - Ref: "#/components/schemas/document", + migrateSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/migrate_request", + } + setMigrationSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/set_migration_request", } - - lensConfigSchema := openapi3.NewSchemaRef("#/components/schemas/lens_config", nil) - lensConfigArraySchema := openapi3.NewArraySchema() - lensConfigArraySchema.Items = lensConfigSchema - - lensConfigResponse := openapi3.NewResponse(). - WithDescription("Lens configurations"). - WithJSONSchema(lensConfigArraySchema) - - lensConfig := openapi3.NewOperation() - lensConfig.OperationID = "lens_config" - lensConfig.Description = "List lens migrations" - lensConfig.Tags = []string{"lens"} - lensConfig.AddResponse(200, lensConfigResponse) - lensConfig.Responses.Set("400", errorResponse) setMigrationRequest := openapi3.NewRequestBody(). WithRequired(true). - WithJSONSchemaRef(lensConfigSchema) + WithJSONSchemaRef(setMigrationSchema) setMigration := openapi3.NewOperation() - setMigration.OperationID = "lens_set_migration" - setMigration.Description = "Add a new lens migration" + setMigration.OperationID = "lens_registry_set_migration" + setMigration.Description = "Add a new lens migration to registry" setMigration.Tags = []string{"lens"} setMigration.RequestBody = &openapi3.RequestBodyRef{ Value: setMigrationRequest, @@ -165,7 +129,7 @@ func (h *lensHandler) bindRoutes(router *Router) { setMigration.Responses.Set("400", errorResponse) reloadLenses := openapi3.NewOperation() - reloadLenses.OperationID = "lens_reload" + reloadLenses.OperationID = "lens_registry_reload" reloadLenses.Description = "Reload lens migrations" reloadLenses.Tags = []string{"lens"} reloadLenses.Responses = openapi3.NewResponses() @@ -176,24 +140,13 @@ func (h *lensHandler) bindRoutes(router *Router) { WithRequired(true). WithSchema(openapi3.NewStringSchema()) - hasMigration := openapi3.NewOperation() - hasMigration.OperationID = "lens_has_migration" - hasMigration.Description = "Check if a migration exists" - hasMigration.Tags = []string{"lens"} - hasMigration.AddParameter(versionPathParam) - hasMigration.Responses = openapi3.NewResponses() - hasMigration.Responses.Set("200", successResponse) - hasMigration.Responses.Set("400", errorResponse) - - migrateSchema := openapi3.NewArraySchema() - migrateSchema.Items = documentSchema migrateRequest := openapi3.NewRequestBody(). WithRequired(true). - WithContent(openapi3.NewContentWithJSONSchema(migrateSchema)) + WithJSONSchemaRef(migrateSchema) migrateUp := openapi3.NewOperation() - migrateUp.OperationID = "lens_migrate_up" - migrateUp.Description = "Migrate documents to a schema version" + migrateUp.OperationID = "lens_registry_migrate_up" + migrateUp.Description = "Migrate documents to a collection" migrateUp.Tags = []string{"lens"} migrateUp.RequestBody = &openapi3.RequestBodyRef{ Value: migrateRequest, @@ -204,8 +157,8 @@ func (h *lensHandler) bindRoutes(router *Router) { migrateUp.Responses.Set("400", errorResponse) migrateDown := openapi3.NewOperation() - migrateDown.OperationID = "lens_migrate_down" - migrateDown.Description = "Migrate documents from a schema version" + migrateDown.OperationID = "lens_registry_migrate_down" + migrateDown.Description = "Migrate documents from a collection" migrateDown.Tags = []string{"lens"} migrateDown.RequestBody = &openapi3.RequestBodyRef{ Value: migrateRequest, @@ -215,10 +168,8 @@ func (h *lensHandler) bindRoutes(router *Router) { migrateDown.Responses.Set("200", successResponse) migrateDown.Responses.Set("400", errorResponse) - router.AddRoute("/lens", http.MethodGet, lensConfig, h.Config) - router.AddRoute("/lens", http.MethodPost, setMigration, h.SetMigration) - router.AddRoute("/lens/reload", http.MethodPost, reloadLenses, h.ReloadLenses) - router.AddRoute("/lens/{version}", http.MethodGet, hasMigration, h.HasMigration) - router.AddRoute("/lens/{version}/up", http.MethodPost, migrateUp, h.MigrateUp) - router.AddRoute("/lens/{version}/down", http.MethodPost, migrateDown, h.MigrateDown) + router.AddRoute("/lens/registry", http.MethodPost, setMigration, h.SetMigration) + router.AddRoute("/lens/registry/reload", http.MethodPost, reloadLenses, h.ReloadLenses) + router.AddRoute("/lens/registry/{version}/up", http.MethodPost, migrateUp, h.MigrateUp) + router.AddRoute("/lens/registry/{version}/down", http.MethodPost, migrateDown, h.MigrateDown) } diff --git a/http/handler_store.go b/http/handler_store.go index 2a1ff97531..af82f0bc44 100644 --- a/http/handler_store.go +++ b/http/handler_store.go @@ -16,8 +16,10 @@ import ( "fmt" "io" "net/http" + "strconv" "github.com/getkin/kin-openapi/openapi3" + "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" ) @@ -82,7 +84,7 @@ func (s *storeHandler) PatchSchema(rw http.ResponseWriter, req *http.Request) { return } - err = store.PatchSchema(req.Context(), message.Patch, message.SetAsDefaultVersion) + err = store.PatchSchema(req.Context(), message.Patch, message.Migration, message.SetAsDefaultVersion) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return @@ -90,7 +92,7 @@ func (s *storeHandler) PatchSchema(rw http.ResponseWriter, req *http.Request) { rw.WriteHeader(http.StatusOK) } -func (s *storeHandler) SetDefaultSchemaVersion(rw http.ResponseWriter, req *http.Request) { +func (s *storeHandler) SetActiveSchemaVersion(rw http.ResponseWriter, req *http.Request) { store := req.Context().Value(storeContextKey).(client.Store) schemaVersionID, err := io.ReadAll(req.Body) @@ -98,7 +100,7 @@ func (s *storeHandler) SetDefaultSchemaVersion(rw http.ResponseWriter, req *http responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - err = store.SetDefaultSchemaVersion(req.Context(), string(schemaVersionID)) + err = store.SetActiveSchemaVersion(req.Context(), string(schemaVersionID)) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return @@ -116,7 +118,7 @@ func (s *storeHandler) AddView(rw http.ResponseWriter, req *http.Request) { return } - defs, err := store.AddView(req.Context(), message.Query, message.SDL) + defs, err := store.AddView(req.Context(), message.Query, message.SDL, message.Transform) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return @@ -125,86 +127,79 @@ func (s *storeHandler) AddView(rw http.ResponseWriter, req *http.Request) { responseJSON(rw, http.StatusOK, defs) } +func (s *storeHandler) SetMigration(rw http.ResponseWriter, req *http.Request) { + store := req.Context().Value(storeContextKey).(client.Store) + + var cfg client.LensConfig + if err := requestJSON(req, &cfg); err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + + err := store.SetMigration(req.Context(), cfg) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + rw.WriteHeader(http.StatusOK) +} + func (s *storeHandler) GetCollection(rw http.ResponseWriter, req *http.Request) { store := req.Context().Value(storeContextKey).(client.Store) - switch { - case req.URL.Query().Has("name"): - col, err := store.GetCollectionByName(req.Context(), req.URL.Query().Get("name")) - if err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - responseJSON(rw, http.StatusOK, col.Definition()) - case req.URL.Query().Has("schema_root"): - cols, err := store.GetCollectionsBySchemaRoot(req.Context(), req.URL.Query().Get("schema_root")) - if err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - colDesc := make([]client.CollectionDefinition, len(cols)) - for i, col := range cols { - colDesc[i] = col.Definition() - } - responseJSON(rw, http.StatusOK, colDesc) - case req.URL.Query().Has("version_id"): - cols, err := store.GetCollectionsByVersionID(req.Context(), req.URL.Query().Get("version_id")) - if err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - colDesc := make([]client.CollectionDefinition, len(cols)) - for i, col := range cols { - colDesc[i] = col.Definition() - } - responseJSON(rw, http.StatusOK, colDesc) - default: - cols, err := store.GetAllCollections(req.Context()) + options := client.CollectionFetchOptions{} + if req.URL.Query().Has("name") { + options.Name = immutable.Some(req.URL.Query().Get("name")) + } + if req.URL.Query().Has("version_id") { + options.SchemaVersionID = immutable.Some(req.URL.Query().Get("version_id")) + } + if req.URL.Query().Has("schema_root") { + options.SchemaRoot = immutable.Some(req.URL.Query().Get("schema_root")) + } + if req.URL.Query().Has("get_inactive") { + getInactiveStr := req.URL.Query().Get("get_inactive") + var err error + getInactive, err := strconv.ParseBool(getInactiveStr) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - colDesc := make([]client.CollectionDefinition, len(cols)) - for i, col := range cols { - colDesc[i] = col.Definition() - } - responseJSON(rw, http.StatusOK, colDesc) + options.IncludeInactive = immutable.Some(getInactive) } + + cols, err := store.GetCollections(req.Context(), options) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + colDesc := make([]client.CollectionDefinition, len(cols)) + for i, col := range cols { + colDesc[i] = col.Definition() + } + responseJSON(rw, http.StatusOK, colDesc) } func (s *storeHandler) GetSchema(rw http.ResponseWriter, req *http.Request) { store := req.Context().Value(storeContextKey).(client.Store) - switch { - case req.URL.Query().Has("version_id"): - schema, err := store.GetSchemaByVersionID(req.Context(), req.URL.Query().Get("version_id")) - if err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - responseJSON(rw, http.StatusOK, schema) - case req.URL.Query().Has("root"): - schema, err := store.GetSchemasByRoot(req.Context(), req.URL.Query().Get("root")) - if err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - responseJSON(rw, http.StatusOK, schema) - case req.URL.Query().Has("name"): - schema, err := store.GetSchemasByName(req.Context(), req.URL.Query().Get("name")) - if err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - responseJSON(rw, http.StatusOK, schema) - default: - schema, err := store.GetAllSchemas(req.Context()) - if err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - responseJSON(rw, http.StatusOK, schema) + options := client.SchemaFetchOptions{} + if req.URL.Query().Has("version_id") { + options.ID = immutable.Some(req.URL.Query().Get("version_id")) + } + if req.URL.Query().Has("root") { + options.Root = immutable.Some(req.URL.Query().Get("root")) + } + if req.URL.Query().Has("name") { + options.Name = immutable.Some(req.URL.Query().Get("name")) } + + schema, err := store.GetSchemas(req.Context(), options) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + responseJSON(rw, http.StatusOK, schema) } func (s *storeHandler) GetAllIndexes(rw http.ResponseWriter, req *http.Request) { @@ -363,6 +358,9 @@ func (h *storeHandler) bindRoutes(router *Router) { addViewSchema := &openapi3.SchemaRef{ Ref: "#/components/schemas/add_view_request", } + lensConfigSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/lens_config", + } patchSchemaRequestSchema := &openapi3.SchemaRef{ Ref: "#/components/schemas/patch_schema_request", } @@ -401,19 +399,19 @@ func (h *storeHandler) bindRoutes(router *Router) { patchSchema.Responses.Set("200", successResponse) patchSchema.Responses.Set("400", errorResponse) - setDefaultSchemaVersionRequest := openapi3.NewRequestBody(). + setActiveSchemaVersionRequest := openapi3.NewRequestBody(). WithContent(openapi3.NewContentWithSchema(openapi3.NewStringSchema(), []string{"text/plain"})) - setDefaultSchemaVersion := openapi3.NewOperation() - setDefaultSchemaVersion.OperationID = "set_default_schema_version" - setDefaultSchemaVersion.Description = "Set the default schema version for a collection" - setDefaultSchemaVersion.Tags = []string{"schema"} - setDefaultSchemaVersion.RequestBody = &openapi3.RequestBodyRef{ - Value: setDefaultSchemaVersionRequest, + setActiveSchemaVersion := openapi3.NewOperation() + setActiveSchemaVersion.OperationID = "set_default_schema_version" + setActiveSchemaVersion.Description = "Set the default schema version for a collection" + setActiveSchemaVersion.Tags = []string{"schema"} + setActiveSchemaVersion.RequestBody = &openapi3.RequestBodyRef{ + Value: setActiveSchemaVersionRequest, } - setDefaultSchemaVersion.Responses = openapi3.NewResponses() - setDefaultSchemaVersion.Responses.Set("200", successResponse) - setDefaultSchemaVersion.Responses.Set("400", errorResponse) + setActiveSchemaVersion.Responses = openapi3.NewResponses() + setActiveSchemaVersion.Responses.Set("200", successResponse) + setActiveSchemaVersion.Responses.Set("400", errorResponse) backupRequest := openapi3.NewRequestBody(). WithRequired(true). @@ -450,6 +448,9 @@ func (h *storeHandler) bindRoutes(router *Router) { collectionVersionIdQueryParam := openapi3.NewQueryParameter("version_id"). WithDescription("Collection schema version id"). WithSchema(openapi3.NewStringSchema()) + collectionGetInactiveQueryParam := openapi3.NewQueryParameter("get_inactive"). + WithDescription("If true, inactive collections will be returned in addition to active ones"). + WithSchema(openapi3.NewStringSchema()) collectionsSchema := openapi3.NewArraySchema() collectionsSchema.Items = collectionSchema @@ -471,6 +472,7 @@ func (h *storeHandler) bindRoutes(router *Router) { collectionDescribe.AddParameter(collectionNameQueryParam) collectionDescribe.AddParameter(collectionSchemaRootQueryParam) collectionDescribe.AddParameter(collectionVersionIdQueryParam) + collectionDescribe.AddParameter(collectionGetInactiveQueryParam) collectionDescribe.AddResponse(200, collectionsResponse) collectionDescribe.Responses.Set("400", errorResponse) @@ -501,6 +503,21 @@ func (h *storeHandler) bindRoutes(router *Router) { views.AddResponse(200, addViewResponse) views.Responses.Set("400", errorResponse) + setMigrationRequest := openapi3.NewRequestBody(). + WithRequired(true). + WithJSONSchemaRef(lensConfigSchema) + + setMigration := openapi3.NewOperation() + setMigration.OperationID = "lens_set_migration" + setMigration.Description = "Add a new lens migration" + setMigration.Tags = []string{"lens"} + setMigration.RequestBody = &openapi3.RequestBodyRef{ + Value: setMigrationRequest, + } + setMigration.Responses = openapi3.NewResponses() + setMigration.Responses.Set("200", successResponse) + setMigration.Responses.Set("400", errorResponse) + schemaNameQueryParam := openapi3.NewQueryParameter("name"). WithDescription("Schema name"). WithSchema(openapi3.NewStringSchema()) @@ -574,11 +591,13 @@ func (h *storeHandler) bindRoutes(router *Router) { router.AddRoute("/backup/import", http.MethodPost, backupImport, h.BasicImport) router.AddRoute("/collections", http.MethodGet, collectionDescribe, h.GetCollection) router.AddRoute("/view", http.MethodPost, views, h.AddView) + router.AddRoute("/view", http.MethodPost, views, h.AddView) router.AddRoute("/graphql", http.MethodGet, graphQLGet, h.ExecRequest) router.AddRoute("/graphql", http.MethodPost, graphQLPost, h.ExecRequest) router.AddRoute("/debug/dump", http.MethodGet, debugDump, h.PrintDump) router.AddRoute("/schema", http.MethodPost, addSchema, h.AddSchema) router.AddRoute("/schema", http.MethodPatch, patchSchema, h.PatchSchema) router.AddRoute("/schema", http.MethodGet, schemaDescribe, h.GetSchema) - router.AddRoute("/schema/default", http.MethodPost, setDefaultSchemaVersion, h.SetDefaultSchemaVersion) + router.AddRoute("/schema/default", http.MethodPost, setActiveSchemaVersion, h.SetActiveSchemaVersion) + router.AddRoute("/lens", http.MethodPost, setMigration, h.SetMigration) } diff --git a/http/middleware.go b/http/middleware.go index d33cbfb5ff..f18ba8bf60 100644 --- a/http/middleware.go +++ b/http/middleware.go @@ -56,13 +56,13 @@ var ( ) // CorsMiddleware handles cross origin request -func CorsMiddleware(opts ServerOptions) func(http.Handler) http.Handler { +func CorsMiddleware(allowedOrigins []string) func(http.Handler) http.Handler { return cors.Handler(cors.Options{ AllowOriginFunc: func(r *http.Request, origin string) bool { - if slices.Contains(opts.AllowedOrigins, "*") { + if slices.Contains(allowedOrigins, "*") { return true } - return slices.Contains(opts.AllowedOrigins, strings.ToLower(origin)) + return slices.Contains(allowedOrigins, strings.ToLower(origin)) }, AllowedMethods: []string{"GET", "HEAD", "POST", "PATCH", "DELETE"}, AllowedHeaders: []string{"Content-Type"}, @@ -71,13 +71,9 @@ func CorsMiddleware(opts ServerOptions) func(http.Handler) http.Handler { } // ApiMiddleware sets the required context values for all API requests. -func ApiMiddleware(db client.DB, txs *sync.Map, opts ServerOptions) func(http.Handler) http.Handler { +func ApiMiddleware(db client.DB, txs *sync.Map) func(http.Handler) http.Handler { return func(next http.Handler) http.Handler { return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - if opts.TLS.HasValue() { - rw.Header().Add("Strict-Transport-Security", "max-age=63072000; includeSubDomains") - } - ctx := req.Context() ctx = context.WithValue(ctx, dbContextKey, db) ctx = context.WithValue(ctx, txsContextKey, txs) diff --git a/http/openapi.go b/http/openapi.go index fc10881f5b..12a832c704 100644 --- a/http/openapi.go +++ b/http/openapi.go @@ -40,6 +40,8 @@ var openApiSchemas = map[string]any{ "ccip_response": &CCIPResponse{}, "patch_schema_request": &patchSchemaRequest{}, "add_view_request": &addViewRequest{}, + "migrate_request": &migrateRequest{}, + "set_migration_request": &setMigrationRequest{}, } func NewOpenAPISpec() (*openapi3.T, error) { diff --git a/http/server.go b/http/server.go index 768542c68d..f975e200ad 100644 --- a/http/server.go +++ b/http/server.go @@ -13,304 +13,187 @@ package http import ( "context" "crypto/tls" - "fmt" "net" "net/http" - "path" - "strings" + "time" - "github.com/sourcenetwork/immutable" - "golang.org/x/crypto/acme/autocert" - - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" -) - -const ( - // These constants are best effort durations that fit our current API - // and possibly prevent from running out of file descriptors. - // readTimeout = 5 * time.Second - // writeTimeout = 10 * time.Second - // idleTimeout = 120 * time.Second - - // Temparily disabling timeouts until [this proposal](https://github.com/golang/go/issues/54136) is merged. - // https://github.com/sourcenetwork/defradb/issues/927 - readTimeout = 0 - writeTimeout = 0 - idleTimeout = 0 + "github.com/go-chi/chi/v5" + "github.com/go-chi/chi/v5/middleware" ) -const ( - httpPort = ":80" - httpsPort = ":443" -) - -// Server struct holds the Handler for the HTTP API. -type Server struct { - options ServerOptions - listener net.Listener - certManager *autocert.Manager - // address that is assigned to the server on listen - address string - - http.Server +// We only allow cipher suites that are marked secure +// by ssllabs +var tlsCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, } type ServerOptions struct { + // Address is the bind address the server listens on. + Address string // AllowedOrigins is the list of allowed origins for CORS. AllowedOrigins []string - // TLS enables https when the value is present. - TLS immutable.Option[TLSOptions] - // RootDirectory is the directory for the node config. - RootDir string - // Domain is the domain for the API (optional). - Domain immutable.Option[string] -} - -type TLSOptions struct { - // PublicKey is the public key for TLS. Ignored if domain is set. - PublicKey string - // PrivateKey is the private key for TLS. Ignored if domain is set. - PrivateKey string - // Email is the address for the CA to send problem notifications (optional) - Email string - // Port is the tls port - Port string + // TLSCertPath is the path to the TLS certificate. + TLSCertPath string + // TLSKeyPath is the path to the TLS key. + TLSKeyPath string + // ReadTimeout is the read timeout for connections. + ReadTimeout time.Duration + // WriteTimeout is the write timeout for connections. + WriteTimeout time.Duration + // IdleTimeout is the idle timeout for connections. + IdleTimeout time.Duration } -// NewServer instantiates a new server with the given http.Handler. -func NewServer(db client.DB, options ...func(*Server)) (*Server, error) { - srv := &Server{ - Server: http.Server{ - ReadTimeout: readTimeout, - WriteTimeout: writeTimeout, - IdleTimeout: idleTimeout, - }, - } - - for _, opt := range append(options, DefaultOpts()) { - opt(srv) - } - - handler, err := NewHandler(db, srv.options) - if err != nil { - return nil, err +// DefaultOpts returns the default options for the server. +func DefaultServerOptions() *ServerOptions { + return &ServerOptions{ + Address: "127.0.0.1:9181", } - srv.Handler = handler - return srv, nil } -func newHTTPRedirServer(m *autocert.Manager) *Server { - srv := &Server{ - Server: http.Server{ - ReadTimeout: readTimeout, - WriteTimeout: writeTimeout, - IdleTimeout: idleTimeout, - }, - } - - srv.Addr = httpPort - srv.Handler = m.HTTPHandler(nil) +// ServerOpt is a function that configures server options. +type ServerOpt func(*ServerOptions) - return srv -} - -// DefaultOpts returns the default options for the server. -func DefaultOpts() func(*Server) { - return func(s *Server) { - if s.Addr == "" { - s.Addr = "localhost:9181" - } +// WithAllowedOrigins sets the allowed origins for CORS. +func WithAllowedOrigins(origins ...string) ServerOpt { + return func(opts *ServerOptions) { + opts.AllowedOrigins = origins } } -// WithAllowedOrigins returns an option to set the allowed origins for CORS. -func WithAllowedOrigins(origins ...string) func(*Server) { - return func(s *Server) { - s.options.AllowedOrigins = append(s.options.AllowedOrigins, origins...) +// WithAddress sets the bind address for the server. +func WithAddress(addr string) ServerOpt { + return func(opts *ServerOptions) { + opts.Address = addr } } -// WithAddress returns an option to set the address for the server. -func WithAddress(addr string) func(*Server) { - return func(s *Server) { - s.Addr = addr - - // If the address is not localhost, we check to see if it's a valid IP address. - // If it's not a valid IP, we assume that it's a domain name to be used with TLS. - if !strings.HasPrefix(addr, "localhost:") && !strings.HasPrefix(addr, ":") { - host, _, err := net.SplitHostPort(addr) - if err != nil { - host = addr - } - ip := net.ParseIP(host) - if ip == nil { - s.Addr = httpPort - s.options.Domain = immutable.Some(host) - } - } +// WithReadTimeout sets the server read timeout. +func WithReadTimeout(timeout time.Duration) ServerOpt { + return func(opts *ServerOptions) { + opts.ReadTimeout = timeout } } -// WithCAEmail returns an option to set the email address for the CA to send problem notifications. -func WithCAEmail(email string) func(*Server) { - return func(s *Server) { - tlsOpt := s.options.TLS.Value() - tlsOpt.Email = email - s.options.TLS = immutable.Some(tlsOpt) +// WithWriteTimeout sets the server write timeout. +func WithWriteTimeout(timeout time.Duration) ServerOpt { + return func(opts *ServerOptions) { + opts.WriteTimeout = timeout } } -// WithRootDir returns an option to set the root directory for the node config. -func WithRootDir(rootDir string) func(*Server) { - return func(s *Server) { - s.options.RootDir = rootDir +// WithIdleTimeout sets the server idle timeout. +func WithIdleTimeout(timeout time.Duration) ServerOpt { + return func(opts *ServerOptions) { + opts.IdleTimeout = timeout } } -// WithSelfSignedCert returns an option to set the public and private keys for TLS. -func WithSelfSignedCert(pubKey, privKey string) func(*Server) { - return func(s *Server) { - tlsOpt := s.options.TLS.Value() - tlsOpt.PublicKey = pubKey - tlsOpt.PrivateKey = privKey - s.options.TLS = immutable.Some(tlsOpt) +// WithTLSCertPath sets the server TLS certificate path. +func WithTLSCertPath(path string) ServerOpt { + return func(opts *ServerOptions) { + opts.TLSCertPath = path } } -// WithTLS returns an option to enable TLS. -func WithTLS() func(*Server) { - return func(s *Server) { - tlsOpt := s.options.TLS.Value() - tlsOpt.Port = httpsPort - s.options.TLS = immutable.Some(tlsOpt) +// WithTLSKeyPath sets the server TLS private key path. +func WithTLSKeyPath(path string) ServerOpt { + return func(opts *ServerOptions) { + opts.TLSKeyPath = path } } -// WithTLSPort returns an option to set the port for TLS. -func WithTLSPort(port int) func(*Server) { - return func(s *Server) { - tlsOpt := s.options.TLS.Value() - tlsOpt.Port = fmt.Sprintf(":%d", port) - s.options.TLS = immutable.Some(tlsOpt) - } +// Server struct holds the Handler for the HTTP API. +type Server struct { + options *ServerOptions + server *http.Server + listener net.Listener + isTLS bool } -// Listen creates a new net.Listener and saves it on the receiver. -func (s *Server) Listen(ctx context.Context) error { - var err error - if s.options.TLS.HasValue() { - return s.listenWithTLS(ctx) - } - - lc := net.ListenConfig{} - s.listener, err = lc.Listen(ctx, "tcp", s.Addr) - if err != nil { - return errors.WithStack(err) +// NewServer instantiates a new server with the given http.Handler. +func NewServer(handler http.Handler, opts ...ServerOpt) (*Server, error) { + options := DefaultServerOptions() + for _, opt := range opts { + opt(options) } - // Save the address on the server in case the port was set to random - // and that we want to see what was assigned. - s.address = s.listener.Addr().String() - - return nil -} + // setup a mux with the default middleware stack + mux := chi.NewMux() + mux.Use( + middleware.RequestLogger(&logFormatter{}), + middleware.Recoverer, + CorsMiddleware(options.AllowedOrigins), + ) + mux.Handle("/*", handler) -func (s *Server) listenWithTLS(ctx context.Context) error { - cfg := &tls.Config{ - MinVersion: tls.VersionTLS12, - // We only allow cipher suites that are marked secure - // by ssllabs - CipherSuites: []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - }, - ServerName: "DefraDB", + server := &http.Server{ + ReadTimeout: options.ReadTimeout, + WriteTimeout: options.WriteTimeout, + IdleTimeout: options.IdleTimeout, + Handler: mux, } - if s.options.Domain.HasValue() && s.options.Domain.Value() != "" { - s.Addr = s.options.TLS.Value().Port - - if s.options.TLS.Value().Email == "" || s.options.TLS.Value().Email == config.DefaultAPIEmail { - return ErrNoEmail - } - - certCache := path.Join(s.options.RootDir, "autocerts") - - log.FeedbackInfo( - ctx, - "Generating auto certificate", - logging.NewKV("Domain", s.options.Domain.Value()), - logging.NewKV("Certificate cache", certCache), - ) - - m := &autocert.Manager{ - Cache: autocert.DirCache(certCache), - Prompt: autocert.AcceptTOS, - Email: s.options.TLS.Value().Email, - HostPolicy: autocert.HostWhitelist(s.options.Domain.Value()), - } - - cfg.GetCertificate = m.GetCertificate + return &Server{ + options: options, + server: server, + }, nil +} - // We set manager on the server instance to later start - // a redirection server. - s.certManager = m - } else { - // When not using auto cert, we create a self signed certificate - // with the provided public and prive keys. - log.FeedbackInfo(ctx, "Generating self signed certificate") +// Shutdown gracefully shuts down the server without interrupting any active connections. +func (s *Server) Shutdown(ctx context.Context) error { + return s.server.Shutdown(ctx) +} - cert, err := tls.LoadX509KeyPair( - s.options.TLS.Value().PrivateKey, - s.options.TLS.Value().PublicKey, - ) - if err != nil { - return NewErrFailedToLoadKeys(err, s.options.TLS.Value().PublicKey, s.options.TLS.Value().PrivateKey) - } +// SetListener sets a new listener on the Server. +func (s *Server) SetListener() (err error) { + s.listener, err = net.Listen("tcp", s.options.Address) + return err +} - cfg.Certificates = []tls.Certificate{cert} +// Serve serves incoming connections. +func (s *Server) Serve() error { + if s.options.TLSCertPath == "" && s.options.TLSKeyPath == "" { + return s.serve() } + s.isTLS = true + return s.serveTLS() +} - var err error - s.listener, err = tls.Listen("tcp", s.Addr, cfg) - if err != nil { - return errors.WithStack(err) +// serve serves http connections. +func (s *Server) serve() error { + if s.listener == nil { + return ErrNoListener } - - // Save the address on the server in case the port was set to random - // and that we want to see what was assigned. - s.address = s.listener.Addr().String() - - return nil + return s.server.Serve(s.listener) } -// Run calls Serve with the receiver's listener. -func (s *Server) Run(ctx context.Context) error { +// serveTLS serves https connections. +func (s *Server) serveTLS() error { if s.listener == nil { return ErrNoListener } - - if s.certManager != nil { - // When using TLS it's important to redirect http requests to https - go func() { - srv := newHTTPRedirServer(s.certManager) - err := srv.ListenAndServe() - if err != nil && !errors.Is(err, http.ErrServerClosed) { - log.Info(ctx, "Something went wrong with the redirection server", logging.NewKV("Error", err)) - } - }() + cert, err := tls.LoadX509KeyPair(s.options.TLSCertPath, s.options.TLSKeyPath) + if err != nil { + return err + } + config := &tls.Config{ + ServerName: "DefraDB", + MinVersion: tls.VersionTLS12, + CipherSuites: tlsCipherSuites, + Certificates: []tls.Certificate{cert}, } - return s.Serve(s.listener) + return s.server.Serve(tls.NewListener(s.listener, config)) } -// AssignedAddr returns the address that was assigned to the server on calls to listen. -func (s *Server) AssignedAddr() string { - return s.address +func (s *Server) Address() string { + if s.isTLS { + return "https://" + s.listener.Addr().String() + } + return "http://" + s.listener.Addr().String() } diff --git a/http/server_test.go b/http/server_test.go index 04095b7c15..ec9ab8ab75 100644 --- a/http/server_test.go +++ b/http/server_test.go @@ -12,111 +12,19 @@ package http import ( "context" + "crypto/tls" "net/http" "os" + "path/filepath" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "golang.org/x/crypto/acme/autocert" ) -func TestNewServerAndRunWithoutListener(t *testing.T) { - ctx := context.Background() - s, err := NewServer(nil, WithAddress(":0")) - require.NoError(t, err) - if ok := assert.NotNil(t, s); ok { - assert.Equal(t, ErrNoListener, s.Run(ctx)) - } -} - -func TestNewServerAndRunWithListenerAndInvalidPort(t *testing.T) { - ctx := context.Background() - s, err := NewServer(nil, WithAddress(":303000")) - require.NoError(t, err) - if ok := assert.NotNil(t, s); ok { - assert.Error(t, s.Listen(ctx)) - } -} - -func TestNewServerAndRunWithListenerAndValidPort(t *testing.T) { - ctx := context.Background() - serverRunning := make(chan struct{}) - serverDone := make(chan struct{}) - s, err := NewServer(nil, WithAddress(":0")) - require.NoError(t, err) - go func() { - close(serverRunning) - err := s.Listen(ctx) - assert.NoError(t, err) - err = s.Run(ctx) - assert.ErrorIs(t, http.ErrServerClosed, err) - defer close(serverDone) - }() - - <-serverRunning - - s.Shutdown(context.Background()) - - <-serverDone -} - -func TestNewServerAndRunWithAutocertWithoutEmail(t *testing.T) { - ctx := context.Background() - dir := t.TempDir() - s, err := NewServer(nil, WithAddress("example.com"), WithRootDir(dir), WithTLSPort(0)) - require.NoError(t, err) - err = s.Listen(ctx) - assert.ErrorIs(t, err, ErrNoEmail) - - s.Shutdown(context.Background()) -} - -func TestNewServerAndRunWithAutocert(t *testing.T) { - ctx := context.Background() - serverRunning := make(chan struct{}) - serverDone := make(chan struct{}) - dir := t.TempDir() - s, err := NewServer(nil, WithAddress("example.com"), WithRootDir(dir), WithTLSPort(0), WithCAEmail("dev@defradb.net")) - require.NoError(t, err) - go func() { - close(serverRunning) - err := s.Listen(ctx) - assert.NoError(t, err) - err = s.Run(ctx) - assert.ErrorIs(t, http.ErrServerClosed, err) - defer close(serverDone) - }() - - <-serverRunning - - s.Shutdown(context.Background()) - - <-serverDone -} - -func TestNewServerAndRunWithSelfSignedCertAndNoKeyFiles(t *testing.T) { - ctx := context.Background() - serverRunning := make(chan struct{}) - serverDone := make(chan struct{}) - dir := t.TempDir() - s, err := NewServer(nil, WithAddress("localhost:0"), WithSelfSignedCert(dir+"/server.crt", dir+"/server.key")) - require.NoError(t, err) - go func() { - close(serverRunning) - err := s.Listen(ctx) - assert.Contains(t, err.Error(), "failed to load given keys") - defer close(serverDone) - }() - - <-serverRunning - - s.Shutdown(context.Background()) - - <-serverDone -} - -const pubKey = `-----BEGIN EC PARAMETERS----- +// tlsKey is the TLS private key in PEM format +const tlsKey = `-----BEGIN EC PARAMETERS----- BgUrgQQAIg== -----END EC PARAMETERS----- -----BEGIN EC PRIVATE KEY----- @@ -126,7 +34,8 @@ pS0gW/SYpAncHhRuz18RQ2ycuXlSN1S/PAryRZ5PK2xORKfwpguEDEMdVwbHorZO K44P/h3dhyNyAyf8rcRoqKXcl/K/uew= -----END EC PRIVATE KEY-----` -const privKey = `-----BEGIN CERTIFICATE----- +// tlsKey is the TLS certificate in PEM format +const tlsCert = `-----BEGIN CERTIFICATE----- MIICQDCCAcUCCQDpMnN1gQ4fGTAKBggqhkjOPQQDAjCBiDELMAkGA1UEBhMCY2Ex DzANBgNVBAgMBlF1ZWJlYzEQMA4GA1UEBwwHQ2hlbHNlYTEPMA0GA1UECgwGU291 cmNlMRAwDgYDVQQLDAdEZWZyYURCMQ8wDQYDVQQDDAZzb3VyY2UxIjAgBgkqhkiG @@ -142,121 +51,178 @@ kgIxAKaEGC+lqp0aaN+yubYLRiTDxOlNpyiHox3nZiL4bG/CCdPDvbX63QcdI2yq XPKczg== -----END CERTIFICATE-----` -func TestNewServerAndRunWithSelfSignedCertAndInvalidPort(t *testing.T) { - ctx := context.Background() - serverRunning := make(chan struct{}) - serverDone := make(chan struct{}) - dir := t.TempDir() - err := os.WriteFile(dir+"/server.key", []byte(privKey), 0644) - if err != nil { - t.Fatal(err) - } - err = os.WriteFile(dir+"/server.crt", []byte(pubKey), 0644) - if err != nil { - t.Fatal(err) - } - s, err := NewServer(nil, WithAddress(":303000"), WithSelfSignedCert(dir+"/server.crt", dir+"/server.key")) +// insecureClient is an http client that trusts all tls certificates +var insecureClient = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, +} + +// testHandler returns an empty body and 200 status code +var testHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) +}) + +func TestServerServeWithNoListener(t *testing.T) { + srv, err := NewServer(testHandler) require.NoError(t, err) + + err = srv.Serve() + require.ErrorIs(t, err, ErrNoListener) +} + +func TestServerServeWithTLSAndNoListener(t *testing.T) { + certPath, keyPath := writeTestCerts(t) + srv, err := NewServer(testHandler, WithTLSCertPath(certPath), WithTLSKeyPath(keyPath)) + require.NoError(t, err) + + err = srv.Serve() + require.ErrorIs(t, err, ErrNoListener) +} + +func TestServerListenAndServeWithInvalidAddress(t *testing.T) { + srv, err := NewServer(testHandler, WithAddress("invalid")) + require.NoError(t, err) + + err = srv.SetListener() + require.ErrorContains(t, err, "address invalid") +} + +func TestServerListenAndServeWithTLSAndInvalidAddress(t *testing.T) { + certPath, keyPath := writeTestCerts(t) + srv, err := NewServer(testHandler, WithAddress("invalid"), WithTLSCertPath(certPath), WithTLSKeyPath(keyPath)) + require.NoError(t, err) + + err = srv.SetListener() + require.ErrorContains(t, err, "address invalid") +} + +func TestServerListenAndServeWithTLSAndInvalidCerts(t *testing.T) { + srv, err := NewServer( + testHandler, + WithAddress("invalid"), + WithTLSCertPath("invalid.crt"), + WithTLSKeyPath("invalid.key"), + WithAddress("127.0.0.1:30001"), + ) + require.NoError(t, err) + + err = srv.SetListener() + require.NoError(t, err) + err = srv.Serve() + require.ErrorContains(t, err, "no such file or directory") + err = srv.listener.Close() + require.NoError(t, err) +} + +func TestServerListenAndServeWithAddress(t *testing.T) { + srv, err := NewServer(testHandler, WithAddress("127.0.0.1:30001")) + require.NoError(t, err) + go func() { - close(serverRunning) - err := s.Listen(ctx) - assert.Contains(t, err.Error(), "invalid port") - defer close(serverDone) + err := srv.SetListener() + require.NoError(t, err) + err = srv.Serve() + require.ErrorIs(t, http.ErrServerClosed, err) }() - <-serverRunning + // wait for server to start + <-time.After(time.Second * 1) - s.Shutdown(context.Background()) + res, err := http.Get("http://127.0.0.1:30001") + require.NoError(t, err) + + defer res.Body.Close() + assert.Equal(t, 200, res.StatusCode) - <-serverDone + err = srv.Shutdown(context.Background()) + require.NoError(t, err) } -func TestNewServerAndRunWithSelfSignedCert(t *testing.T) { - ctx := context.Background() - serverRunning := make(chan struct{}) - serverDone := make(chan struct{}) - dir := t.TempDir() - err := os.WriteFile(dir+"/server.key", []byte(privKey), 0644) - if err != nil { - t.Fatal(err) - } - err = os.WriteFile(dir+"/server.crt", []byte(pubKey), 0644) - if err != nil { - t.Fatal(err) - } - s, err := NewServer(nil, WithAddress("localhost:0"), WithSelfSignedCert(dir+"/server.crt", dir+"/server.key")) +func TestServerListenAndServeWithTLS(t *testing.T) { + certPath, keyPath := writeTestCerts(t) + srv, err := NewServer(testHandler, WithAddress("127.0.0.1:8443"), WithTLSCertPath(certPath), WithTLSKeyPath(keyPath)) require.NoError(t, err) + go func() { - close(serverRunning) - err := s.Listen(ctx) - assert.NoError(t, err) - err = s.Run(ctx) - assert.ErrorIs(t, http.ErrServerClosed, err) - defer close(serverDone) + err := srv.SetListener() + require.NoError(t, err) + err = srv.Serve() + require.ErrorIs(t, http.ErrServerClosed, err) }() - <-serverRunning + // wait for server to start + <-time.After(time.Second * 1) - s.Shutdown(context.Background()) + res, err := insecureClient.Get("https://127.0.0.1:8443") + require.NoError(t, err) - <-serverDone -} + defer res.Body.Close() + assert.Equal(t, 200, res.StatusCode) -func TestNewServerWithoutOptions(t *testing.T) { - s, err := NewServer(nil) + err = srv.Shutdown(context.Background()) require.NoError(t, err) - assert.Equal(t, "localhost:9181", s.Addr) - assert.Equal(t, []string(nil), s.options.AllowedOrigins) } -func TestNewServerWithAddress(t *testing.T) { - s, err := NewServer(nil, WithAddress("localhost:9999")) +func TestServerListenAndServeWithAllowedOrigins(t *testing.T) { + srv, err := NewServer(testHandler, WithAllowedOrigins("localhost"), WithAddress("127.0.0.1:30001")) require.NoError(t, err) - assert.Equal(t, "localhost:9999", s.Addr) -} -func TestNewServerWithDomainAddress(t *testing.T) { - s, err := NewServer(nil, WithAddress("example.com")) + go func() { + err := srv.SetListener() + require.NoError(t, err) + err = srv.Serve() + require.ErrorIs(t, http.ErrServerClosed, err) + }() + + // wait for server to start + <-time.After(time.Second * 1) + + req, err := http.NewRequest(http.MethodOptions, "http://127.0.0.1:30001", nil) require.NoError(t, err) - assert.Equal(t, "example.com", s.options.Domain.Value()) - assert.NotNil(t, s.options.TLS) -} + req.Header.Add("origin", "localhost") -func TestNewServerWithAllowedOrigins(t *testing.T) { - s, err := NewServer(nil, WithAllowedOrigins("https://source.network", "https://app.source.network")) + res, err := http.DefaultClient.Do(req) require.NoError(t, err) - assert.Equal(t, []string{"https://source.network", "https://app.source.network"}, s.options.AllowedOrigins) -} -func TestNewServerWithCAEmail(t *testing.T) { - s, err := NewServer(nil, WithCAEmail("me@example.com")) + defer res.Body.Close() + assert.Equal(t, 200, res.StatusCode) + assert.Equal(t, "localhost", res.Header.Get("Access-Control-Allow-Origin")) + + err = srv.Shutdown(context.Background()) require.NoError(t, err) - assert.Equal(t, "me@example.com", s.options.TLS.Value().Email) } -func TestNewServerWithRootDir(t *testing.T) { - dir := t.TempDir() - s, err := NewServer(nil, WithRootDir(dir)) +func TestServerWithReadTimeout(t *testing.T) { + srv, err := NewServer(testHandler, WithReadTimeout(time.Second)) require.NoError(t, err) - assert.Equal(t, dir, s.options.RootDir) + assert.Equal(t, time.Second, srv.server.ReadTimeout) } -func TestNewServerWithTLSPort(t *testing.T) { - s, err := NewServer(nil, WithTLSPort(44343)) +func TestServerWithWriteTimeout(t *testing.T) { + srv, err := NewServer(testHandler, WithWriteTimeout(time.Second)) require.NoError(t, err) - assert.Equal(t, ":44343", s.options.TLS.Value().Port) + assert.Equal(t, time.Second, srv.server.WriteTimeout) } -func TestNewServerWithSelfSignedCert(t *testing.T) { - s, err := NewServer(nil, WithSelfSignedCert("pub.key", "priv.key")) +func TestServerWithIdleTimeout(t *testing.T) { + srv, err := NewServer(testHandler, WithIdleTimeout(time.Second)) require.NoError(t, err) - assert.Equal(t, "pub.key", s.options.TLS.Value().PublicKey) - assert.Equal(t, "priv.key", s.options.TLS.Value().PrivateKey) - assert.NotNil(t, s.options.TLS) + assert.Equal(t, time.Second, srv.server.IdleTimeout) } -func TestNewHTTPRedirServer(t *testing.T) { - m := &autocert.Manager{} - s := newHTTPRedirServer(m) - assert.Equal(t, ":80", s.Addr) +func writeTestCerts(t *testing.T) (string, string) { + tempDir := t.TempDir() + certPath := filepath.Join(tempDir, "cert.pub") + keyPath := filepath.Join(tempDir, "cert.key") + + err := os.WriteFile(certPath, []byte(tlsCert), 0644) + require.NoError(t, err) + + err = os.WriteFile(keyPath, []byte(tlsKey), 0644) + require.NoError(t, err) + + return certPath, keyPath } diff --git a/lens/fetcher.go b/lens/fetcher.go index 71f5b6243a..1e093f3966 100644 --- a/lens/fetcher.go +++ b/lens/fetcher.go @@ -36,7 +36,7 @@ type lensedFetcher struct { col client.Collection // Cache the fieldDescriptions mapped by name to allow for cheaper access within the fetcher loop - fieldDescriptionsByName map[string]client.FieldDescription + fieldDescriptionsByName map[string]client.FieldDefinition targetVersionID string @@ -59,7 +59,7 @@ func (f *lensedFetcher) Init( ctx context.Context, txn datastore.Txn, col client.Collection, - fields []client.FieldDescription, + fields []client.FieldDefinition, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, @@ -67,42 +67,36 @@ func (f *lensedFetcher) Init( ) error { f.col = col - f.fieldDescriptionsByName = make(map[string]client.FieldDescription, len(col.Schema().Fields)) + f.fieldDescriptionsByName = make(map[string]client.FieldDefinition, len(col.Schema().Fields)) // Add cache the field descriptions in reverse, allowing smaller-index fields to overwrite any later // ones. This should never really happen here, but it ensures the result is consistent with col.GetField // which returns the first one it finds with a matching name. - for i := len(col.Schema().Fields) - 1; i >= 0; i-- { - field := col.Schema().Fields[i] - f.fieldDescriptionsByName[field.Name] = field + defFields := col.Definition().GetFields() + for i := len(defFields) - 1; i >= 0; i-- { + f.fieldDescriptionsByName[defFields[i].Name] = defFields[i] } - cfg, err := f.registry.Config(ctx) - if err != nil { - return err - } - - history, err := getTargetedSchemaHistory(ctx, txn, cfg, f.col.Schema().Root, f.col.Schema().VersionID) + history, err := getTargetedSchemaHistory(ctx, txn, f.col.Schema().Root, f.col.Schema().VersionID) if err != nil { return err } f.lens = new(ctx, f.registry, f.col.Schema().VersionID, history) f.txn = txn - for schemaVersionID := range history { - hasMigration, err := f.registry.HasMigration(ctx, schemaVersionID) - if err != nil { - return err - } - - if hasMigration { - f.hasMigrations = true - break +historyLoop: + for _, historyItem := range history { + sources := historyItem.collection.CollectionSources() + for _, source := range sources { + if source.Transform.HasValue() { + f.hasMigrations = true + break historyLoop + } } } f.targetVersionID = col.Schema().VersionID - var innerFetcherFields []client.FieldDescription + var innerFetcherFields []client.FieldDefinition if f.hasMigrations { // If there are migrations present, they may require fields that are not otherwise // requested. At the moment this means we need to pass in nil so that the underlying @@ -204,7 +198,7 @@ func encodedDocToLensDoc(doc fetcher.EncodedDocument) (LensDoc, error) { func (f *lensedFetcher) lensDocToEncodedDoc(docAsMap LensDoc) (fetcher.EncodedDocument, error) { var key string status := client.Active - properties := map[client.FieldDescription]any{} + properties := map[client.FieldDefinition]any{} for fieldName, fieldByteValue := range docAsMap { if fieldName == request.DocIDFieldName { @@ -228,7 +222,7 @@ func (f *lensedFetcher) lensDocToEncodedDoc(docAsMap LensDoc) (fetcher.EncodedDo continue } - fieldValue, err := core.DecodeFieldValue(fieldDesc, fieldByteValue) + fieldValue, err := core.NormalizeFieldValue(fieldDesc, fieldByteValue) if err != nil { return nil, err } @@ -283,9 +277,9 @@ func (f *lensedFetcher) updateDataStore(ctx context.Context, original map[string } datastoreKeyBase := core.DataStoreKey{ - CollectionID: f.col.Description().IDString(), - DocID: docID, - InstanceType: core.ValueKey, + CollectionRootID: f.col.Description().RootID, + DocID: docID, + InstanceType: core.ValueKey, } for fieldName, value := range modifiedFieldValuesByName { @@ -321,7 +315,7 @@ type lensEncodedDocument struct { key []byte schemaVersionID string status client.DocumentStatus - properties map[client.FieldDescription]any + properties map[client.FieldDefinition]any } var _ fetcher.EncodedDocument = (*lensEncodedDocument)(nil) @@ -338,7 +332,7 @@ func (encdoc *lensEncodedDocument) Status() client.DocumentStatus { return encdoc.status } -func (encdoc *lensEncodedDocument) Properties(onlyFilterProps bool) (map[client.FieldDescription]any, error) { +func (encdoc *lensEncodedDocument) Properties(onlyFilterProps bool) (map[client.FieldDefinition]any, error) { return encdoc.properties, nil } @@ -346,5 +340,5 @@ func (encdoc *lensEncodedDocument) Reset() { encdoc.key = nil encdoc.schemaVersionID = "" encdoc.status = 0 - encdoc.properties = map[client.FieldDescription]any{} + encdoc.properties = map[client.FieldDefinition]any{} } diff --git a/lens/history.go b/lens/history.go index 56b43a9d5b..a7a5ee57d8 100644 --- a/lens/history.go +++ b/lens/history.go @@ -13,27 +13,26 @@ package lens import ( "context" - "github.com/ipfs/go-datastore/query" "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/db/description" ) // schemaHistoryLink represents an item in a particular schema's history, it // links to the previous and next version items if they exist. type schemaHistoryLink struct { - // The schema version id of this history item. - schemaVersionID string + // The collection as this point in history. + collection *client.CollectionDescription - // The history link to the next schema version, if there is one - // (for the most recent schema version this will be None). - next immutable.Option[*schemaHistoryLink] + // The history link to the next schema versions, if there are some + // (for the most recent schema version this will be empty). + next []*schemaHistoryLink - // The history link to the previous schema version, if there is - // one (for the initial schema version this will be None). - previous immutable.Option[*schemaHistoryLink] + // The history link to the previous schema versions, if there are + // some (for the initial schema version this will be empty). + previous []*schemaHistoryLink } // targetedSchemaHistoryLink represents an item in a particular schema's history, it @@ -42,8 +41,8 @@ type schemaHistoryLink struct { // It also contains a vector which describes the distance and direction to the // target schema version (given as an input param on construction). type targetedSchemaHistoryLink struct { - // The schema version id of this history item. - schemaVersionID string + // The collection as this point in history. + collection *client.CollectionDescription // The link to next schema version, if there is one // (for the most recent schema version this will be None). @@ -69,11 +68,10 @@ type targetedSchemaHistoryLink struct { func getTargetedSchemaHistory( ctx context.Context, txn datastore.Txn, - lensConfigs []client.LensConfig, schemaRoot string, targetSchemaVersionID string, ) (map[schemaVersionID]*targetedSchemaHistoryLink, error) { - history, err := getSchemaHistory(ctx, txn, lensConfigs, schemaRoot) + history, err := getSchemaHistory(ctx, txn, schemaRoot) if err != nil { return nil, err } @@ -81,18 +79,22 @@ func getTargetedSchemaHistory( result := map[schemaVersionID]*targetedSchemaHistoryLink{} for _, item := range history { - result[item.schemaVersionID] = &targetedSchemaHistoryLink{ - schemaVersionID: item.schemaVersionID, + result[item.collection.SchemaVersionID] = &targetedSchemaHistoryLink{ + collection: item.collection, } } for _, item := range result { - schemaHistoryLink := history[item.schemaVersionID] - nextHistoryItem := schemaHistoryLink.next - if !nextHistoryItem.HasValue() { + schemaHistoryLink := history[item.collection.ID] + nextHistoryItems := schemaHistoryLink.next + if len(nextHistoryItems) == 0 { continue } - nextItem := result[nextHistoryItem.Value().schemaVersionID] + + // WARNING: This line assumes that each collection can only have a single source, and so + // just takes the first item. If/when collections can have multiple sources we will need to change + // this slightly. + nextItem := result[nextHistoryItems[0].collection.SchemaVersionID] item.next = immutable.Some(nextItem) nextItem.previous = immutable.Some(item) } @@ -100,7 +102,7 @@ func getTargetedSchemaHistory( orphanSchemaVersions := map[string]struct{}{} for schemaVersion, item := range result { - if item.schemaVersionID == targetSchemaVersionID { + if item.collection.SchemaVersionID == targetSchemaVersionID { continue } if item.targetVector != 0 { @@ -122,7 +124,7 @@ func getTargetedSchemaHistory( wasFound = true break } - if currentItem.schemaVersionID == targetSchemaVersionID { + if currentItem.collection.SchemaVersionID == targetSchemaVersionID { wasFound = true break } @@ -143,7 +145,7 @@ func getTargetedSchemaHistory( wasFound = true break } - if currentItem.schemaVersionID == targetSchemaVersionID { + if currentItem.collection.SchemaVersionID == targetSchemaVersionID { wasFound = true break } @@ -169,11 +171,6 @@ func getTargetedSchemaHistory( return result, nil } -type schemaHistoryPairing struct { - schemaVersionID string - nextSchemaVersionID string -} - // getSchemaHistory returns the history of the schema of the given id as linked list // with each item mapped by schema version id. // @@ -182,96 +179,35 @@ type schemaHistoryPairing struct { func getSchemaHistory( ctx context.Context, txn datastore.Txn, - lensConfigs []client.LensConfig, schemaRoot string, -) (map[schemaVersionID]*schemaHistoryLink, error) { - pairings := map[string]*schemaHistoryPairing{} - - for _, config := range lensConfigs { - pairings[config.SourceSchemaVersionID] = &schemaHistoryPairing{ - schemaVersionID: config.SourceSchemaVersionID, - nextSchemaVersionID: config.DestinationSchemaVersionID, - } - - if _, ok := pairings[config.DestinationSchemaVersionID]; !ok { - pairings[config.DestinationSchemaVersionID] = &schemaHistoryPairing{ - schemaVersionID: config.DestinationSchemaVersionID, - } - } - } - - prefix := core.NewSchemaHistoryKey(schemaRoot, "") - q, err := txn.Systemstore().Query(ctx, query.Query{ - Prefix: prefix.ToString(), - }) +) (map[collectionID]*schemaHistoryLink, error) { + cols, err := description.GetCollectionsBySchemaRoot(ctx, txn, schemaRoot) if err != nil { return nil, err } - for res := range q.Next() { - // check for Done on context first - select { - case <-ctx.Done(): - // we've been cancelled! ;) - return nil, q.Close() - default: - // noop, just continue on the with the for loop - } - - if res.Error != nil { - err = q.Close() - if err != nil { - return nil, err - } - return nil, res.Error - } + history := map[collectionID]*schemaHistoryLink{} - key, err := core.NewSchemaHistoryKeyFromString(res.Key) - if err != nil { - err = q.Close() - if err != nil { - return nil, err - } - return nil, err - } - - // The local schema version history takes priority over and migration-defined history - // and overwrites whatever already exists in the pairings (if any) - pairings[key.PreviousSchemaVersionID] = &schemaHistoryPairing{ - schemaVersionID: key.PreviousSchemaVersionID, - nextSchemaVersionID: string(res.Value), - } - - if _, ok := pairings[string(res.Value)]; !ok { - pairings[string(res.Value)] = &schemaHistoryPairing{ - schemaVersionID: string(res.Value), - } - } - } - - err = q.Close() - if err != nil { - return nil, err - } - - history := map[schemaVersionID]*schemaHistoryLink{} - - for _, pairing := range pairings { + for _, c := range cols { + col := c // Convert the temporary types to the cleaner return type: - history[pairing.schemaVersionID] = &schemaHistoryLink{ - schemaVersionID: pairing.schemaVersionID, + history[col.ID] = &schemaHistoryLink{ + collection: &col, } } - for _, pairing := range pairings { - src := history[pairing.schemaVersionID] - - // Use the internal pairings to set the next/previous links. This must be - // done after the `history` map has been fully populated, else `src` and - // `next` may not yet have been added to the map. - if next, hasNext := history[pairing.nextSchemaVersionID]; hasNext { - src.next = immutable.Some(next) - next.previous = immutable.Some(src) + for _, historyItem := range history { + for _, source := range historyItem.collection.CollectionSources() { + src := history[source.SourceCollectionID] + historyItem.previous = append( + historyItem.next, + src, + ) + + src.next = append( + src.next, + historyItem, + ) } } diff --git a/lens/lens.go b/lens/lens.go index 86fcb0876f..4e700d7324 100644 --- a/lens/lens.go +++ b/lens/lens.go @@ -19,6 +19,7 @@ import ( ) type schemaVersionID = string +type collectionID = uint32 // LensDoc represents a document that will be sent to/from a Lens. type LensDoc = map[string]any @@ -151,10 +152,10 @@ func (l *lens) Next() (bool, error) { var pipeHead enumerable.Enumerable[LensDoc] for { - junctionPipe, junctionPreviouslyExisted := l.lensPipesBySchemaVersionIDs[historyLocation.schemaVersionID] + junctionPipe, junctionPreviouslyExisted := l.lensPipesBySchemaVersionIDs[historyLocation.collection.SchemaVersionID] if !junctionPreviouslyExisted { versionInputPipe := enumerable.NewQueue[LensDoc]() - l.lensInputPipesBySchemaVersionIDs[historyLocation.schemaVersionID] = versionInputPipe + l.lensInputPipesBySchemaVersionIDs[historyLocation.collection.SchemaVersionID] = versionInputPipe if inputPipe == nil { // The input pipe will be fed documents which are currently at this schema version inputPipe = versionInputPipe @@ -162,7 +163,7 @@ func (l *lens) Next() (bool, error) { // It is a source of the schemaVersion junction pipe, other schema versions // may also join as sources to this junction pipe junctionPipe = enumerable.Concat[LensDoc](versionInputPipe) - l.lensPipesBySchemaVersionIDs[historyLocation.schemaVersionID] = junctionPipe + l.lensPipesBySchemaVersionIDs[historyLocation.collection.SchemaVersionID] = junctionPipe } // If we have previously laid pipe, we need to connect it to the current junction. @@ -181,7 +182,7 @@ func (l *lens) Next() (bool, error) { // Aquire a lens migration from the registery, using the junctionPipe as its source. // The new pipeHead will then be connected as a source to the next migration-stage on // the next loop. - pipeHead, err = l.lensRegistry.MigrateUp(l.ctx, junctionPipe, historyLocation.schemaVersionID) + pipeHead, err = l.lensRegistry.MigrateUp(l.ctx, junctionPipe, historyLocation.next.Value().collection.ID) if err != nil { return false, err } @@ -191,7 +192,7 @@ func (l *lens) Next() (bool, error) { // Aquire a lens migration from the registery, using the junctionPipe as its source. // The new pipeHead will then be connected as a source to the next migration-stage on // the next loop. - pipeHead, err = l.lensRegistry.MigrateDown(l.ctx, junctionPipe, historyLocation.previous.Value().schemaVersionID) + pipeHead, err = l.lensRegistry.MigrateDown(l.ctx, junctionPipe, historyLocation.collection.ID) if err != nil { return false, err } diff --git a/lens/registry.go b/lens/registry.go index 20b125a498..ba24779611 100644 --- a/lens/registry.go +++ b/lens/registry.go @@ -12,10 +12,8 @@ package lens import ( "context" - "encoding/json" "sync" - "github.com/ipfs/go-datastore/query" "github.com/lens-vm/lens/host-go/config" "github.com/lens-vm/lens/host-go/config/model" "github.com/lens-vm/lens/host-go/engine/module" @@ -24,8 +22,8 @@ import ( "github.com/sourcenetwork/immutable/enumerable" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/db/description" "github.com/sourcenetwork/defradb/errors" ) @@ -46,13 +44,9 @@ type lensRegistry struct { modulesByPath map[string]module.Module moduleLock sync.Mutex - lensPoolsBySchemaVersionID map[string]*lensPool - reversedPoolsBySchemaVersionID map[string]*lensPool - poolLock sync.RWMutex - - // lens configurations by source schema version ID - configs map[string]client.LensConfig - configLock sync.RWMutex + lensPoolsByCollectionID map[uint32]*lensPool + reversedPoolsByCollectionID map[uint32]*lensPool + poolLock sync.RWMutex // Writable transaction contexts by transaction ID. // @@ -65,18 +59,16 @@ type lensRegistry struct { // stuff within here should be accessible from within this transaction but not // from outside. type txnContext struct { - txn datastore.Txn - lensPoolsBySchemaVersionID map[string]*lensPool - reversedPoolsBySchemaVersionID map[string]*lensPool - configs map[string]client.LensConfig + txn datastore.Txn + lensPoolsByCollectionID map[uint32]*lensPool + reversedPoolsByCollectionID map[uint32]*lensPool } func newTxnCtx(txn datastore.Txn) *txnContext { return &txnContext{ - txn: txn, - lensPoolsBySchemaVersionID: map[string]*lensPool{}, - reversedPoolsBySchemaVersionID: map[string]*lensPool{}, - configs: map[string]client.LensConfig{}, + txn: txn, + lensPoolsByCollectionID: map[uint32]*lensPool{}, + reversedPoolsByCollectionID: map[uint32]*lensPool{}, } } @@ -103,13 +95,12 @@ func NewRegistry(lensPoolSize immutable.Option[int], db TxnSource) client.LensRe return &implicitTxnLensRegistry{ db: db, registry: &lensRegistry{ - poolSize: size, - runtime: wasmtime.New(), - modulesByPath: map[string]module.Module{}, - lensPoolsBySchemaVersionID: map[string]*lensPool{}, - reversedPoolsBySchemaVersionID: map[string]*lensPool{}, - configs: map[string]client.LensConfig{}, - txnCtxs: map[uint64]*txnContext{}, + poolSize: size, + runtime: wasmtime.New(), + modulesByPath: map[string]module.Module{}, + lensPoolsByCollectionID: map[uint32]*lensPool{}, + reversedPoolsByCollectionID: map[uint32]*lensPool{}, + txnCtxs: map[uint64]*txnContext{}, }, } } @@ -133,20 +124,14 @@ func (r *lensRegistry) getCtx(txn datastore.Txn, readonly bool) *txnContext { txnCtx.txn.OnSuccess(func() { r.poolLock.Lock() - for schemaVersionID, locker := range txnCtx.lensPoolsBySchemaVersionID { - r.lensPoolsBySchemaVersionID[schemaVersionID] = locker + for collectionID, locker := range txnCtx.lensPoolsByCollectionID { + r.lensPoolsByCollectionID[collectionID] = locker } - for schemaVersionID, locker := range txnCtx.reversedPoolsBySchemaVersionID { - r.reversedPoolsBySchemaVersionID[schemaVersionID] = locker + for collectionID, locker := range txnCtx.reversedPoolsByCollectionID { + r.reversedPoolsByCollectionID[collectionID] = locker } r.poolLock.Unlock() - r.configLock.Lock() - for schemaVersionID, cfg := range txnCtx.configs { - r.configs[schemaVersionID] = cfg - } - r.configLock.Unlock() - r.txnLock.Lock() delete(r.txnCtxs, txn.ID()) r.txnLock.Unlock() @@ -169,28 +154,12 @@ func (r *lensRegistry) getCtx(txn datastore.Txn, readonly bool) *txnContext { return txnCtx } -func (r *lensRegistry) setMigration(ctx context.Context, txnCtx *txnContext, cfg client.LensConfig) error { - key := core.NewSchemaVersionMigrationKey(cfg.SourceSchemaVersionID) - - json, err := json.Marshal(cfg) - if err != nil { - return err - } - - err = txnCtx.txn.Systemstore().Put(ctx, key.ToDS(), json) - if err != nil { - return err - } - - err = r.cacheLens(txnCtx, cfg) - if err != nil { - return err - } - - return nil -} - -func (r *lensRegistry) cacheLens(txnCtx *txnContext, cfg client.LensConfig) error { +func (r *lensRegistry) setMigration( + ctx context.Context, + txnCtx *txnContext, + collectionID uint32, + cfg model.Lens, +) error { inversedModuleCfgs := make([]model.LensModule, len(cfg.Lenses)) for i, moduleCfg := range cfg.Lenses { // Reverse the order of the lenses for the inverse migration. @@ -204,19 +173,15 @@ func (r *lensRegistry) cacheLens(txnCtx *txnContext, cfg client.LensConfig) erro } } - reversedCfg := client.LensConfig{ - SourceSchemaVersionID: cfg.SourceSchemaVersionID, - DestinationSchemaVersionID: cfg.DestinationSchemaVersionID, - Lens: model.Lens{ - Lenses: inversedModuleCfgs, - }, + reversedCfg := model.Lens{ + Lenses: inversedModuleCfgs, } - err := r.cachePool(txnCtx.txn, txnCtx.lensPoolsBySchemaVersionID, cfg) + err := r.cachePool(txnCtx.txn, txnCtx.lensPoolsByCollectionID, cfg, collectionID) if err != nil { return err } - err = r.cachePool(txnCtx.txn, txnCtx.reversedPoolsBySchemaVersionID, reversedCfg) + err = r.cachePool(txnCtx.txn, txnCtx.reversedPoolsByCollectionID, reversedCfg, collectionID) // For now, checking this error is the best way of determining if a migration has an inverse. // Inverses are optional. //nolint:revive @@ -224,12 +189,15 @@ func (r *lensRegistry) cacheLens(txnCtx *txnContext, cfg client.LensConfig) erro return err } - txnCtx.configs[cfg.SourceSchemaVersionID] = cfg - return nil } -func (r *lensRegistry) cachePool(txn datastore.Txn, target map[string]*lensPool, cfg client.LensConfig) error { +func (r *lensRegistry) cachePool( + txn datastore.Txn, + target map[uint32]*lensPool, + cfg model.Lens, + collectionID uint32, +) error { pool := r.newPool(r.poolSize, cfg) for i := 0; i < r.poolSize; i++ { @@ -240,94 +208,64 @@ func (r *lensRegistry) cachePool(txn datastore.Txn, target map[string]*lensPool, pool.returnLens(lensPipe) } - target[cfg.SourceSchemaVersionID] = pool + target[collectionID] = pool return nil } func (r *lensRegistry) reloadLenses(ctx context.Context, txnCtx *txnContext) error { - prefix := core.NewSchemaVersionMigrationKey("") - q, err := txnCtx.txn.Systemstore().Query(ctx, query.Query{ - Prefix: prefix.ToString(), - }) + cols, err := description.GetCollections(ctx, txnCtx.txn) if err != nil { return err } - for res := range q.Next() { - // check for Done on context first - select { - case <-ctx.Done(): - // we've been cancelled! ;) - err = q.Close() - if err != nil { - return err - } - - return nil - default: - // noop, just continue on the with the for loop - } + for _, col := range cols { + sources := col.CollectionSources() - if res.Error != nil { - err = q.Close() - if err != nil { - return errors.Wrap(err.Error(), res.Error) - } - return res.Error + if len(sources) == 0 { + continue } - var cfg client.LensConfig - err = json.Unmarshal(res.Value, &cfg) - if err != nil { - err = q.Close() - if err != nil { - return err - } - return err + // WARNING: Here we are only dealing with the first source in the set, this is fine for now as + // currently collections can only have one source, however this code will need to change if/when + // collections support multiple sources. + + if !sources[0].Transform.HasValue() { + continue } - err = r.cacheLens(txnCtx, cfg) + err = r.setMigration(ctx, txnCtx, col.ID, sources[0].Transform.Value()) if err != nil { - err = q.Close() - if err != nil { - return errors.Wrap(err.Error(), res.Error) - } return err } } - err = q.Close() - if err != nil { - return err - } - return nil } func (r *lensRegistry) migrateUp( txnCtx *txnContext, src enumerable.Enumerable[LensDoc], - schemaVersionID string, + collectionID uint32, ) (enumerable.Enumerable[LensDoc], error) { - return r.migrate(r.lensPoolsBySchemaVersionID, txnCtx.lensPoolsBySchemaVersionID, src, schemaVersionID) + return r.migrate(r.lensPoolsByCollectionID, txnCtx.lensPoolsByCollectionID, src, collectionID) } func (r *lensRegistry) migrateDown( txnCtx *txnContext, src enumerable.Enumerable[LensDoc], - schemaVersionID string, + collectionID uint32, ) (enumerable.Enumerable[LensDoc], error) { - return r.migrate(r.reversedPoolsBySchemaVersionID, txnCtx.reversedPoolsBySchemaVersionID, src, schemaVersionID) + return r.migrate(r.reversedPoolsByCollectionID, txnCtx.reversedPoolsByCollectionID, src, collectionID) } func (r *lensRegistry) migrate( - pools map[string]*lensPool, - txnPools map[string]*lensPool, + pools map[uint32]*lensPool, + txnPools map[uint32]*lensPool, src enumerable.Enumerable[LensDoc], - schemaVersionID string, + collectionID uint32, ) (enumerable.Enumerable[LensDoc], error) { - lensPool, ok := r.getPool(pools, txnPools, schemaVersionID) + lensPool, ok := r.getPool(pools, txnPools, collectionID) if !ok { // If there are no migrations for this schema version, just return the given source. return src, nil @@ -343,44 +281,17 @@ func (r *lensRegistry) migrate( return lens, nil } -func (r *lensRegistry) config(txnCtx *txnContext) []client.LensConfig { - configs := map[string]client.LensConfig{} - r.configLock.RLock() - for schemaVersionID, cfg := range r.configs { - configs[schemaVersionID] = cfg - } - r.configLock.RUnlock() - - // If within a txn actively writing to this registry overwrite - // values from the (commited) registry. - // Note: Config cannot be removed, only replaced at the moment. - for schemaVersionID, cfg := range txnCtx.configs { - configs[schemaVersionID] = cfg - } - - result := []client.LensConfig{} - for _, cfg := range configs { - result = append(result, cfg) - } - return result -} - -func (r *lensRegistry) hasMigration(txnCtx *txnContext, schemaVersionID string) bool { - _, hasMigration := r.getPool(r.lensPoolsBySchemaVersionID, txnCtx.lensPoolsBySchemaVersionID, schemaVersionID) - return hasMigration -} - func (r *lensRegistry) getPool( - pools map[string]*lensPool, - txnPools map[string]*lensPool, - schemaVersionID string, + pools map[uint32]*lensPool, + txnPools map[uint32]*lensPool, + collectionID uint32, ) (*lensPool, bool) { - if pool, ok := txnPools[schemaVersionID]; ok { + if pool, ok := txnPools[collectionID]; ok { return pool, true } r.poolLock.RLock() - pool, ok := pools[schemaVersionID] + pool, ok := pools[collectionID] r.poolLock.RUnlock() return pool, ok } @@ -392,7 +303,7 @@ func (r *lensRegistry) getPool( // so we need to limit how frequently we do this. type lensPool struct { // The config used to create the lenses within this locker. - cfg client.LensConfig + cfg model.Lens registry *lensRegistry @@ -405,7 +316,7 @@ type lensPool struct { pipes chan *lensPipe } -func (r *lensRegistry) newPool(lensPoolSize int, cfg client.LensConfig) *lensPool { +func (r *lensRegistry) newPool(lensPoolSize int, cfg model.Lens) *lensPool { return &lensPool{ cfg: cfg, registry: r, @@ -472,11 +383,11 @@ type lensPipe struct { var _ enumerable.Socket[LensDoc] = (*lensPipe)(nil) -func (r *lensRegistry) newLensPipe(cfg client.LensConfig) (*lensPipe, error) { +func (r *lensRegistry) newLensPipe(cfg model.Lens) (*lensPipe, error) { socket := enumerable.NewSocket[LensDoc]() r.moduleLock.Lock() - enumerable, err := config.LoadInto[LensDoc, LensDoc](r.runtime, r.modulesByPath, cfg.Lens, socket) + enumerable, err := config.LoadInto[LensDoc, LensDoc](r.runtime, r.modulesByPath, cfg, socket) r.moduleLock.Unlock() if err != nil { diff --git a/lens/txn_registry.go b/lens/txn_registry.go index 954db01e0c..8093dedbdd 100644 --- a/lens/txn_registry.go +++ b/lens/txn_registry.go @@ -13,6 +13,7 @@ package lens import ( "context" + "github.com/lens-vm/lens/host-go/config/model" "github.com/sourcenetwork/immutable/enumerable" "github.com/sourcenetwork/defradb/client" @@ -46,7 +47,7 @@ func (r *explicitTxnLensRegistry) WithTxn(txn datastore.Txn) client.LensRegistry } } -func (r *implicitTxnLensRegistry) SetMigration(ctx context.Context, cfg client.LensConfig) error { +func (r *implicitTxnLensRegistry) SetMigration(ctx context.Context, collectionID uint32, cfg model.Lens) error { txn, err := r.db.NewTxn(ctx, false) if err != nil { return err @@ -54,7 +55,7 @@ func (r *implicitTxnLensRegistry) SetMigration(ctx context.Context, cfg client.L defer txn.Discard(ctx) txnCtx := r.registry.getCtx(txn, false) - err = r.registry.setMigration(ctx, txnCtx, cfg) + err = r.registry.setMigration(ctx, txnCtx, collectionID, cfg) if err != nil { return err } @@ -62,8 +63,8 @@ func (r *implicitTxnLensRegistry) SetMigration(ctx context.Context, cfg client.L return txn.Commit(ctx) } -func (r *explicitTxnLensRegistry) SetMigration(ctx context.Context, cfg client.LensConfig) error { - return r.registry.setMigration(ctx, r.registry.getCtx(r.txn, false), cfg) +func (r *explicitTxnLensRegistry) SetMigration(ctx context.Context, collectionID uint32, cfg model.Lens) error { + return r.registry.setMigration(ctx, r.registry.getCtx(r.txn, false), collectionID, cfg) } func (r *implicitTxnLensRegistry) ReloadLenses(ctx context.Context) error { @@ -89,7 +90,7 @@ func (r *explicitTxnLensRegistry) ReloadLenses(ctx context.Context) error { func (r *implicitTxnLensRegistry) MigrateUp( ctx context.Context, src enumerable.Enumerable[LensDoc], - schemaVersionID string, + collectionID uint32, ) (enumerable.Enumerable[map[string]any], error) { txn, err := r.db.NewTxn(ctx, true) if err != nil { @@ -98,21 +99,21 @@ func (r *implicitTxnLensRegistry) MigrateUp( defer txn.Discard(ctx) txnCtx := newTxnCtx(txn) - return r.registry.migrateUp(txnCtx, src, schemaVersionID) + return r.registry.migrateUp(txnCtx, src, collectionID) } func (r *explicitTxnLensRegistry) MigrateUp( ctx context.Context, src enumerable.Enumerable[LensDoc], - schemaVersionID string, + collectionID uint32, ) (enumerable.Enumerable[map[string]any], error) { - return r.registry.migrateUp(r.registry.getCtx(r.txn, true), src, schemaVersionID) + return r.registry.migrateUp(r.registry.getCtx(r.txn, true), src, collectionID) } func (r *implicitTxnLensRegistry) MigrateDown( ctx context.Context, src enumerable.Enumerable[LensDoc], - schemaVersionID string, + collectionID uint32, ) (enumerable.Enumerable[map[string]any], error) { txn, err := r.db.NewTxn(ctx, true) if err != nil { @@ -121,43 +122,13 @@ func (r *implicitTxnLensRegistry) MigrateDown( defer txn.Discard(ctx) txnCtx := newTxnCtx(txn) - return r.registry.migrateDown(txnCtx, src, schemaVersionID) + return r.registry.migrateDown(txnCtx, src, collectionID) } func (r *explicitTxnLensRegistry) MigrateDown( ctx context.Context, src enumerable.Enumerable[LensDoc], - schemaVersionID string, + collectionID uint32, ) (enumerable.Enumerable[map[string]any], error) { - return r.registry.migrateDown(r.registry.getCtx(r.txn, true), src, schemaVersionID) -} - -func (r *implicitTxnLensRegistry) Config(ctx context.Context) ([]client.LensConfig, error) { - txn, err := r.db.NewTxn(ctx, true) - if err != nil { - return nil, err - } - defer txn.Discard(ctx) - txnCtx := newTxnCtx(txn) - - return r.registry.config(txnCtx), nil -} - -func (r *explicitTxnLensRegistry) Config(ctx context.Context) ([]client.LensConfig, error) { - return r.registry.config(r.registry.getCtx(r.txn, true)), nil -} - -func (r *implicitTxnLensRegistry) HasMigration(ctx context.Context, schemaVersionID string) (bool, error) { - txn, err := r.db.NewTxn(ctx, true) - if err != nil { - return false, err - } - defer txn.Discard(ctx) - txnCtx := newTxnCtx(txn) - - return r.registry.hasMigration(txnCtx, schemaVersionID), nil -} - -func (r *explicitTxnLensRegistry) HasMigration(ctx context.Context, schemaVersionID string) (bool, error) { - return r.registry.hasMigration(r.registry.getCtx(r.txn, true), schemaVersionID), nil + return r.registry.migrateDown(r.registry.getCtx(r.txn, true), src, collectionID) } diff --git a/licenses/BSL.txt b/licenses/BSL.txt index 9aef00b6d8..38cf309ebc 100644 --- a/licenses/BSL.txt +++ b/licenses/BSL.txt @@ -7,7 +7,7 @@ Parameters Licensor: Democratized Data (D2) Foundation -Licensed Work: DefraDB v0.9.0 +Licensed Work: DefraDB v0.10.0 The Licensed Work is (c) 2023 D2 Foundation. @@ -28,7 +28,7 @@ Additional Use Grant: You may only use the Licensed Work for the -Change Date: 2028-01-16 +Change Date: 2028-03-08 Change License: Apache License, Version 2.0 diff --git a/merkle/crdt/merklecrdt.go b/merkle/crdt/merklecrdt.go index ba7fd5648d..b52fb7cf6d 100644 --- a/merkle/crdt/merklecrdt.go +++ b/merkle/crdt/merklecrdt.go @@ -86,14 +86,14 @@ func InstanceWithStore( ), nil case client.PN_COUNTER: switch kind { - case client.FieldKind_INT: + case client.FieldKind_NILLABLE_INT: return NewMerklePNCounter[int64]( store, schemaVersionKey, key, fieldName, ), nil - case client.FieldKind_FLOAT: + case client.FieldKind_NILLABLE_FLOAT: return NewMerklePNCounter[float64]( store, schemaVersionKey, diff --git a/net/client_test.go b/net/client_test.go index 5db18c4a07..89c26e06b5 100644 --- a/net/client_test.go +++ b/net/client_test.go @@ -24,10 +24,10 @@ import ( var sd = client.SchemaDescription{ Name: "test", - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "test", - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, Typ: client.LWW_REGISTER, }, }, diff --git a/net/config.go b/net/config.go index 4c33dc02a6..040689bbe9 100644 --- a/net/config.go +++ b/net/config.go @@ -13,113 +13,55 @@ package net import ( - "time" - - cconnmgr "github.com/libp2p/go-libp2p/core/connmgr" "github.com/libp2p/go-libp2p/core/crypto" - "github.com/libp2p/go-libp2p/p2p/net/connmgr" - ma "github.com/multiformats/go-multiaddr" "google.golang.org/grpc" - - "github.com/sourcenetwork/defradb/config" ) // Options is the node options. type Options struct { - ListenAddrs []ma.Multiaddr + ListenAddresses []string PrivateKey crypto.PrivKey EnablePubSub bool EnableRelay bool GRPCServerOptions []grpc.ServerOption GRPCDialOptions []grpc.DialOption - ConnManager cconnmgr.ConnManager } -type NodeOpt func(*Options) error - -// NewMergedOptions obtains Options by applying given NodeOpts. -func NewMergedOptions(opts ...NodeOpt) (*Options, error) { - var options Options - for _, opt := range opts { - if opt == nil { - continue - } - if err := opt(&options); err != nil { - return nil, err - } +// DefaultOptions returns the default net options. +func DefaultOptions() *Options { + return &Options{ + ListenAddresses: []string{"/ip4/0.0.0.0/tcp/9171"}, + EnablePubSub: true, + EnableRelay: false, } - return &options, nil } -// NewConnManager gives a new ConnManager. -func NewConnManager(low int, high int, grace time.Duration) (cconnmgr.ConnManager, error) { - c, err := connmgr.NewConnManager(low, high, connmgr.WithGracePeriod(grace)) - if err != nil { - return nil, err - } - return c, nil -} - -// WithConfig provides the Node-specific configuration, from the top-level Net config. -func WithConfig(cfg *config.Config) NodeOpt { - return func(opt *Options) error { - var err error - err = WithListenP2PAddrStrings(cfg.Net.P2PAddress)(opt) - if err != nil { - return err - } - opt.EnableRelay = cfg.Net.RelayEnabled - opt.EnablePubSub = cfg.Net.PubSubEnabled - opt.ConnManager, err = NewConnManager(100, 400, time.Second*20) - if err != nil { - return err - } - return nil - } -} +type NodeOpt func(*Options) // WithPrivateKey sets the p2p host private key. func WithPrivateKey(priv crypto.PrivKey) NodeOpt { - return func(opt *Options) error { + return func(opt *Options) { opt.PrivateKey = priv - return nil } } -// WithPubSub enables the pubsub feature. -func WithPubSub(enable bool) NodeOpt { - return func(opt *Options) error { +// WithEnablePubSub enables the pubsub feature. +func WithEnablePubSub(enable bool) NodeOpt { + return func(opt *Options) { opt.EnablePubSub = enable - return nil } } // WithEnableRelay enables the relay feature. func WithEnableRelay(enable bool) NodeOpt { - return func(opt *Options) error { + return func(opt *Options) { opt.EnableRelay = enable - return nil - } -} - -// ListenP2PAddrStrings sets the address to listen on given as strings. -func WithListenP2PAddrStrings(addrs ...string) NodeOpt { - return func(opt *Options) error { - for _, addrstr := range addrs { - a, err := ma.NewMultiaddr(addrstr) - if err != nil { - return err - } - opt.ListenAddrs = append(opt.ListenAddrs, a) - } - return nil } } -// ListenAddrs sets the address to listen on given as MultiAddr(s). -func WithListenAddrs(addrs ...ma.Multiaddr) NodeOpt { - return func(opt *Options) error { - opt.ListenAddrs = addrs - return nil +// WithListenAddress sets the address to listen on given as a multiaddress string. +func WithListenAddresses(addresses ...string) NodeOpt { + return func(opt *Options) { + opt.ListenAddresses = addresses } } diff --git a/net/config_test.go b/net/config_test.go index 6f306c29ed..869c820788 100644 --- a/net/config_test.go +++ b/net/config_test.go @@ -12,94 +12,25 @@ package net import ( "testing" - "time" - "github.com/libp2p/go-libp2p/core/crypto" - ma "github.com/multiformats/go-multiaddr" - "github.com/stretchr/testify/require" - - "github.com/sourcenetwork/defradb/config" + "github.com/stretchr/testify/assert" ) -func TestNewMergedOptionsSimple(t *testing.T) { - opt, err := NewMergedOptions() - require.NoError(t, err) - require.NotNil(t, opt) -} - -func TestNewMergedOptionsWithNilOption(t *testing.T) { - opt, err := NewMergedOptions(nil) - require.NoError(t, err) - require.NotNil(t, opt) -} - -func TestNewConnManagerSimple(t *testing.T) { - conMngr, err := NewConnManager(1, 10, time.Second) - require.NoError(t, err) - err = conMngr.Close() - require.NoError(t, err) -} - -func TestNewConnManagerWithError(t *testing.T) { - _, err := NewConnManager(1, 10, -time.Second) - require.Contains(t, err.Error(), "grace period must be non-negative") -} - -func TestWithConfigWithP2PAddressError(t *testing.T) { - cfg := config.Config{ - Net: &config.NetConfig{ - P2PAddress: "/willerror/0.0.0.0/tcp/9999", - }, - } - err := WithConfig(&cfg)(&Options{}) - require.Contains(t, err.Error(), "failed to parse multiaddr") -} - -func TestWithPrivateKey(t *testing.T) { - key, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 0) - require.NoError(t, err) - - opt, err := NewMergedOptions(WithPrivateKey(key)) - require.NoError(t, err) - require.NotNil(t, opt) - require.Equal(t, key, opt.PrivateKey) -} - -func TestWithPubSub(t *testing.T) { - opt, err := NewMergedOptions(WithPubSub(true)) - require.NoError(t, err) - require.NotNil(t, opt) - require.True(t, opt.EnablePubSub) +func TestWithListenAddresses(t *testing.T) { + opts := &Options{} + addresses := []string{"/ip4/127.0.0.1/tcp/6666", "/ip4/0.0.0.0/tcp/6666"} + WithListenAddresses(addresses...)(opts) + assert.Equal(t, addresses, opts.ListenAddresses) } func TestWithEnableRelay(t *testing.T) { - opt, err := NewMergedOptions(WithEnableRelay(true)) - require.NoError(t, err) - require.NotNil(t, opt) - require.True(t, opt.EnableRelay) + opts := &Options{} + WithEnableRelay(true)(opts) + assert.Equal(t, true, opts.EnableRelay) } -func TestWithListenP2PAddrStringsWithError(t *testing.T) { - addr := "/willerror/0.0.0.0/tcp/9999" - _, err := NewMergedOptions(WithListenP2PAddrStrings(addr)) - require.Contains(t, err.Error(), "failed to parse multiaddr") -} - -func TestWithListenP2PAddrStrings(t *testing.T) { - addr := "/ip4/0.0.0.0/tcp/9999" - opt, err := NewMergedOptions(WithListenP2PAddrStrings(addr)) - require.NoError(t, err) - require.NotNil(t, opt) - require.Equal(t, addr, opt.ListenAddrs[0].String()) -} - -func TestWithListenAddrs(t *testing.T) { - addr := "/ip4/0.0.0.0/tcp/9999" - a, err := ma.NewMultiaddr(addr) - require.NoError(t, err) - - opt, err := NewMergedOptions(WithListenAddrs(a)) - require.NoError(t, err) - require.NotNil(t, opt) - require.Equal(t, addr, opt.ListenAddrs[0].String()) +func TestWithEnablePubSub(t *testing.T) { + opts := &Options{} + WithEnablePubSub(true)(opts) + assert.Equal(t, true, opts.EnablePubSub) } diff --git a/net/dialer_test.go b/net/dialer_test.go index d14ed5823e..7f37611ec3 100644 --- a/net/dialer_test.go +++ b/net/dialer_test.go @@ -27,14 +27,14 @@ func TestDial_WithConnectedPeer_NoError(t *testing.T) { n1, err := NewNode( ctx, db1, - WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) assert.NoError(t, err) defer n1.Close() n2, err := NewNode( ctx, db2, - WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) assert.NoError(t, err) defer n2.Close() @@ -54,14 +54,14 @@ func TestDial_WithConnectedPeerAndSecondConnection_NoError(t *testing.T) { n1, err := NewNode( ctx, db1, - WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) assert.NoError(t, err) defer n1.Close() n2, err := NewNode( ctx, db2, - WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) assert.NoError(t, err) defer n2.Close() @@ -84,14 +84,14 @@ func TestDial_WithConnectedPeerAndSecondConnectionWithConnectionShutdown_Closing n1, err := NewNode( ctx, db1, - WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) assert.NoError(t, err) defer n1.Close() n2, err := NewNode( ctx, db2, - WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) assert.NoError(t, err) defer n2.Close() diff --git a/net/node.go b/net/node.go index 71e9a10d55..9245f78772 100644 --- a/net/node.go +++ b/net/node.go @@ -43,6 +43,7 @@ import ( // @TODO: https://github.com/sourcenetwork/defradb/issues/1902 //nolint:staticcheck "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds" + "github.com/libp2p/go-libp2p/p2p/net/connmgr" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/logging" @@ -78,11 +79,25 @@ func NewNode( db client.DB, opts ...NodeOpt, ) (*Node, error) { - options, err := NewMergedOptions(opts...) + options := DefaultOptions() + for _, opt := range opts { + opt(options) + } + + connManager, err := connmgr.NewConnManager(100, 400, connmgr.WithGracePeriod(time.Second*20)) if err != nil { return nil, err } + var listenAddresses []multiaddr.Multiaddr + for _, addr := range options.ListenAddresses { + listenAddress, err := multiaddr.NewMultiaddr(addr) + if err != nil { + return nil, err + } + listenAddresses = append(listenAddresses, listenAddress) + } + fin := finalizer.NewFinalizer() peerstore, err := pstoreds.NewPeerstore(ctx, db.Peerstore(), pstoreds.DefaultOpts()) @@ -103,10 +118,10 @@ func NewNode( var ddht *dualdht.DHT libp2pOpts := []libp2p.Option{ - libp2p.ConnectionManager(options.ConnManager), + libp2p.ConnectionManager(connManager), libp2p.DefaultTransports, libp2p.Identity(options.PrivateKey), - libp2p.ListenAddrs(options.ListenAddrs...), + libp2p.ListenAddrs(listenAddresses...), libp2p.Peerstore(peerstore), libp2p.Routing(func(h host.Host) (routing.PeerRouting, error) { // Delete this line and uncomment the next 6 lines once we remove batchable datastore support. @@ -133,7 +148,7 @@ func NewNode( ctx, "Created LibP2P host", logging.NewKV("PeerId", h.ID()), - logging.NewKV("Address", options.ListenAddrs), + logging.NewKV("Address", options.ListenAddresses), ) var ps *pubsub.PubSub diff --git a/net/node_test.go b/net/node_test.go index fcceeb00a3..3b7f28d017 100644 --- a/net/node_test.go +++ b/net/node_test.go @@ -17,12 +17,10 @@ import ( "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" badger "github.com/sourcenetwork/badger/v4" "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" "github.com/sourcenetwork/defradb/datastore/memory" "github.com/sourcenetwork/defradb/db" @@ -81,14 +79,14 @@ func TestNewNode_NoPubSub_NoError(t *testing.T) { n, err := NewNode( context.Background(), db, - WithPubSub(false), + WithEnablePubSub(false), ) require.NoError(t, err) defer n.Close() require.Nil(t, n.ps) } -func TestNewNode_WithPubSub_NoError(t *testing.T) { +func TestNewNode_WithEnablePubSub_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) @@ -97,7 +95,7 @@ func TestNewNode_WithPubSub_NoError(t *testing.T) { n, err := NewNode( ctx, db, - WithPubSub(true), + WithEnablePubSub(true), ) require.NoError(t, err) @@ -128,7 +126,7 @@ func TestNewNode_BootstrapWithNoPeer_NoError(t *testing.T) { n1, err := NewNode( ctx, db, - WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) require.NoError(t, err) defer n1.Close() @@ -144,14 +142,14 @@ func TestNewNode_BootstrapWithOnePeer_NoError(t *testing.T) { n1, err := NewNode( ctx, db, - WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) require.NoError(t, err) defer n1.Close() n2, err := NewNode( ctx, db, - WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) require.NoError(t, err) defer n2.Close() @@ -171,14 +169,14 @@ func TestNewNode_BootstrapWithOneValidPeerAndManyInvalidPeers_NoError(t *testing n1, err := NewNode( ctx, db, - WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) require.NoError(t, err) defer n1.Close() n2, err := NewNode( ctx, db, - WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) require.NoError(t, err) defer n2.Close() @@ -192,7 +190,7 @@ func TestNewNode_BootstrapWithOneValidPeerAndManyInvalidPeers_NoError(t *testing n2.Bootstrap(addrs) } -func TestListenAddrs_WithListenP2PAddrStrings_NoError(t *testing.T) { +func TestListenAddrs_WithListenAddresses_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) @@ -200,7 +198,7 @@ func TestListenAddrs_WithListenP2PAddrStrings_NoError(t *testing.T) { n, err := NewNode( context.Background(), db, - WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) require.NoError(t, err) defer n.Close() @@ -208,35 +206,6 @@ func TestListenAddrs_WithListenP2PAddrStrings_NoError(t *testing.T) { require.Contains(t, n.ListenAddrs()[0].String(), "/tcp/") } -func TestNodeConfig_NoError(t *testing.T) { - cfg := config.DefaultConfig() - cfg.Net.P2PAddress = "/ip4/0.0.0.0/tcp/9179" - cfg.Net.RelayEnabled = true - cfg.Net.PubSubEnabled = true - - configOpt := WithConfig(cfg) - options, err := NewMergedOptions(configOpt) - require.NoError(t, err) - - // confirming it provides the same config as a manually constructed node.Options - p2pAddr, err := ma.NewMultiaddr(cfg.Net.P2PAddress) - require.NoError(t, err) - connManager, err := NewConnManager(100, 400, time.Second*20) - require.NoError(t, err) - expectedOptions := Options{ - ListenAddrs: []ma.Multiaddr{p2pAddr}, - EnablePubSub: true, - EnableRelay: true, - ConnManager: connManager, - } - - for k, v := range options.ListenAddrs { - require.Equal(t, expectedOptions.ListenAddrs[k], v) - } - require.Equal(t, expectedOptions.EnablePubSub, options.EnablePubSub) - require.Equal(t, expectedOptions.EnableRelay, options.EnableRelay) -} - func TestPeerConnectionEventEmitter_MultiEvent_NoError(t *testing.T) { db := FixtureNewMemoryDBWithBroadcaster(t) n, err := NewNode( diff --git a/net/peer.go b/net/peer.go index acdba2e9c8..0c456d5b18 100644 --- a/net/peer.go +++ b/net/peer.go @@ -177,6 +177,7 @@ func (p *Peer) Start() error { go p.handleBroadcastLoop() } + log.FeedbackInfo(p.ctx, "Starting P2P node", logging.NewKV("P2P addresses", p.host.Addrs())) // register the P2P gRPC server go func() { pb.RegisterServiceServer(p.p2pRPC, p.server) diff --git a/net/peer_collection.go b/net/peer_collection.go index 02bbb6e9a6..6f4f4d8ba8 100644 --- a/net/peer_collection.go +++ b/net/peer_collection.go @@ -14,6 +14,7 @@ import ( "context" dsq "github.com/ipfs/go-datastore/query" + "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/core" @@ -31,7 +32,12 @@ func (p *Peer) AddP2PCollections(ctx context.Context, collectionIDs []string) er // first let's make sure the collections actually exists storeCollections := []client.Collection{} for _, col := range collectionIDs { - storeCol, err := p.db.WithTxn(txn).GetCollectionsBySchemaRoot(p.ctx, col) + storeCol, err := p.db.WithTxn(txn).GetCollections( + p.ctx, + client.CollectionFetchOptions{ + SchemaRoot: immutable.Some(col), + }, + ) if err != nil { return err } @@ -96,7 +102,12 @@ func (p *Peer) RemoveP2PCollections(ctx context.Context, collectionIDs []string) // first let's make sure the collections actually exists storeCollections := []client.Collection{} for _, col := range collectionIDs { - storeCol, err := p.db.WithTxn(txn).GetCollectionsBySchemaRoot(p.ctx, col) + storeCol, err := p.db.WithTxn(txn).GetCollections( + p.ctx, + client.CollectionFetchOptions{ + SchemaRoot: immutable.Some(col), + }, + ) if err != nil { return err } diff --git a/net/peer_replicator.go b/net/peer_replicator.go index 0506e018c4..3638122a2a 100644 --- a/net/peer_replicator.go +++ b/net/peer_replicator.go @@ -53,7 +53,7 @@ func (p *Peer) SetReplicator(ctx context.Context, rep client.Replicator) error { default: // default to all collections - collections, err = p.db.WithTxn(txn).GetAllCollections(ctx) + collections, err = p.db.WithTxn(txn).GetCollections(ctx, client.CollectionFetchOptions{}) if err != nil { return NewErrReplicatorCollections(err) } @@ -94,7 +94,7 @@ func (p *Peer) SetReplicator(ctx context.Context, rep client.Replicator) error { for _, col := range added { keysCh, err := col.WithTxn(txn).GetAllDocIDs(ctx) if err != nil { - return NewErrReplicatorDocID(err, col.Name(), rep.Info.ID) + return NewErrReplicatorDocID(err, col.Name().Value(), rep.Info.ID) } p.pushToReplicator(ctx, txn, col, keysCh, rep.Info.ID) } @@ -139,7 +139,7 @@ func (p *Peer) DeleteReplicator(ctx context.Context, rep client.Replicator) erro default: // default to all collections - collections, err = p.db.WithTxn(txn).GetAllCollections(ctx) + collections, err = p.db.WithTxn(txn).GetCollections(ctx, client.CollectionFetchOptions{}) if err != nil { return NewErrReplicatorCollections(err) } diff --git a/net/peer_test.go b/net/peer_test.go index 139e160155..0a863b8112 100644 --- a/net/peer_test.go +++ b/net/peer_test.go @@ -26,7 +26,6 @@ import ( "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" "github.com/sourcenetwork/defradb/core/crdt" "github.com/sourcenetwork/defradb/datastore/memory" "github.com/sourcenetwork/defradb/db" @@ -119,13 +118,10 @@ func newTestNode(ctx context.Context, t *testing.T) (client.DB, *Node) { db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) require.NoError(t, err) - cfg := config.DefaultConfig() - cfg.Net.P2PAddress = randomMultiaddr - n, err := NewNode( ctx, db, - WithConfig(cfg), + WithListenAddresses(randomMultiaddr), ) require.NoError(t, err) @@ -218,13 +214,13 @@ func TestStart_WithKnownPeer_NoError(t *testing.T) { n1, err := NewNode( ctx, db1, - WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) require.NoError(t, err) n2, err := NewNode( ctx, db2, - WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) require.NoError(t, err) @@ -254,13 +250,13 @@ func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) { n1, err := NewNode( ctx, db1, - WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) require.NoError(t, err) n2, err := NewNode( ctx, db2, - WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) require.NoError(t, err) @@ -290,7 +286,7 @@ func TestStart_WithNoUpdateChannel_NilUpdateChannelError(t *testing.T) { n, err := NewNode( ctx, db, - WithPubSub(true), + WithEnablePubSub(true), ) require.NoError(t, err) @@ -309,7 +305,7 @@ func TestStart_WitClosedUpdateChannel_ClosedChannelError(t *testing.T) { n, err := NewNode( ctx, db, - WithPubSub(true), + WithEnablePubSub(true), ) require.NoError(t, err) diff --git a/net/process.go b/net/process.go index 4e6ecee19e..5eec8a6efd 100644 --- a/net/process.go +++ b/net/process.go @@ -149,9 +149,9 @@ func initCRDTForType( ), nil } - fd, ok := col.Schema().GetField(field) + fd, ok := col.Definition().GetFieldByName(field) if !ok { - return nil, errors.New(fmt.Sprintf("Couldn't find field %s for doc %s", field, dsKey)) + return nil, errors.New(fmt.Sprintf("Couldn't find field %s for doc %s", field, dsKey.ToString())) } ctype = fd.Typ fieldID := fd.ID.String() diff --git a/net/server.go b/net/server.go index e93000d1b9..206ccb3b53 100644 --- a/net/server.go +++ b/net/server.go @@ -22,6 +22,7 @@ import ( "github.com/libp2p/go-libp2p/core/event" libpeer "github.com/libp2p/go-libp2p/core/peer" rpc "github.com/sourcenetwork/go-libp2p-pubsub-rpc" + "github.com/sourcenetwork/immutable" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" grpcpeer "google.golang.org/grpc/peer" @@ -96,7 +97,7 @@ func newServer(p *Peer, db client.DB, opts ...grpc.DialOption) (*server, error) // Get all DocIDs across all collections in the DB log.Debug(p.ctx, "Getting all existing DocIDs...") - cols, err := s.db.GetAllCollections(s.peer.ctx) + cols, err := s.db.GetCollections(s.peer.ctx, client.CollectionFetchOptions{}) if err != nil { return nil, err } @@ -246,7 +247,6 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL return &pb.PushLogReply{}, nil } - schemaRoot := string(req.Body.SchemaRoot) dsKey := core.DataStoreKeyFromDocID(docID) var txnErr error @@ -262,14 +262,10 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL // Currently a schema is the best way we have to link a push log request to a collection, // this will change with https://github.com/sourcenetwork/defradb/issues/1085 - cols, err := store.GetCollectionsBySchemaRoot(ctx, schemaRoot) + col, err := s.getActiveCollection(ctx, store, string(req.Body.SchemaRoot)) if err != nil { - return nil, errors.Wrap(fmt.Sprintf("Failed to get collection from schemaRoot %s", schemaRoot), err) - } - if len(cols) == 0 { - return nil, client.NewErrCollectionNotFoundForSchema(schemaRoot) + return nil, err } - col := cols[0] // Create a new DAG service with the current transaction var getter format.NodeGetter = s.peer.newDAGSyncerTxn(txn) @@ -299,6 +295,11 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL session.Wait() bp.mergeBlocks(ctx) + err = s.syncIndexedDocs(ctx, col.WithTxn(txn), docID) + if err != nil { + return nil, err + } + // dagWorkers specific to the DocID will have been spawned within handleChildBlocks. // Once we are done with the dag syncing process, we can get rid of those workers. if s.peer.closeJob != nil { @@ -326,6 +327,64 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL return &pb.PushLogReply{}, client.NewErrMaxTxnRetries(txnErr) } +func (*server) getActiveCollection( + ctx context.Context, + store client.Store, + schemaRoot string, +) (client.Collection, error) { + cols, err := store.GetCollections( + ctx, + client.CollectionFetchOptions{ + SchemaRoot: immutable.Some(schemaRoot), + }, + ) + if err != nil { + return nil, errors.Wrap(fmt.Sprintf("Failed to get collection from schemaRoot %s", schemaRoot), err) + } + if len(cols) == 0 { + return nil, client.NewErrCollectionNotFoundForSchema(schemaRoot) + } + var col client.Collection + for _, c := range cols { + if col != nil && col.Name().HasValue() && !c.Name().HasValue() { + continue + } + col = c + } + return col, nil +} + +func (s *server) syncIndexedDocs( + ctx context.Context, + col client.Collection, + docID client.DocID, +) error { + preTxnCol, err := s.db.GetCollectionByName(ctx, col.Name().Value()) + if err != nil { + return err + } + + oldDoc, err := preTxnCol.Get(ctx, docID, false) + isNewDoc := errors.Is(err, client.ErrDocumentNotFound) + if !isNewDoc && err != nil { + return err + } + + doc, err := col.Get(ctx, docID, false) + isDeletedDoc := errors.Is(err, client.ErrDocumentNotFound) + if !isDeletedDoc && err != nil { + return err + } + + if isDeletedDoc { + return preTxnCol.DeleteDocIndex(ctx, oldDoc) + } else if isNewDoc { + return col.CreateDocIndex(ctx, doc) + } else { + return col.UpdateDocIndex(ctx, oldDoc, doc) + } +} + // GetHeadLog receives a get head log request func (s *server) GetHeadLog( ctx context.Context, diff --git a/net/server_test.go b/net/server_test.go index 5606dc3dc7..099f426887 100644 --- a/net/server_test.go +++ b/net/server_test.go @@ -49,7 +49,7 @@ type mockDBColError struct { client.DB } -func (mDB *mockDBColError) GetAllCollections(context.Context) ([]client.Collection, error) { +func (mDB *mockDBColError) GetCollections(context.Context, client.CollectionFetchOptions) ([]client.Collection, error) { return nil, mockError } @@ -85,7 +85,7 @@ type mockDBDocIDsError struct { client.DB } -func (mDB *mockDBDocIDsError) GetAllCollections(context.Context) ([]client.Collection, error) { +func (mDB *mockDBDocIDsError) GetCollections(context.Context, client.CollectionFetchOptions) ([]client.Collection, error) { return []client.Collection{ &mockCollection{}, }, nil diff --git a/node/node.go b/node/node.go new file mode 100644 index 0000000000..89bedd56ff --- /dev/null +++ b/node/node.go @@ -0,0 +1,191 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package node + +import ( + "context" + "errors" + "fmt" + gohttp "net/http" + + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/db" + "github.com/sourcenetwork/defradb/http" + "github.com/sourcenetwork/defradb/logging" + "github.com/sourcenetwork/defradb/net" +) + +var log = logging.MustNewLogger("node") + +// Options contains start configuration values. +type Options struct { + storeOpts []StoreOpt + dbOpts []db.Option + netOpts []net.NodeOpt + serverOpts []http.ServerOpt + peers []peer.AddrInfo + disableP2P bool + disableAPI bool +} + +// DefaultOptions returns options with default settings. +func DefaultOptions() *Options { + return &Options{} +} + +// NodeOpt is a function for setting configuration values. +type NodeOpt func(*Options) + +// WithStoreOpts sets the store options. +func WithStoreOpts(opts ...StoreOpt) NodeOpt { + return func(o *Options) { + o.storeOpts = opts + } +} + +// WithDatabaseOpts sets the database options. +func WithDatabaseOpts(opts ...db.Option) NodeOpt { + return func(o *Options) { + o.dbOpts = opts + } +} + +// WithNetOpts sets the net / p2p options. +func WithNetOpts(opts ...net.NodeOpt) NodeOpt { + return func(o *Options) { + o.netOpts = opts + } +} + +// WithServerOpts sets the api server options. +func WithServerOpts(opts ...http.ServerOpt) NodeOpt { + return func(o *Options) { + o.serverOpts = opts + } +} + +// WithDisableP2P sets the disable p2p flag. +func WithDisableP2P(disable bool) NodeOpt { + return func(o *Options) { + o.disableP2P = disable + } +} + +// WithDisableAPI sets the disable api flag. +func WithDisableAPI(disable bool) NodeOpt { + return func(o *Options) { + o.disableAPI = disable + } +} + +// WithPeers sets the bootstrap peers. +func WithPeers(peers ...peer.AddrInfo) NodeOpt { + return func(o *Options) { + o.peers = peers + } +} + +// Node is a DefraDB instance with optional sub-systems. +type Node struct { + DB client.DB + Node *net.Node + Server *http.Server +} + +// NewNode returns a new node instance configured with the given options. +func NewNode(ctx context.Context, opts ...NodeOpt) (*Node, error) { + options := DefaultOptions() + for _, opt := range opts { + opt(options) + } + rootstore, err := NewStore(options.storeOpts...) + if err != nil { + return nil, err + } + db, err := db.NewDB(ctx, rootstore, options.dbOpts...) + if err != nil { + return nil, err + } + + var node *net.Node + if !options.disableP2P { + // setup net node + node, err = net.NewNode(ctx, db, options.netOpts...) + if err != nil { + return nil, err + } + if len(options.peers) > 0 { + node.Bootstrap(options.peers) + } + } + + var server *http.Server + if !options.disableAPI { + // setup http server + var handler *http.Handler + if node != nil { + handler, err = http.NewHandler(node) + } else { + handler, err = http.NewHandler(db) + } + if err != nil { + return nil, err + } + server, err = http.NewServer(handler, options.serverOpts...) + if err != nil { + return nil, err + } + } + + return &Node{ + DB: db, + Node: node, + Server: server, + }, nil +} + +// Start starts the node sub-systems. +func (n *Node) Start(ctx context.Context) error { + if n.Node != nil { + if err := n.Node.Start(); err != nil { + return err + } + } + if n.Server != nil { + err := n.Server.SetListener() + if err != nil { + return err + } + log.FeedbackInfo(ctx, fmt.Sprintf("Providing HTTP API at %s.", n.Server.Address())) + go func() { + if err := n.Server.Serve(); err != nil && !errors.Is(err, gohttp.ErrServerClosed) { + log.FeedbackErrorE(ctx, "HTTP server stopped", err) + } + }() + } + return nil +} + +// Close stops the node sub-systems. +func (n *Node) Close(ctx context.Context) error { + var err error + if n.Server != nil { + err = n.Server.Shutdown(ctx) + } + if n.Node != nil { + n.Node.Close() + } else { + n.DB.Close() + } + return err +} diff --git a/node/node_test.go b/node/node_test.go new file mode 100644 index 0000000000..3f3c7c854f --- /dev/null +++ b/node/node_test.go @@ -0,0 +1,101 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package node + +import ( + "context" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/db" + "github.com/sourcenetwork/defradb/http" + "github.com/sourcenetwork/defradb/net" +) + +func TestWithStoreOpts(t *testing.T) { + storeOpts := []StoreOpt{WithPath("test")} + + options := &Options{} + WithStoreOpts(storeOpts...)(options) + assert.Equal(t, storeOpts, options.storeOpts) +} + +func TestWithDatabaseOpts(t *testing.T) { + dbOpts := []db.Option{db.WithMaxRetries(10)} + + options := &Options{} + WithDatabaseOpts(dbOpts...)(options) + assert.Equal(t, dbOpts, options.dbOpts) +} + +func TestWithNetOpts(t *testing.T) { + netOpts := []net.NodeOpt{net.WithEnablePubSub(true)} + + options := &Options{} + WithNetOpts(netOpts...)(options) + assert.Equal(t, netOpts, options.netOpts) +} + +func TestWithServerOpts(t *testing.T) { + serverOpts := []http.ServerOpt{http.WithAddress("127.0.0.1:8080")} + + options := &Options{} + WithServerOpts(serverOpts...)(options) + assert.Equal(t, serverOpts, options.serverOpts) +} + +func TestWithDisableP2P(t *testing.T) { + options := &Options{} + WithDisableP2P(true)(options) + assert.Equal(t, true, options.disableP2P) +} + +func TestWithDisableAPI(t *testing.T) { + options := &Options{} + WithDisableAPI(true)(options) + assert.Equal(t, true, options.disableAPI) +} + +func TestWithPeers(t *testing.T) { + peer, err := peer.AddrInfoFromString("/ip4/127.0.0.1/tcp/9000/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") + require.NoError(t, err) + + options := &Options{} + WithPeers(*peer)(options) + + require.Len(t, options.peers, 1) + assert.Equal(t, *peer, options.peers[0]) +} + +func TestNodeStart(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + opts := []NodeOpt{ + WithStoreOpts(WithPath(t.TempDir())), + WithDatabaseOpts(db.WithUpdateEvents()), + } + + node, err := NewNode(ctx, opts...) + require.NoError(t, err) + + err = node.Start(ctx) + require.NoError(t, err) + + <-time.After(5 * time.Second) + + err = node.Close(ctx) + require.NoError(t, err) +} diff --git a/node/store.go b/node/store.go new file mode 100644 index 0000000000..6d05954662 --- /dev/null +++ b/node/store.go @@ -0,0 +1,78 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package node + +import ( + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/datastore/badger/v4" +) + +// StoreOptions contains store configuration values. +type StoreOptions struct { + path string + inMemory bool + valueLogFileSize int64 + encryptionKey []byte +} + +// DefaultStoreOptions returns new options with default values. +func DefaultStoreOptions() *StoreOptions { + return &StoreOptions{ + inMemory: false, + valueLogFileSize: 1 << 30, + } +} + +// StoreOpt is a function for setting configuration values. +type StoreOpt func(*StoreOptions) + +// WithInMemory sets the in memory flag. +func WithInMemory(inMemory bool) StoreOpt { + return func(o *StoreOptions) { + o.inMemory = inMemory + } +} + +// WithPath sets the datastore path. +func WithPath(path string) StoreOpt { + return func(o *StoreOptions) { + o.path = path + } +} + +// WithValueLogFileSize sets the badger value log file size. +func WithValueLogFileSize(size int64) StoreOpt { + return func(o *StoreOptions) { + o.valueLogFileSize = size + } +} + +// WithEncryptionKey sets the badger encryption key. +func WithEncryptionKey(encryptionKey []byte) StoreOpt { + return func(o *StoreOptions) { + o.encryptionKey = encryptionKey + } +} + +// NewStore returns a new store with the given options. +func NewStore(opts ...StoreOpt) (datastore.RootStore, error) { + options := DefaultStoreOptions() + for _, opt := range opts { + opt(options) + } + + badgerOpts := badger.DefaultOptions + badgerOpts.InMemory = options.inMemory + badgerOpts.ValueLogFileSize = options.valueLogFileSize + badgerOpts.EncryptionKey = options.encryptionKey + + return badger.NewDatastore(options.path, &badgerOpts) +} diff --git a/node/store_test.go b/node/store_test.go new file mode 100644 index 0000000000..69ce98e952 --- /dev/null +++ b/node/store_test.go @@ -0,0 +1,47 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package node + +import ( + "crypto/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestWithInMemory(t *testing.T) { + options := &StoreOptions{} + WithInMemory(true)(options) + assert.Equal(t, true, options.inMemory) +} + +func TestWithPath(t *testing.T) { + options := &StoreOptions{} + WithPath("tmp")(options) + assert.Equal(t, "tmp", options.path) +} + +func TestWithValueLogFileSize(t *testing.T) { + options := &StoreOptions{} + WithValueLogFileSize(int64(5 << 30))(options) + assert.Equal(t, int64(5<<30), options.valueLogFileSize) +} + +func TestWithEncryptionKey(t *testing.T) { + encryptionKey := make([]byte, 32) + _, err := rand.Read(encryptionKey) + require.NoError(t, err) + + options := &StoreOptions{} + WithEncryptionKey(encryptionKey)(options) + assert.Equal(t, encryptionKey, options.encryptionKey) +} diff --git a/planner/commit.go b/planner/commit.go index 3caf6d2f4a..8508ab9980 100644 --- a/planner/commit.go +++ b/planner/commit.go @@ -16,6 +16,7 @@ import ( blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" + "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" @@ -328,7 +329,13 @@ func (n *dagScanNode) dagBlockToNodeDoc(block blocks.Block) (core.Doc, []*ipld.L fieldName = nil default: - cols, err := n.planner.db.GetCollectionsByVersionID(n.planner.ctx, schemaVersionId) + cols, err := n.planner.db.GetCollections( + n.planner.ctx, + client.CollectionFetchOptions{ + IncludeInactive: immutable.Some(true), + SchemaVersionID: immutable.Some(schemaVersionId), + }, + ) if err != nil { return core.Doc{}, nil, err } @@ -338,7 +345,7 @@ func (n *dagScanNode) dagBlockToNodeDoc(block blocks.Block) (core.Doc, []*ipld.L // Because we only care about the schema, we can safely take the first - the schema is the same // for all in the set. - field, ok := cols[0].Schema().GetField(fieldName.(string)) + field, ok := cols[0].Definition().GetFieldByName(fieldName.(string)) if !ok { return core.Doc{}, nil, client.NewErrFieldNotExist(fieldName.(string)) } @@ -346,7 +353,7 @@ func (n *dagScanNode) dagBlockToNodeDoc(block blocks.Block) (core.Doc, []*ipld.L } n.commitSelect.DocumentMapping.SetFirstOfName(&commit, request.HeightFieldName, int64(prio)) - n.commitSelect.DocumentMapping.SetFirstOfName(&commit, request.DeltaFieldName, request.DeltaArgData) + n.commitSelect.DocumentMapping.SetFirstOfName(&commit, request.DeltaFieldName, delta[request.DeltaArgData]) n.commitSelect.DocumentMapping.SetFirstOfName(&commit, request.FieldNameFieldName, fieldName) n.commitSelect.DocumentMapping.SetFirstOfName(&commit, request.FieldIDFieldName, fieldID) @@ -358,7 +365,13 @@ func (n *dagScanNode) dagBlockToNodeDoc(block blocks.Block) (core.Doc, []*ipld.L n.commitSelect.DocumentMapping.SetFirstOfName(&commit, request.DocIDArgName, string(docID)) - cols, err := n.planner.db.GetCollectionsByVersionID(n.planner.ctx, schemaVersionId) + cols, err := n.planner.db.GetCollections( + n.planner.ctx, + client.CollectionFetchOptions{ + IncludeInactive: immutable.Some(true), + SchemaVersionID: immutable.Some(schemaVersionId), + }, + ) if err != nil { return core.Doc{}, nil, err } @@ -400,5 +413,3 @@ func (n *dagScanNode) dagBlockToNodeDoc(block blocks.Block) (core.Doc, []*ipld.L return commit, heads, nil } - -func (n *dagScanNode) Append() bool { return true } diff --git a/planner/datasource.go b/planner/datasource.go index 6cfb8cf728..526621d9d4 100644 --- a/planner/datasource.go +++ b/planner/datasource.go @@ -32,9 +32,9 @@ func (p *Planner) getCollectionScanPlan(mapperSelect *mapper.Select) (planSource } var plan planNode - if col.Description().BaseQuery != nil { + if len(col.Description().QuerySources()) > 0 { var err error - plan, err = p.View(mapperSelect, col.Description()) + plan, err = p.View(mapperSelect, col) if err != nil { return planSource{}, err } diff --git a/planner/delete.go b/planner/delete.go index b1096ffdb6..63cdec9a6f 100644 --- a/planner/delete.go +++ b/planner/delete.go @@ -57,6 +57,10 @@ func (n *deleteNode) Next() (bool, error) { if err != nil { return false, err } + + n.currentValue.Status = client.Deleted + n.documentMapping.TrySetFirstOfName(&n.currentValue, request.DeletedFieldName, true) + return true, nil } diff --git a/planner/filter/copy_field.go b/planner/filter/copy_field.go index 70b5dc2956..fff974da06 100644 --- a/planner/filter/copy_field.go +++ b/planner/filter/copy_field.go @@ -16,6 +16,7 @@ import ( // CopyField copies the given field from the provided filter. // Multiple fields can be passed to copy related objects with a certain field. +// In this case every subsequent field is a sub field of the previous one. Eg. bool.author.name // The result filter preserves the structure of the original filter. func CopyField(filter *mapper.Filter, fields ...mapper.Field) *mapper.Filter { if filter == nil || len(fields) == 0 { diff --git a/planner/filter/copy_field_test.go b/planner/filter/copy_field_test.go index 1714db55b6..611f1d1fd8 100644 --- a/planner/filter/copy_field_test.go +++ b/planner/filter/copy_field_test.go @@ -120,12 +120,12 @@ func TestCopyField(t *testing.T) { } } -func TestCopyFieldOfNullFilter(t *testing.T) { +func TestCopyField_IfFilterIsNil_NoOp(t *testing.T) { actualFilter := CopyField(nil, mapper.Field{Index: 1}) assert.Nil(t, actualFilter) } -func TestCopyFieldWithNoFieldGiven(t *testing.T) { +func TestCopyField_IfNoFieldGiven_NoOp(t *testing.T) { filter := mapper.NewFilter() filter.Conditions = map[connor.FilterKey]any{ &mapper.PropertyIndex{Index: 0}: &mapper.Operator{Operation: "_eq"}, @@ -133,3 +133,18 @@ func TestCopyFieldWithNoFieldGiven(t *testing.T) { actualFilter := CopyField(filter) assert.Nil(t, actualFilter) } + +func TestCopyField_IfSecondFieldIsNotSubField_NoOp(t *testing.T) { + mapping := getDocMapping() + inputFilter := mapper.ToFilter(request.Filter{Conditions: map[string]any{ + "name": m("_eq", "John"), + "age": m("_gt", 55), + }}, mapping) + + var actualFilter *mapper.Filter + assert.NotPanics(t, func() { + actualFilter = CopyField(inputFilter, mapper.Field{Index: authorNameInd}, mapper.Field{Index: 666}) + }) + + assert.Nil(t, actualFilter) +} diff --git a/planner/filter/normalize.go b/planner/filter/normalize.go index 181b1f8485..65317f2170 100644 --- a/planner/filter/normalize.go +++ b/planner/filter/normalize.go @@ -185,7 +185,12 @@ func normalizeProperties(parentKey connor.FilterKey, conditions []any) []any { // if canMergeAnd is true, all _and groups will be merged props := make(map[int][]any) for _, c := range conditions { - for key, val := range c.(map[connor.FilterKey]any) { + cMap, ok := c.(map[connor.FilterKey]any) + if !ok { + result = append(result, c) + continue + } + for key, val := range cMap { op, ok := key.(*mapper.Operator) if canMergeAnd && ok && op.Operation == request.FilterOpAnd { merge = append(merge, val.([]any)...) diff --git a/planner/filter/split.go b/planner/filter/split.go index 1ef153746b..e562c8165a 100644 --- a/planner/filter/split.go +++ b/planner/filter/split.go @@ -13,7 +13,9 @@ import ( "github.com/sourcenetwork/defradb/planner/mapper" ) -// SplitByField splits the provided filter into 2 filters based on field. +// SplitByFields splits the provided filter into 2 filters based on fields. +// It extract the conditions that apply to the provided fields and returns them +// as the second returned filter. // It can be used for extracting a supType // Eg. (filter: {age: 10, name: "bob", author: {birthday: "June 26, 1990", ...}, ...}) // @@ -22,13 +24,26 @@ import ( // // And the subType filter is the conditions that apply to the queried sub type // ie: {birthday: "June 26, 1990", ...}. -func SplitByField(filter *mapper.Filter, field mapper.Field) (*mapper.Filter, *mapper.Filter) { +func SplitByFields(filter *mapper.Filter, fields ...mapper.Field) (*mapper.Filter, *mapper.Filter) { if filter == nil { return nil, nil } - splitF := CopyField(filter, field) - RemoveField(filter, field) + if len(fields) == 0 { + return filter, nil + } + + splitF := CopyField(filter, fields[0]) + RemoveField(filter, fields[0]) + + for _, field := range fields[1:] { + newSplitF := CopyField(filter, field) + if newSplitF == nil { + continue + } + splitF.Conditions = Merge(splitF.Conditions, newSplitF.Conditions) + RemoveField(filter, field) + } if len(filter.Conditions) == 0 { filter = nil diff --git a/planner/filter/split_test.go b/planner/filter/split_test.go index 86fbb0b44a..221bd31527 100644 --- a/planner/filter/split_test.go +++ b/planner/filter/split_test.go @@ -21,7 +21,7 @@ import ( func TestSplitFilter(t *testing.T) { tests := []struct { name string - inputField mapper.Field + inputFields []mapper.Field inputFilter map[string]any expectedFilter1 map[string]any expectedFilter2 map[string]any @@ -32,7 +32,7 @@ func TestSplitFilter(t *testing.T) { "name": m("_eq", "John"), "age": m("_gt", 55), }, - inputField: mapper.Field{Index: authorAgeInd}, + inputFields: []mapper.Field{{Index: authorAgeInd}}, expectedFilter1: m("name", m("_eq", "John")), expectedFilter2: m("age", m("_gt", 55)), }, @@ -41,7 +41,7 @@ func TestSplitFilter(t *testing.T) { inputFilter: map[string]any{ "age": m("_gt", 55), }, - inputField: mapper.Field{Index: authorAgeInd}, + inputFields: []mapper.Field{{Index: authorAgeInd}}, expectedFilter1: nil, expectedFilter2: m("age", m("_gt", 55)), }, @@ -50,17 +50,68 @@ func TestSplitFilter(t *testing.T) { inputFilter: map[string]any{ "name": m("_eq", "John"), }, - inputField: mapper.Field{Index: authorAgeInd}, + inputFields: []mapper.Field{{Index: authorAgeInd}}, expectedFilter1: m("name", m("_eq", "John")), expectedFilter2: nil, }, + { + name: "split by 2 fields", + inputFilter: map[string]any{ + "name": m("_eq", "John"), + "age": m("_gt", 55), + "published": m("_eq", true), + "verified": m("_eq", false), + }, + inputFields: []mapper.Field{{Index: authorNameInd}, {Index: authorAgeInd}, {Index: authorVerifiedInd}}, + expectedFilter1: m("published", m("_eq", true)), + expectedFilter2: map[string]any{ + "name": m("_eq", "John"), + "age": m("_gt", 55), + "verified": m("_eq", false), + }, + }, + { + name: "split by fields that are not present", + inputFilter: map[string]any{ + "name": m("_eq", "John"), + "age": m("_gt", 55), + "verified": m("_eq", false), + }, + inputFields: []mapper.Field{ + {Index: authorNameInd}, + {Index: 100}, + {Index: authorAgeInd}, + {Index: 200}, + }, + expectedFilter1: m("verified", m("_eq", false)), + expectedFilter2: map[string]any{ + "name": m("_eq", "John"), + "age": m("_gt", 55), + }, + }, + { + name: "filter with two []any slices", + inputFilter: map[string]any{ + "age": m("_in", []any{10, 20, 30}), + "name": m("_in", []any{"John", "Bob"}), + }, + inputFields: []mapper.Field{ + {Index: authorNameInd}, + {Index: authorAgeInd}, + }, + expectedFilter1: nil, + expectedFilter2: map[string]any{ + "age": m("_in", []any{10, 20, 30}), + "name": m("_in", []any{"John", "Bob"}), + }, + }, } mapping := getDocMapping() for _, test := range tests { t.Run(test.name, func(t *testing.T) { inputFilter := mapper.ToFilter(request.Filter{Conditions: test.inputFilter}, mapping) - actualFilter1, actualFilter2 := SplitByField(inputFilter, test.inputField) + actualFilter1, actualFilter2 := SplitByFields(inputFilter, test.inputFields...) expectedFilter1 := mapper.ToFilter(request.Filter{Conditions: test.expectedFilter1}, mapping) expectedFilter2 := mapper.ToFilter(request.Filter{Conditions: test.expectedFilter2}, mapping) if expectedFilter1 != nil || actualFilter1 != nil { @@ -73,8 +124,20 @@ func TestSplitFilter(t *testing.T) { } } +func TestSplitFilter_WithNoFields_ReturnsInputFilter(t *testing.T) { + mapping := getDocMapping() + inputFilterConditions := map[string]any{ + "name": m("_eq", "John"), + "age": m("_gt", 55), + } + inputFilter := mapper.ToFilter(request.Filter{Conditions: inputFilterConditions}, mapping) + actualFilter1, actualFilter2 := SplitByFields(inputFilter) + AssertEqualFilterMap(t, inputFilter.Conditions, actualFilter1.Conditions) + assert.Nil(t, actualFilter2) +} + func TestSplitNullFilter(t *testing.T) { - actualFilter1, actualFilter2 := SplitByField(nil, mapper.Field{Index: authorAgeInd}) + actualFilter1, actualFilter2 := SplitByFields(nil, mapper.Field{Index: authorAgeInd}) assert.Nil(t, actualFilter1) assert.Nil(t, actualFilter2) } diff --git a/planner/lens.go b/planner/lens.go new file mode 100644 index 0000000000..eba0edd587 --- /dev/null +++ b/planner/lens.go @@ -0,0 +1,174 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package planner + +import ( + "github.com/sourcenetwork/immutable/enumerable" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/core" +) + +// viewNode applies a lens transform to data yielded from the source node. +// +// It may return a different number of documents to that yielded by its source, +// and there is no guarentee that those documents will actually exist as documents +// in Defra (they may be created by the transform). +type lensNode struct { + docMapper + documentIterator + + p *Planner + source planNode + collection client.CollectionDescription + + input enumerable.Queue[map[string]any] + output enumerable.Enumerable[map[string]any] +} + +func (p *Planner) Lens(source planNode, docMap *core.DocumentMapping, col client.Collection) *lensNode { + return &lensNode{ + docMapper: docMapper{docMap}, + p: p, + source: source, + collection: col.Description(), + } +} + +func (n *lensNode) Init() error { + n.input = enumerable.NewQueue[map[string]any]() + + pipe, err := n.p.db.LensRegistry().MigrateUp(n.p.ctx, n.input, n.collection.ID) + if err != nil { + return err + } + + n.output = pipe + + return n.source.Init() +} + +func (n *lensNode) Start() error { + return n.source.Start() +} + +func (n *lensNode) Spans(spans core.Spans) { + n.source.Spans(spans) +} + +func (n *lensNode) Next() (bool, error) { + hasNext, err := n.output.Next() + if err != nil { + return false, err + } + + if hasNext { + lensDoc, err := n.output.Value() + if err != nil { + return false, err + } + + nextValue, err := n.toDoc(lensDoc) + if err != nil { + return false, err + } + + n.currentValue = nextValue + return true, nil + } + + sourceHasNext, err := n.source.Next() + if err != nil { + return false, err + } + + if !sourceHasNext { + return false, nil + } + + sourceDoc := n.source.Value() + sourceLensDoc := n.source.Source().DocumentMap().ToMap(sourceDoc) + + err = n.input.Put(sourceLensDoc) + if err != nil { + return false, err + } + + return n.Next() +} + +func (n *lensNode) toDoc(mapDoc map[string]any) (core.Doc, error) { + status := client.Active + properties := make([]any, len(mapDoc)) + + for fieldName, fieldValue := range mapDoc { + if fieldName == request.DocIDFieldName && fieldValue != nil { + properties[core.DocIDFieldIndex] = fieldValue.(string) + continue + } + + if fieldName == request.DeletedFieldName { + if wasDeleted, ok := fieldValue.(bool); ok { + if wasDeleted { + status = client.Deleted + } + } + continue + } + + indexes := n.documentMapping.IndexesByName[fieldName] + if len(indexes) == 0 { + // Note: This can happen if a migration returns a field that + // we do not know about. In which case we have to skip it. + continue + } + // Take the last index of this name, this is in order to be consistent with other + // similar logic, for example when converting a core.Doc to a map before passing it + // into a lens transform. + fieldIndex := indexes[len(indexes)-1] + + if len(properties) <= fieldIndex { + // Because the document is sourced from another mapping, we may still need to grow + // the resultant field set. We cannot use [append] because the index of each field + // must still correspond to it's field ID. + originalProps := properties + properties = make([]any, fieldIndex+1) + copy(properties, originalProps) + } + properties[fieldIndex] = fieldValue + } + + return core.Doc{ + Fields: properties, + SchemaVersionID: n.collection.SchemaVersionID, + Status: status, + }, nil +} + +func (n *lensNode) Source() planNode { + return n.source +} + +func (n *lensNode) Kind() string { + return "lensNode" +} + +func (n *lensNode) Close() error { + if n.source != nil { + err := n.source.Close() + if err != nil { + return err + } + } + + return nil +} diff --git a/planner/mapper/mapper.go b/planner/mapper/mapper.go index 06772be487..953d21ce17 100644 --- a/planner/mapper/mapper.go +++ b/planner/mapper/mapper.go @@ -58,7 +58,7 @@ func toSelect( return nil, err } - mapping, schema, err := getTopLevelInfo(ctx, store, selectRequest, collectionName) + mapping, definition, err := getTopLevelInfo(ctx, store, selectRequest, collectionName) if err != nil { return nil, err } @@ -91,7 +91,7 @@ func toSelect( fields, mapping, collectionName, - schema, + definition, store, ) @@ -99,8 +99,8 @@ func toSelect( return nil, err } - if len(schema.Fields) != 0 { - fields, err = resolveSecondaryRelationIDs(ctx, store, collectionName, schema, mapping, fields) + if len(definition.Schema.Fields) != 0 { + fields, err = resolveSecondaryRelationIDs(ctx, store, collectionName, definition.Schema, mapping, fields) if err != nil { return nil, err } @@ -111,10 +111,10 @@ func toSelect( groupByFields := selectRequest.GroupBy.Value().Fields // Remap all alias field names to use their internal field name mappings. for index, groupByField := range groupByFields { - fieldDesc, ok := schema.GetField(groupByField) - if ok && fieldDesc.IsObject() && !fieldDesc.IsObjectArray() { + fieldDesc, ok := definition.GetFieldByName(groupByField) + if ok && fieldDesc.Kind.IsObject() && !fieldDesc.Kind.IsObjectArray() { groupByFields[index] = groupByField + request.RelatedObjectID - } else if ok && fieldDesc.IsObjectArray() { + } else if ok && fieldDesc.Kind.IsObjectArray() { return nil, NewErrInvalidFieldToGroupBy(groupByField) } } @@ -267,7 +267,7 @@ func resolveAggregates( inputFields []Requestable, mapping *core.DocumentMapping, collectionName string, - schema client.SchemaDescription, + def client.CollectionDefinition, store client.Store, ) ([]Requestable, error) { fields := inputFields @@ -287,9 +287,9 @@ func resolveAggregates( var hasHost bool var convertedFilter *Filter if childIsMapped { - fieldDesc, isField := schema.GetField(target.hostExternalName) + fieldDesc, isField := def.GetFieldByName(target.hostExternalName) - if isField && !fieldDesc.IsObject() { + if isField && !fieldDesc.Kind.IsObject() { var order *OrderBy if target.order.HasValue() && len(target.order.Value().Conditions) > 0 { // For inline arrays the order element will consist of just a direction @@ -370,6 +370,19 @@ func resolveAggregates( mapping.SetChildAt(index, childMapping) if !childIsMapped { + filterDependencies, err := resolveFilterDependencies( + ctx, + store, + childCollectionName, + target.filter, + mapping.ChildMappings[index], + childFields, + ) + if err != nil { + return nil, err + } + childFields = append(childFields, filterDependencies...) + // If the child was not mapped, the filter will not have been converted yet // so we must do that now. convertedFilter = ToFilter(target.filter.Value(), mapping.ChildMappings[index]) @@ -716,8 +729,8 @@ func getCollectionName( return "", err } - hostFieldDesc, parentHasField := parentCollection.Schema().GetField(selectRequest.Name) - if parentHasField && hostFieldDesc.RelationType != 0 { + hostFieldDesc, parentHasField := parentCollection.Definition().GetFieldByName(selectRequest.Name) + if parentHasField && hostFieldDesc.RelationName != "" { // If this field exists on the parent, and it is a child object // then this collection name is the collection name of the child. return hostFieldDesc.Schema, nil @@ -733,17 +746,17 @@ func getTopLevelInfo( store client.Store, selectRequest *request.Select, collectionName string, -) (*core.DocumentMapping, client.SchemaDescription, error) { +) (*core.DocumentMapping, client.CollectionDefinition, error) { mapping := core.NewDocumentMapping() if _, isAggregate := request.Aggregates[selectRequest.Name]; isAggregate { // If this is a (top-level) aggregate, then it will have no collection // description, and no top-level fields, so we return an empty mapping only - return mapping, client.SchemaDescription{}, nil + return mapping, client.CollectionDefinition{}, nil } if selectRequest.Root == request.ObjectSelection { - var schema client.SchemaDescription + var definition client.CollectionDefinition collection, err := store.GetCollectionByName(ctx, collectionName) if err != nil { // If the collection is not found, check to see if a schema of that name exists, @@ -751,29 +764,41 @@ func getTopLevelInfo( // // Note: This is a poor way to check if a collection exists or not, see // https://github.com/sourcenetwork/defradb/issues/2146 - schemas, err := store.GetSchemasByName(ctx, collectionName) + schemas, err := store.GetSchemas( + ctx, + client.SchemaFetchOptions{ + Name: immutable.Some(collectionName), + }, + ) if err != nil { - return nil, client.SchemaDescription{}, err + return nil, client.CollectionDefinition{}, err } if len(schemas) == 0 { - return nil, client.SchemaDescription{}, NewErrTypeNotFound(collectionName) + return nil, client.CollectionDefinition{}, NewErrTypeNotFound(collectionName) + } + + for i, f := range schemas[0].Fields { + // As embedded objects do not have collections/field-ids, we just take the index + mapping.Add(int(i), f.Name) + } + + definition = client.CollectionDefinition{ + // `schemas` will contain all versions of that name, as views cannot be updated atm this should + // be fine for now + Schema: schemas[0], } - // `schemas` will contain all versions of that name, as views cannot be updated atm this should - // be fine for now - schema = schemas[0] } else { mapping.Add(core.DocIDFieldIndex, request.DocIDFieldName) - schema = collection.Schema() - } - - // Map all fields from schema into the map as they are fetched automatically - for _, f := range schema.Fields { - if f.IsObject() { - // Objects are skipped, as they are not fetched by default and - // have to be requested via selects. - continue + definition = collection.Definition() + // Map all fields from schema into the map as they are fetched automatically + for _, f := range definition.GetFields() { + if f.Kind.IsObject() { + // Objects are skipped, as they are not fetched by default and + // have to be requested via selects. + continue + } + mapping.Add(int(f.ID), f.Name) } - mapping.Add(int(f.ID), f.Name) } // Setting the type name must be done after adding the fields, as @@ -782,7 +807,7 @@ func getTopLevelInfo( mapping.Add(mapping.GetNextIndex(), request.DeletedFieldName) - return mapping, schema, nil + return mapping, definition, nil } if selectRequest.Name == request.LinksFieldName { @@ -803,7 +828,7 @@ func getTopLevelInfo( mapping.SetTypeName(request.CommitTypeName) } - return mapping, client.SchemaDescription{}, nil + return mapping, client.CollectionDefinition{}, nil } func resolveFilterDependencies( @@ -1013,12 +1038,12 @@ func resolveSecondaryRelationIDs( continue } - fieldDesc, descFound := schema.GetField(existingField.Name) + fieldDesc, descFound := schema.GetFieldByName(existingField.Name) if !descFound { continue } - if !fieldDesc.RelationType.IsSet(client.Relation_Type_INTERNAL_ID) { + if fieldDesc.Kind != client.FieldKind_DocID { continue } diff --git a/planner/multi.go b/planner/multi.go index 30bbc8338c..1b5fc14bbc 100644 --- a/planner/multi.go +++ b/planner/multi.go @@ -32,23 +32,6 @@ type MultiNode interface { Children() []planNode } -// mergeNode is a special interface for the MultiNode -// system. A mergeNode provides an entire document -// in its Values() func, with all the specific and -// necessary fields and subfields already merged -// into the doc -type mergeNode interface { - planNode - Merge() bool -} - -// appendNode is a special interface for the MultiNode -// system. -type appendNode interface { - planNode - Append() bool -} - // parallelNode implements the MultiNode interface. It // enables parallel execution of planNodes. This is needed // if a single request has multiple Select statements at the @@ -132,10 +115,10 @@ func (p *parallelNode) Next() (bool, error) { var err error // isMerge := false switch n := plan.(type) { - case mergeNode: + case *scanNode, *typeIndexJoin: // isMerge = true next, err = p.nextMerge(i, n) - case appendNode: + case *dagScanNode: next, err = p.nextAppend(i, n) } if err != nil { @@ -148,7 +131,7 @@ func (p *parallelNode) Next() (bool, error) { return orNext, nil } -func (p *parallelNode) nextMerge(index int, plan mergeNode) (bool, error) { +func (p *parallelNode) nextMerge(index int, plan planNode) (bool, error) { if next, err := plan.Next(); !next { return false, err } @@ -159,52 +142,7 @@ func (p *parallelNode) nextMerge(index int, plan mergeNode) (bool, error) { return true, nil } -/* - -scan node -========= -{ - _docID: bae-ALICE, - name: Alice, - points: 124, - verified: false -} - -typeJoin node(merge) -============= -{ - friends: [ - { - _docID: bae-BOB, - name: bob, - points: 99.9, - verified: true, - } - ] -} - -output -====== - -{ - _docID: bae-ALICE, - name: Alice, - points: 124, - verified: false, - - friends: [ - { - _docID: bae-BOB, - name: bob, - points: 99.9, - verified: true, - } - ] -} - -*/ - -func (p *parallelNode) nextAppend(index int, plan appendNode) (bool, error) { +func (p *parallelNode) nextAppend(index int, plan planNode) (bool, error) { key := p.currentValue.GetID() if key == "" { return false, nil @@ -235,43 +173,6 @@ func (p *parallelNode) nextAppend(index int, plan appendNode) (bool, error) { return true, nil } -/* - -query { - user { - _docID - name - points - verified - - _version { - cid - } - } -} - -scan node -========= -{ - _docID: bae-ALICE, - name: Alice, - points: 124, - verified: false -} - -_version: commitSelectTopNode(append) -=================== -[ - { - cid: QmABC - }, - { - cid: QmDEF - } - ... -] -*/ - func (p *parallelNode) Source() planNode { return p.multiscan } func (p *parallelNode) Children() []planNode { @@ -283,171 +184,66 @@ func (p *parallelNode) addChild(fieldIndex int, node planNode) { p.childIndexes = append(p.childIndexes, fieldIndex) } -/* -user { - friends { - name - } - - addresses { - street_name - } -} - -Select { - source: scanNode(user) -} - - |||||| - \/\/\/ - -Select { - source: TypeJoin(friends, user) { - joinPlan { - typeJoinMany { - root: scanNode(user) - subType: Select { - source: scanNode(friends) - } - } - } - }, -} - - |||||| - \/\/\/ - -Select { - source: MultiNode[ - { - TypeJoin(friends, user) { - joinPlan { - typeJoinMany { - root: multiscan(scanNode(user)) - subType: Select { - source: scanNode(friends) - } - } - } - } - }, - { - TypeJoin(addresses, user) { - joinPlan { - typeJoinMany { - root: multiscan(scanNode(user)) - subType: Select { - source: scanNode(addresses) - } - } - } - } - }] - } -} - -select addSubPlan { - check if source is MultiNode - yes => - get multiScan node - create new plan with multi scan node - append - no = > - create new multinode - get scan node from existing source - create multiscan - replace existing source scannode with multiScan - add existing source to new MultiNode - add new plan to multNode - -} - -Select { - source: Parallel {[ - TypeJoin { - - }, - commitScan { - - } - ]} -} -*/ - -// @todo: Document AddSubPlan method -func (s *selectNode) addSubPlan(fieldIndex int, plan planNode) error { - src := s.source - switch node := src.(type) { +func (s *selectNode) addSubPlan(fieldIndex int, newPlan planNode) error { + switch sourceNode := s.source.(type) { // if its a scan node, we either replace or create a multinode case *scanNode, *pipeNode: - switch plan.(type) { - case mergeNode: - s.source = plan - case appendNode: + switch newPlan.(type) { + case *scanNode, *typeIndexJoin: + s.source = newPlan + case *dagScanNode: m := ¶llelNode{ p: s.planner, - docMapper: docMapper{src.DocumentMap()}, + docMapper: docMapper{s.source.DocumentMap()}, } - m.addChild(-1, src) - m.addChild(fieldIndex, plan) + m.addChild(-1, s.source) + m.addChild(fieldIndex, newPlan) s.source = m default: - return client.NewErrUnhandledType("sub plan", plan) + return client.NewErrUnhandledType("sub plan", newPlan) } - // source is a mergeNode, like a TypeJoin - case mergeNode: - origScan, _ := walkAndFindPlanType[*scanNode](plan) + case *typeIndexJoin: + origScan, _ := walkAndFindPlanType[*scanNode](newPlan) if origScan == nil { return ErrFailedToFindScanNode } // create our new multiscanner multiscan := &multiScanNode{scanNode: origScan} // replace our current source internal scanNode with our new multiscanner - if err := s.planner.walkAndReplacePlan(src, origScan, multiscan); err != nil { + if err := s.planner.walkAndReplacePlan(s.source, origScan, multiscan); err != nil { return err } // create multinode multinode := ¶llelNode{ p: s.planner, multiscan: multiscan, - docMapper: docMapper{src.DocumentMap()}, + docMapper: docMapper{s.source.DocumentMap()}, } - multinode.addChild(-1, src) + multinode.addChild(-1, s.source) multiscan.addReader() // replace our new node internal scanNode with our new multiscanner - if err := s.planner.walkAndReplacePlan(plan, origScan, multiscan); err != nil { + if err := s.planner.walkAndReplacePlan(newPlan, origScan, multiscan); err != nil { return err } // add our newly updated plan to the multinode - multinode.addChild(fieldIndex, plan) + multinode.addChild(fieldIndex, newPlan) multiscan.addReader() s.source = multinode // we already have an existing parallelNode as our source case *parallelNode: - switch plan.(type) { - // easy, just append, since append doest need any internal relaced scannode - case appendNode: - node.addChild(fieldIndex, plan) - + switch newPlan.(type) { // We have a internal multiscanNode on our MultiNode - case mergeNode: - multiscan, sourceIsMultiscan := node.Source().(*multiScanNode) - if !sourceIsMultiscan { - return client.NewErrUnexpectedType[*multiScanNode]("mergeNode", node.Source()) - } - + case *scanNode, *typeIndexJoin: // replace our new node internal scanNode with our existing multiscanner - if err := s.planner.walkAndReplacePlan(plan, multiscan.Source(), multiscan); err != nil { + if err := s.planner.walkAndReplacePlan(newPlan, sourceNode.multiscan.Source(), sourceNode.multiscan); err != nil { return err } - multiscan.addReader() - // add our newly updated plan to the multinode - node.addChild(fieldIndex, plan) - default: - return client.NewErrUnhandledType("sub plan", plan) + sourceNode.multiscan.addReader() } + + sourceNode.addChild(fieldIndex, newPlan) } return nil } diff --git a/planner/operations.go b/planner/operations.go index 75d70dcdaf..59aa00a3c3 100644 --- a/planner/operations.go +++ b/planner/operations.go @@ -33,6 +33,7 @@ var ( _ planNode = (*updateNode)(nil) _ planNode = (*valuesNode)(nil) _ planNode = (*viewNode)(nil) + _ planNode = (*lensNode)(nil) _ MultiNode = (*parallelNode)(nil) _ MultiNode = (*topLevelNode)(nil) diff --git a/planner/planner.go b/planner/planner.go index 5a87983947..0629076924 100644 --- a/planner/planner.go +++ b/planner/planner.go @@ -240,6 +240,9 @@ func (p *Planner) expandPlan(planNode planNode, parentPlan *selectTopNode) error case *viewNode: return p.expandPlan(n.source, parentPlan) + case *lensNode: + return p.expandPlan(n.source, parentPlan) + default: return nil } @@ -342,18 +345,17 @@ func (p *Planner) tryOptimizeJoinDirection(node *invertibleTypeJoin, parentPlan ) slct := node.subType.(*selectTopNode).selectNode desc := slct.collection.Description() - schema := slct.collection.Schema() - indexedFields := desc.CollectIndexedFields(&schema) - for _, indField := range indexedFields { - if ind, ok := filteredSubFields[indField.Name]; ok { + for subFieldName, subFieldInd := range filteredSubFields { + indexes := desc.GetIndexesOnField(subFieldName) + if len(indexes) > 0 { subInd := node.documentMapping.FirstIndexOfName(node.subTypeName) relatedField := mapper.Field{Name: node.subTypeName, Index: subInd} fieldFilter := filter.UnwrapRelation(filter.CopyField( parentPlan.selectNode.filter, relatedField, - mapper.Field{Name: indField.Name, Index: ind}, + mapper.Field{Name: subFieldName, Index: subFieldInd}, ), relatedField) - err := node.invertJoinDirectionWithIndex(fieldFilter, indField) + err := node.invertJoinDirectionWithIndex(fieldFilter, indexes[0]) if err != nil { return err } diff --git a/planner/scan.go b/planner/scan.go index 19ae079f5f..3ba0dd03cb 100644 --- a/planner/scan.go +++ b/planner/scan.go @@ -41,7 +41,7 @@ type scanNode struct { p *Planner col client.Collection - fields []client.FieldDescription + fields []client.FieldDefinition showDeleted bool @@ -104,7 +104,7 @@ func (n *scanNode) initFields(fields []mapper.Requestable) error { if target.Filter != nil { fieldDescs, err := parser.ParseFilterFieldsForDescription( target.Filter.ExternalConditions, - n.col.Schema(), + n.col.Definition(), ) if err != nil { return err @@ -125,7 +125,7 @@ func (n *scanNode) initFields(fields []mapper.Requestable) error { } func (n *scanNode) tryAddField(fieldName string) bool { - fd, ok := n.col.Schema().GetField(fieldName) + fd, ok := n.col.Definition().GetFieldByName(fieldName) if !ok { // skip fields that are not part of the // schema description. The scanner (and fetcher) @@ -138,7 +138,7 @@ func (n *scanNode) tryAddField(fieldName string) bool { func (scan *scanNode) initFetcher( cid immutable.Option[string], - indexedField immutable.Option[client.FieldDescription], + index immutable.Option[client.IndexDescription], ) { var f fetcher.Fetcher if cid.HasValue() { @@ -146,14 +146,17 @@ func (scan *scanNode) initFetcher( } else { f = new(fetcher.DocumentFetcher) - if indexedField.HasValue() { - typeIndex := scan.documentMapping.FirstIndexOfName(indexedField.Value().Name) - field := mapper.Field{Index: typeIndex, Name: indexedField.Value().Name} + if index.HasValue() { + fields := make([]mapper.Field, 0, len(index.Value().Fields)) + for _, field := range index.Value().Fields { + fieldName := field.Name + typeIndex := scan.documentMapping.FirstIndexOfName(fieldName) + fields = append(fields, mapper.Field{Index: typeIndex, Name: fieldName}) + } var indexFilter *mapper.Filter - scan.filter, indexFilter = filter.SplitByField(scan.filter, field) + scan.filter, indexFilter = filter.SplitByFields(scan.filter, fields...) if indexFilter != nil { - fieldDesc, _ := scan.col.Schema().GetField(indexedField.Value().Name) - f = fetcher.NewIndexFetcher(f, fieldDesc, indexFilter) + f = fetcher.NewIndexFetcher(f, index.Value(), indexFilter) } } @@ -252,7 +255,7 @@ func (n *scanNode) simpleExplain() (map[string]any, error) { } // Add the collection attributes. - simpleExplainMap[collectionNameLabel] = n.col.Name() + simpleExplainMap[collectionNameLabel] = n.col.Name().Value() simpleExplainMap[collectionIDLabel] = n.col.Description().IDString() // Add the spans attribute. @@ -285,9 +288,6 @@ func (n *scanNode) Explain(explainType request.ExplainType) (map[string]any, err } } -// Merge implements mergeNode -func (n *scanNode) Merge() bool { return true } - func (p *Planner) Scan( mapperSelect *mapper.Select, colDesc client.CollectionDescription, diff --git a/planner/select.go b/planner/select.go index f1d85de9f3..ce7ff19030 100644 --- a/planner/select.go +++ b/planner/select.go @@ -290,26 +290,29 @@ func (n *selectNode) initSource() ([]aggregateNode, error) { } if isScanNode { - origScan.initFetcher(n.selectReq.Cid, findFilteredByIndexedField(origScan)) + origScan.initFetcher(n.selectReq.Cid, findIndexByFilteringField(origScan)) } return aggregates, nil } -func findFilteredByIndexedField(scanNode *scanNode) immutable.Option[client.FieldDescription] { - if scanNode.filter != nil { - schema := scanNode.col.Schema() - indexedFields := scanNode.col.Description().CollectIndexedFields(&schema) - for i := range indexedFields { - typeIndex := scanNode.documentMapping.FirstIndexOfName(indexedFields[i].Name) - if scanNode.filter.HasIndex(typeIndex) { - // we return the first found indexed field to keep it simple for now - // more sophisticated optimization logic can be added later - return immutable.Some(indexedFields[i]) - } +func findIndexByFilteringField(scanNode *scanNode) immutable.Option[client.IndexDescription] { + if scanNode.filter == nil { + return immutable.None[client.IndexDescription]() + } + colDesc := scanNode.col.Description() + + for _, field := range scanNode.col.Schema().Fields { + if _, isFiltered := scanNode.filter.ExternalConditions[field.Name]; !isFiltered { + continue + } + indexes := colDesc.GetIndexesOnField(field.Name) + if len(indexes) > 0 { + // we return the first found index. We will optimize it later. + return immutable.Some(indexes[0]) } } - return immutable.None[client.FieldDescription]() + return immutable.None[client.IndexDescription]() } func (n *selectNode) initFields(selectReq *mapper.Select) ([]aggregateNode, error) { @@ -373,8 +376,9 @@ func (n *selectNode) initFields(selectReq *mapper.Select) ([]aggregateNode, erro // commit query link fields are always added and need no special treatment here // WARNING: It is important to check collection name is nil and the parent select name // here else we risk falsely identifying user defined fields with the name `links` as a commit links field - } else if n.collection.Description().BaseQuery == nil { - // Views only contain embedded objects and don't require a traditional join here + } else if !(n.collection != nil && len(n.collection.Description().QuerySources()) > 0) { + // Collections sourcing data from queries only contain embedded objects and don't require + // a traditional join here err := n.addTypeIndexJoin(f) if err != nil { return nil, err diff --git a/planner/sum.go b/planner/sum.go index 85371e5a30..fafd0cc4b5 100644 --- a/planner/sum.go +++ b/planner/sum.go @@ -82,12 +82,12 @@ func (p *Planner) isValueFloat( return false, err } - fieldDescription, fieldDescriptionFound := parentCol.Schema().GetField(source.Name) + fieldDescription, fieldDescriptionFound := parentCol.Schema().GetFieldByName(source.Name) if !fieldDescriptionFound { return false, client.NewErrFieldNotExist(source.Name) } return fieldDescription.Kind == client.FieldKind_FLOAT_ARRAY || - fieldDescription.Kind == client.FieldKind_FLOAT || + fieldDescription.Kind == client.FieldKind_NILLABLE_FLOAT || fieldDescription.Kind == client.FieldKind_NILLABLE_FLOAT_ARRAY, nil } @@ -130,13 +130,13 @@ func (p *Planner) isValueFloat( return false, err } - fieldDescription, fieldDescriptionFound := childCol.Schema().GetField(source.ChildTarget.Name) + fieldDescription, fieldDescriptionFound := childCol.Schema().GetFieldByName(source.ChildTarget.Name) if !fieldDescriptionFound { return false, client.NewErrFieldNotExist(source.ChildTarget.Name) } return fieldDescription.Kind == client.FieldKind_FLOAT_ARRAY || - fieldDescription.Kind == client.FieldKind_FLOAT || + fieldDescription.Kind == client.FieldKind_NILLABLE_FLOAT || fieldDescription.Kind == client.FieldKind_NILLABLE_FLOAT_ARRAY, nil } diff --git a/planner/type_join.go b/planner/type_join.go index fc4e6009cf..915a2d128f 100644 --- a/planner/type_join.go +++ b/planner/type_join.go @@ -20,7 +20,6 @@ import ( "github.com/sourcenetwork/defradb/db/base" "github.com/sourcenetwork/defradb/planner/filter" "github.com/sourcenetwork/defradb/planner/mapper" - "github.com/sourcenetwork/defradb/request/graphql/schema" ) /* @@ -81,15 +80,14 @@ func (p *Planner) makeTypeIndexJoin( var joinPlan planNode var err error - typeFieldDesc, ok := parent.collection.Schema().GetField(subType.Name) + typeFieldDesc, ok := parent.collection.Schema().GetFieldByName(subType.Name) if !ok { return nil, client.NewErrFieldNotExist(subType.Name) } - meta := typeFieldDesc.RelationType - if schema.IsOne(meta) { // One-to-One, or One side of One-to-Many + if typeFieldDesc.Kind == client.FieldKind_FOREIGN_OBJECT { // One-to-One, or One side of One-to-Many joinPlan, err = p.makeTypeJoinOne(parent, source, subType) - } else if schema.IsOneToMany(meta) { // Many side of One-to-Many + } else if typeFieldDesc.Kind == client.FieldKind_FOREIGN_OBJECT_ARRAY { // Many side of One-to-Many joinPlan, err = p.makeTypeJoinMany(parent, source, subType) } else { // more to come, Many-to-Many, Embedded? return nil, ErrUnknownRelationType @@ -222,9 +220,6 @@ func (n *typeIndexJoin) Explain(explainType request.ExplainType) (map[string]any } } -// Merge implements mergeNode -func (n *typeIndexJoin) Merge() bool { return true } - // typeJoinOne is the plan node for a type index join // where the root type is the primary in a one-to-one relation request. type typeJoinOne struct { @@ -244,15 +239,11 @@ func (p *Planner) makeTypeJoinOne( } // get the correct sub field schema type (collection) - subTypeFieldDesc, ok := parent.collection.Schema().GetField(subType.Name) + subTypeFieldDesc, ok := parent.collection.Schema().GetFieldByName(subType.Name) if !ok { return nil, client.NewErrFieldNotExist(subType.Name) } - // determine relation direction (primary or secondary?) - // check if the field we're querying is the primary side of the relation - isPrimary := subTypeFieldDesc.RelationType.IsSet(client.Relation_Type_Primary) - subTypeCol, err := p.db.GetCollectionByName(p.ctx, subType.CollectionName) if err != nil { return nil, err @@ -261,7 +252,7 @@ func (p *Planner) makeTypeJoinOne( subTypeField, subTypeFieldNameFound := subTypeCol.Description().GetFieldByRelation( subTypeFieldDesc.RelationName, - parent.collection.Name(), + parent.collection.Name().Value(), subTypeFieldDesc.Name, &subTypeSchema, ) @@ -270,7 +261,7 @@ func (p *Planner) makeTypeJoinOne( } var secondaryFieldIndex immutable.Option[int] - if !isPrimary { + if !subTypeFieldDesc.IsPrimaryRelation { idFieldName := subTypeFieldDesc.Name + request.RelatedObjectID secondaryFieldIndex = immutable.Some( parent.documentMapping.FirstIndexOfName(idFieldName), @@ -292,7 +283,7 @@ func (p *Planner) makeTypeJoinOne( subSelect: subType, rootName: subTypeField.Name, subTypeName: subType.Name, - isSecondary: !isPrimary, + isSecondary: !subTypeFieldDesc.IsPrimaryRelation, secondaryFieldIndex: secondaryFieldIndex, secondaryFetchLimit: 1, dir: dir, @@ -359,7 +350,7 @@ func prepareScanNodeFilterForTypeJoin( filter.RemoveField(scan.filter, subType.Field) } else { var parentFilter *mapper.Filter - scan.filter, parentFilter = filter.SplitByField(scan.filter, subType.Field) + scan.filter, parentFilter = filter.SplitByFields(scan.filter, subType.Field) if parentFilter != nil { if parent.filter == nil { parent.filter = parentFilter @@ -383,7 +374,7 @@ func (p *Planner) makeTypeJoinMany( return nil, err } - subTypeFieldDesc, ok := parent.collection.Schema().GetField(subType.Name) + subTypeFieldDesc, ok := parent.collection.Schema().GetFieldByName(subType.Name) if !ok { return nil, client.NewErrFieldNotExist(subType.Name) } @@ -396,7 +387,7 @@ func (p *Planner) makeTypeJoinMany( rootField, rootNameFound := subTypeCol.Description().GetFieldByRelation( subTypeFieldDesc.RelationName, - parent.collection.Name(), + parent.collection.Name().Value(), subTypeFieldDesc.Name, &subTypeSchema, ) @@ -606,12 +597,12 @@ func (join *invertibleTypeJoin) Next() (bool, error) { func (join *invertibleTypeJoin) invertJoinDirectionWithIndex( fieldFilter *mapper.Filter, - field client.FieldDescription, + index client.IndexDescription, ) error { subScan := getScanNode(join.subType) subScan.tryAddField(join.rootName + request.RelatedObjectID) subScan.filter = fieldFilter - subScan.initFetcher(immutable.Option[string]{}, immutable.Some(field)) + subScan.initFetcher(immutable.Option[string]{}, immutable.Some(index)) join.invert() diff --git a/planner/view.go b/planner/view.go index 48a026f306..f02de06d27 100644 --- a/planner/view.go +++ b/planner/view.go @@ -23,10 +23,17 @@ type viewNode struct { p *Planner desc client.CollectionDescription source planNode + + // This is cached as a boolean to save rediscovering this in the main Next/Value iteration loop + hasTransform bool } -func (p *Planner) View(query *mapper.Select, desc client.CollectionDescription) (*viewNode, error) { - m, err := mapper.ToSelect(p.ctx, p.db, desc.BaseQuery) +func (p *Planner) View(query *mapper.Select, col client.Collection) (planNode, error) { + // For now, we assume a single source. This will need to change if/when we support multiple sources + querySource := (col.Description().Sources[0].(*client.QuerySource)) + hasTransform := querySource.Transform.HasValue() + + m, err := mapper.ToSelect(p.ctx, p.db, &querySource.Query) if err != nil { return nil, err } @@ -36,12 +43,19 @@ func (p *Planner) View(query *mapper.Select, desc client.CollectionDescription) return nil, err } - return &viewNode{ - p: p, - desc: desc, - source: source, - docMapper: docMapper{query.DocumentMapping}, - }, nil + if hasTransform { + source = p.Lens(source, query.DocumentMapping, col) + } + + viewNode := &viewNode{ + p: p, + desc: col.Description(), + source: source, + docMapper: docMapper{query.DocumentMapping}, + hasTransform: hasTransform, + } + + return viewNode, nil } func (n *viewNode) Init() error { @@ -61,14 +75,21 @@ func (n *viewNode) Next() (bool, error) { } func (n *viewNode) Value() core.Doc { - sourceValue := n.source.DocumentMap().ToMap(n.source.Value()) + sourceValue := n.source.Value() + if n.hasTransform { + // If this view has a transform the source document will already have been + // converted to the new document mapping. + return sourceValue + } + + sourceMap := n.source.DocumentMap().ToMap(sourceValue) // We must convert the document from the source mapping (which was constructed using the // view's base query) to a document using the output mapping (which was constructed using // the current query and the output schemas). We do this by source output name, which // will take into account any aliases defined in the base query. doc := n.docMapper.documentMapping.NewDoc() - for fieldName, fieldValue := range sourceValue { + for fieldName, fieldValue := range sourceMap { // If the field does not exist, ignore it an continue. It likely means that // the field was declared in the query but not the SDL, and if it is not in the // SDL it cannot be requested/rendered by the user and would be dropped later anyway. diff --git a/playground/package-lock.json b/playground/package-lock.json index a38ae85877..e01bcfbc4d 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -8,24 +8,24 @@ "name": "playground", "version": "0.0.0", "dependencies": { - "graphiql": "^3.1.0", + "graphiql": "^3.1.1", "graphql": "^16.8.1", "react": "^18.2.0", "react-dom": "^18.2.0", - "swagger-ui-react": "^5.11.0" + "swagger-ui-react": "^5.11.9" }, "devDependencies": { - "@types/react": "^18.2.48", + "@types/react": "^18.2.61", "@types/react-dom": "^18.2.18", "@types/swagger-ui-react": "^4.18.3", - "@typescript-eslint/eslint-plugin": "^6.18.1", - "@typescript-eslint/parser": "^6.18.1", - "@vitejs/plugin-react-swc": "^3.5.0", - "eslint": "^8.56.0", + "@typescript-eslint/eslint-plugin": "^7.1.0", + "@typescript-eslint/parser": "^7.1.0", + "@vitejs/plugin-react-swc": "^3.6.0", + "eslint": "^8.57.0", "eslint-plugin-react-hooks": "^4.6.0", "eslint-plugin-react-refresh": "^0.4.5", "typescript": "^5.3.3", - "vite": "^5.0.11" + "vite": "^5.1.4" } }, "node_modules/@aashutoshrathi/word-wrap": { @@ -38,9 +38,9 @@ } }, "node_modules/@babel/runtime": { - "version": "7.22.15", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.22.15.tgz", - "integrity": "sha512-T0O+aa+4w0u06iNmapipJXMV4HoUir03hpx3/YqXXhu9xim3w+dVphjFWl1OH8NbZHw5Lbm9k45drDkgq2VNNA==", + "version": "7.24.0", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.24.0.tgz", + "integrity": "sha512-Chk32uHMg6TnQdvw2e9IlqPpFX/6NLuK0Ys2PqLb7/gL5uFn9mXvK715FGLlOLQrcO4qIkNHkvPGktzzXexsFw==", "dependencies": { "regenerator-runtime": "^0.14.0" }, @@ -49,9 +49,9 @@ } }, "node_modules/@babel/runtime-corejs3": { - "version": "7.23.8", - "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.23.8.tgz", - "integrity": "sha512-2ZzmcDugdm0/YQKFVYsXiwUN7USPX8PM7cytpb4PFl87fM+qYPSvTZX//8tyeJB1j0YDmafBJEbl5f8NfLyuKw==", + "version": "7.24.0", + "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.24.0.tgz", + "integrity": "sha512-HxiRMOncx3ly6f3fcZ1GVKf+/EROcI9qwPgmij8Czqy6Okm/0T37T4y2ZIlLUuEUFjtM7NRsfdCO8Y3tAiJZew==", "dependencies": { "core-js-pure": "^3.30.2", "regenerator-runtime": "^0.14.0" @@ -80,18 +80,18 @@ } }, "node_modules/@codemirror/state": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.3.1.tgz", - "integrity": "sha512-88e4HhMtKJyw6fKprGaN/yZfiaoGYOi2nM45YCUC6R/kex9sxFWBDGatS1vk4lMgnWmdIIB9tk8Gj1LmL8YfvA==", + "version": "6.4.1", + "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.4.1.tgz", + "integrity": "sha512-QkEyUiLhsJoZkbumGZlswmAhA7CBU02Wrz7zvH4SrcifbsqwlXShVXg65f3v/ts57W3dqyamEriMhij1Z3Zz4A==", "peer": true }, "node_modules/@codemirror/view": { - "version": "6.22.0", - "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.22.0.tgz", - "integrity": "sha512-6zLj4YIoIpfTGKrDMTbeZRpa8ih4EymMCKmddEDcJWrCdp/N1D46B38YEz4creTb4T177AVS9EyXkLeC/HL2jA==", + "version": "6.25.1", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.25.1.tgz", + "integrity": "sha512-2LXLxsQnHDdfGzDvjzAwZh2ZviNJm7im6tGpa0IONIDnFd8RZ80D2SNi8PDi6YjKcMoMRK20v6OmKIdsrwsyoQ==", "peer": true, "dependencies": { - "@codemirror/state": "^6.1.4", + "@codemirror/state": "^6.4.0", "style-mod": "^4.1.0", "w3c-keyname": "^2.2.4" } @@ -111,10 +111,26 @@ "integrity": "sha512-Ja/Vfqe3HpuzRsG1oBtWTHk2PGZ7GR+2Vz5iYGelAw8dx32K0y7PjVuxK6z1nMpZOqAFsRUPCkK1YjJ56qJlgw==", "optional": true }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.19.12.tgz", + "integrity": "sha512-bmoCYyWdEL3wDQIVbcyzRyeKLgk2WtWLTWz1ZIAZF/EGbNOwSA6ew3PftJ1PqMiOOGu0OyFMzG53L0zqIpPeNA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } + }, "node_modules/@esbuild/android-arm": { - "version": "0.19.8", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.19.8.tgz", - "integrity": "sha512-31E2lxlGM1KEfivQl8Yf5aYU/mflz9g06H6S15ITUFQueMFtFjESRMoDSkvMo8thYvLBax+VKTPlpnx+sPicOA==", + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.19.12.tgz", + "integrity": "sha512-qg/Lj1mu3CdQlDEEiWrlC4eaPZ1KztwGJ9B6J+/6G+/4ewxJg7gqj8eVYWvao1bXrqGiW2rsBZFSX3q2lcW05w==", "cpu": [ "arm" ], @@ -128,9 +144,9 @@ } }, "node_modules/@esbuild/android-arm64": { - "version": "0.19.8", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.19.8.tgz", - "integrity": "sha512-B8JbS61bEunhfx8kasogFENgQfr/dIp+ggYXwTqdbMAgGDhRa3AaPpQMuQU0rNxDLECj6FhDzk1cF9WHMVwrtA==", + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.19.12.tgz", + "integrity": "sha512-P0UVNGIienjZv3f5zq0DP3Nt2IE/3plFzuaS96vihvD0Hd6H/q4WXUGpCxD/E8YrSXfNyRPbpTq+T8ZQioSuPA==", "cpu": [ "arm64" ], @@ -144,9 +160,9 @@ } }, "node_modules/@esbuild/android-x64": { - "version": "0.19.8", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.19.8.tgz", - "integrity": "sha512-rdqqYfRIn4jWOp+lzQttYMa2Xar3OK9Yt2fhOhzFXqg0rVWEfSclJvZq5fZslnz6ypHvVf3CT7qyf0A5pM682A==", + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.19.12.tgz", + "integrity": "sha512-3k7ZoUW6Q6YqhdhIaq/WZ7HwBpnFBlW905Fa4s4qWJyiNOgT1dOqDiVAQFwBH7gBRZr17gLrlFCRzF6jFh7Kew==", "cpu": [ "x64" ], @@ -160,9 +176,9 @@ } }, "node_modules/@esbuild/darwin-arm64": { - "version": "0.19.8", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.19.8.tgz", - "integrity": "sha512-RQw9DemMbIq35Bprbboyf8SmOr4UXsRVxJ97LgB55VKKeJOOdvsIPy0nFyF2l8U+h4PtBx/1kRf0BelOYCiQcw==", + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.19.12.tgz", + "integrity": "sha512-B6IeSgZgtEzGC42jsI+YYu9Z3HKRxp8ZT3cqhvliEHovq8HSX2YX8lNocDn79gCKJXOSaEot9MVYky7AKjCs8g==", "cpu": [ "arm64" ], @@ -176,9 +192,9 @@ } }, "node_modules/@esbuild/darwin-x64": { - "version": "0.19.8", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.19.8.tgz", - "integrity": "sha512-3sur80OT9YdeZwIVgERAysAbwncom7b4bCI2XKLjMfPymTud7e/oY4y+ci1XVp5TfQp/bppn7xLw1n/oSQY3/Q==", + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.19.12.tgz", + "integrity": "sha512-hKoVkKzFiToTgn+41qGhsUJXFlIjxI/jSYeZf3ugemDYZldIXIxhvwN6erJGlX4t5h417iFuheZ7l+YVn05N3A==", "cpu": [ "x64" ], @@ -192,9 +208,9 @@ } }, "node_modules/@esbuild/freebsd-arm64": { - "version": "0.19.8", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.19.8.tgz", - "integrity": "sha512-WAnPJSDattvS/XtPCTj1tPoTxERjcTpH6HsMr6ujTT+X6rylVe8ggxk8pVxzf5U1wh5sPODpawNicF5ta/9Tmw==", + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.19.12.tgz", + "integrity": "sha512-4aRvFIXmwAcDBw9AueDQ2YnGmz5L6obe5kmPT8Vd+/+x/JMVKCgdcRwH6APrbpNXsPz+K653Qg8HB/oXvXVukA==", "cpu": [ "arm64" ], @@ -208,9 +224,9 @@ } }, "node_modules/@esbuild/freebsd-x64": { - "version": "0.19.8", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.19.8.tgz", - "integrity": "sha512-ICvZyOplIjmmhjd6mxi+zxSdpPTKFfyPPQMQTK/w+8eNK6WV01AjIztJALDtwNNfFhfZLux0tZLC+U9nSyA5Zg==", + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.19.12.tgz", + "integrity": "sha512-EYoXZ4d8xtBoVN7CEwWY2IN4ho76xjYXqSXMNccFSx2lgqOG/1TBPW0yPx1bJZk94qu3tX0fycJeeQsKovA8gg==", "cpu": [ "x64" ], @@ -224,9 +240,9 @@ } }, "node_modules/@esbuild/linux-arm": { - "version": "0.19.8", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.19.8.tgz", - "integrity": "sha512-H4vmI5PYqSvosPaTJuEppU9oz1dq2A7Mr2vyg5TF9Ga+3+MGgBdGzcyBP7qK9MrwFQZlvNyJrvz6GuCaj3OukQ==", + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.19.12.tgz", + "integrity": "sha512-J5jPms//KhSNv+LO1S1TX1UWp1ucM6N6XuL6ITdKWElCu8wXP72l9MM0zDTzzeikVyqFE6U8YAV9/tFyj0ti+w==", "cpu": [ "arm" ], @@ -240,9 +256,9 @@ } }, "node_modules/@esbuild/linux-arm64": { - "version": "0.19.8", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.19.8.tgz", - "integrity": "sha512-z1zMZivxDLHWnyGOctT9JP70h0beY54xDDDJt4VpTX+iwA77IFsE1vCXWmprajJGa+ZYSqkSbRQ4eyLCpCmiCQ==", + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.19.12.tgz", + "integrity": "sha512-EoTjyYyLuVPfdPLsGVVVC8a0p1BFFvtpQDB/YLEhaXyf/5bczaGeN15QkR+O4S5LeJ92Tqotve7i1jn35qwvdA==", "cpu": [ "arm64" ], @@ -256,9 +272,9 @@ } }, "node_modules/@esbuild/linux-ia32": { - "version": "0.19.8", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.19.8.tgz", - "integrity": "sha512-1a8suQiFJmZz1khm/rDglOc8lavtzEMRo0v6WhPgxkrjcU0LkHj+TwBrALwoz/OtMExvsqbbMI0ChyelKabSvQ==", + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.19.12.tgz", + "integrity": "sha512-Thsa42rrP1+UIGaWz47uydHSBOgTUnwBwNq59khgIwktK6x60Hivfbux9iNR0eHCHzOLjLMLfUMLCypBkZXMHA==", "cpu": [ "ia32" ], @@ -272,9 +288,9 @@ } }, "node_modules/@esbuild/linux-loong64": { - "version": "0.19.8", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.19.8.tgz", - "integrity": "sha512-fHZWS2JJxnXt1uYJsDv9+b60WCc2RlvVAy1F76qOLtXRO+H4mjt3Tr6MJ5l7Q78X8KgCFudnTuiQRBhULUyBKQ==", + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.19.12.tgz", + "integrity": "sha512-LiXdXA0s3IqRRjm6rV6XaWATScKAXjI4R4LoDlvO7+yQqFdlr1Bax62sRwkVvRIrwXxvtYEHHI4dm50jAXkuAA==", "cpu": [ "loong64" ], @@ -288,9 +304,9 @@ } }, "node_modules/@esbuild/linux-mips64el": { - "version": "0.19.8", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.19.8.tgz", - "integrity": "sha512-Wy/z0EL5qZYLX66dVnEg9riiwls5IYnziwuju2oUiuxVc+/edvqXa04qNtbrs0Ukatg5HEzqT94Zs7J207dN5Q==", + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.19.12.tgz", + "integrity": "sha512-fEnAuj5VGTanfJ07ff0gOA6IPsvrVHLVb6Lyd1g2/ed67oU1eFzL0r9WL7ZzscD+/N6i3dWumGE1Un4f7Amf+w==", "cpu": [ "mips64el" ], @@ -304,9 +320,9 @@ } }, "node_modules/@esbuild/linux-ppc64": { - "version": "0.19.8", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.19.8.tgz", - "integrity": "sha512-ETaW6245wK23YIEufhMQ3HSeHO7NgsLx8gygBVldRHKhOlD1oNeNy/P67mIh1zPn2Hr2HLieQrt6tWrVwuqrxg==", + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.19.12.tgz", + "integrity": "sha512-nYJA2/QPimDQOh1rKWedNOe3Gfc8PabU7HT3iXWtNUbRzXS9+vgB0Fjaqr//XNbd82mCxHzik2qotuI89cfixg==", "cpu": [ "ppc64" ], @@ -320,9 +336,9 @@ } }, "node_modules/@esbuild/linux-riscv64": { - "version": "0.19.8", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.19.8.tgz", - "integrity": "sha512-T2DRQk55SgoleTP+DtPlMrxi/5r9AeFgkhkZ/B0ap99zmxtxdOixOMI570VjdRCs9pE4Wdkz7JYrsPvsl7eESg==", + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.19.12.tgz", + "integrity": "sha512-2MueBrlPQCw5dVJJpQdUYgeqIzDQgw3QtiAHUC4RBz9FXPrskyyU3VI1hw7C0BSKB9OduwSJ79FTCqtGMWqJHg==", "cpu": [ "riscv64" ], @@ -336,9 +352,9 @@ } }, "node_modules/@esbuild/linux-s390x": { - "version": "0.19.8", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.19.8.tgz", - "integrity": "sha512-NPxbdmmo3Bk7mbNeHmcCd7R7fptJaczPYBaELk6NcXxy7HLNyWwCyDJ/Xx+/YcNH7Im5dHdx9gZ5xIwyliQCbg==", + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.19.12.tgz", + "integrity": "sha512-+Pil1Nv3Umes4m3AZKqA2anfhJiVmNCYkPchwFJNEJN5QxmTs1uzyy4TvmDrCRNT2ApwSari7ZIgrPeUx4UZDg==", "cpu": [ "s390x" ], @@ -352,9 +368,9 @@ } }, "node_modules/@esbuild/linux-x64": { - "version": "0.19.8", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.19.8.tgz", - "integrity": "sha512-lytMAVOM3b1gPypL2TRmZ5rnXl7+6IIk8uB3eLsV1JwcizuolblXRrc5ShPrO9ls/b+RTp+E6gbsuLWHWi2zGg==", + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.19.12.tgz", + "integrity": "sha512-B71g1QpxfwBvNrfyJdVDexenDIt1CiDN1TIXLbhOw0KhJzE78KIFGX6OJ9MrtC0oOqMWf+0xop4qEU8JrJTwCg==", "cpu": [ "x64" ], @@ -368,9 +384,9 @@ } }, "node_modules/@esbuild/netbsd-x64": { - "version": "0.19.8", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.19.8.tgz", - "integrity": "sha512-hvWVo2VsXz/8NVt1UhLzxwAfo5sioj92uo0bCfLibB0xlOmimU/DeAEsQILlBQvkhrGjamP0/el5HU76HAitGw==", + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.19.12.tgz", + "integrity": "sha512-3ltjQ7n1owJgFbuC61Oj++XhtzmymoCihNFgT84UAmJnxJfm4sYCiSLTXZtE00VWYpPMYc+ZQmB6xbSdVh0JWA==", "cpu": [ "x64" ], @@ -384,9 +400,9 @@ } }, "node_modules/@esbuild/openbsd-x64": { - "version": "0.19.8", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.19.8.tgz", - "integrity": "sha512-/7Y7u77rdvmGTxR83PgaSvSBJCC2L3Kb1M/+dmSIvRvQPXXCuC97QAwMugBNG0yGcbEGfFBH7ojPzAOxfGNkwQ==", + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.19.12.tgz", + "integrity": "sha512-RbrfTB9SWsr0kWmb9srfF+L933uMDdu9BIzdA7os2t0TXhCRjrQyCeOt6wVxr79CKD4c+p+YhCj31HBkYcXebw==", "cpu": [ "x64" ], @@ -400,9 +416,9 @@ } }, "node_modules/@esbuild/sunos-x64": { - "version": "0.19.8", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.19.8.tgz", - "integrity": "sha512-9Lc4s7Oi98GqFA4HzA/W2JHIYfnXbUYgekUP/Sm4BG9sfLjyv6GKKHKKVs83SMicBF2JwAX6A1PuOLMqpD001w==", + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.19.12.tgz", + "integrity": "sha512-HKjJwRrW8uWtCQnQOz9qcU3mUZhTUQvi56Q8DPTLLB+DawoiQdjsYq+j+D3s9I8VFtDr+F9CjgXKKC4ss89IeA==", "cpu": [ "x64" ], @@ -416,9 +432,9 @@ } }, "node_modules/@esbuild/win32-arm64": { - "version": "0.19.8", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.19.8.tgz", - "integrity": "sha512-rq6WzBGjSzihI9deW3fC2Gqiak68+b7qo5/3kmB6Gvbh/NYPA0sJhrnp7wgV4bNwjqM+R2AApXGxMO7ZoGhIJg==", + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.19.12.tgz", + "integrity": "sha512-URgtR1dJnmGvX864pn1B2YUYNzjmXkuJOIqG2HdU62MVS4EHpU2946OZoTMnRUHklGtJdJZ33QfzdjGACXhn1A==", "cpu": [ "arm64" ], @@ -432,9 +448,9 @@ } }, "node_modules/@esbuild/win32-ia32": { - "version": "0.19.8", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.19.8.tgz", - "integrity": "sha512-AIAbverbg5jMvJznYiGhrd3sumfwWs8572mIJL5NQjJa06P8KfCPWZQ0NwZbPQnbQi9OWSZhFVSUWjjIrn4hSw==", + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.19.12.tgz", + "integrity": "sha512-+ZOE6pUkMOJfmxmBZElNOx72NKpIa/HFOMGzu8fqzQJ5kgf6aTGrcJaFsNiVMH4JKpMipyK+7k0n2UXN7a8YKQ==", "cpu": [ "ia32" ], @@ -448,9 +464,9 @@ } }, "node_modules/@esbuild/win32-x64": { - "version": "0.19.8", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.19.8.tgz", - "integrity": "sha512-bfZ0cQ1uZs2PqpulNL5j/3w+GDhP36k1K5c38QdQg+Swy51jFZWWeIkteNsufkQxp986wnqRRsb/bHbY1WQ7TA==", + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.19.12.tgz", + "integrity": "sha512-T1QyPSDCyMXaO3pzBkF96E8xMkiRYbUEZADd29SyPGabqxMViNoii+NcK7eWJAEoU6RZyEm5lVSIjTmcdoB9HA==", "cpu": [ "x64" ], @@ -479,9 +495,9 @@ } }, "node_modules/@eslint-community/regexpp": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.6.2.tgz", - "integrity": "sha512-pPTNuaAG3QMH+buKyBIGJs3g/S5y0caxw0ygM3YyE6yJFySwiGGSzA+mM3KJ8QQvzeLh3blwgSonkFjgQdxzMw==", + "version": "4.10.0", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.10.0.tgz", + "integrity": "sha512-Cu96Sd2By9mCNTx2iyKOmq10v22jUVQv0lQnlGNy16oE9589yE+QADPbrMGCkA51cKZSg3Pu/aTJVTGfL/qjUA==", "dev": true, "engines": { "node": "^12.0.0 || ^14.0.0 || >=16.0.0" @@ -510,46 +526,60 @@ "url": "https://opencollective.com/eslint" } }, - "node_modules/@eslint/js": { - "version": "8.56.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.56.0.tgz", - "integrity": "sha512-gMsVel9D7f2HLkBma9VbtzZRehRogVRfbr++f06nL2vnCGCNlzOD+/MUov/F4p8myyAHspEhVobgjpX64q5m6A==", + "node_modules/@eslint/eslintrc/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@eslint/eslintrc/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "*" } }, - "node_modules/@fastify/busboy": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@fastify/busboy/-/busboy-2.1.0.tgz", - "integrity": "sha512-+KpH+QxZU7O4675t3mnkQKcZZg56u+K/Ct2K+N2AZYNVK8kyeo/bI18tI8aPm3tvNNRyTWfj6s5tnGNlcbQRsA==", + "node_modules/@eslint/js": { + "version": "8.57.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.0.tgz", + "integrity": "sha512-Ys+3g2TaW7gADOJzPt83SJtCDhMjndcDMFVQ/Tj9iA1BfJzFKD9mAUXT3OenpuPHbI6P/myECxRJrofUsDx/5g==", + "dev": true, "engines": { - "node": ">=14" + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" } }, "node_modules/@floating-ui/core": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.5.0.tgz", - "integrity": "sha512-kK1h4m36DQ0UHGj5Ah4db7R0rHemTqqO0QLvUqi1/mUUp3LuAWbWxdxSIf/XsnH9VS6rRVPLJCncjRzUvyCLXg==", + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.6.0.tgz", + "integrity": "sha512-PcF++MykgmTj3CIyOQbKA/hDzOAiqI3mhuoN44WRCopIs1sgoDoU4oty4Jtqaj/y3oDU6fnVSm4QG0a3t5i0+g==", "dependencies": { - "@floating-ui/utils": "^0.1.3" + "@floating-ui/utils": "^0.2.1" } }, "node_modules/@floating-ui/dom": { - "version": "1.5.3", - "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.5.3.tgz", - "integrity": "sha512-ClAbQnEqJAKCJOEbbLo5IUlZHkNszqhuxS4fHAVxRPXPya6Ysf2G8KypnYcOTpx6I8xcgF9bbHb6g/2KpbV8qA==", + "version": "1.6.3", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.3.tgz", + "integrity": "sha512-RnDthu3mzPlQ31Ss/BTwQ1zjzIhr3lk1gZB1OC56h/1vEtaXkESrOqL5fQVMfXpwGtRwX+YsZBdyHtJMQnkArw==", "dependencies": { - "@floating-ui/core": "^1.4.2", - "@floating-ui/utils": "^0.1.3" + "@floating-ui/core": "^1.0.0", + "@floating-ui/utils": "^0.2.0" } }, "node_modules/@floating-ui/react-dom": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.0.2.tgz", - "integrity": "sha512-5qhlDvjaLmAst/rKb3VdlCinwTF4EYMiVxuuc/HVUjs46W0zgtbMmAZ1UTsDrRTxRmUEzl92mOtWbeeXL26lSQ==", + "version": "2.0.8", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.0.8.tgz", + "integrity": "sha512-HOdqOt3R3OGeTKidaLvJKcgg75S6tibQ3Tif4eyd91QnIJWr0NLvoXFpJA/j8HqkFSL68GDca9AuyWEHlhyClw==", "dependencies": { - "@floating-ui/dom": "^1.5.1" + "@floating-ui/dom": "^1.6.1" }, "peerDependencies": { "react": ">=16.8.0", @@ -557,14 +587,14 @@ } }, "node_modules/@floating-ui/utils": { - "version": "0.1.6", - "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.1.6.tgz", - "integrity": "sha512-OfX7E2oUDYxtBvsuS4e/jSn4Q9Qb6DzgeYtsAdkPZ47znpoNsMgZw0+tVijiv3uGNR6dgNlty6r9rzIzHjtd/A==" + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.1.tgz", + "integrity": "sha512-9TANp6GPoMtYzQdt54kfAyMmz1+osLlXdg2ENroU7zzrtflTLrrC/lgrIfaSe+Wu0b89GKccT7vxXA0MoAIO+Q==" }, "node_modules/@graphiql/react": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@graphiql/react/-/react-0.20.2.tgz", - "integrity": "sha512-/crAUlM+4iVHyNHVdiZjsTEqfMXBHfjEvrMwCwTVig6YXmCAVuaxqkD7NlDtrrPQArLGkABmf1Nw7ObRpby5lg==", + "version": "0.20.3", + "resolved": "https://registry.npmjs.org/@graphiql/react/-/react-0.20.3.tgz", + "integrity": "sha512-LHEiWQPABflTyRJZBZB50WSlrWER4RtlWg9XV1+D4yZQ3+6GbLM7X1zYf4D/TQ6AJB/vLZQHEnbhS0LuKcNqfA==", "dependencies": { "@graphiql/toolkit": "^0.9.1", "@headlessui/react": "^1.7.15", @@ -607,10 +637,11 @@ } }, "node_modules/@headlessui/react": { - "version": "1.7.17", - "resolved": "https://registry.npmjs.org/@headlessui/react/-/react-1.7.17.tgz", - "integrity": "sha512-4am+tzvkqDSSgiwrsEpGWqgGo9dz8qU5M3znCkC4PgkpY4HcCZzEDEvozltGGGHIKl9jbXbZPSH5TWn4sWJdow==", + "version": "1.7.18", + "resolved": "https://registry.npmjs.org/@headlessui/react/-/react-1.7.18.tgz", + "integrity": "sha512-4i5DOrzwN4qSgNsL4Si61VMkUcWbcSKueUV7sFhpHzQcSShdlHENE5+QBntMSRvHt8NyoFO2AGG8si9lq+w4zQ==", "dependencies": { + "@tanstack/react-virtual": "^3.0.0-beta.60", "client-only": "^0.0.1" }, "engines": { @@ -622,19 +653,41 @@ } }, "node_modules/@humanwhocodes/config-array": { - "version": "0.11.13", - "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.13.tgz", - "integrity": "sha512-JSBDMiDKSzQVngfRjOdFXgFfklaXI4K9nLF49Auh21lmBWRLIK3+xTErTWD4KU54pb6coM6ESE7Awz/FNU3zgQ==", + "version": "0.11.14", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz", + "integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==", "dev": true, "dependencies": { - "@humanwhocodes/object-schema": "^2.0.1", - "debug": "^4.1.1", + "@humanwhocodes/object-schema": "^2.0.2", + "debug": "^4.3.1", "minimatch": "^3.0.5" }, "engines": { "node": ">=10.10.0" } }, + "node_modules/@humanwhocodes/config-array/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, "node_modules/@humanwhocodes/module-importer": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", @@ -649,43 +702,43 @@ } }, "node_modules/@humanwhocodes/object-schema": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.1.tgz", - "integrity": "sha512-dvuCeX5fC9dXgJn9t+X5atfmgQAzUOWqS1254Gh0m6i8wKd10ebXkfNKiRK+1GWi/yTvvLDHpoxLr0xxxeslWw==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.2.tgz", + "integrity": "sha512-6EwiSjwWYP7pTckG6I5eyFANjPhmPjUX9JRLUSfNPC7FX7zK9gyZAfUEaECL6ALTpGX5AjnBq3C9XmVWPitNpw==", "dev": true }, "node_modules/@lezer/common": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.1.0.tgz", - "integrity": "sha512-XPIN3cYDXsoJI/oDWoR2tD++juVrhgIago9xyKhZ7IhGlzdDM9QgC8D8saKNCz5pindGcznFr2HBSsEQSWnSjw==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.2.1.tgz", + "integrity": "sha512-yemX0ZD2xS/73llMZIK6KplkjIjf2EvAHcinDi/TfJ9hS25G0388+ClHt6/3but0oOxinTcQHJLDXh6w1crzFQ==", "peer": true }, "node_modules/@lezer/highlight": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.1.6.tgz", - "integrity": "sha512-cmSJYa2us+r3SePpRCjN5ymCqCPv+zyXmDl0ciWtVaNiORT/MxM7ZgOMQZADD0o51qOaOg24qc/zBViOIwAjJg==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.2.0.tgz", + "integrity": "sha512-WrS5Mw51sGrpqjlh3d4/fOwpEV2Hd3YOkp9DBt4k8XZQcoTHZFB7sx030A6OcahF4J1nDQAa3jXlTVVYH50IFA==", "peer": true, "dependencies": { "@lezer/common": "^1.0.0" } }, "node_modules/@lezer/lr": { - "version": "1.3.14", - "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.3.14.tgz", - "integrity": "sha512-z5mY4LStlA3yL7aHT/rqgG614cfcvklS+8oFRFBYrs4YaWLJyKKM4+nN6KopToX0o9Hj6zmH6M5kinOYuy06ug==", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.0.tgz", + "integrity": "sha512-Wst46p51km8gH0ZUmeNrtpRYmdlRHUpN1DQd3GFAyKANi8WVz8c2jHYTf1CVScFaCjQw1iO3ZZdqGDxQPRErTg==", "peer": true, "dependencies": { "@lezer/common": "^1.0.0" } }, "node_modules/@motionone/animation": { - "version": "10.16.3", - "resolved": "https://registry.npmjs.org/@motionone/animation/-/animation-10.16.3.tgz", - "integrity": "sha512-QUGWpLbMFLhyqKlngjZhjtxM8IqiJQjLK0DF+XOF6od9nhSvlaeEpOY/UMCRVcZn/9Tr2rZO22EkuCIjYdI74g==", + "version": "10.17.0", + "resolved": "https://registry.npmjs.org/@motionone/animation/-/animation-10.17.0.tgz", + "integrity": "sha512-ANfIN9+iq1kGgsZxs+Nz96uiNcPLGTXwfNo2Xz/fcJXniPYpaz/Uyrfa+7I5BPLxCP82sh7quVDudf1GABqHbg==", "dependencies": { - "@motionone/easing": "^10.16.3", - "@motionone/types": "^10.16.3", - "@motionone/utils": "^10.16.3", + "@motionone/easing": "^10.17.0", + "@motionone/types": "^10.17.0", + "@motionone/utils": "^10.17.0", "tslib": "^2.3.1" } }, @@ -703,35 +756,35 @@ } }, "node_modules/@motionone/easing": { - "version": "10.16.3", - "resolved": "https://registry.npmjs.org/@motionone/easing/-/easing-10.16.3.tgz", - "integrity": "sha512-HWTMZbTmZojzwEuKT/xCdvoMPXjYSyQvuVM6jmM0yoGU6BWzsmYMeB4bn38UFf618fJCNtP9XeC/zxtKWfbr0w==", + "version": "10.17.0", + "resolved": "https://registry.npmjs.org/@motionone/easing/-/easing-10.17.0.tgz", + "integrity": "sha512-Bxe2wSuLu/qxqW4rBFS5m9tMLOw+QBh8v5A7Z5k4Ul4sTj5jAOfZG5R0bn5ywmk+Fs92Ij1feZ5pmC4TeXA8Tg==", "dependencies": { - "@motionone/utils": "^10.16.3", + "@motionone/utils": "^10.17.0", "tslib": "^2.3.1" } }, "node_modules/@motionone/generators": { - "version": "10.16.4", - "resolved": "https://registry.npmjs.org/@motionone/generators/-/generators-10.16.4.tgz", - "integrity": "sha512-geFZ3w0Rm0ZXXpctWsSf3REGywmLLujEjxPYpBR0j+ymYwof0xbV6S5kGqqsDKgyWKVWpUInqQYvQfL6fRbXeg==", + "version": "10.17.0", + "resolved": "https://registry.npmjs.org/@motionone/generators/-/generators-10.17.0.tgz", + "integrity": "sha512-T6Uo5bDHrZWhIfxG/2Aut7qyWQyJIWehk6OB4qNvr/jwA/SRmixwbd7SOrxZi1z5rH3LIeFFBKK1xHnSbGPZSQ==", "dependencies": { - "@motionone/types": "^10.16.3", - "@motionone/utils": "^10.16.3", + "@motionone/types": "^10.17.0", + "@motionone/utils": "^10.17.0", "tslib": "^2.3.1" } }, "node_modules/@motionone/types": { - "version": "10.16.3", - "resolved": "https://registry.npmjs.org/@motionone/types/-/types-10.16.3.tgz", - "integrity": "sha512-W4jkEGFifDq73DlaZs3HUfamV2t1wM35zN/zX7Q79LfZ2sc6C0R1baUHZmqc/K5F3vSw3PavgQ6HyHLd/MXcWg==" + "version": "10.17.0", + "resolved": "https://registry.npmjs.org/@motionone/types/-/types-10.17.0.tgz", + "integrity": "sha512-EgeeqOZVdRUTEHq95Z3t8Rsirc7chN5xFAPMYFobx8TPubkEfRSm5xihmMUkbaR2ErKJTUw3347QDPTHIW12IA==" }, "node_modules/@motionone/utils": { - "version": "10.16.3", - "resolved": "https://registry.npmjs.org/@motionone/utils/-/utils-10.16.3.tgz", - "integrity": "sha512-WNWDksJIxQkaI9p9Z9z0+K27xdqISGNFy1SsWVGaiedTHq0iaT6iZujby8fT/ZnZxj1EOaxJtSfUPCFNU5CRoA==", + "version": "10.17.0", + "resolved": "https://registry.npmjs.org/@motionone/utils/-/utils-10.17.0.tgz", + "integrity": "sha512-bGwrki4896apMWIj9yp5rAS2m0xyhxblg6gTB/leWDPt+pb410W8lYWsxyurX+DH+gO1zsQsfx2su/c1/LtTpg==", "dependencies": { - "@motionone/types": "^10.16.3", + "@motionone/types": "^10.17.0", "hey-listen": "^1.0.8", "tslib": "^2.3.1" } @@ -1402,9 +1455,9 @@ } }, "node_modules/@rollup/rollup-android-arm-eabi": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.6.0.tgz", - "integrity": "sha512-keHkkWAe7OtdALGoutLY3utvthkGF+Y17ws9LYT8pxMBYXaCoH/8dXS2uzo6e8+sEhY7y/zi5RFo22Dy2lFpDw==", + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.12.1.tgz", + "integrity": "sha512-iU2Sya8hNn1LhsYyf0N+L4Gf9Qc+9eBTJJJsaOGUp+7x4n2M9dxTt8UvhJl3oeftSjblSlpCfvjA/IfP3g5VjQ==", "cpu": [ "arm" ], @@ -1415,9 +1468,9 @@ ] }, "node_modules/@rollup/rollup-android-arm64": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.6.0.tgz", - "integrity": "sha512-y3Kt+34smKQNWilicPbBz/MXEY7QwDzMFNgwEWeYiOhUt9MTWKjHqe3EVkXwT2fR7izOvHpDWZ0o2IyD9SWX7A==", + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.12.1.tgz", + "integrity": "sha512-wlzcWiH2Ir7rdMELxFE5vuM7D6TsOcJ2Yw0c3vaBR3VOsJFVTx9xvwnAvhgU5Ii8Gd6+I11qNHwndDscIm0HXg==", "cpu": [ "arm64" ], @@ -1428,9 +1481,9 @@ ] }, "node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.6.0.tgz", - "integrity": "sha512-oLzzxcUIHltHxOCmaXl+pkIlU+uhSxef5HfntW7RsLh1eHm+vJzjD9Oo4oUKso4YuP4PpbFJNlZjJuOrxo8dPg==", + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.12.1.tgz", + "integrity": "sha512-YRXa1+aZIFN5BaImK+84B3uNK8C6+ynKLPgvn29X9s0LTVCByp54TB7tdSMHDR7GTV39bz1lOmlLDuedgTwwHg==", "cpu": [ "arm64" ], @@ -1441,9 +1494,9 @@ ] }, "node_modules/@rollup/rollup-darwin-x64": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.6.0.tgz", - "integrity": "sha512-+ANnmjkcOBaV25n0+M0Bere3roeVAnwlKW65qagtuAfIxXF9YxUneRyAn/RDcIdRa7QrjRNJL3jR7T43ObGe8Q==", + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.12.1.tgz", + "integrity": "sha512-opjWJ4MevxeA8FhlngQWPBOvVWYNPFkq6/25rGgG+KOy0r8clYwL1CFd+PGwRqqMFVQ4/Qd3sQu5t7ucP7C/Uw==", "cpu": [ "x64" ], @@ -1454,9 +1507,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-gnueabihf": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.6.0.tgz", - "integrity": "sha512-tBTSIkjSVUyrekddpkAqKOosnj1Fc0ZY0rJL2bIEWPKqlEQk0paORL9pUIlt7lcGJi3LzMIlUGXvtNi1Z6MOCQ==", + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.12.1.tgz", + "integrity": "sha512-uBkwaI+gBUlIe+EfbNnY5xNyXuhZbDSx2nzzW8tRMjUmpScd6lCQYKY2V9BATHtv5Ef2OBq6SChEP8h+/cxifQ==", "cpu": [ "arm" ], @@ -1467,9 +1520,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-gnu": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.6.0.tgz", - "integrity": "sha512-Ed8uJI3kM11de9S0j67wAV07JUNhbAqIrDYhQBrQW42jGopgheyk/cdcshgGO4fW5Wjq97COCY/BHogdGvKVNQ==", + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.12.1.tgz", + "integrity": "sha512-0bK9aG1kIg0Su7OcFTlexkVeNZ5IzEsnz1ept87a0TUgZ6HplSgkJAnFpEVRW7GRcikT4GlPV0pbtVedOaXHQQ==", "cpu": [ "arm64" ], @@ -1480,9 +1533,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-musl": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.6.0.tgz", - "integrity": "sha512-mZoNQ/qK4D7SSY8v6kEsAAyDgznzLLuSFCA3aBHZTmf3HP/dW4tNLTtWh9+LfyO0Z1aUn+ecpT7IQ3WtIg3ViQ==", + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.12.1.tgz", + "integrity": "sha512-qB6AFRXuP8bdkBI4D7UPUbE7OQf7u5OL+R94JE42Z2Qjmyj74FtDdLGeriRyBDhm4rQSvqAGCGC01b8Fu2LthQ==", "cpu": [ "arm64" ], @@ -1492,10 +1545,23 @@ "linux" ] }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.12.1.tgz", + "integrity": "sha512-sHig3LaGlpNgDj5o8uPEoGs98RII8HpNIqFtAI8/pYABO8i0nb1QzT0JDoXF/pxzqO+FkxvwkHZo9k0NJYDedg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.6.0.tgz", - "integrity": "sha512-rouezFHpwCqdEXsqAfNsTgSWO0FoZ5hKv5p+TGO5KFhyN/dvYXNMqMolOb8BkyKcPqjYRBeT+Z6V3aM26rPaYg==", + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.12.1.tgz", + "integrity": "sha512-nD3YcUv6jBJbBNFvSbp0IV66+ba/1teuBcu+fBBPZ33sidxitc6ErhON3JNavaH8HlswhWMC3s5rgZpM4MtPqQ==", "cpu": [ "x64" ], @@ -1506,9 +1572,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.6.0.tgz", - "integrity": "sha512-Bbm+fyn3S6u51urfj3YnqBXg5vI2jQPncRRELaucmhBVyZkbWClQ1fEsRmdnCPpQOQfkpg9gZArvtMVkOMsh1w==", + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.12.1.tgz", + "integrity": "sha512-7/XVZqgBby2qp/cO0TQ8uJK+9xnSdJ9ct6gSDdEr4MfABrjTyrW6Bau7HQ73a2a5tPB7hno49A0y1jhWGDN9OQ==", "cpu": [ "x64" ], @@ -1519,9 +1585,9 @@ ] }, "node_modules/@rollup/rollup-win32-arm64-msvc": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.6.0.tgz", - "integrity": "sha512-+MRMcyx9L2kTrTUzYmR61+XVsliMG4odFb5UmqtiT8xOfEicfYAGEuF/D1Pww1+uZkYhBqAHpvju7VN+GnC3ng==", + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.12.1.tgz", + "integrity": "sha512-CYc64bnICG42UPL7TrhIwsJW4QcKkIt9gGlj21gq3VV0LL6XNb1yAdHVp1pIi9gkts9gGcT3OfUYHjGP7ETAiw==", "cpu": [ "arm64" ], @@ -1532,9 +1598,9 @@ ] }, "node_modules/@rollup/rollup-win32-ia32-msvc": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.6.0.tgz", - "integrity": "sha512-rxfeE6K6s/Xl2HGeK6cO8SiQq3k/3BYpw7cfhW5Bk2euXNEpuzi2cc7llxx1si1QgwfjNtdRNTGqdBzGlFZGFw==", + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.12.1.tgz", + "integrity": "sha512-LN+vnlZ9g0qlHGlS920GR4zFCqAwbv2lULrR29yGaWP9u7wF5L7GqWu9Ah6/kFZPXPUkpdZwd//TNR+9XC9hvA==", "cpu": [ "ia32" ], @@ -1545,9 +1611,9 @@ ] }, "node_modules/@rollup/rollup-win32-x64-msvc": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.6.0.tgz", - "integrity": "sha512-QqmCsydHS172Y0Kc13bkMXvipbJSvzeglBncJG3LsYJSiPlxYACz7MmJBs4A8l1oU+jfhYEIC/+AUSlvjmiX/g==", + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.12.1.tgz", + "integrity": "sha512-n+vkrSyphvmU0qkQ6QBNXCGr2mKjhP08mPRM/Xp5Ck2FV4NrHU+y6axzDeixUrCBHVUS51TZhjqrKBBsHLKb2Q==", "cpu": [ "x64" ], @@ -1558,27 +1624,26 @@ ] }, "node_modules/@swagger-api/apidom-ast": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ast/-/apidom-ast-0.92.0.tgz", - "integrity": "sha512-j9vuKaYZP3mAGXUcKeWIkSToxPPCBLJcLEfjSEh14P0n6NRJp7Yg19SA+IwHdIvOAfJonuebj/lhPOMjzd6P1g==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ast/-/apidom-ast-0.97.0.tgz", + "integrity": "sha512-KpPyC8x5ZrB4l9+jgl8FAhokedh+8b5VuBTTdTJKFf+x5uznMiBf/MZTWgvsIk8/9MtjkQYUN1qgVzEPiKWvHg==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-error": "^0.92.0", + "@swagger-api/apidom-error": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.1.1", - "stampit": "^4.3.2", "unraw": "^3.0.0" } }, "node_modules/@swagger-api/apidom-core": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-core/-/apidom-core-0.92.0.tgz", - "integrity": "sha512-PK1zlS0UCcE5dIPtSy8/+oWfXAVf7b/iM3LRaPgaFGF5b8qa6S/zmROTh10Yjug9v9Vnuq8opEhyHkGyl+WdSA==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-core/-/apidom-core-0.97.0.tgz", + "integrity": "sha512-3LYlN0Cox0FBFNZqmgi7VyJ4MXppCmZoFjlurT+Y90ND1y2lCidcwjAthr3QpV8b+UCc7MG3APBGRfwqaYZ2IA==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-ast": "^0.92.0", - "@swagger-api/apidom-error": "^0.92.0", + "@swagger-api/apidom-ast": "^0.97.0", + "@swagger-api/apidom-error": "^0.97.0", "@types/ramda": "~0.29.6", "minim": "~0.23.8", "ramda": "~0.29.1", @@ -1588,65 +1653,65 @@ } }, "node_modules/@swagger-api/apidom-error": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-error/-/apidom-error-0.92.0.tgz", - "integrity": "sha512-wo7xCvTpWr5Lpt/ly1L4bhZ6W7grgtAg7SK/d8FNZR85zPJXM4FPMpcRtKktfWJ/RikQJT/g5DjI33iTqB6z/w==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-error/-/apidom-error-0.97.0.tgz", + "integrity": "sha512-Y2YRnsJSXp+MdgwwMSCtidzJfy/bL6CZEpc+5aWUw1mphTjfLZC66uA4btUgUevyiT6mNHXm8tUmGomHA7Izdw==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7" } }, "node_modules/@swagger-api/apidom-json-pointer": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-json-pointer/-/apidom-json-pointer-0.92.0.tgz", - "integrity": "sha512-VmZ1EXE7BWX+ndeeh9t1uFRql5jbPRmAcglUfdtu3jlg6fOqXzzgx9qFpRz9GhpMHWEGFm1ymd8tMAa1CvgcHw==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-json-pointer/-/apidom-json-pointer-0.97.0.tgz", + "integrity": "sha512-9vcgePgcYXUiYEqnvx8Ew04j8JtfenosysbSuGgRs93Ls8mQ/+ndIOklHaXJzNjBZZxqxS0p6QLFcj1jpUiojQ==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.92.0", - "@swagger-api/apidom-error": "^0.92.0", + "@swagger-api/apidom-core": "^0.97.0", + "@swagger-api/apidom-error": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.0.0" } }, "node_modules/@swagger-api/apidom-ns-api-design-systems": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-api-design-systems/-/apidom-ns-api-design-systems-0.92.0.tgz", - "integrity": "sha512-wXEXhw0wDQIPTUqff953h44oQZr29DcoAzZfROWlGtOLItGDDMjhfIYiRg1406mXA4N7d5d0vNi9V/HXkxItQw==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-api-design-systems/-/apidom-ns-api-design-systems-0.97.0.tgz", + "integrity": "sha512-uSTIEX4q9XWoP9TQq9nEtW5xG3hVQN2VD5spYoxvYlzUOtg12yxkVgu776eq0kVZd74acZhKIF7mn3uiqaQcHA==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.92.0", - "@swagger-api/apidom-error": "^0.92.0", - "@swagger-api/apidom-ns-openapi-3-1": "^0.92.0", + "@swagger-api/apidom-core": "^0.97.0", + "@swagger-api/apidom-error": "^0.97.0", + "@swagger-api/apidom-ns-openapi-3-1": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.1.1", - "stampit": "^4.3.2" + "ts-mixer": "^6.0.3" } }, "node_modules/@swagger-api/apidom-ns-asyncapi-2": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-asyncapi-2/-/apidom-ns-asyncapi-2-0.92.0.tgz", - "integrity": "sha512-FmJLT3GqzT4HK7Mwh54cXZ4PZt58yKVtJAKWKJ0dg2/Gim0AKJWf6t6B3Z9ZFUiKyehbqP4K7gSM7qGL0tKe2Q==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-asyncapi-2/-/apidom-ns-asyncapi-2-0.97.0.tgz", + "integrity": "sha512-buEQSrXdtjoAkqIWSZ448HlvnareupthIoObYELp25LVuQwhxxVSY3NR0aCIR37GHgSchrmPBVcsvPMtXV96BA==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.92.0", - "@swagger-api/apidom-ns-json-schema-draft-7": "^0.92.0", + "@swagger-api/apidom-core": "^0.97.0", + "@swagger-api/apidom-ns-json-schema-draft-7": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.1.1", - "stampit": "^4.3.2" + "ts-mixer": "^6.0.3" } }, "node_modules/@swagger-api/apidom-ns-json-schema-draft-4": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-4/-/apidom-ns-json-schema-draft-4-0.92.0.tgz", - "integrity": "sha512-7s2EKjCQwRXbK4Y4AGpVkyn1AANCxOUFSHebo1h2katyVeAopV0LJmbXH5yQedTltV0k3BIjnd7hS+7dI846Pw==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-4/-/apidom-ns-json-schema-draft-4-0.97.0.tgz", + "integrity": "sha512-eBMIPxX4huNDGle6TOfSe1kKS1/HvL6w66GWWLFxZW2doCQHMADgjo7j/kVowrXiJtEoMgjBVp3W30WkcwBVug==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-ast": "^0.92.0", - "@swagger-api/apidom-core": "^0.92.0", + "@swagger-api/apidom-ast": "^0.97.0", + "@swagger-api/apidom-core": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.1.1", @@ -1654,15 +1719,15 @@ } }, "node_modules/@swagger-api/apidom-ns-json-schema-draft-6": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-6/-/apidom-ns-json-schema-draft-6-0.92.0.tgz", - "integrity": "sha512-zur80x04jesXVzlU9sLZhW4giO9RfOouI7L/H8v2wUlcBvjaPBn1tIqrURw2VEHKAcJORhTRusQCR21vnFot2g==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-6/-/apidom-ns-json-schema-draft-6-0.97.0.tgz", + "integrity": "sha512-tRbg3/b4aJGfcODc0HDngZDjBdhPAv8OZM1OZdsqI4EEIw3PI/wpd+b6b8a5udOjAdbUYqnYsq6gCylCDNBnzw==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.92.0", - "@swagger-api/apidom-error": "^0.92.0", - "@swagger-api/apidom-ns-json-schema-draft-4": "^0.92.0", + "@swagger-api/apidom-core": "^0.97.0", + "@swagger-api/apidom-error": "^0.97.0", + "@swagger-api/apidom-ns-json-schema-draft-4": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.1.1", @@ -1670,15 +1735,15 @@ } }, "node_modules/@swagger-api/apidom-ns-json-schema-draft-7": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-7/-/apidom-ns-json-schema-draft-7-0.92.0.tgz", - "integrity": "sha512-DSY7lY98XHnc0wg0V38ZmBPs5HWuRuSb6G+n5Z+qs5RRodh1x5BrTIY6M0Yk3oJVbbEoFGmF0VlTe6vHf44pbw==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-7/-/apidom-ns-json-schema-draft-7-0.97.0.tgz", + "integrity": "sha512-0GITsoa6kVVkoKBUxyeODmh6vjGXuvDQZd3Vxs1nz0c/O6ZR+VBfBB3JW5wzhVr+WCXebaOJGDyWkxJMHKycxw==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.92.0", - "@swagger-api/apidom-error": "^0.92.0", - "@swagger-api/apidom-ns-json-schema-draft-6": "^0.92.0", + "@swagger-api/apidom-core": "^0.97.0", + "@swagger-api/apidom-error": "^0.97.0", + "@swagger-api/apidom-ns-json-schema-draft-6": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.1.1", @@ -1686,274 +1751,274 @@ } }, "node_modules/@swagger-api/apidom-ns-openapi-2": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-2/-/apidom-ns-openapi-2-0.92.0.tgz", - "integrity": "sha512-OJlSTvPzK+zqzd2xXeWkF50z08Wlpygc98eVzZjYI0Af8mz7x6R5T9BCP5p6ZlQoO9OTvk4gfv7ViWXCdamObg==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-2/-/apidom-ns-openapi-2-0.97.0.tgz", + "integrity": "sha512-5gOA9FiO1J9OxJhcVBeXdm77kuh2cwPXG6Sh/DOlbk733Pz9v9W0aQgpLi5Ltsgagxe1sHhBqxJ1asw10QFzzw==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.92.0", - "@swagger-api/apidom-error": "^0.92.0", - "@swagger-api/apidom-ns-json-schema-draft-4": "^0.92.0", + "@swagger-api/apidom-core": "^0.97.0", + "@swagger-api/apidom-error": "^0.97.0", + "@swagger-api/apidom-ns-json-schema-draft-4": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.1.1", - "stampit": "^4.3.2" + "ts-mixer": "^6.0.3" } }, "node_modules/@swagger-api/apidom-ns-openapi-3-0": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-3-0/-/apidom-ns-openapi-3-0-0.92.0.tgz", - "integrity": "sha512-VGha4RRnoeoAZBWLGy37YsBzwICM3ZFNyCk2Dwpaqfg9zFN+E6BL2CtIbkxvFkMdwaMURmDItiQsw28pF0tOgQ==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-3-0/-/apidom-ns-openapi-3-0-0.97.0.tgz", + "integrity": "sha512-fbnN87SF0WN/4DcSpceuo+NUtkAGeicMIucEMF+LIIiCAF27Xi5d6Q823i9DgOEfJtifHKVj6Zhl/zSKAD2eyw==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.92.0", - "@swagger-api/apidom-error": "^0.92.0", - "@swagger-api/apidom-ns-json-schema-draft-4": "^0.92.0", + "@swagger-api/apidom-core": "^0.97.0", + "@swagger-api/apidom-error": "^0.97.0", + "@swagger-api/apidom-ns-json-schema-draft-4": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.1.1", - "stampit": "^4.3.2" + "ts-mixer": "^6.0.3" } }, "node_modules/@swagger-api/apidom-ns-openapi-3-1": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-3-1/-/apidom-ns-openapi-3-1-0.92.0.tgz", - "integrity": "sha512-xZD+JxifYhDoTjn76K2ZT3xNoXBQChaKfSkJr4l5Xh9Guuk0IcsPTUDRpuytuZZXVez0O401XFoUso/mZRTjkA==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-3-1/-/apidom-ns-openapi-3-1-0.97.0.tgz", + "integrity": "sha512-DyvkTim+t7iVKyze6N3tITsfyElthmOwOcxwOjKj/3lySEy61DuY4X2FaPD5+owftVDxMs4Q6F9Chm7qv91a+Q==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-ast": "^0.92.0", - "@swagger-api/apidom-core": "^0.92.0", - "@swagger-api/apidom-ns-openapi-3-0": "^0.92.0", + "@swagger-api/apidom-ast": "^0.97.0", + "@swagger-api/apidom-core": "^0.97.0", + "@swagger-api/apidom-ns-openapi-3-0": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.1.1", - "stampit": "^4.3.2" + "ts-mixer": "^6.0.3" } }, "node_modules/@swagger-api/apidom-ns-workflows-1": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-workflows-1/-/apidom-ns-workflows-1-0.92.0.tgz", - "integrity": "sha512-gl1dF+SrRHK4lLiwaK4PMjL9A5z28cW9xiMWCxRyppX/I2bVTVVOfgdAyqLWsFA0gopmITWesJxohRumG35fTw==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-workflows-1/-/apidom-ns-workflows-1-0.97.0.tgz", + "integrity": "sha512-eIuoTRSITlUtMjpM3J0H9b2rVeEVu13i/Fv6+ZMPob0yHmQBWo9bnLjxxnfEZkpvp050worKULfNMdJV8NKBkA==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.92.0", - "@swagger-api/apidom-ns-openapi-3-1": "^0.92.0", + "@swagger-api/apidom-core": "^0.97.0", + "@swagger-api/apidom-ns-openapi-3-1": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.1.1", - "stampit": "^4.3.2" + "ts-mixer": "^6.0.3" } }, "node_modules/@swagger-api/apidom-parser-adapter-api-design-systems-json": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-api-design-systems-json/-/apidom-parser-adapter-api-design-systems-json-0.92.0.tgz", - "integrity": "sha512-i07FeLdNobWzHT9LnfsdOix+XrlZN/KnQL1RODPzxWk7i7ya2e4uc3JemyHh4Tnv04G8JV32SQqtzOtMteJsdA==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-api-design-systems-json/-/apidom-parser-adapter-api-design-systems-json-0.97.0.tgz", + "integrity": "sha512-ZDzaiTHMEpz0kM0/iyHEjySTf0xoLKDJwJiSxKNuew141k0rakTVeVisxXeq+6JQi2eC6KuyS98DHMe7hEIVUw==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.92.0", - "@swagger-api/apidom-ns-api-design-systems": "^0.92.0", - "@swagger-api/apidom-parser-adapter-json": "^0.92.0", + "@swagger-api/apidom-core": "^0.97.0", + "@swagger-api/apidom-ns-api-design-systems": "^0.97.0", + "@swagger-api/apidom-parser-adapter-json": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-api-design-systems-yaml": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-api-design-systems-yaml/-/apidom-parser-adapter-api-design-systems-yaml-0.92.0.tgz", - "integrity": "sha512-bbjFkU0D4zqaZnd8/m1Kyx2UuHpri8ZxLdT1TiXqHweSfRQcNt4VYt0bjWBnnGGBMkHElgYbX5ov6kHvPf3wJg==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-api-design-systems-yaml/-/apidom-parser-adapter-api-design-systems-yaml-0.97.0.tgz", + "integrity": "sha512-5/BziPWqrHLr91VR+EC4pXt/fNToWMmvG+d7RVjksHinrjps2E6HA+oZOhqKqA2LRCLNjGhNUptXzRMDjjtenw==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.92.0", - "@swagger-api/apidom-ns-api-design-systems": "^0.92.0", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.92.0", + "@swagger-api/apidom-core": "^0.97.0", + "@swagger-api/apidom-ns-api-design-systems": "^0.97.0", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-asyncapi-json-2": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-asyncapi-json-2/-/apidom-parser-adapter-asyncapi-json-2-0.92.0.tgz", - "integrity": "sha512-Q7gudmGA5TUGbbr0QYNQkndktP91C0WE7uDDS2IwCBtHroRDiMPFCjzE9dsjIST5WnP+LUXmxG1Bv0NLTWcSUg==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-asyncapi-json-2/-/apidom-parser-adapter-asyncapi-json-2-0.97.0.tgz", + "integrity": "sha512-XLD/YZifnhezRQY5ADQQAje5G5qtZ4GAbXk//1sRNe3R/qCk1pDxmRYr27yzt8w1XhfM+9VQmCTI21ZFpNFQOA==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.92.0", - "@swagger-api/apidom-ns-asyncapi-2": "^0.92.0", - "@swagger-api/apidom-parser-adapter-json": "^0.92.0", + "@swagger-api/apidom-core": "^0.97.0", + "@swagger-api/apidom-ns-asyncapi-2": "^0.97.0", + "@swagger-api/apidom-parser-adapter-json": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-asyncapi-yaml-2": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-asyncapi-yaml-2/-/apidom-parser-adapter-asyncapi-yaml-2-0.92.0.tgz", - "integrity": "sha512-V5/VdDj0aeOKp+3AtvPSz2b0HosJfYkHPjNvPU5eafLSzqzMIR/evYq5BvKWoJL1IvLdjoEPqDVVaEZluHZTew==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-asyncapi-yaml-2/-/apidom-parser-adapter-asyncapi-yaml-2-0.97.0.tgz", + "integrity": "sha512-whyThDiGN4FoNirgY0XtXF7IJeU6NfsrBwjaxCkYBuSPslZBoWy4ojEQbfg+2HqNLbnHKJyvabh9/tSIxgB92A==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.92.0", - "@swagger-api/apidom-ns-asyncapi-2": "^0.92.0", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.92.0", + "@swagger-api/apidom-core": "^0.97.0", + "@swagger-api/apidom-ns-asyncapi-2": "^0.97.0", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-json": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-json/-/apidom-parser-adapter-json-0.92.0.tgz", - "integrity": "sha512-KA1Nn6FN0zTA5JhRazwYN9voTDlmExID7Jwz6GXmY826OXqeT4Yl0Egyo1aLYrfT0S73vhC4LVqpdORWLGdZtg==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-json/-/apidom-parser-adapter-json-0.97.0.tgz", + "integrity": "sha512-MPhAX77Z9Csti+Kljtbrl/ez2H610R4fQg0RnkNW40f4e6TXeOogT5tmceeWP+IKGAKX45HA1JpVPxdtSJn3ww==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-ast": "^0.92.0", - "@swagger-api/apidom-core": "^0.92.0", - "@swagger-api/apidom-error": "^0.92.0", + "@swagger-api/apidom-ast": "^0.97.0", + "@swagger-api/apidom-core": "^0.97.0", + "@swagger-api/apidom-error": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.1.1", "tree-sitter": "=0.20.4", - "tree-sitter-json": "=0.20.1", + "tree-sitter-json": "=0.20.2", "web-tree-sitter": "=0.20.3" } }, "node_modules/@swagger-api/apidom-parser-adapter-openapi-json-2": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-2/-/apidom-parser-adapter-openapi-json-2-0.92.0.tgz", - "integrity": "sha512-8OlvjcvI/GuOFJJxN+Mc4tJSo9UWuJdzQtQOtO4k3QwWwS28hGvRTjQ5PpsXAVZoLJMAbDuRdREYD9qeIKvM2g==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-2/-/apidom-parser-adapter-openapi-json-2-0.97.0.tgz", + "integrity": "sha512-HtaoRN7wnVB2ilxs/RpLBR7+MwIfUqUcdCzC/EVV788CnSbutwj61W3jR2w9BRXeANJ4K2APcvU4W7WiI9Sugg==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.92.0", - "@swagger-api/apidom-ns-openapi-2": "^0.92.0", - "@swagger-api/apidom-parser-adapter-json": "^0.92.0", + "@swagger-api/apidom-core": "^0.97.0", + "@swagger-api/apidom-ns-openapi-2": "^0.97.0", + "@swagger-api/apidom-parser-adapter-json": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-openapi-json-3-0": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-3-0/-/apidom-parser-adapter-openapi-json-3-0-0.92.0.tgz", - "integrity": "sha512-kzE4COaNobKIUjGsdqqXgO/LruaQHs2kTzOzHPUTR1TH1ZlB2t8MTV+6LJzGNG3IB3QSfZDd7KBEYWklsCTyTA==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-3-0/-/apidom-parser-adapter-openapi-json-3-0-0.97.0.tgz", + "integrity": "sha512-psfxh7k671HukibaY53cems0fcsLQP8U5lQPzVDevEGJQoguAWHyV2C5kOr52XOJInmsN5E+COEn6oPzsIaDCg==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.92.0", - "@swagger-api/apidom-ns-openapi-3-0": "^0.92.0", - "@swagger-api/apidom-parser-adapter-json": "^0.92.0", + "@swagger-api/apidom-core": "^0.97.0", + "@swagger-api/apidom-ns-openapi-3-0": "^0.97.0", + "@swagger-api/apidom-parser-adapter-json": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-openapi-json-3-1": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-3-1/-/apidom-parser-adapter-openapi-json-3-1-0.92.0.tgz", - "integrity": "sha512-4gkIXfKGwEKZQ6+kxp4EdFBlAc7Kjq8GAgaC7ilGTSSxIaz5hBHBOJoe3cXWpQ/WlXiOyNCy7WdbuKRpUDKIdg==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-3-1/-/apidom-parser-adapter-openapi-json-3-1-0.97.0.tgz", + "integrity": "sha512-PJpcLhS441ATFjbCHHhVUPd8K1JZaiFQJS7yfQEKQmA5MlBRh3w7mqCJAbZN49wuMkelTdB8qJJlVEGUDSxX5Q==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.92.0", - "@swagger-api/apidom-ns-openapi-3-1": "^0.92.0", - "@swagger-api/apidom-parser-adapter-json": "^0.92.0", + "@swagger-api/apidom-core": "^0.97.0", + "@swagger-api/apidom-ns-openapi-3-1": "^0.97.0", + "@swagger-api/apidom-parser-adapter-json": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-openapi-yaml-2": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-2/-/apidom-parser-adapter-openapi-yaml-2-0.92.0.tgz", - "integrity": "sha512-TIY9cytYhA3yUf+5PcwsH9UjzKy5V4nGUtK6n5RvcL4btaGQA2LUB5CiV/1nSvYLNjYjGxhtB3haZDbHe3/gyw==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-2/-/apidom-parser-adapter-openapi-yaml-2-0.97.0.tgz", + "integrity": "sha512-X5saN/AElpS+LohbSjNPesUPWYOM8Wb19+OD7/WS1r6AVRIlj5gKLy3vO7BLBvaER5G73qYylfrPxCoUPlpZZg==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.92.0", - "@swagger-api/apidom-ns-openapi-2": "^0.92.0", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.92.0", + "@swagger-api/apidom-core": "^0.97.0", + "@swagger-api/apidom-ns-openapi-2": "^0.97.0", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-openapi-yaml-3-0": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-3-0/-/apidom-parser-adapter-openapi-yaml-3-0-0.92.0.tgz", - "integrity": "sha512-AUwtAxeautYtiwifNCmv6Kjs7ksptRFxcQ3sgLv2bP3f9t5jzcI9NhmgJNdbRfohHYaHMwTuUESrfsTdBgKlAA==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-3-0/-/apidom-parser-adapter-openapi-yaml-3-0-0.97.0.tgz", + "integrity": "sha512-kBW6atIN0rONf9kjNeE5eHkxb3amfby0vxKfk+9fiRdQbJVCg4UiWOFmU5rD9bc2smtLWSQNkjlMkKS3i2/4Wg==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.92.0", - "@swagger-api/apidom-ns-openapi-3-0": "^0.92.0", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.92.0", + "@swagger-api/apidom-core": "^0.97.0", + "@swagger-api/apidom-ns-openapi-3-0": "^0.97.0", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-openapi-yaml-3-1": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-3-1/-/apidom-parser-adapter-openapi-yaml-3-1-0.92.0.tgz", - "integrity": "sha512-gMR4zUZ/RrjVJVr6DnqwsCsnlplGXJk6O9UKbkoBsiom81dkcHx68BmWA2oM2lYVGKx+G8WVmVDo2EJaZvZYGg==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-3-1/-/apidom-parser-adapter-openapi-yaml-3-1-0.97.0.tgz", + "integrity": "sha512-cclRwQ9IQj6sFLUCDzqRbbbplQfKdt9xz8YONvtq4XBHZO6Ab8z5CF3A9eLiuW1TJZ3y0QU7xmI6h5jWwUrC9w==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.92.0", - "@swagger-api/apidom-ns-openapi-3-1": "^0.92.0", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.92.0", + "@swagger-api/apidom-core": "^0.97.0", + "@swagger-api/apidom-ns-openapi-3-1": "^0.97.0", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-workflows-json-1": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-workflows-json-1/-/apidom-parser-adapter-workflows-json-1-0.92.0.tgz", - "integrity": "sha512-tyLiSxEKeU6mhClFjNxrTQJA2aSgfEF7LJ/ZcJgvREsvyk6ns3op9wN2SXw4UmD+657IgN0aUPihh92aEXKovA==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-workflows-json-1/-/apidom-parser-adapter-workflows-json-1-0.97.0.tgz", + "integrity": "sha512-UvnISzq5JDG43sTIJ2oE8u8qALHmBKbYMGncYgUdlHx7z5RgPAWxIRDWH40YFzUSuKSRNp4TI7eG/9MUd3RnGA==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.92.0", - "@swagger-api/apidom-ns-workflows-1": "^0.92.0", - "@swagger-api/apidom-parser-adapter-json": "^0.92.0", + "@swagger-api/apidom-core": "^0.97.0", + "@swagger-api/apidom-ns-workflows-1": "^0.97.0", + "@swagger-api/apidom-parser-adapter-json": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-workflows-yaml-1": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-workflows-yaml-1/-/apidom-parser-adapter-workflows-yaml-1-0.92.0.tgz", - "integrity": "sha512-0Nr+5oAocuw3SZXcO8WEqnU7GGWP7O6GrsFafD6KLBL05v3I0erPfmnWQjWh6jBeXv8r5W69WEQItzES0DBJjA==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-workflows-yaml-1/-/apidom-parser-adapter-workflows-yaml-1-0.97.0.tgz", + "integrity": "sha512-TTZS0YkFvy0X8Huom+fr3muZsCy8mtDpuUks45EvPqv6gjGLCBw3/AZ507CS0YxYvoERbXkYfAYqxW8lptwKuQ==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.92.0", - "@swagger-api/apidom-ns-workflows-1": "^0.92.0", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.92.0", + "@swagger-api/apidom-core": "^0.97.0", + "@swagger-api/apidom-ns-workflows-1": "^0.97.0", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-yaml-1-2": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-yaml-1-2/-/apidom-parser-adapter-yaml-1-2-0.92.0.tgz", - "integrity": "sha512-cFLqlhehMuY5WRdU1780Vno6iWpjMlr7CfOOloZW1rKf2lvojn0c4eDsyfWFaB2DgE+Xd4CWl55McuaPZMngsw==", + "version": "0.97.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-yaml-1-2/-/apidom-parser-adapter-yaml-1-2-0.97.0.tgz", + "integrity": "sha512-3f1ADjQyKyLnuRhPuoHMgWMW28o0ylohWCQwX4q69CMH0kqGxP7HnqIU/i0I2cxZdjGv72OCdiKwaR/OgHcmEw==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-ast": "^0.92.0", - "@swagger-api/apidom-core": "^0.92.0", - "@swagger-api/apidom-error": "^0.92.0", + "@swagger-api/apidom-ast": "^0.97.0", + "@swagger-api/apidom-core": "^0.97.0", + "@swagger-api/apidom-error": "^0.97.0", "@types/ramda": "~0.29.6", "ramda": "~0.29.1", "ramda-adjunct": "^4.1.1", @@ -1963,12 +2028,12 @@ } }, "node_modules/@swagger-api/apidom-reference": { - "version": "0.92.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-reference/-/apidom-reference-0.92.0.tgz", - "integrity": "sha512-G/qJBTpXCdwPsc5dqPjX+vAfhvtnhIFqnKtEZ71wnEvF7TpIxdeZKKfqpg+Zxi7MSuZD/Gpkr4J/eP0lO0fAdA==", + "version": "0.97.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-reference/-/apidom-reference-0.97.1.tgz", + "integrity": "sha512-Bs1U2VutmVpqbCxbCt4DTiL8v0s6osAJx+4v49BGrTcfFFh97K/EOAm48WgA8ViP7qHUNBhUF83rjbpEwOshFw==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.92.0", + "@swagger-api/apidom-core": "^0.97.0", "@types/ramda": "~0.29.6", "axios": "^1.4.0", "minimatch": "^7.4.3", @@ -1978,35 +2043,27 @@ "stampit": "^4.3.2" }, "optionalDependencies": { - "@swagger-api/apidom-error": "^0.92.0", - "@swagger-api/apidom-json-pointer": "^0.92.0", - "@swagger-api/apidom-ns-asyncapi-2": "^0.92.0", - "@swagger-api/apidom-ns-openapi-2": "^0.92.0", - "@swagger-api/apidom-ns-openapi-3-0": "^0.92.0", - "@swagger-api/apidom-ns-openapi-3-1": "^0.92.0", - "@swagger-api/apidom-ns-workflows-1": "^0.92.0", - "@swagger-api/apidom-parser-adapter-api-design-systems-json": "^0.92.0", - "@swagger-api/apidom-parser-adapter-api-design-systems-yaml": "^0.92.0", - "@swagger-api/apidom-parser-adapter-asyncapi-json-2": "^0.92.0", - "@swagger-api/apidom-parser-adapter-asyncapi-yaml-2": "^0.92.0", - "@swagger-api/apidom-parser-adapter-json": "^0.92.0", - "@swagger-api/apidom-parser-adapter-openapi-json-2": "^0.92.0", - "@swagger-api/apidom-parser-adapter-openapi-json-3-0": "^0.92.0", - "@swagger-api/apidom-parser-adapter-openapi-json-3-1": "^0.92.0", - "@swagger-api/apidom-parser-adapter-openapi-yaml-2": "^0.92.0", - "@swagger-api/apidom-parser-adapter-openapi-yaml-3-0": "^0.92.0", - "@swagger-api/apidom-parser-adapter-openapi-yaml-3-1": "^0.92.0", - "@swagger-api/apidom-parser-adapter-workflows-json-1": "^0.92.0", - "@swagger-api/apidom-parser-adapter-workflows-yaml-1": "^0.92.0", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.92.0" - } - }, - "node_modules/@swagger-api/apidom-reference/node_modules/brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", - "dependencies": { - "balanced-match": "^1.0.0" + "@swagger-api/apidom-error": "^0.97.0", + "@swagger-api/apidom-json-pointer": "^0.97.0", + "@swagger-api/apidom-ns-asyncapi-2": "^0.97.0", + "@swagger-api/apidom-ns-openapi-2": "^0.97.0", + "@swagger-api/apidom-ns-openapi-3-0": "^0.97.0", + "@swagger-api/apidom-ns-openapi-3-1": "^0.97.0", + "@swagger-api/apidom-ns-workflows-1": "^0.97.0", + "@swagger-api/apidom-parser-adapter-api-design-systems-json": "^0.97.0", + "@swagger-api/apidom-parser-adapter-api-design-systems-yaml": "^0.97.0", + "@swagger-api/apidom-parser-adapter-asyncapi-json-2": "^0.97.0", + "@swagger-api/apidom-parser-adapter-asyncapi-yaml-2": "^0.97.0", + "@swagger-api/apidom-parser-adapter-json": "^0.97.0", + "@swagger-api/apidom-parser-adapter-openapi-json-2": "^0.97.0", + "@swagger-api/apidom-parser-adapter-openapi-json-3-0": "^0.97.0", + "@swagger-api/apidom-parser-adapter-openapi-json-3-1": "^0.97.0", + "@swagger-api/apidom-parser-adapter-openapi-yaml-2": "^0.97.0", + "@swagger-api/apidom-parser-adapter-openapi-yaml-3-0": "^0.97.0", + "@swagger-api/apidom-parser-adapter-openapi-yaml-3-1": "^0.97.0", + "@swagger-api/apidom-parser-adapter-workflows-json-1": "^0.97.0", + "@swagger-api/apidom-parser-adapter-workflows-yaml-1": "^0.97.0", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.97.0" } }, "node_modules/@swagger-api/apidom-reference/node_modules/minimatch": { @@ -2024,13 +2081,13 @@ } }, "node_modules/@swc/core": { - "version": "1.3.96", - "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.3.96.tgz", - "integrity": "sha512-zwE3TLgoZwJfQygdv2SdCK9mRLYluwDOM53I+dT6Z5ZvrgVENmY3txvWDvduzkV+/8IuvrRbVezMpxcojadRdQ==", + "version": "1.4.6", + "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.4.6.tgz", + "integrity": "sha512-A7iK9+1qzTCIuc3IYcS8gPHCm9bZVKUJrfNnwveZYyo6OFp3jLno4WOM2yBy5uqedgYATEiWgBYHKq37KrU6IA==", "dev": true, "hasInstallScript": true, "dependencies": { - "@swc/counter": "^0.1.1", + "@swc/counter": "^0.1.2", "@swc/types": "^0.1.5" }, "engines": { @@ -2041,16 +2098,16 @@ "url": "https://opencollective.com/swc" }, "optionalDependencies": { - "@swc/core-darwin-arm64": "1.3.96", - "@swc/core-darwin-x64": "1.3.96", - "@swc/core-linux-arm-gnueabihf": "1.3.96", - "@swc/core-linux-arm64-gnu": "1.3.96", - "@swc/core-linux-arm64-musl": "1.3.96", - "@swc/core-linux-x64-gnu": "1.3.96", - "@swc/core-linux-x64-musl": "1.3.96", - "@swc/core-win32-arm64-msvc": "1.3.96", - "@swc/core-win32-ia32-msvc": "1.3.96", - "@swc/core-win32-x64-msvc": "1.3.96" + "@swc/core-darwin-arm64": "1.4.6", + "@swc/core-darwin-x64": "1.4.6", + "@swc/core-linux-arm-gnueabihf": "1.4.6", + "@swc/core-linux-arm64-gnu": "1.4.6", + "@swc/core-linux-arm64-musl": "1.4.6", + "@swc/core-linux-x64-gnu": "1.4.6", + "@swc/core-linux-x64-musl": "1.4.6", + "@swc/core-win32-arm64-msvc": "1.4.6", + "@swc/core-win32-ia32-msvc": "1.4.6", + "@swc/core-win32-x64-msvc": "1.4.6" }, "peerDependencies": { "@swc/helpers": "^0.5.0" @@ -2062,9 +2119,9 @@ } }, "node_modules/@swc/core-darwin-arm64": { - "version": "1.3.96", - "resolved": "https://registry.npmjs.org/@swc/core-darwin-arm64/-/core-darwin-arm64-1.3.96.tgz", - "integrity": "sha512-8hzgXYVd85hfPh6mJ9yrG26rhgzCmcLO0h1TIl8U31hwmTbfZLzRitFQ/kqMJNbIBCwmNH1RU2QcJnL3d7f69A==", + "version": "1.4.6", + "resolved": "https://registry.npmjs.org/@swc/core-darwin-arm64/-/core-darwin-arm64-1.4.6.tgz", + "integrity": "sha512-bpggpx/BfLFyy48aUKq1PsNUxb7J6CINlpAUk0V4yXfmGnpZH80Gp1pM3GkFDQyCfq7L7IpjPrIjWQwCrL4hYw==", "cpu": [ "arm64" ], @@ -2078,9 +2135,9 @@ } }, "node_modules/@swc/core-darwin-x64": { - "version": "1.3.96", - "resolved": "https://registry.npmjs.org/@swc/core-darwin-x64/-/core-darwin-x64-1.3.96.tgz", - "integrity": "sha512-mFp9GFfuPg+43vlAdQZl0WZpZSE8sEzqL7sr/7Reul5McUHP0BaLsEzwjvD035ESfkY8GBZdLpMinblIbFNljQ==", + "version": "1.4.6", + "resolved": "https://registry.npmjs.org/@swc/core-darwin-x64/-/core-darwin-x64-1.4.6.tgz", + "integrity": "sha512-vJn+/ZuBTg+vtNkcmgZdH6FQpa0hFVdnB9bAeqYwKkyqP15zaPe6jfC+qL2y/cIeC7ASvHXEKrnCZgBLxfVQ9w==", "cpu": [ "x64" ], @@ -2094,9 +2151,9 @@ } }, "node_modules/@swc/core-linux-arm-gnueabihf": { - "version": "1.3.96", - "resolved": "https://registry.npmjs.org/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.3.96.tgz", - "integrity": "sha512-8UEKkYJP4c8YzYIY/LlbSo8z5Obj4hqcv/fUTHiEePiGsOddgGf7AWjh56u7IoN/0uEmEro59nc1ChFXqXSGyg==", + "version": "1.4.6", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.4.6.tgz", + "integrity": "sha512-hEmYcB/9XBAl02MtuVHszhNjQpjBzhk/NFulnU33tBMbNZpy2TN5yTsitezMq090QXdDz8sKIALApDyg07ZR8g==", "cpu": [ "arm" ], @@ -2110,9 +2167,9 @@ } }, "node_modules/@swc/core-linux-arm64-gnu": { - "version": "1.3.96", - "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.3.96.tgz", - "integrity": "sha512-c/IiJ0s1y3Ymm2BTpyC/xr6gOvoqAVETrivVXHq68xgNms95luSpbYQ28rqaZC8bQC8M5zdXpSc0T8DJu8RJGw==", + "version": "1.4.6", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.4.6.tgz", + "integrity": "sha512-/UCYIVoGpm2YVvGHZM2QOA3dexa28BjcpLAIYnoCbgH5f7ulDhE8FAIO/9pasj+kixDBsdqewHfsNXFYlgGJjQ==", "cpu": [ "arm64" ], @@ -2126,9 +2183,9 @@ } }, "node_modules/@swc/core-linux-arm64-musl": { - "version": "1.3.96", - "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.3.96.tgz", - "integrity": "sha512-i5/UTUwmJLri7zhtF6SAo/4QDQJDH2fhYJaBIUhrICmIkRO/ltURmpejqxsM/ye9Jqv5zG7VszMC0v/GYn/7BQ==", + "version": "1.4.6", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.4.6.tgz", + "integrity": "sha512-LGQsKJ8MA9zZ8xHCkbGkcPSmpkZL2O7drvwsGKynyCttHhpwVjj9lguhD4DWU3+FWIsjvho5Vu0Ggei8OYi/Lw==", "cpu": [ "arm64" ], @@ -2142,9 +2199,9 @@ } }, "node_modules/@swc/core-linux-x64-gnu": { - "version": "1.3.96", - "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.3.96.tgz", - "integrity": "sha512-USdaZu8lTIkm4Yf9cogct/j5eqtdZqTgcTib4I+NloUW0E/hySou3eSyp3V2UAA1qyuC72ld1otXuyKBna0YKQ==", + "version": "1.4.6", + "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.4.6.tgz", + "integrity": "sha512-10JL2nLIreMQDKvq2TECnQe5fCuoqBHu1yW8aChqgHUyg9d7gfZX/kppUsuimqcgRBnS0AjTDAA+JF6UsG/2Yg==", "cpu": [ "x64" ], @@ -2158,9 +2215,9 @@ } }, "node_modules/@swc/core-linux-x64-musl": { - "version": "1.3.96", - "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.3.96.tgz", - "integrity": "sha512-QYErutd+G2SNaCinUVobfL7jWWjGTI0QEoQ6hqTp7PxCJS/dmKmj3C5ZkvxRYcq7XcZt7ovrYCTwPTHzt6lZBg==", + "version": "1.4.6", + "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.4.6.tgz", + "integrity": "sha512-EGyjFVzVY6Do89x8sfah7I3cuP4MwtwzmA6OlfD/KASqfCFf5eIaEBMbajgR41bVfMV7lK72lwAIea5xEyq1AQ==", "cpu": [ "x64" ], @@ -2174,9 +2231,9 @@ } }, "node_modules/@swc/core-win32-arm64-msvc": { - "version": "1.3.96", - "resolved": "https://registry.npmjs.org/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.3.96.tgz", - "integrity": "sha512-hjGvvAduA3Un2cZ9iNP4xvTXOO4jL3G9iakhFsgVhpkU73SGmK7+LN8ZVBEu4oq2SUcHO6caWvnZ881cxGuSpg==", + "version": "1.4.6", + "resolved": "https://registry.npmjs.org/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.4.6.tgz", + "integrity": "sha512-gfW9AuXvwSyK07Vb8Y8E9m2oJZk21WqcD+X4BZhkbKB0TCZK0zk1j/HpS2UFlr1JB2zPKPpSWLU3ll0GEHRG2A==", "cpu": [ "arm64" ], @@ -2190,9 +2247,9 @@ } }, "node_modules/@swc/core-win32-ia32-msvc": { - "version": "1.3.96", - "resolved": "https://registry.npmjs.org/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.3.96.tgz", - "integrity": "sha512-Far2hVFiwr+7VPCM2GxSmbh3ikTpM3pDombE+d69hkedvYHYZxtTF+2LTKl/sXtpbUnsoq7yV/32c9R/xaaWfw==", + "version": "1.4.6", + "resolved": "https://registry.npmjs.org/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.4.6.tgz", + "integrity": "sha512-ZuQm81FhhvNVYtVb9GfZ+Du6e7fZlkisWvuCeBeRiyseNt1tcrQ8J3V67jD2nxje8CVXrwG3oUIbPcybv2rxfQ==", "cpu": [ "ia32" ], @@ -2206,9 +2263,9 @@ } }, "node_modules/@swc/core-win32-x64-msvc": { - "version": "1.3.96", - "resolved": "https://registry.npmjs.org/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.3.96.tgz", - "integrity": "sha512-4VbSAniIu0ikLf5mBX81FsljnfqjoVGleEkCQv4+zRlyZtO3FHoDPkeLVoy6WRlj7tyrRcfUJ4mDdPkbfTO14g==", + "version": "1.4.6", + "resolved": "https://registry.npmjs.org/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.4.6.tgz", + "integrity": "sha512-UagPb7w5V0uzWSjrXwOavGa7s9iv3wrVdEgWy+/inm0OwY4lj3zpK9qDnMWAwYLuFwkI3UG4Q3dH8wD+CUUcjw==", "cpu": [ "x64" ], @@ -2222,9 +2279,9 @@ } }, "node_modules/@swc/counter": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.2.tgz", - "integrity": "sha512-9F4ys4C74eSTEUNndnER3VJ15oru2NumfQxS8geE+f3eB5xvfxpWyqE5XlVnxb/R14uoXi6SLbBwwiDSkv+XEw==", + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", + "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==", "dev": true }, "node_modules/@swc/types": { @@ -2233,23 +2290,48 @@ "integrity": "sha512-myfUej5naTBWnqOCc/MdVOLVjXUXtIA+NpDrDBKJtLLg2shUjBu3cZmB/85RyitKc55+lUUyl7oRfLOvkr2hsw==", "dev": true }, + "node_modules/@tanstack/react-virtual": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/@tanstack/react-virtual/-/react-virtual-3.1.3.tgz", + "integrity": "sha512-YCzcbF/Ws/uZ0q3Z6fagH+JVhx4JLvbSflgldMgLsuvB8aXjZLLb3HvrEVxY480F9wFlBiXlvQxOyXb5ENPrNA==", + "dependencies": { + "@tanstack/virtual-core": "3.1.3" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/@tanstack/virtual-core": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/@tanstack/virtual-core/-/virtual-core-3.1.3.tgz", + "integrity": "sha512-Y5B4EYyv1j9V8LzeAoOVeTg0LI7Fo5InYKgAjkY1Pu9GjtUwX/EKxNcU7ng3sKr99WEf+bPTcktAeybyMOYo+g==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, "node_modules/@types/codemirror": { - "version": "5.60.12", - "resolved": "https://registry.npmjs.org/@types/codemirror/-/codemirror-5.60.12.tgz", - "integrity": "sha512-SFSj5Tb/mtQoVgaltsipdRGG1PkcFu/L0OXPNBGCXYUQtwsNoAGRNNHOTl1jYcQUcEI77EiUfk94bgETTbSo/A==", + "version": "5.60.15", + "resolved": "https://registry.npmjs.org/@types/codemirror/-/codemirror-5.60.15.tgz", + "integrity": "sha512-dTOvwEQ+ouKJ/rE9LT1Ue2hmP6H1mZv5+CCnNWu2qtiOe2LQa9lCprEY20HxiDmV/Bxh+dXjywmy5aKvoGjULA==", "dependencies": { "@types/tern": "*" } }, "node_modules/@types/estree": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.4.tgz", - "integrity": "sha512-2JwWnHK9H+wUZNorf2Zr6ves96WHoWDJIftkcxPKsS7Djta6Zu519LarhRNljPXkpsZR2ZMwNCPeW7omW07BJw==" + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", + "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==" }, "node_modules/@types/hast": { - "version": "2.3.6", - "resolved": "https://registry.npmjs.org/@types/hast/-/hast-2.3.6.tgz", - "integrity": "sha512-47rJE80oqPmFdVDCD7IheXBrVdwuBgsYwoczFvKmwfo2Mzsnt+V9OONsYauFmICb6lQPpCuXYJWejBNs4pDJRg==", + "version": "2.3.10", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-2.3.10.tgz", + "integrity": "sha512-McWspRw8xx8J9HurkVBfYj0xKoE25tOFlHGdx4MJ5xORQrMGZNqJhVQWaIbm6Oyla5kYOXtDiopzKRJzEOkwJw==", "dependencies": { "@types/unist": "^2" } @@ -2261,23 +2343,23 @@ "dev": true }, "node_modules/@types/prop-types": { - "version": "15.7.5", - "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.5.tgz", - "integrity": "sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==", + "version": "15.7.11", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.11.tgz", + "integrity": "sha512-ga8y9v9uyeiLdpKddhxYQkxNDrfvuPrlFb0N1qnZZByvcElJaXthF1UhvCh9TLWJBEHeNtdnbysW7Y6Uq8CVng==", "devOptional": true }, "node_modules/@types/ramda": { - "version": "0.29.9", - "resolved": "https://registry.npmjs.org/@types/ramda/-/ramda-0.29.9.tgz", - "integrity": "sha512-X3yEG6tQCWBcUAql+RPC/O1Hm9BSU+MXu2wJnCETuAgUlrEDwTA1kIOdEEE4YXDtf0zfQLHa9CCE7WYp9kqPIQ==", + "version": "0.29.11", + "resolved": "https://registry.npmjs.org/@types/ramda/-/ramda-0.29.11.tgz", + "integrity": "sha512-jm1+PmNOpE7aPS+mMcuB4a72VkCXUJqPSaQRu2YqR8MbsFfaowYXgKxc7bluYdDpRHNXT5Z+xu+Lgr3/ml6wSA==", "dependencies": { - "types-ramda": "^0.29.6" + "types-ramda": "^0.29.9" } }, "node_modules/@types/react": { - "version": "18.2.48", - "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.48.tgz", - "integrity": "sha512-qboRCl6Ie70DQQG9hhNREz81jqC1cs9EVNcjQ1AU+jH6NFfSAhVVbrrY/+nSF+Bsk4AOwm9Qa61InvMCyV+H3w==", + "version": "18.2.64", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.64.tgz", + "integrity": "sha512-MlmPvHgjj2p3vZaxbQgFUQFvD8QiZwACfGqEdDSWou5yISWxDQ4/74nCAwsUiX7UFLKZz3BbVSPj+YxeoGGCfg==", "devOptional": true, "dependencies": { "@types/prop-types": "*", @@ -2286,24 +2368,24 @@ } }, "node_modules/@types/react-dom": { - "version": "18.2.18", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.18.tgz", - "integrity": "sha512-TJxDm6OfAX2KJWJdMEVTwWke5Sc/E/RlnPGvGfS0W7+6ocy2xhDVQVh/KvC2Uf7kACs+gDytdusDSdWfWkaNzw==", + "version": "18.2.21", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.21.tgz", + "integrity": "sha512-gnvBA/21SA4xxqNXEwNiVcP0xSGHh/gi1VhWv9Bl46a0ItbTT5nFY+G9VSQpaG/8N/qdJpJ+vftQ4zflTtnjLw==", "devOptional": true, "dependencies": { "@types/react": "*" } }, "node_modules/@types/scheduler": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.3.tgz", - "integrity": "sha512-5cJ8CB4yAx7BH1oMvdU0Jh9lrEXyPkar6F9G/ERswkCuvP4KQZfZkSjcMbAICCpQTN4OuZn8tz0HiKv9TGZgrQ==", + "version": "0.16.8", + "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.8.tgz", + "integrity": "sha512-WZLiwShhwLRmeV6zH+GkbOFT6Z6VklCItrDioxUnv+u4Ll+8vKeFySoFyK/0ctcRpOmwAicELfmys1sDc/Rw+A==", "devOptional": true }, "node_modules/@types/semver": { - "version": "7.5.6", - "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.6.tgz", - "integrity": "sha512-dn1l8LaMea/IjDoHNd9J52uBbInB796CDffS6VdIxvqYCPSG0V0DzHp76GpaWnlhg88uYyPbXCDIowa86ybd5A==", + "version": "7.5.8", + "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.8.tgz", + "integrity": "sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ==", "dev": true }, "node_modules/@types/swagger-ui-react": { @@ -2316,17 +2398,17 @@ } }, "node_modules/@types/tern": { - "version": "0.23.6", - "resolved": "https://registry.npmjs.org/@types/tern/-/tern-0.23.6.tgz", - "integrity": "sha512-ntalN+F2msUwz7/OCCADN4FwxtIGqF4Hqwxd15yAn0VOUozj1VaIrH4Prh95N8y69K3bQpHFVGwTJDZC4oRtvA==", + "version": "0.23.9", + "resolved": "https://registry.npmjs.org/@types/tern/-/tern-0.23.9.tgz", + "integrity": "sha512-ypzHFE/wBzh+BlH6rrBgS5I/Z7RD21pGhZ2rltb/+ZrVM1awdZwjx7hE5XfuYgHWk9uvV5HLZN3SloevCAp3Bw==", "dependencies": { "@types/estree": "*" } }, "node_modules/@types/unist": { - "version": "2.0.8", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.8.tgz", - "integrity": "sha512-d0XxK3YTObnWVp6rZuev3c49+j4Lo8g4L1ZRm9z5L0xpoZycUPshHgczK5gsUMaZOstjVYYi09p5gYvUtfChYw==" + "version": "2.0.10", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.10.tgz", + "integrity": "sha512-IfYcSBWE3hLpBg8+X2SEa8LVkJdJEkT2Ese2aaLs3ptGdVtABxndrMaxuFlQ1qdFf9Q5rDvDpxI3WwgvKFAsQA==" }, "node_modules/@types/use-sync-external-store": { "version": "0.0.3", @@ -2334,16 +2416,16 @@ "integrity": "sha512-EwmlvuaxPNej9+T4v5AuBPJa2x2UOJVdjCtDHgcDqitUeOtjnJKJ+apYjVcAoBEMjKW1VVFGZLUb5+qqa09XFA==" }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "6.18.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.18.1.tgz", - "integrity": "sha512-nISDRYnnIpk7VCFrGcu1rnZfM1Dh9LRHnfgdkjcbi/l7g16VYRri3TjXi9Ir4lOZSw5N/gnV/3H7jIPQ8Q4daA==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-7.1.1.tgz", + "integrity": "sha512-zioDz623d0RHNhvx0eesUmGfIjzrk18nSBC8xewepKXbBvN/7c1qImV7Hg8TI1URTxKax7/zxfxj3Uph8Chcuw==", "dev": true, "dependencies": { "@eslint-community/regexpp": "^4.5.1", - "@typescript-eslint/scope-manager": "6.18.1", - "@typescript-eslint/type-utils": "6.18.1", - "@typescript-eslint/utils": "6.18.1", - "@typescript-eslint/visitor-keys": "6.18.1", + "@typescript-eslint/scope-manager": "7.1.1", + "@typescript-eslint/type-utils": "7.1.1", + "@typescript-eslint/utils": "7.1.1", + "@typescript-eslint/visitor-keys": "7.1.1", "debug": "^4.3.4", "graphemer": "^1.4.0", "ignore": "^5.2.4", @@ -2359,8 +2441,8 @@ "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "@typescript-eslint/parser": "^6.0.0 || ^6.0.0-alpha", - "eslint": "^7.0.0 || ^8.0.0" + "@typescript-eslint/parser": "^7.0.0", + "eslint": "^8.56.0" }, "peerDependenciesMeta": { "typescript": { @@ -2369,15 +2451,15 @@ } }, "node_modules/@typescript-eslint/parser": { - "version": "6.18.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.18.1.tgz", - "integrity": "sha512-zct/MdJnVaRRNy9e84XnVtRv9Vf91/qqe+hZJtKanjojud4wAVy/7lXxJmMyX6X6J+xc6c//YEWvpeif8cAhWA==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-7.1.1.tgz", + "integrity": "sha512-ZWUFyL0z04R1nAEgr9e79YtV5LbafdOtN7yapNbn1ansMyaegl2D4bL7vHoJ4HPSc4CaLwuCVas8CVuneKzplQ==", "dev": true, "dependencies": { - "@typescript-eslint/scope-manager": "6.18.1", - "@typescript-eslint/types": "6.18.1", - "@typescript-eslint/typescript-estree": "6.18.1", - "@typescript-eslint/visitor-keys": "6.18.1", + "@typescript-eslint/scope-manager": "7.1.1", + "@typescript-eslint/types": "7.1.1", + "@typescript-eslint/typescript-estree": "7.1.1", + "@typescript-eslint/visitor-keys": "7.1.1", "debug": "^4.3.4" }, "engines": { @@ -2388,7 +2470,7 @@ "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "eslint": "^7.0.0 || ^8.0.0" + "eslint": "^8.56.0" }, "peerDependenciesMeta": { "typescript": { @@ -2397,13 +2479,13 @@ } }, "node_modules/@typescript-eslint/scope-manager": { - "version": "6.18.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.18.1.tgz", - "integrity": "sha512-BgdBwXPFmZzaZUuw6wKiHKIovms97a7eTImjkXCZE04TGHysG+0hDQPmygyvgtkoB/aOQwSM/nWv3LzrOIQOBw==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.1.1.tgz", + "integrity": "sha512-cirZpA8bJMRb4WZ+rO6+mnOJrGFDd38WoXCEI57+CYBqta8Yc8aJym2i7vyqLL1vVYljgw0X27axkUXz32T8TA==", "dev": true, "dependencies": { - "@typescript-eslint/types": "6.18.1", - "@typescript-eslint/visitor-keys": "6.18.1" + "@typescript-eslint/types": "7.1.1", + "@typescript-eslint/visitor-keys": "7.1.1" }, "engines": { "node": "^16.0.0 || >=18.0.0" @@ -2414,13 +2496,13 @@ } }, "node_modules/@typescript-eslint/type-utils": { - "version": "6.18.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.18.1.tgz", - "integrity": "sha512-wyOSKhuzHeU/5pcRDP2G2Ndci+4g653V43gXTpt4nbyoIOAASkGDA9JIAgbQCdCkcr1MvpSYWzxTz0olCn8+/Q==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-7.1.1.tgz", + "integrity": "sha512-5r4RKze6XHEEhlZnJtR3GYeCh1IueUHdbrukV2KSlLXaTjuSfeVF8mZUVPLovidCuZfbVjfhi4c0DNSa/Rdg5g==", "dev": true, "dependencies": { - "@typescript-eslint/typescript-estree": "6.18.1", - "@typescript-eslint/utils": "6.18.1", + "@typescript-eslint/typescript-estree": "7.1.1", + "@typescript-eslint/utils": "7.1.1", "debug": "^4.3.4", "ts-api-utils": "^1.0.1" }, @@ -2432,7 +2514,7 @@ "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "eslint": "^7.0.0 || ^8.0.0" + "eslint": "^8.56.0" }, "peerDependenciesMeta": { "typescript": { @@ -2441,9 +2523,9 @@ } }, "node_modules/@typescript-eslint/types": { - "version": "6.18.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.18.1.tgz", - "integrity": "sha512-4TuMAe+tc5oA7wwfqMtB0Y5OrREPF1GeJBAjqwgZh1lEMH5PJQgWgHGfYufVB51LtjD+peZylmeyxUXPfENLCw==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.1.1.tgz", + "integrity": "sha512-KhewzrlRMrgeKm1U9bh2z5aoL4s7K3tK5DwHDn8MHv0yQfWFz/0ZR6trrIHHa5CsF83j/GgHqzdbzCXJ3crx0Q==", "dev": true, "engines": { "node": "^16.0.0 || >=18.0.0" @@ -2454,13 +2536,13 @@ } }, "node_modules/@typescript-eslint/typescript-estree": { - "version": "6.18.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.18.1.tgz", - "integrity": "sha512-fv9B94UAhywPRhUeeV/v+3SBDvcPiLxRZJw/xZeeGgRLQZ6rLMG+8krrJUyIf6s1ecWTzlsbp0rlw7n9sjufHA==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.1.1.tgz", + "integrity": "sha512-9ZOncVSfr+sMXVxxca2OJOPagRwT0u/UHikM2Rd6L/aB+kL/QAuTnsv6MeXtjzCJYb8PzrXarypSGIPx3Jemxw==", "dev": true, "dependencies": { - "@typescript-eslint/types": "6.18.1", - "@typescript-eslint/visitor-keys": "6.18.1", + "@typescript-eslint/types": "7.1.1", + "@typescript-eslint/visitor-keys": "7.1.1", "debug": "^4.3.4", "globby": "^11.1.0", "is-glob": "^4.0.3", @@ -2481,42 +2563,18 @@ } } }, - "node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0" - } - }, - "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": { - "version": "9.0.3", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz", - "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==", - "dev": true, - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, "node_modules/@typescript-eslint/utils": { - "version": "6.18.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.18.1.tgz", - "integrity": "sha512-zZmTuVZvD1wpoceHvoQpOiewmWu3uP9FuTWo8vqpy2ffsmfCE8mklRPi+vmnIYAIk9t/4kOThri2QCDgor+OpQ==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-7.1.1.tgz", + "integrity": "sha512-thOXM89xA03xAE0lW7alstvnyoBUbBX38YtY+zAUcpRPcq9EIhXPuJ0YTv948MbzmKh6e1AUszn5cBFK49Umqg==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.4.0", "@types/json-schema": "^7.0.12", "@types/semver": "^7.5.0", - "@typescript-eslint/scope-manager": "6.18.1", - "@typescript-eslint/types": "6.18.1", - "@typescript-eslint/typescript-estree": "6.18.1", + "@typescript-eslint/scope-manager": "7.1.1", + "@typescript-eslint/types": "7.1.1", + "@typescript-eslint/typescript-estree": "7.1.1", "semver": "^7.5.4" }, "engines": { @@ -2527,16 +2585,16 @@ "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "eslint": "^7.0.0 || ^8.0.0" + "eslint": "^8.56.0" } }, "node_modules/@typescript-eslint/visitor-keys": { - "version": "6.18.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.18.1.tgz", - "integrity": "sha512-/kvt0C5lRqGoCfsbmm7/CwMqoSkY3zzHLIjdhHZQW3VFrnz7ATecOHR7nb7V+xn4286MBxfnQfQhAmCI0u+bJA==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.1.1.tgz", + "integrity": "sha512-yTdHDQxY7cSoCcAtiBzVzxleJhkGB9NncSIyMYe2+OGON1ZsP9zOPws/Pqgopa65jvknOjlk/w7ulPlZ78PiLQ==", "dev": true, "dependencies": { - "@typescript-eslint/types": "6.18.1", + "@typescript-eslint/types": "7.1.1", "eslint-visitor-keys": "^3.4.1" }, "engines": { @@ -2554,12 +2612,12 @@ "dev": true }, "node_modules/@vitejs/plugin-react-swc": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/@vitejs/plugin-react-swc/-/plugin-react-swc-3.5.0.tgz", - "integrity": "sha512-1PrOvAaDpqlCV+Up8RkAh9qaiUjoDUcjtttyhXDKw53XA6Ve16SOp6cCOpRs8Dj8DqUQs6eTW5YkLcLJjrXAig==", + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react-swc/-/plugin-react-swc-3.6.0.tgz", + "integrity": "sha512-XFRbsGgpGxGzEV5i5+vRiro1bwcIaZDIdBRP16qwm+jP68ue/S8FJTBEgOeojtVDYrbSua3XFp71kC8VJE6v+g==", "dev": true, "dependencies": { - "@swc/core": "^1.3.96" + "@swc/core": "^1.3.107" }, "peerDependencies": { "vite": "^4 || ^5" @@ -2571,9 +2629,9 @@ "integrity": "sha512-GpSwvyXOcOOlV70vbnzjj4fW5xW/FdUF6nQEt1ENy7m4ZCczi1+/buVUPAqmGfqznsORNFzUMjctTIp8a9tuCQ==" }, "node_modules/acorn": { - "version": "8.11.2", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.2.tgz", - "integrity": "sha512-nc0Axzp/0FILLEVsm4fNwLCwMttvhEI263QtVPQcbpfZZ3ts0hLsZGOpE6czNlid7CJ9MlyH8reXkpsf3YUY4w==", + "version": "8.11.3", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", + "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", "dev": true, "bin": { "acorn": "bin/acorn" @@ -2677,9 +2735,9 @@ } }, "node_modules/axios": { - "version": "1.6.5", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.6.5.tgz", - "integrity": "sha512-Ii012v05KEVuUoFWmMW/UQv9aRIc3ZwkWDcM+h5Il8izZCtRVpDUfwpoFf7eOtajT3QiGR4yDUx7lPqHJULgbg==", + "version": "1.6.7", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.6.7.tgz", + "integrity": "sha512-/hDJGff6/c7u0hDkvkGxR/oy6CbCs8ziCsC7SqmhjfozqiJGc8Z11wrv9z9lYfY4K8l+H9TpjcMDX0xOZmx+RA==", "dependencies": { "follow-redirects": "^1.15.4", "form-data": "^4.0.0", @@ -2722,12 +2780,11 @@ } }, "node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" + "balanced-match": "^1.0.0" } }, "node_modules/braces": { @@ -2766,13 +2823,18 @@ } }, "node_modules/call-bind": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.5.tgz", - "integrity": "sha512-C3nQxfFZxFRVoJoGKKI8y3MOEo129NQ+FgQ08iye+Mk4zNZZGdjfs06bVTr+DBSlA66Q2VEcMki/cUCP4SercQ==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", "function-bind": "^1.1.2", - "get-intrinsic": "^1.2.1", - "set-function-length": "^1.1.1" + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -2868,9 +2930,9 @@ } }, "node_modules/codemirror": { - "version": "5.65.15", - "resolved": "https://registry.npmjs.org/codemirror/-/codemirror-5.65.15.tgz", - "integrity": "sha512-YC4EHbbwQeubZzxLl5G4nlbLc1T21QTrKGaOal/Pkm9dVDMZXMH7+ieSPEOZCtO9I68i8/oteJKOxzHC2zR+0g==" + "version": "5.65.16", + "resolved": "https://registry.npmjs.org/codemirror/-/codemirror-5.65.16.tgz", + "integrity": "sha512-br21LjYmSlVL0vFCPWPfhzUCT34FM/pAdK7rRIZwa0rrtrIdotvP4Oh4GUHsu2E3IrQMCfRkL/fN3ytMNxVQvg==" }, "node_modules/codemirror-graphql": { "version": "2.0.10", @@ -2952,9 +3014,9 @@ } }, "node_modules/core-js-pure": { - "version": "3.35.0", - "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.35.0.tgz", - "integrity": "sha512-f+eRYmkou59uh7BPcyJ8MC76DiGhspj1KMxVIcF24tzP8NA9HVa1uC7BTW2tgx7E1QVCzDzsgp7kArrzhlz8Ew==", + "version": "3.36.0", + "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.36.0.tgz", + "integrity": "sha512-cN28qmhRNgbMZZMc/RFu5w8pK9VJzpb2rJVR/lHuZJKwmXnoWOpXmMkxqBB514igkp1Hu8WGROsiOAzUcKdHOQ==", "hasInstallScript": true, "funding": { "type": "opencollective", @@ -2980,9 +3042,9 @@ "integrity": "sha512-YUifsXXuknHlUsmlgyY0PKzgPOr7/FjCePfHNt0jxm83wHZi44VDMQ7/fGNkjY3/jV1MC+1CmZbaHzugyeRtpg==" }, "node_modules/csstype": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.2.tgz", - "integrity": "sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==", + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", + "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==", "devOptional": true }, "node_modules/debug": { @@ -3040,16 +3102,19 @@ } }, "node_modules/define-data-property": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.1.tgz", - "integrity": "sha512-E7uGkTzkk1d0ByLeSc6ZsFS79Axg+m1P/VsgYsxHgiuc3tFSj+MjMIwe90FC4lOAZzNBdY7kkO2P2wKdsQ1vgQ==", + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", "dependencies": { - "get-intrinsic": "^1.2.1", - "gopd": "^1.0.1", - "has-property-descriptors": "^1.0.0" + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" }, "engines": { "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/delayed-stream": { @@ -3099,9 +3164,9 @@ } }, "node_modules/dompurify": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.0.6.tgz", - "integrity": "sha512-ilkD8YEnnGh1zJ240uJsW7AzE+2qpbOUYjacomn3AvJ6J4JhKGSZ2nh4wUIXPZrEPppaCLx5jFe8T89Rk8tQ7w==" + "version": "3.0.9", + "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.0.9.tgz", + "integrity": "sha512-uyb4NDIvQ3hRn6NiC+SIFaP4mJ/MdXlvtunaqK9Bn6dD3RuB/1S/gasEjDHD8eiaqdSael2vBv+hOs7Y+jhYOQ==" }, "node_modules/drange": { "version": "1.1.1", @@ -3128,10 +3193,29 @@ "url": "https://github.com/fb55/entities?sponsor=1" } }, + "node_modules/es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "dependencies": { + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "engines": { + "node": ">= 0.4" + } + }, "node_modules/esbuild": { - "version": "0.19.8", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.19.8.tgz", - "integrity": "sha512-l7iffQpT2OrZfH2rXIp7/FkmaeZM0vxbxN9KfiCwGYuZqzMg/JdvX26R31Zxn/Pxvsrg3Y9N6XTcnknqDyyv4w==", + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.19.12.tgz", + "integrity": "sha512-aARqgq8roFBj054KvQr5f1sFu0D65G+miZRCuJyJ0G13Zwx7vRar5Zhn2tkQNzIXcBrNVsv/8stehpj+GAjgbg==", "dev": true, "hasInstallScript": true, "bin": { @@ -3141,28 +3225,29 @@ "node": ">=12" }, "optionalDependencies": { - "@esbuild/android-arm": "0.19.8", - "@esbuild/android-arm64": "0.19.8", - "@esbuild/android-x64": "0.19.8", - "@esbuild/darwin-arm64": "0.19.8", - "@esbuild/darwin-x64": "0.19.8", - "@esbuild/freebsd-arm64": "0.19.8", - "@esbuild/freebsd-x64": "0.19.8", - "@esbuild/linux-arm": "0.19.8", - "@esbuild/linux-arm64": "0.19.8", - "@esbuild/linux-ia32": "0.19.8", - "@esbuild/linux-loong64": "0.19.8", - "@esbuild/linux-mips64el": "0.19.8", - "@esbuild/linux-ppc64": "0.19.8", - "@esbuild/linux-riscv64": "0.19.8", - "@esbuild/linux-s390x": "0.19.8", - "@esbuild/linux-x64": "0.19.8", - "@esbuild/netbsd-x64": "0.19.8", - "@esbuild/openbsd-x64": "0.19.8", - "@esbuild/sunos-x64": "0.19.8", - "@esbuild/win32-arm64": "0.19.8", - "@esbuild/win32-ia32": "0.19.8", - "@esbuild/win32-x64": "0.19.8" + "@esbuild/aix-ppc64": "0.19.12", + "@esbuild/android-arm": "0.19.12", + "@esbuild/android-arm64": "0.19.12", + "@esbuild/android-x64": "0.19.12", + "@esbuild/darwin-arm64": "0.19.12", + "@esbuild/darwin-x64": "0.19.12", + "@esbuild/freebsd-arm64": "0.19.12", + "@esbuild/freebsd-x64": "0.19.12", + "@esbuild/linux-arm": "0.19.12", + "@esbuild/linux-arm64": "0.19.12", + "@esbuild/linux-ia32": "0.19.12", + "@esbuild/linux-loong64": "0.19.12", + "@esbuild/linux-mips64el": "0.19.12", + "@esbuild/linux-ppc64": "0.19.12", + "@esbuild/linux-riscv64": "0.19.12", + "@esbuild/linux-s390x": "0.19.12", + "@esbuild/linux-x64": "0.19.12", + "@esbuild/netbsd-x64": "0.19.12", + "@esbuild/openbsd-x64": "0.19.12", + "@esbuild/sunos-x64": "0.19.12", + "@esbuild/win32-arm64": "0.19.12", + "@esbuild/win32-ia32": "0.19.12", + "@esbuild/win32-x64": "0.19.12" } }, "node_modules/escape-string-regexp": { @@ -3178,16 +3263,16 @@ } }, "node_modules/eslint": { - "version": "8.56.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.56.0.tgz", - "integrity": "sha512-Go19xM6T9puCOWntie1/P997aXxFsOi37JIHRWI514Hc6ZnaHGKY9xFhrU65RT6CcBEzZoGG1e6Nq+DT04ZtZQ==", + "version": "8.57.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.0.tgz", + "integrity": "sha512-dZ6+mexnaTIbSBZWgou51U6OmzIhYM2VcNdtiTtI7qPNZm35Akpr0f6vtw3w1Kmn5PYo+tZVfh13WrhpS6oLqQ==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.6.1", "@eslint/eslintrc": "^2.1.4", - "@eslint/js": "8.56.0", - "@humanwhocodes/config-array": "^0.11.13", + "@eslint/js": "8.57.0", + "@humanwhocodes/config-array": "^0.11.14", "@humanwhocodes/module-importer": "^1.0.1", "@nodelib/fs.walk": "^1.2.8", "@ungap/structured-clone": "^1.2.0", @@ -3253,11 +3338,15 @@ "eslint": ">=7" } }, - "node_modules/eslint-visitor-keys": { - "version": "3.4.3", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", - "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "node_modules/eslint-scope": { + "version": "7.2.2", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", + "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", "dev": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" }, @@ -3265,15 +3354,11 @@ "url": "https://opencollective.com/eslint" } }, - "node_modules/eslint/node_modules/eslint-scope": { - "version": "7.2.2", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", - "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", + "node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", "dev": true, - "dependencies": { - "esrecurse": "^4.3.0", - "estraverse": "^5.2.0" - }, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" }, @@ -3281,13 +3366,26 @@ "url": "https://opencollective.com/eslint" } }, - "node_modules/eslint/node_modules/estraverse": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", - "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "node_modules/eslint/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/eslint/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, "engines": { - "node": ">=4.0" + "node": "*" } }, "node_modules/espree": { @@ -3319,15 +3417,6 @@ "node": ">=0.10" } }, - "node_modules/esquery/node_modules/estraverse": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", - "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", - "dev": true, - "engines": { - "node": ">=4.0" - } - }, "node_modules/esrecurse": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", @@ -3340,7 +3429,7 @@ "node": ">=4.0" } }, - "node_modules/esrecurse/node_modules/estraverse": { + "node_modules/estraverse": { "version": "5.3.0", "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", @@ -3374,9 +3463,9 @@ "dev": true }, "node_modules/fast-glob": { - "version": "3.2.12", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz", - "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==", + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", "dev": true, "dependencies": { "@nodelib/fs.stat": "^2.0.2", @@ -3419,9 +3508,9 @@ "dev": true }, "node_modules/fastq": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz", - "integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==", + "version": "1.17.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", + "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", "dev": true, "dependencies": { "reusify": "^1.0.4" @@ -3487,12 +3576,13 @@ } }, "node_modules/flat-cache": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz", - "integrity": "sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", + "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", "dev": true, "dependencies": { - "flatted": "^3.1.0", + "flatted": "^3.2.9", + "keyv": "^4.5.3", "rimraf": "^3.0.2" }, "engines": { @@ -3500,9 +3590,9 @@ } }, "node_modules/flatted": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.7.tgz", - "integrity": "sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ==", + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.1.tgz", + "integrity": "sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==", "dev": true }, "node_modules/follow-redirects": { @@ -3621,15 +3711,19 @@ } }, "node_modules/get-intrinsic": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.2.tgz", - "integrity": "sha512-0gSo4ml/0j98Y3lngkFEot/zhiCeWsbYIlZ+uZOVgzLyLaUw7wxUL+nCTP0XJvJg1AXulJRI3UJi8GsbDuxdGA==", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", "dependencies": { + "es-errors": "^1.3.0", "function-bind": "^1.1.2", "has-proto": "^1.0.1", "has-symbols": "^1.0.3", "hasown": "^2.0.0" }, + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -3679,10 +3773,30 @@ "node": ">=10.13.0" } }, + "node_modules/glob/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/glob/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, "node_modules/globals": { - "version": "13.23.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.23.0.tgz", - "integrity": "sha512-XAmF0RjlrjY23MA51q3HltdlGxUpXPvg0GioKiD9X6HD28iMjo2dKC8Vqwm7lne4GNr78+RHTfliktR6ZH09wA==", + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", "dev": true, "dependencies": { "type-fest": "^0.20.2" @@ -3737,11 +3851,11 @@ "dev": true }, "node_modules/graphiql": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/graphiql/-/graphiql-3.1.0.tgz", - "integrity": "sha512-1l2PecYNvFYYNSYq+4vIJOACXkP60Kod0E0SnKu+2f0Ux/npFNr3TfwJLZs7eKqqSh0KODmorvHi/XBP46Ua7A==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/graphiql/-/graphiql-3.1.1.tgz", + "integrity": "sha512-FMNa981Wj8JBJJRTdryNyrVteigS8B7q+Q1fh1rW4IsFPaXNIs1VMs8kwqIZ8zERj4Fc64Ea750g3n6r2w9Zcg==", "dependencies": { - "@graphiql/react": "^0.20.2", + "@graphiql/react": "^0.20.3", "@graphiql/toolkit": "^0.9.1", "graphql-language-service": "^5.2.0", "markdown-it": "^12.2.0" @@ -3784,20 +3898,20 @@ } }, "node_modules/has-property-descriptors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.1.tgz", - "integrity": "sha512-VsX8eaIewvas0xnvinAe9bw4WfIeODpGYikiWYLH+dma0Jw6KHYqWiWfhQlgOVK8D6PvjubK5Uc4P0iIhIcNVg==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", "dependencies": { - "get-intrinsic": "^1.2.2" + "es-define-property": "^1.0.0" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/has-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz", - "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==", + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", + "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", "engines": { "node": ">= 0.4" }, @@ -3817,9 +3931,9 @@ } }, "node_modules/hasown": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.0.tgz", - "integrity": "sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.1.tgz", + "integrity": "sha512-1/th4MHjnwncwXsIW6QMzlvYL9kG5e/CpVvLRZe4XPa8TOUNbCELqmvhDmnkNsAjwaG4+I8gJJL0JBvTTLO9qA==", "dependencies": { "function-bind": "^1.1.2" }, @@ -3885,9 +3999,9 @@ ] }, "node_modules/ignore": { - "version": "5.2.4", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz", - "integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==", + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.1.tgz", + "integrity": "sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==", "dev": true, "engines": { "node": ">= 4" @@ -4076,6 +4190,11 @@ "node": ">=8" } }, + "node_modules/isarray": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", + "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==" + }, "node_modules/isexe": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", @@ -4110,6 +4229,12 @@ "js-yaml": "bin/js-yaml.js" } }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true + }, "node_modules/json-schema-traverse": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", @@ -4117,11 +4242,17 @@ "dev": true }, "node_modules/json-stable-stringify": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/json-stable-stringify/-/json-stable-stringify-1.0.2.tgz", - "integrity": "sha512-eunSSaEnxV12z+Z73y/j5N37/In40GK4GmsSy+tEHJMxknvqnA7/djeYtAgW0GsWHUfg+847WJjKaEylk2y09g==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify/-/json-stable-stringify-1.1.1.tgz", + "integrity": "sha512-SU/971Kt5qVQfJpyDveVhQ/vya+5hvrjClFOcr8c0Fq5aODJjMwutrOfCU+eCnVD5gpx1Q3fEqkyom77zH1iIg==", "dependencies": { - "jsonify": "^0.0.1" + "call-bind": "^1.0.5", + "isarray": "^2.0.5", + "jsonify": "^0.0.1", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -4152,6 +4283,15 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "dependencies": { + "json-buffer": "3.0.1" + } + }, "node_modules/klaw-sync": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/klaw-sync/-/klaw-sync-6.0.0.tgz", @@ -4347,14 +4487,18 @@ } }, "node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "version": "9.0.3", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz", + "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==", + "dev": true, "dependencies": { - "brace-expansion": "^1.1.7" + "brace-expansion": "^2.0.1" }, "engines": { - "node": "*" + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" } }, "node_modules/minimist": { @@ -4378,9 +4522,9 @@ "dev": true }, "node_modules/nan": { - "version": "2.18.0", - "resolved": "https://registry.npmjs.org/nan/-/nan-2.18.0.tgz", - "integrity": "sha512-W7tfG7vMOGtD30sHoZSSc/JVYiyDPEyQVso/Zz+/uQd0B0L46gtC+pHha5FFMRpil6fm/AoEcRWyOVi4+E/f8w==", + "version": "2.19.0", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.19.0.tgz", + "integrity": "sha512-nO1xXxfh/RWNxfd/XPfbIfFk5vgLsAxUR9y5O0cHMJu/AW9U95JLXqthYHjEp+8gQ5p96K9jUp8nbVOxCdRbtw==", "optional": true }, "node_modules/nanoid": { @@ -4414,9 +4558,9 @@ "dev": true }, "node_modules/node-abi": { - "version": "3.54.0", - "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.54.0.tgz", - "integrity": "sha512-p7eGEiQil0YUV3ItH4/tBb781L5impVmmx2E9FRKF7d18XXzp4PGT2tdYMFY6wQqgxD0IwNZOiSJ0/K0fSi/OA==", + "version": "3.56.0", + "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.56.0.tgz", + "integrity": "sha512-fZjdhDOeRcaS+rcpve7XuwHBmktS1nS1gzgghwKUQQ8nTy2FdSDr6ZT8k6YhvlJeHmmQMYiT/IH9hfco5zeW2Q==", "optional": true, "dependencies": { "semver": "^7.3.5" @@ -4485,6 +4629,14 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "engines": { + "node": ">= 0.4" + } + }, "node_modules/once": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", @@ -4703,9 +4855,9 @@ } }, "node_modules/postcss": { - "version": "8.4.32", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.32.tgz", - "integrity": "sha512-D/kj5JNu6oo2EIy+XL/26JEDTlIbB8hw85G8StOE6L74RQAVVP5rej6wxCNqyMbR4RkPfqvezVbPw81Ngd6Kcw==", + "version": "8.4.35", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.35.tgz", + "integrity": "sha512-u5U8qYpBCpN13BsiEB0CbR1Hhh4Gc0zLFuedrHJKMctHCHAGrMdG0PRM/KErzAL3CU6/eckEtmHNB3x6e3c0vA==", "dev": true, "funding": [ { @@ -4731,9 +4883,9 @@ } }, "node_modules/prebuild-install": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-7.1.1.tgz", - "integrity": "sha512-jAXscXWMcCK8GgCoHOfIr0ODh5ai8mj63L2nWrjuAgXE6tDyYGnx4/8o/rCgU+B4JSyZBKbeZqzhtwtC3ovxjw==", + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-7.1.2.tgz", + "integrity": "sha512-UnNke3IQb6sgarcZIDU3gbMeTp/9SSU1DAIkil7PrqG1vZlBtY5msYccSKSHDqa3hNg436IXK+SNImReuA1wEQ==", "optional": true, "dependencies": { "detect-libc": "^2.0.0", @@ -4828,11 +4980,11 @@ } }, "node_modules/qs": { - "version": "6.11.2", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.2.tgz", - "integrity": "sha512-tDNIz22aBzCDxLtVH++VnTfzxlfeK5CbqohpSqpJgj1Wg/cQbStNAz3NuqCs5vV+pjBsK4x4pN9HlVh7rcYRiA==", + "version": "6.12.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.12.0.tgz", + "integrity": "sha512-trVZiI6RMOkO476zLGaBIzszOdFPnCCXHPG9kn0yuS1uz6xdVxPfZdB3vUig9pxPFDM9BRAgz/YUIVQ1/vuiUg==", "dependencies": { - "side-channel": "^1.0.4" + "side-channel": "^1.0.6" }, "engines": { "node": ">=0.6" @@ -5066,9 +5218,9 @@ } }, "node_modules/react-remove-scroll-bar": { - "version": "2.3.4", - "resolved": "https://registry.npmjs.org/react-remove-scroll-bar/-/react-remove-scroll-bar-2.3.4.tgz", - "integrity": "sha512-63C4YQBUt0m6ALadE9XV56hV8BgJWDmmTPY758iIJjfQKt2nYwoUrPk0LXRXcB/yIj82T1/Ixfdpdk68LwIB0A==", + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/react-remove-scroll-bar/-/react-remove-scroll-bar-2.3.5.tgz", + "integrity": "sha512-3cqjOqg6s0XbOjWvmasmqHch+RLxIEk2r/70rzGXuz3iIGQsQheEQyqYCBb5EECoD01Vo2SIbDqW4paLeLTASw==", "dependencies": { "react-style-singleton": "^2.2.1", "tslib": "^2.0.0" @@ -5173,9 +5325,9 @@ } }, "node_modules/regenerator-runtime": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz", - "integrity": "sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA==" + "version": "0.14.1", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", + "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==" }, "node_modules/remarkable": { "version": "2.0.1", @@ -5261,10 +5413,13 @@ } }, "node_modules/rollup": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.6.0.tgz", - "integrity": "sha512-R8i5Her4oO1LiMQ3jKf7MUglYV/mhQ5g5OKeld5CnkmPdIGo79FDDQYqPhq/PCVuTQVuxsWgIbDy9F+zdHn80w==", + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.12.1.tgz", + "integrity": "sha512-ggqQKvx/PsB0FaWXhIvVkSWh7a/PCLQAsMjBc+nA2M8Rv2/HG0X6zvixAB7KyZBRtifBUhy5k8voQX/mRnABPg==", "dev": true, + "dependencies": { + "@types/estree": "1.0.5" + }, "bin": { "rollup": "dist/bin/rollup" }, @@ -5273,18 +5428,19 @@ "npm": ">=8.0.0" }, "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.6.0", - "@rollup/rollup-android-arm64": "4.6.0", - "@rollup/rollup-darwin-arm64": "4.6.0", - "@rollup/rollup-darwin-x64": "4.6.0", - "@rollup/rollup-linux-arm-gnueabihf": "4.6.0", - "@rollup/rollup-linux-arm64-gnu": "4.6.0", - "@rollup/rollup-linux-arm64-musl": "4.6.0", - "@rollup/rollup-linux-x64-gnu": "4.6.0", - "@rollup/rollup-linux-x64-musl": "4.6.0", - "@rollup/rollup-win32-arm64-msvc": "4.6.0", - "@rollup/rollup-win32-ia32-msvc": "4.6.0", - "@rollup/rollup-win32-x64-msvc": "4.6.0", + "@rollup/rollup-android-arm-eabi": "4.12.1", + "@rollup/rollup-android-arm64": "4.12.1", + "@rollup/rollup-darwin-arm64": "4.12.1", + "@rollup/rollup-darwin-x64": "4.12.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.12.1", + "@rollup/rollup-linux-arm64-gnu": "4.12.1", + "@rollup/rollup-linux-arm64-musl": "4.12.1", + "@rollup/rollup-linux-riscv64-gnu": "4.12.1", + "@rollup/rollup-linux-x64-gnu": "4.12.1", + "@rollup/rollup-linux-x64-musl": "4.12.1", + "@rollup/rollup-win32-arm64-msvc": "4.12.1", + "@rollup/rollup-win32-ia32-msvc": "4.12.1", + "@rollup/rollup-win32-x64-msvc": "4.12.1", "fsevents": "~2.3.2" } }, @@ -5339,9 +5495,9 @@ } }, "node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", + "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", "dependencies": { "lru-cache": "^6.0.0" }, @@ -5367,13 +5523,14 @@ } }, "node_modules/set-function-length": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.0.tgz", - "integrity": "sha512-4DBHDoyHlM1IRPGYcoxexgh67y4ueR53FKV1yyxwFMY7aCqcN/38M1+SwZ/qJQ8iLv7+ck385ot4CcisOAPT9w==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.1.tgz", + "integrity": "sha512-j4t6ccc+VsKwYHso+kElc5neZpjtq9EnRICFZtWyBsLojhmeF/ZBd/elqm22WJh/BziDe/SBiOeAt0m2mfLD0g==", "dependencies": { - "define-data-property": "^1.1.1", + "define-data-property": "^1.1.2", + "es-errors": "^1.3.0", "function-bind": "^1.1.2", - "get-intrinsic": "^1.2.2", + "get-intrinsic": "^1.2.3", "gopd": "^1.0.1", "has-property-descriptors": "^1.0.1" }, @@ -5439,13 +5596,17 @@ } }, "node_modules/side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", "dependencies": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -5567,9 +5728,9 @@ } }, "node_modules/style-mod": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.1.0.tgz", - "integrity": "sha512-Ca5ib8HrFn+f+0n4N4ScTIA9iTOQ7MaGS1ylHcoVqW9J7w2w8PzN6g9gKmTYgGEBH8e120+RCmhpje6jC5uGWA==", + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.1.2.tgz", + "integrity": "sha512-wnD1HyVqpJUI2+eKZ+eo1UwghftP6yuFheBqqe+bWCotBjC2K1YnteJILRMs3SM4V/0dLEW1SC27MWP5y+mwmw==", "peer": true }, "node_modules/style-value-types": { @@ -5593,26 +5754,25 @@ } }, "node_modules/swagger-client": { - "version": "3.25.0", - "resolved": "https://registry.npmjs.org/swagger-client/-/swagger-client-3.25.0.tgz", - "integrity": "sha512-p143zWkIhgyh2E5+3HPFMlCw3WkV9RbX9HyftfBdiccCbOlmHdcJC0XEJZxcm+ZA+80DORs0F30/mzk7sx4iwA==", + "version": "3.26.0", + "resolved": "https://registry.npmjs.org/swagger-client/-/swagger-client-3.26.0.tgz", + "integrity": "sha512-1yFR/S2V3v5DwgmNePoHEjq2dZJxDx1leDQ53r5M4hZs+dozm9VnznlSl9a1V5iTYw4UsS4PQuBRQsmBH21ViA==", "dependencies": { "@babel/runtime-corejs3": "^7.22.15", - "@swagger-api/apidom-core": ">=0.90.0 <1.0.0", - "@swagger-api/apidom-error": ">=0.90.0 <1.0.0", - "@swagger-api/apidom-json-pointer": ">=0.90.0 <1.0.0", - "@swagger-api/apidom-ns-openapi-3-1": ">=0.90.0 <1.0.0", - "@swagger-api/apidom-reference": ">=0.90.0 <1.0.0", + "@swagger-api/apidom-core": ">=0.97.0 <1.0.0", + "@swagger-api/apidom-error": ">=0.97.0 <1.0.0", + "@swagger-api/apidom-json-pointer": ">=0.97.0 <1.0.0", + "@swagger-api/apidom-ns-openapi-3-1": ">=0.97.0 <1.0.0", + "@swagger-api/apidom-reference": ">=0.97.0 <1.0.0", "cookie": "~0.6.0", "deepmerge": "~4.3.0", "fast-json-patch": "^3.0.0-1", "is-plain-object": "^5.0.0", "js-yaml": "^4.1.0", "node-abort-controller": "^3.1.1", - "node-fetch-commonjs": "^3.3.1", + "node-fetch-commonjs": "^3.3.2", "qs": "^6.10.2", - "traverse": "~0.6.6", - "undici": "^5.24.0" + "traverse": "~0.6.6" } }, "node_modules/swagger-client/node_modules/is-plain-object": { @@ -5624,17 +5784,17 @@ } }, "node_modules/swagger-ui-react": { - "version": "5.11.0", - "resolved": "https://registry.npmjs.org/swagger-ui-react/-/swagger-ui-react-5.11.0.tgz", - "integrity": "sha512-iqc5/Z8nvqOdjU2LuWYbREnDmKj5gndZSESTH9dXfymlzLc2NoPQmXZAw02U8kFgHyciX0yDMp3oaCw1zBdPSA==", + "version": "5.11.10", + "resolved": "https://registry.npmjs.org/swagger-ui-react/-/swagger-ui-react-5.11.10.tgz", + "integrity": "sha512-X5HwC5h/HN5txkjOmSfL2nuhQH3fkePSdH8rrvqKFKwzZpvUYw0CmwBpBkJyQm24FuI7U9q/k3/ru6dVG32cQw==", "dependencies": { - "@babel/runtime-corejs3": "^7.23.7", + "@babel/runtime-corejs3": "^7.24.0", "@braintree/sanitize-url": "=7.0.0", "base64-js": "^1.5.1", "classnames": "^2.5.1", "css.escape": "1.5.1", "deep-extend": "0.6.0", - "dompurify": "=3.0.6", + "dompurify": "=3.0.9", "ieee754": "^1.2.1", "immutable": "^3.x.x", "js-file-download": "^0.4.12", @@ -5649,15 +5809,15 @@ "react-immutable-proptypes": "2.2.0", "react-immutable-pure-component": "^2.2.0", "react-inspector": "^6.0.1", - "react-redux": "^9.0.4", + "react-redux": "^9.1.0", "react-syntax-highlighter": "^15.5.0", - "redux": "^5.0.0", + "redux": "^5.0.1", "redux-immutable": "^4.0.0", "remarkable": "^2.0.1", - "reselect": "^5.0.1", + "reselect": "^5.1.0", "serialize-error": "^8.1.0", "sha.js": "^2.4.11", - "swagger-client": "^3.25.0", + "swagger-client": "^3.25.4", "url-parse": "^1.5.10", "xml": "=1.0.1", "xml-but-prettier": "^1.0.1", @@ -5752,9 +5912,9 @@ } }, "node_modules/tree-sitter-json": { - "version": "0.20.1", - "resolved": "https://registry.npmjs.org/tree-sitter-json/-/tree-sitter-json-0.20.1.tgz", - "integrity": "sha512-482hf7J+aBwhksSw8yWaqI8nyP1DrSwnS4IMBShsnkFWD3SE8oalHnsEik59fEVi3orcTCUtMzSjZx+0Tpa6Vw==", + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/tree-sitter-json/-/tree-sitter-json-0.20.2.tgz", + "integrity": "sha512-eUxrowp4F1QEGk/i7Sa+Xl8Crlfp7J0AXxX1QdJEQKQYMWhgMbCIgyQvpO3Q0P9oyTrNQxRLlRipDS44a8EtRw==", "hasInstallScript": true, "optional": true, "dependencies": { @@ -5772,17 +5932,22 @@ } }, "node_modules/ts-api-utils": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.0.1.tgz", - "integrity": "sha512-lC/RGlPmwdrIBFTX59wwNzqh7aR2otPNPR/5brHZm/XKFYKsfqxihXUe9pU3JI+3vGkl+vyCoNNnPhJn3aLK1A==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.2.1.tgz", + "integrity": "sha512-RIYA36cJn2WiH9Hy77hdF9r7oEwxAtB/TS9/S4Qd90Ap4z5FSiin5zEiTL44OII1Y3IIlEvxwxFUVgrHSZ/UpA==", "dev": true, "engines": { - "node": ">=16.13.0" + "node": ">=16" }, "peerDependencies": { "typescript": ">=4.2.0" } }, + "node_modules/ts-mixer": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/ts-mixer/-/ts-mixer-6.0.4.tgz", + "integrity": "sha512-ufKpbmrugz5Aou4wcr5Wc1UUFWOLhq+Fm6qa6P0w0K5Qw2yhaUoiWszhCVuNQyNwrlGiscHOmqYoAox1PtvgjA==" + }, "node_modules/ts-toolbelt": { "version": "9.6.0", "resolved": "https://registry.npmjs.org/ts-toolbelt/-/ts-toolbelt-9.6.0.tgz", @@ -5829,17 +5994,17 @@ } }, "node_modules/types-ramda": { - "version": "0.29.6", - "resolved": "https://registry.npmjs.org/types-ramda/-/types-ramda-0.29.6.tgz", - "integrity": "sha512-VJoOk1uYNh9ZguGd3eZvqkdhD4hTGtnjRBUx5Zc0U9ftmnCgiWcSj/lsahzKunbiwRje1MxxNkEy1UdcXRCpYw==", + "version": "0.29.9", + "resolved": "https://registry.npmjs.org/types-ramda/-/types-ramda-0.29.9.tgz", + "integrity": "sha512-B+VbLtW68J4ncG/rccKaYDhlirKlVH/Izh2JZUfaPJv+3Tl2jbbgYsB1pvole1vXKSgaPlAe/wgEdOnMdAu52A==", "dependencies": { "ts-toolbelt": "^9.6.0" } }, "node_modules/typescript": { - "version": "5.3.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.3.3.tgz", - "integrity": "sha512-pXWcraxM0uxAS+tN0AG/BF2TyqmHO014Z070UsJ+pFvYuRSq8KH8DmWpnbXe0pEPDHXZV3FcAbJkijJ5oNEnWw==", + "version": "5.4.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.4.2.tgz", + "integrity": "sha512-+2/g0Fds1ERlP6JsakQQDXjZdZMM+rqpamFZJEKh4kwTIn3iDkgKtby0CeNd5ATNZ4Ry1ax15TMx0W2V+miizQ==", "dev": true, "bin": { "tsc": "bin/tsc", @@ -5854,21 +6019,10 @@ "resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-1.0.6.tgz", "integrity": "sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA==" }, - "node_modules/undici": { - "version": "5.28.2", - "resolved": "https://registry.npmjs.org/undici/-/undici-5.28.2.tgz", - "integrity": "sha512-wh1pHJHnUeQV5Xa8/kyQhO7WFa8M34l026L5P/+2TYiakvGy5Rdc8jWZVyG7ieht/0WgJLEd3kcU5gKx+6GC8w==", - "dependencies": { - "@fastify/busboy": "^2.0.0" - }, - "engines": { - "node": ">=14.0" - } - }, "node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", "engines": { "node": ">= 10.0.0" } @@ -5897,9 +6051,9 @@ } }, "node_modules/use-callback-ref": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.0.tgz", - "integrity": "sha512-3FT9PRuRdbB9HfXhEq35u4oZkvpJ5kuYbpqhCfmiZyReuRgpnhDlbr2ZEnnuS0RrJAPn6l23xjFg9kpDM+Ms7w==", + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.1.tgz", + "integrity": "sha512-Lg4Vx1XZQauB42Hw3kK7JM6yjVjgFmFC5/Ab797s79aARomD2nEErc4mCgM8EZrARLmmbWpi5DGCadmK50DcAQ==", "dependencies": { "tslib": "^2.0.0" }, @@ -5952,13 +6106,13 @@ "optional": true }, "node_modules/vite": { - "version": "5.0.11", - "resolved": "https://registry.npmjs.org/vite/-/vite-5.0.11.tgz", - "integrity": "sha512-XBMnDjZcNAw/G1gEiskiM1v6yzM4GE5aMGvhWTlHAYYhxb7S3/V1s3m2LDHa8Vh6yIWYYB0iJwsEaS523c4oYA==", + "version": "5.1.5", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.1.5.tgz", + "integrity": "sha512-BdN1xh0Of/oQafhU+FvopafUp6WaYenLU/NFoL5WyJL++GxkNfieKzBhM24H3HVsPQrlAqB7iJYTHabzaRed5Q==", "dev": true, "dependencies": { "esbuild": "^0.19.3", - "postcss": "^8.4.32", + "postcss": "^8.4.35", "rollup": "^4.2.0" }, "bin": { @@ -6018,9 +6172,9 @@ "peer": true }, "node_modules/web-streams-polyfill": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.2.tgz", - "integrity": "sha512-3pRGuxRF5gpuZc0W+EpwQRmCD7gRqcDOMt688KmdlDAgAyaB1XlN0zq2njfDNm44XVdIouE7pZ6GzbdyH47uIQ==", + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", + "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==", "engines": { "node": ">= 8" } @@ -6077,9 +6231,12 @@ "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" }, "node_modules/yaml": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.3.tgz", - "integrity": "sha512-zw0VAJxgeZ6+++/su5AFoqBbZbrEakwu+X0M5HmcwUiBL7AzcuPKjj5we4xfQLp78LkEMpD0cOnUhmgOVy3KdQ==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.4.1.tgz", + "integrity": "sha512-pIXzoImaqmfOrL7teGUBt/T7ZDnyeGBWyXQBvOVhLkWLN37GXv8NMLK406UY6dS51JfcQHsmcW5cJ441bHg6Lg==", + "bin": { + "yaml": "bin.mjs" + }, "engines": { "node": ">= 14" } diff --git a/playground/package.json b/playground/package.json index 2532afe5af..0930fe41ea 100644 --- a/playground/package.json +++ b/playground/package.json @@ -10,23 +10,23 @@ "preview": "vite preview" }, "dependencies": { - "graphiql": "^3.1.0", + "graphiql": "^3.1.1", "graphql": "^16.8.1", "react": "^18.2.0", "react-dom": "^18.2.0", - "swagger-ui-react": "^5.11.0" + "swagger-ui-react": "^5.11.9" }, "devDependencies": { - "@types/react": "^18.2.48", + "@types/react": "^18.2.61", "@types/react-dom": "^18.2.18", "@types/swagger-ui-react": "^4.18.3", - "@typescript-eslint/parser": "^6.18.1", - "@typescript-eslint/eslint-plugin": "^6.18.1", - "@vitejs/plugin-react-swc": "^3.5.0", - "eslint": "^8.56.0", + "@typescript-eslint/eslint-plugin": "^7.1.0", + "@typescript-eslint/parser": "^7.1.0", + "@vitejs/plugin-react-swc": "^3.6.0", + "eslint": "^8.57.0", "eslint-plugin-react-hooks": "^4.6.0", "eslint-plugin-react-refresh": "^0.4.5", "typescript": "^5.3.3", - "vite": "^5.0.11" + "vite": "^5.1.4" } } diff --git a/request/graphql/parser/filter.go b/request/graphql/parser/filter.go index 16994e5a84..d7e7f44d40 100644 --- a/request/graphql/parser/filter.go +++ b/request/graphql/parser/filter.go @@ -194,35 +194,35 @@ func parseVal(val ast.Value, recurseFn parseFn) (any, error) { // from the filter conditions“ func ParseFilterFieldsForDescription( conditions map[string]any, - schema client.SchemaDescription, -) ([]client.FieldDescription, error) { - return parseFilterFieldsForDescriptionMap(conditions, schema) + col client.CollectionDefinition, +) ([]client.FieldDefinition, error) { + return parseFilterFieldsForDescriptionMap(conditions, col) } func parseFilterFieldsForDescriptionMap( conditions map[string]any, - schema client.SchemaDescription, -) ([]client.FieldDescription, error) { - fields := make([]client.FieldDescription, 0) + col client.CollectionDefinition, +) ([]client.FieldDefinition, error) { + fields := make([]client.FieldDefinition, 0) for k, v := range conditions { switch k { case "_or", "_and": conds := v.([]any) - parsedFileds, err := parseFilterFieldsForDescriptionSlice(conds, schema) + parsedFileds, err := parseFilterFieldsForDescriptionSlice(conds, col) if err != nil { return nil, err } fields = append(fields, parsedFileds...) case "_not": conds := v.(map[string]any) - parsedFileds, err := parseFilterFieldsForDescriptionMap(conds, schema) + parsedFileds, err := parseFilterFieldsForDescriptionMap(conds, col) if err != nil { return nil, err } fields = append(fields, parsedFileds...) default: - f, found := schema.GetField(k) - if !found || f.IsObject() { + f, found := col.GetFieldByName(k) + if !found || f.Kind.IsObject() { continue } fields = append(fields, f) @@ -233,9 +233,9 @@ func parseFilterFieldsForDescriptionMap( func parseFilterFieldsForDescriptionSlice( conditions []any, - schema client.SchemaDescription, -) ([]client.FieldDescription, error) { - fields := make([]client.FieldDescription, 0) + schema client.CollectionDefinition, +) ([]client.FieldDefinition, error) { + fields := make([]client.FieldDefinition, 0) for _, v := range conditions { switch cond := v.(type) { case map[string]any: @@ -250,21 +250,3 @@ func parseFilterFieldsForDescriptionSlice( } return fields, nil } - -/* -userCollection := db.getCollection("users") -doc := userCollection.NewFromJSON("{ - "title": "Painted House", - "description": "...", - "genres": ["bae-123", "bae-def", "bae-456"] - "author_id": "bae-999", -}") -doc.Save() - -doc := document.New(schema).FromJSON - ------------------------------------- - - - -*/ diff --git a/request/graphql/parser/request.go b/request/graphql/parser/request.go index fca63bcac5..69d275de03 100644 --- a/request/graphql/parser/request.go +++ b/request/graphql/parser/request.go @@ -48,7 +48,7 @@ func ParseRequest(schema gql.Schema, doc *ast.Document) (*request.Request, []err } parsedDirectives, err := parseDirectives(astOpDef.Directives) - if errs != nil { + if err != nil { return nil, []error{err} } parsedQueryOpDef.Directives = parsedDirectives diff --git a/request/graphql/schema/collection.go b/request/graphql/schema/collection.go index 36f4d61c71..d9ebefa680 100644 --- a/request/graphql/schema/collection.go +++ b/request/graphql/schema/collection.go @@ -15,13 +15,14 @@ import ( "fmt" "sort" - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/request/graphql/schema/types" - "github.com/sourcenetwork/graphql-go/language/ast" gqlp "github.com/sourcenetwork/graphql-go/language/parser" "github.com/sourcenetwork/graphql-go/language/source" + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/request/graphql/schema/types" ) // FromString parses a GQL SDL string into a set of collection descriptions. @@ -42,11 +43,11 @@ func FromString(ctx context.Context, schemaString string) ( return nil, err } - return fromAst(ctx, doc) + return fromAst(doc) } // fromAst parses a GQL AST into a set of collection descriptions. -func fromAst(ctx context.Context, doc *ast.Document) ( +func fromAst(doc *ast.Document) ( []client.CollectionDefinition, error, ) { @@ -56,7 +57,7 @@ func fromAst(ctx context.Context, doc *ast.Document) ( for _, def := range doc.Definitions { switch defType := def.(type) { case *ast.ObjectDefinition: - description, err := collectionFromAstDefinition(ctx, relationManager, defType) + description, err := collectionFromAstDefinition(relationManager, defType) if err != nil { return nil, err } @@ -64,7 +65,7 @@ func fromAst(ctx context.Context, doc *ast.Document) ( definitions = append(definitions, description) case *ast.InterfaceDefinition: - description, err := schemaFromAstDefinition(ctx, relationManager, defType) + description, err := schemaFromAstDefinition(relationManager, defType) if err != nil { return nil, err } @@ -96,11 +97,10 @@ func fromAst(ctx context.Context, doc *ast.Document) ( // collectionFromAstDefinition parses a AST object definition into a set of collection descriptions. func collectionFromAstDefinition( - ctx context.Context, relationManager *RelationManager, def *ast.ObjectDefinition, ) (client.CollectionDefinition, error) { - fieldDescriptions := []client.FieldDescription{ + fieldDescriptions := []client.SchemaFieldDescription{ { Name: request.DocIDFieldName, Kind: client.FieldKind_DocID, @@ -151,7 +151,7 @@ func collectionFromAstDefinition( return client.CollectionDefinition{ Description: client.CollectionDescription{ - Name: def.Name.Value, + Name: immutable.Some(def.Name.Value), Indexes: indexDescriptions, }, Schema: client.SchemaDescription{ @@ -162,11 +162,10 @@ func collectionFromAstDefinition( } func schemaFromAstDefinition( - ctx context.Context, relationManager *RelationManager, def *ast.InterfaceDefinition, ) (client.SchemaDescription, error) { - fieldDescriptions := []client.FieldDescription{} + fieldDescriptions := []client.SchemaFieldDescription{} for _, field := range def.Fields { tmpFieldsDescriptions, err := fieldsFromAST(field, relationManager, def.Name.Value) @@ -211,8 +210,7 @@ func fieldIndexFromAST(field *ast.FieldDefinition, directive *ast.Directive) (cl desc := client.IndexDescription{ Fields: []client.IndexedFieldDescription{ { - Name: field.Name.Value, - Direction: client.Ascending, + Name: field.Name.Value, }, }, } @@ -233,6 +231,14 @@ func fieldIndexFromAST(field *ast.FieldDefinition, directive *ast.Directive) (cl return client.IndexDescription{}, ErrIndexWithInvalidArg } desc.Unique = boolVal.Value + case types.IndexDirectivePropDirection: + dirVal, ok := arg.Value.(*ast.EnumValue) + if !ok { + return client.IndexDescription{}, ErrIndexWithInvalidArg + } + if dirVal.Value == types.FieldOrderDESC { + desc.Fields[0].Descending = true + } default: return client.IndexDescription{}, ErrIndexWithUnknownArg } @@ -296,16 +302,12 @@ func indexFromAST(directive *ast.Directive) (client.IndexDescription, error) { if !ok { return client.IndexDescription{}, ErrIndexWithInvalidArg } - if dirVal.Value == string(client.Ascending) { - desc.Fields[i].Direction = client.Ascending - } else if dirVal.Value == string(client.Descending) { - desc.Fields[i].Direction = client.Descending + if dirVal.Value == types.FieldOrderASC { + desc.Fields[i].Descending = false + } else if dirVal.Value == types.FieldOrderDESC { + desc.Fields[i].Descending = true } } - } else { - for i := range desc.Fields { - desc.Fields[i].Direction = client.Ascending - } } return desc, nil } @@ -313,7 +315,7 @@ func indexFromAST(directive *ast.Directive) (client.IndexDescription, error) { func fieldsFromAST(field *ast.FieldDefinition, relationManager *RelationManager, hostObjectName string, -) ([]client.FieldDescription, error) { +) ([]client.SchemaFieldDescription, error) { kind, err := astTypeToKind(field.Type) if err != nil { return nil, err @@ -321,28 +323,20 @@ func fieldsFromAST(field *ast.FieldDefinition, schema := "" relationName := "" - relationType := client.RelationType(0) + relationType := relationType(0) - fieldDescriptions := []client.FieldDescription{} + fieldDescriptions := []client.SchemaFieldDescription{} if kind == client.FieldKind_FOREIGN_OBJECT || kind == client.FieldKind_FOREIGN_OBJECT_ARRAY { if kind == client.FieldKind_FOREIGN_OBJECT { schema = field.Type.(*ast.Named).Name.Value - relationType = client.Relation_Type_ONE + relationType = relation_Type_ONE if _, exists := findDirective(field, "primary"); exists { - relationType |= client.Relation_Type_Primary + relationType |= relation_Type_Primary } - - // An _id field is added for every 1-N relationship from this object. - fieldDescriptions = append(fieldDescriptions, client.FieldDescription{ - Name: fmt.Sprintf("%s_id", field.Name.Value), - Kind: client.FieldKind_DocID, - Typ: defaultCRDTForFieldKind[client.FieldKind_DocID], - RelationType: client.Relation_Type_INTERNAL_ID, - }) } else if kind == client.FieldKind_FOREIGN_OBJECT_ARRAY { schema = field.Type.(*ast.List).Type.(*ast.Named).Name.Value - relationType = client.Relation_Type_MANY + relationType = relation_Type_MANY } relationName, err = getRelationshipName(field, hostObjectName, schema) @@ -350,6 +344,16 @@ func fieldsFromAST(field *ast.FieldDefinition, return nil, err } + if kind == client.FieldKind_FOREIGN_OBJECT { + // An _id field is added for every 1-N relationship from this object. + fieldDescriptions = append(fieldDescriptions, client.SchemaFieldDescription{ + Name: fmt.Sprintf("%s_id", field.Name.Value), + Kind: client.FieldKind_DocID, + Typ: defaultCRDTForFieldKind[client.FieldKind_DocID], + RelationName: relationName, + }) + } + // Register the relationship so that the relationship manager can evaluate // relationsip properties dependent on both collections in the relationship. _, err := relationManager.RegisterSingle( @@ -368,13 +372,12 @@ func fieldsFromAST(field *ast.FieldDefinition, return nil, err } - fieldDescription := client.FieldDescription{ + fieldDescription := client.SchemaFieldDescription{ Name: field.Name.Value, Kind: kind, Typ: cType, Schema: schema, RelationName: relationName, - RelationType: relationType, } fieldDescriptions = append(fieldDescriptions, fieldDescription) @@ -386,18 +389,15 @@ func setCRDTType(field *ast.FieldDefinition, kind client.FieldKind) (client.CTyp for _, arg := range directive.Arguments { switch arg.Name.Value { case "type": - cType := arg.Value.GetValue().(string) - switch cType { - case client.PN_COUNTER.String(): - if !client.PN_COUNTER.IsCompatibleWith(kind) { - return 0, client.NewErrCRDTKindMismatch(cType, kind.String()) - } - return client.PN_COUNTER, nil - case client.LWW_REGISTER.String(): - return client.LWW_REGISTER, nil - default: - return 0, client.NewErrInvalidCRDTType(field.Name.Value, cType) + cTypeString := arg.Value.GetValue().(string) + cType, validCRDTEnum := types.CRDTEnum.ParseValue(cTypeString).(client.CType) + if !validCRDTEnum { + return 0, client.NewErrInvalidCRDTType(field.Name.Value, cTypeString) + } + if !cType.IsCompatibleWith(kind) { + return 0, client.NewErrCRDTKindMismatch(cType.String(), kind.String()) } + return cType, nil } } } @@ -413,6 +413,7 @@ func astTypeToKind(t ast.Type) (client.FieldKind, error) { typeDateTime string = "DateTime" typeString string = "String" typeBlob string = "Blob" + typeJSON string = "JSON" ) switch astTypeVal := t.(type) { @@ -452,17 +453,19 @@ func astTypeToKind(t ast.Type) (client.FieldKind, error) { case typeID: return client.FieldKind_DocID, nil case typeBoolean: - return client.FieldKind_BOOL, nil + return client.FieldKind_NILLABLE_BOOL, nil case typeInt: - return client.FieldKind_INT, nil + return client.FieldKind_NILLABLE_INT, nil case typeFloat: - return client.FieldKind_FLOAT, nil + return client.FieldKind_NILLABLE_FLOAT, nil case typeDateTime: - return client.FieldKind_DATETIME, nil + return client.FieldKind_NILLABLE_DATETIME, nil case typeString: - return client.FieldKind_STRING, nil + return client.FieldKind_NILLABLE_STRING, nil case typeBlob: - return client.FieldKind_BLOB, nil + return client.FieldKind_NILLABLE_BLOB, nil + case typeJSON: + return client.FieldKind_NILLABLE_JSON, nil default: return client.FieldKind_FOREIGN_OBJECT, nil } @@ -513,14 +516,14 @@ func getRelationshipName( func finalizeRelations(relationManager *RelationManager, definitions []client.CollectionDefinition) error { embeddedObjNames := map[string]struct{}{} for _, def := range definitions { - if def.Description.Name == "" { + if !def.Description.Name.HasValue() { embeddedObjNames[def.Schema.Name] = struct{}{} } } for _, definition := range definitions { for i, field := range definition.Schema.Fields { - if field.RelationType == 0 || field.RelationType&client.Relation_Type_INTERNAL_ID != 0 { + if field.RelationName == "" || field.Kind == client.FieldKind_DocID { continue } @@ -529,7 +532,7 @@ func finalizeRelations(relationManager *RelationManager, definitions []client.Co return err } - _, fieldRelationType, ok := rel.GetField(field.Schema, field.Name) + _, fieldRelationType, ok := rel.getField(field.Schema, field.Name) if !ok { return NewErrRelationMissingField(field.Schema, field.Name) } @@ -545,7 +548,7 @@ func finalizeRelations(relationManager *RelationManager, definitions []client.Co return client.NewErrRelationOneSided(field.Name, field.Schema) } - field.RelationType = rel.Kind() | fieldRelationType + field.IsPrimaryRelation = fieldRelationType.isSet(relation_Type_Primary) definition.Schema.Fields[i] = field } } diff --git a/request/graphql/schema/descriptions.go b/request/graphql/schema/descriptions.go index 147c494c74..cb19140d26 100644 --- a/request/graphql/schema/descriptions.go +++ b/request/graphql/schema/descriptions.go @@ -25,15 +25,16 @@ var ( //nolint:unused gqlTypeToFieldKindReference = map[gql.Type]client.FieldKind{ gql.ID: client.FieldKind_DocID, - gql.Boolean: client.FieldKind_BOOL, - gql.Int: client.FieldKind_INT, - gql.Float: client.FieldKind_FLOAT, - gql.DateTime: client.FieldKind_DATETIME, - gql.String: client.FieldKind_STRING, + gql.Boolean: client.FieldKind_NILLABLE_BOOL, + gql.Int: client.FieldKind_NILLABLE_INT, + gql.Float: client.FieldKind_NILLABLE_FLOAT, + gql.DateTime: client.FieldKind_NILLABLE_DATETIME, + gql.String: client.FieldKind_NILLABLE_STRING, &gql.Object{}: client.FieldKind_FOREIGN_OBJECT, &gql.List{}: client.FieldKind_FOREIGN_OBJECT_ARRAY, // Custom scalars - schemaTypes.BlobScalarType: client.FieldKind_BLOB, + schemaTypes.BlobScalarType: client.FieldKind_NILLABLE_BLOB, + schemaTypes.JSONScalarType: client.FieldKind_NILLABLE_JSON, // More custom ones to come // - JSON // - Counters @@ -41,39 +42,41 @@ var ( fieldKindToGQLType = map[client.FieldKind]gql.Type{ client.FieldKind_DocID: gql.ID, - client.FieldKind_BOOL: gql.Boolean, + client.FieldKind_NILLABLE_BOOL: gql.Boolean, client.FieldKind_BOOL_ARRAY: gql.NewList(gql.NewNonNull(gql.Boolean)), client.FieldKind_NILLABLE_BOOL_ARRAY: gql.NewList(gql.Boolean), - client.FieldKind_INT: gql.Int, + client.FieldKind_NILLABLE_INT: gql.Int, client.FieldKind_INT_ARRAY: gql.NewList(gql.NewNonNull(gql.Int)), client.FieldKind_NILLABLE_INT_ARRAY: gql.NewList(gql.Int), - client.FieldKind_FLOAT: gql.Float, + client.FieldKind_NILLABLE_FLOAT: gql.Float, client.FieldKind_FLOAT_ARRAY: gql.NewList(gql.NewNonNull(gql.Float)), client.FieldKind_NILLABLE_FLOAT_ARRAY: gql.NewList(gql.Float), - client.FieldKind_DATETIME: gql.DateTime, - client.FieldKind_STRING: gql.String, + client.FieldKind_NILLABLE_DATETIME: gql.DateTime, + client.FieldKind_NILLABLE_STRING: gql.String, client.FieldKind_STRING_ARRAY: gql.NewList(gql.NewNonNull(gql.String)), client.FieldKind_NILLABLE_STRING_ARRAY: gql.NewList(gql.String), - client.FieldKind_BLOB: schemaTypes.BlobScalarType, + client.FieldKind_NILLABLE_BLOB: schemaTypes.BlobScalarType, + client.FieldKind_NILLABLE_JSON: schemaTypes.JSONScalarType, } // This map is fine to use defaultCRDTForFieldKind = map[client.FieldKind]client.CType{ client.FieldKind_DocID: client.LWW_REGISTER, - client.FieldKind_BOOL: client.LWW_REGISTER, + client.FieldKind_NILLABLE_BOOL: client.LWW_REGISTER, client.FieldKind_BOOL_ARRAY: client.LWW_REGISTER, client.FieldKind_NILLABLE_BOOL_ARRAY: client.LWW_REGISTER, - client.FieldKind_INT: client.LWW_REGISTER, + client.FieldKind_NILLABLE_INT: client.LWW_REGISTER, client.FieldKind_INT_ARRAY: client.LWW_REGISTER, client.FieldKind_NILLABLE_INT_ARRAY: client.LWW_REGISTER, - client.FieldKind_FLOAT: client.LWW_REGISTER, + client.FieldKind_NILLABLE_FLOAT: client.LWW_REGISTER, client.FieldKind_FLOAT_ARRAY: client.LWW_REGISTER, client.FieldKind_NILLABLE_FLOAT_ARRAY: client.LWW_REGISTER, - client.FieldKind_DATETIME: client.LWW_REGISTER, - client.FieldKind_STRING: client.LWW_REGISTER, + client.FieldKind_NILLABLE_DATETIME: client.LWW_REGISTER, + client.FieldKind_NILLABLE_STRING: client.LWW_REGISTER, client.FieldKind_STRING_ARRAY: client.LWW_REGISTER, client.FieldKind_NILLABLE_STRING_ARRAY: client.LWW_REGISTER, - client.FieldKind_BLOB: client.LWW_REGISTER, + client.FieldKind_NILLABLE_BLOB: client.LWW_REGISTER, + client.FieldKind_NILLABLE_JSON: client.LWW_REGISTER, client.FieldKind_FOREIGN_OBJECT: client.LWW_REGISTER, client.FieldKind_FOREIGN_OBJECT_ARRAY: client.NONE_CRDT, } diff --git a/request/graphql/schema/descriptions_test.go b/request/graphql/schema/descriptions_test.go index 397436bca2..93f6b36d48 100644 --- a/request/graphql/schema/descriptions_test.go +++ b/request/graphql/schema/descriptions_test.go @@ -14,6 +14,7 @@ import ( "context" "testing" + "github.com/sourcenetwork/immutable" "github.com/stretchr/testify/assert" "github.com/sourcenetwork/defradb/client" @@ -33,12 +34,12 @@ func TestSingleSimpleType(t *testing.T) { targetDescs: []client.CollectionDefinition{ { Description: client.CollectionDescription{ - Name: "User", + Name: immutable.Some("User"), Indexes: []client.IndexDescription{}, }, Schema: client.SchemaDescription{ Name: "User", - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "_docID", Kind: client.FieldKind_DocID, @@ -46,17 +47,17 @@ func TestSingleSimpleType(t *testing.T) { }, { Name: "age", - Kind: client.FieldKind_INT, + Kind: client.FieldKind_NILLABLE_INT, Typ: client.LWW_REGISTER, }, { Name: "name", - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, Typ: client.LWW_REGISTER, }, { Name: "verified", - Kind: client.FieldKind_BOOL, + Kind: client.FieldKind_NILLABLE_BOOL, Typ: client.LWW_REGISTER, }, }, @@ -82,12 +83,12 @@ func TestSingleSimpleType(t *testing.T) { targetDescs: []client.CollectionDefinition{ { Description: client.CollectionDescription{ - Name: "User", + Name: immutable.Some("User"), Indexes: []client.IndexDescription{}, }, Schema: client.SchemaDescription{ Name: "User", - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "_docID", Kind: client.FieldKind_DocID, @@ -95,17 +96,17 @@ func TestSingleSimpleType(t *testing.T) { }, { Name: "age", - Kind: client.FieldKind_INT, + Kind: client.FieldKind_NILLABLE_INT, Typ: client.LWW_REGISTER, }, { Name: "name", - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, Typ: client.LWW_REGISTER, }, { Name: "verified", - Kind: client.FieldKind_BOOL, + Kind: client.FieldKind_NILLABLE_BOOL, Typ: client.LWW_REGISTER, }, }, @@ -113,12 +114,12 @@ func TestSingleSimpleType(t *testing.T) { }, { Description: client.CollectionDescription{ - Name: "Author", + Name: immutable.Some("Author"), Indexes: []client.IndexDescription{}, }, Schema: client.SchemaDescription{ Name: "Author", - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "_docID", Kind: client.FieldKind_DocID, @@ -126,17 +127,17 @@ func TestSingleSimpleType(t *testing.T) { }, { Name: "name", - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, Typ: client.LWW_REGISTER, }, { Name: "publisher", - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, Typ: client.LWW_REGISTER, }, { Name: "rating", - Kind: client.FieldKind_FLOAT, + Kind: client.FieldKind_NILLABLE_FLOAT, Typ: client.LWW_REGISTER, }, }, @@ -162,12 +163,12 @@ func TestSingleSimpleType(t *testing.T) { targetDescs: []client.CollectionDefinition{ { Description: client.CollectionDescription{ - Name: "Book", + Name: immutable.Some("Book"), Indexes: []client.IndexDescription{}, }, Schema: client.SchemaDescription{ Name: "Book", - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "_docID", Kind: client.FieldKind_DocID, @@ -179,22 +180,20 @@ func TestSingleSimpleType(t *testing.T) { Kind: client.FieldKind_FOREIGN_OBJECT, Typ: client.NONE_CRDT, Schema: "Author", - RelationType: client.Relation_Type_ONE | client.Relation_Type_ONEONE, }, { - Name: "author_id", - Kind: client.FieldKind_DocID, - Typ: client.LWW_REGISTER, - RelationType: client.Relation_Type_INTERNAL_ID, + Name: "author_id", + Kind: client.FieldKind_DocID, + Typ: client.LWW_REGISTER, }, { Name: "name", - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, Typ: client.LWW_REGISTER, }, { Name: "rating", - Kind: client.FieldKind_FLOAT, + Kind: client.FieldKind_NILLABLE_FLOAT, Typ: client.LWW_REGISTER, }, }, @@ -202,12 +201,12 @@ func TestSingleSimpleType(t *testing.T) { }, { Description: client.CollectionDescription{ - Name: "Author", + Name: immutable.Some("Author"), Indexes: []client.IndexDescription{}, }, Schema: client.SchemaDescription{ Name: "Author", - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "_docID", Kind: client.FieldKind_DocID, @@ -215,27 +214,26 @@ func TestSingleSimpleType(t *testing.T) { }, { Name: "age", - Kind: client.FieldKind_INT, + Kind: client.FieldKind_NILLABLE_INT, Typ: client.LWW_REGISTER, }, { Name: "name", - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, Typ: client.LWW_REGISTER, }, { - Name: "published", - RelationName: "author_book", - Kind: client.FieldKind_FOREIGN_OBJECT, - Typ: client.NONE_CRDT, - Schema: "Book", - RelationType: client.Relation_Type_ONE | client.Relation_Type_ONEONE | client.Relation_Type_Primary, + Name: "published", + RelationName: "author_book", + Kind: client.FieldKind_FOREIGN_OBJECT, + Typ: client.NONE_CRDT, + Schema: "Book", + IsPrimaryRelation: true, }, { - Name: "published_id", - Kind: client.FieldKind_DocID, - Typ: client.LWW_REGISTER, - RelationType: client.Relation_Type_INTERNAL_ID, + Name: "published_id", + Kind: client.FieldKind_DocID, + Typ: client.LWW_REGISTER, }, }, }, @@ -260,12 +258,12 @@ func TestSingleSimpleType(t *testing.T) { targetDescs: []client.CollectionDefinition{ { Description: client.CollectionDescription{ - Name: "User", + Name: immutable.Some("User"), Indexes: []client.IndexDescription{}, }, Schema: client.SchemaDescription{ Name: "User", - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "_docID", Kind: client.FieldKind_DocID, @@ -273,17 +271,17 @@ func TestSingleSimpleType(t *testing.T) { }, { Name: "age", - Kind: client.FieldKind_INT, + Kind: client.FieldKind_NILLABLE_INT, Typ: client.LWW_REGISTER, }, { Name: "name", - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, Typ: client.LWW_REGISTER, }, { Name: "verified", - Kind: client.FieldKind_BOOL, + Kind: client.FieldKind_NILLABLE_BOOL, Typ: client.LWW_REGISTER, }, }, @@ -291,12 +289,12 @@ func TestSingleSimpleType(t *testing.T) { }, { Description: client.CollectionDescription{ - Name: "Author", + Name: immutable.Some("Author"), Indexes: []client.IndexDescription{}, }, Schema: client.SchemaDescription{ Name: "Author", - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "_docID", Kind: client.FieldKind_DocID, @@ -304,17 +302,17 @@ func TestSingleSimpleType(t *testing.T) { }, { Name: "name", - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, Typ: client.LWW_REGISTER, }, { Name: "publisher", - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, Typ: client.LWW_REGISTER, }, { Name: "rating", - Kind: client.FieldKind_FLOAT, + Kind: client.FieldKind_NILLABLE_FLOAT, Typ: client.LWW_REGISTER, }, }, @@ -340,12 +338,12 @@ func TestSingleSimpleType(t *testing.T) { targetDescs: []client.CollectionDefinition{ { Description: client.CollectionDescription{ - Name: "Book", + Name: immutable.Some("Book"), Indexes: []client.IndexDescription{}, }, Schema: client.SchemaDescription{ Name: "Book", - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "_docID", Kind: client.FieldKind_DocID, @@ -357,22 +355,20 @@ func TestSingleSimpleType(t *testing.T) { Kind: client.FieldKind_FOREIGN_OBJECT, Typ: client.NONE_CRDT, Schema: "Author", - RelationType: client.Relation_Type_ONE | client.Relation_Type_ONEONE, }, { - Name: "author_id", - Kind: client.FieldKind_DocID, - Typ: client.LWW_REGISTER, - RelationType: client.Relation_Type_INTERNAL_ID, + Name: "author_id", + Kind: client.FieldKind_DocID, + Typ: client.LWW_REGISTER, }, { Name: "name", - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, Typ: client.LWW_REGISTER, }, { Name: "rating", - Kind: client.FieldKind_FLOAT, + Kind: client.FieldKind_NILLABLE_FLOAT, Typ: client.LWW_REGISTER, }, }, @@ -380,12 +376,12 @@ func TestSingleSimpleType(t *testing.T) { }, { Description: client.CollectionDescription{ - Name: "Author", + Name: immutable.Some("Author"), Indexes: []client.IndexDescription{}, }, Schema: client.SchemaDescription{ Name: "Author", - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "_docID", Kind: client.FieldKind_DocID, @@ -393,27 +389,26 @@ func TestSingleSimpleType(t *testing.T) { }, { Name: "age", - Kind: client.FieldKind_INT, + Kind: client.FieldKind_NILLABLE_INT, Typ: client.LWW_REGISTER, }, { Name: "name", - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, Typ: client.LWW_REGISTER, }, { - Name: "published", - RelationName: "book_authors", - Kind: client.FieldKind_FOREIGN_OBJECT, - Typ: client.NONE_CRDT, - Schema: "Book", - RelationType: client.Relation_Type_ONE | client.Relation_Type_ONEONE | client.Relation_Type_Primary, + Name: "published", + RelationName: "book_authors", + Kind: client.FieldKind_FOREIGN_OBJECT, + Typ: client.NONE_CRDT, + Schema: "Book", + IsPrimaryRelation: true, }, { - Name: "published_id", - Kind: client.FieldKind_DocID, - Typ: client.LWW_REGISTER, - RelationType: client.Relation_Type_INTERNAL_ID, + Name: "published_id", + Kind: client.FieldKind_DocID, + Typ: client.LWW_REGISTER, }, }, }, @@ -438,39 +433,38 @@ func TestSingleSimpleType(t *testing.T) { targetDescs: []client.CollectionDefinition{ { Description: client.CollectionDescription{ - Name: "Book", + Name: immutable.Some("Book"), Indexes: []client.IndexDescription{}, }, Schema: client.SchemaDescription{ Name: "Book", - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "_docID", Kind: client.FieldKind_DocID, Typ: client.NONE_CRDT, }, { - Name: "author", - RelationName: "author_book", - Kind: client.FieldKind_FOREIGN_OBJECT, - Typ: client.NONE_CRDT, - Schema: "Author", - RelationType: client.Relation_Type_ONE | client.Relation_Type_ONEONE | client.Relation_Type_Primary, + Name: "author", + RelationName: "author_book", + Kind: client.FieldKind_FOREIGN_OBJECT, + Typ: client.NONE_CRDT, + Schema: "Author", + IsPrimaryRelation: true, }, { - Name: "author_id", - Kind: client.FieldKind_DocID, - Typ: client.LWW_REGISTER, - RelationType: client.Relation_Type_INTERNAL_ID, + Name: "author_id", + Kind: client.FieldKind_DocID, + Typ: client.LWW_REGISTER, }, { Name: "name", - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, Typ: client.LWW_REGISTER, }, { Name: "rating", - Kind: client.FieldKind_FLOAT, + Kind: client.FieldKind_NILLABLE_FLOAT, Typ: client.LWW_REGISTER, }, }, @@ -478,12 +472,12 @@ func TestSingleSimpleType(t *testing.T) { }, { Description: client.CollectionDescription{ - Name: "Author", + Name: immutable.Some("Author"), Indexes: []client.IndexDescription{}, }, Schema: client.SchemaDescription{ Name: "Author", - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "_docID", Kind: client.FieldKind_DocID, @@ -491,12 +485,12 @@ func TestSingleSimpleType(t *testing.T) { }, { Name: "age", - Kind: client.FieldKind_INT, + Kind: client.FieldKind_NILLABLE_INT, Typ: client.LWW_REGISTER, }, { Name: "name", - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, Typ: client.LWW_REGISTER, }, { @@ -505,13 +499,11 @@ func TestSingleSimpleType(t *testing.T) { Kind: client.FieldKind_FOREIGN_OBJECT, Typ: client.NONE_CRDT, Schema: "Book", - RelationType: client.Relation_Type_ONE | client.Relation_Type_ONEONE, }, { - Name: "published_id", - Kind: client.FieldKind_DocID, - Typ: client.LWW_REGISTER, - RelationType: client.Relation_Type_INTERNAL_ID, + Name: "published_id", + Kind: client.FieldKind_DocID, + Typ: client.LWW_REGISTER, }, }, }, @@ -536,39 +528,38 @@ func TestSingleSimpleType(t *testing.T) { targetDescs: []client.CollectionDefinition{ { Description: client.CollectionDescription{ - Name: "Book", + Name: immutable.Some("Book"), Indexes: []client.IndexDescription{}, }, Schema: client.SchemaDescription{ Name: "Book", - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "_docID", Kind: client.FieldKind_DocID, Typ: client.NONE_CRDT, }, { - Name: "author", - RelationName: "author_book", - Kind: client.FieldKind_FOREIGN_OBJECT, - Typ: client.NONE_CRDT, - Schema: "Author", - RelationType: client.Relation_Type_ONE | client.Relation_Type_ONEMANY | client.Relation_Type_Primary, + Name: "author", + RelationName: "author_book", + Kind: client.FieldKind_FOREIGN_OBJECT, + Typ: client.NONE_CRDT, + Schema: "Author", + IsPrimaryRelation: true, }, { - Name: "author_id", - Kind: client.FieldKind_DocID, - Typ: client.LWW_REGISTER, - RelationType: client.Relation_Type_INTERNAL_ID, + Name: "author_id", + Kind: client.FieldKind_DocID, + Typ: client.LWW_REGISTER, }, { Name: "name", - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, Typ: client.LWW_REGISTER, }, { Name: "rating", - Kind: client.FieldKind_FLOAT, + Kind: client.FieldKind_NILLABLE_FLOAT, Typ: client.LWW_REGISTER, }, }, @@ -576,12 +567,12 @@ func TestSingleSimpleType(t *testing.T) { }, { Description: client.CollectionDescription{ - Name: "Author", + Name: immutable.Some("Author"), Indexes: []client.IndexDescription{}, }, Schema: client.SchemaDescription{ Name: "Author", - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "_docID", Kind: client.FieldKind_DocID, @@ -589,12 +580,12 @@ func TestSingleSimpleType(t *testing.T) { }, { Name: "age", - Kind: client.FieldKind_INT, + Kind: client.FieldKind_NILLABLE_INT, Typ: client.LWW_REGISTER, }, { Name: "name", - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, Typ: client.LWW_REGISTER, }, { @@ -603,7 +594,6 @@ func TestSingleSimpleType(t *testing.T) { Kind: client.FieldKind_FOREIGN_OBJECT_ARRAY, Typ: client.NONE_CRDT, Schema: "Book", - RelationType: client.Relation_Type_MANY | client.Relation_Type_ONEMANY, }, }, }, diff --git a/request/graphql/schema/generate.go b/request/graphql/schema/generate.go index 1083772d58..e4397e2e40 100644 --- a/request/graphql/schema/generate.go +++ b/request/graphql/schema/generate.go @@ -82,7 +82,7 @@ func (g *Generator) Generate(ctx context.Context, collections []client.Collectio // the given CollectionDescriptions. func (g *Generator) generate(ctx context.Context, collections []client.CollectionDefinition) ([]*gql.Object, error) { // build base types - defs, err := g.buildTypes(ctx, collections) + defs, err := g.buildTypes(collections) if err != nil { return nil, err } @@ -108,7 +108,7 @@ func (g *Generator) generate(ctx context.Context, collections []client.Collectio var isEmbedded bool for _, definition := range collections { - if t.Name() == definition.Schema.Name && definition.Description.Name == "" { + if t.Name() == definition.Schema.Name && !definition.Description.Name.HasValue() { isEmbedded = true break } @@ -129,7 +129,7 @@ func (g *Generator) generate(ctx context.Context, collections []client.Collectio return nil, err } - if err := g.genAggregateFields(ctx); err != nil { + if err := g.genAggregateFields(); err != nil { return nil, err } // resolve types @@ -194,8 +194,8 @@ func (g *Generator) generate(ctx context.Context, collections []client.Collectio var isReadOnly bool var collectionFound bool for _, definition := range collections { - if t.Name() == definition.Description.Name { - isReadOnly = definition.Description.BaseQuery != nil + if t.Name() == definition.Description.Name.Value() { + isReadOnly = len(definition.Description.QuerySources()) > 0 collectionFound = true break } @@ -403,7 +403,6 @@ func (g *Generator) createExpandedFieldList( // Given a set of developer defined collection types // extract and return the correct gql.Object type(s) func (g *Generator) buildTypes( - ctx context.Context, collections []client.CollectionDefinition, ) ([]*gql.Object, error) { // @todo: Check for duplicate named defined types in the TypeMap @@ -416,15 +415,16 @@ func (g *Generator) buildTypes( // TODO remove when Go 1.22 collection := c fieldDescriptions := collection.Schema.Fields - isEmbeddedObject := collection.Description.Name == "" - isViewObject := isEmbeddedObject || collection.Description.BaseQuery != nil + isEmbeddedObject := !collection.Description.Name.HasValue() + isQuerySource := len(collection.Description.QuerySources()) > 0 + isViewObject := isEmbeddedObject || isQuerySource var objectName string if isEmbeddedObject { // If this is an embedded object, take the type name from the Schema objectName = collection.Schema.Name } else { - objectName = collection.Description.Name + objectName = collection.Description.Name.Value() } // check if type exists @@ -529,7 +529,7 @@ func (g *Generator) buildTypes( // for collection create and update mutation operations. func (g *Generator) buildMutationInputTypes(collections []client.CollectionDefinition) error { for _, c := range collections { - if c.Description.Name == "" { + if !c.Description.Name.HasValue() { // If the definition's collection is empty, this must be a collectionless // schema, in which case users cannot mutate documents through it and we // have no need to build mutation input types for it. @@ -541,7 +541,7 @@ func (g *Generator) buildMutationInputTypes(collections []client.CollectionDefin // TODO remove when Go 1.22 collection := c fieldDescriptions := collection.Schema.Fields - mutationInputName := collection.Description.Name + "MutationInputArg" + mutationInputName := collection.Description.Name.Value() + "MutationInputArg" // check if mutation input type exists if _, ok := g.manager.schema.TypeMap()[mutationInputName]; ok { @@ -593,7 +593,7 @@ func (g *Generator) buildMutationInputTypes(collections []client.CollectionDefin return nil } -func (g *Generator) genAggregateFields(ctx context.Context) error { +func (g *Generator) genAggregateFields() error { topLevelCountInputs := map[string]*gql.InputObject{} topLevelNumericAggInputs := map[string]*gql.InputObject{} @@ -1013,7 +1013,7 @@ func (g *Generator) GenerateQueryInputForGQLType( types.groupBy = g.genTypeFieldsEnum(obj) types.order = g.genTypeOrderArgInput(obj) - queryField := g.genTypeQueryableFieldList(ctx, obj, types) + queryField := g.genTypeQueryableFieldList(obj, types) return queryField, nil } @@ -1248,7 +1248,6 @@ type queryInputTypeConfig struct { } func (g *Generator) genTypeQueryableFieldList( - ctx context.Context, obj *gql.Object, config queryInputTypeConfig, ) *gql.Field { diff --git a/request/graphql/schema/index_parse_test.go b/request/graphql/schema/index_parse_test.go index ca1ce32696..8204c2d0ec 100644 --- a/request/graphql/schema/index_parse_test.go +++ b/request/graphql/schema/index_parse_test.go @@ -28,7 +28,7 @@ func TestParseIndexOnStruct(t *testing.T) { { Name: "", Fields: []client.IndexedFieldDescription{ - {Name: "name", Direction: client.Ascending}, + {Name: "name"}, }, Unique: false, }, @@ -41,7 +41,7 @@ func TestParseIndexOnStruct(t *testing.T) { { Name: "userIndex", Fields: []client.IndexedFieldDescription{ - {Name: "name", Direction: client.Ascending}, + {Name: "name"}, }, }, }, @@ -52,7 +52,7 @@ func TestParseIndexOnStruct(t *testing.T) { targetDescriptions: []client.IndexDescription{ { Fields: []client.IndexedFieldDescription{ - {Name: "name", Direction: client.Ascending}, + {Name: "name"}, }, Unique: true, }, @@ -64,7 +64,7 @@ func TestParseIndexOnStruct(t *testing.T) { targetDescriptions: []client.IndexDescription{ { Fields: []client.IndexedFieldDescription{ - {Name: "name", Direction: client.Ascending}, + {Name: "name"}, }, Unique: false, }, @@ -76,7 +76,7 @@ func TestParseIndexOnStruct(t *testing.T) { targetDescriptions: []client.IndexDescription{ { Fields: []client.IndexedFieldDescription{ - {Name: "name", Direction: client.Ascending}}, + {Name: "name"}}, }, }, }, @@ -86,7 +86,7 @@ func TestParseIndexOnStruct(t *testing.T) { targetDescriptions: []client.IndexDescription{ { Fields: []client.IndexedFieldDescription{ - {Name: "name", Direction: client.Descending}}, + {Name: "name", Descending: true}}, }, }, }, @@ -96,8 +96,8 @@ func TestParseIndexOnStruct(t *testing.T) { targetDescriptions: []client.IndexDescription{ { Fields: []client.IndexedFieldDescription{ - {Name: "name", Direction: client.Ascending}, - {Name: "age", Direction: client.Ascending}, + {Name: "name"}, + {Name: "age"}, }, }, }, @@ -108,8 +108,8 @@ func TestParseIndexOnStruct(t *testing.T) { targetDescriptions: []client.IndexDescription{ { Fields: []client.IndexedFieldDescription{ - {Name: "name", Direction: client.Ascending}, - {Name: "age", Direction: client.Descending}, + {Name: "name"}, + {Name: "age", Descending: true}, }, }, }, @@ -216,7 +216,7 @@ func TestParseIndexOnField(t *testing.T) { { Name: "", Fields: []client.IndexedFieldDescription{ - {Name: "name", Direction: client.Ascending}, + {Name: "name"}, }, Unique: false, }, @@ -231,7 +231,7 @@ func TestParseIndexOnField(t *testing.T) { { Name: "nameIndex", Fields: []client.IndexedFieldDescription{ - {Name: "name", Direction: client.Ascending}, + {Name: "name"}, }, Unique: false, }, @@ -245,7 +245,7 @@ func TestParseIndexOnField(t *testing.T) { targetDescriptions: []client.IndexDescription{ { Fields: []client.IndexedFieldDescription{ - {Name: "name", Direction: client.Ascending}, + {Name: "name"}, }, Unique: true, }, @@ -259,7 +259,35 @@ func TestParseIndexOnField(t *testing.T) { targetDescriptions: []client.IndexDescription{ { Fields: []client.IndexedFieldDescription{ - {Name: "name", Direction: client.Ascending}, + {Name: "name"}, + }, + Unique: false, + }, + }, + }, + { + description: "field index in ASC order", + sdl: `type user { + name: String @index(direction: ASC) + }`, + targetDescriptions: []client.IndexDescription{ + { + Fields: []client.IndexedFieldDescription{ + {Name: "name"}, + }, + Unique: false, + }, + }, + }, + { + description: "field index in DESC order", + sdl: `type user { + name: String @index(direction: DESC) + }`, + targetDescriptions: []client.IndexDescription{ + { + Fields: []client.IndexedFieldDescription{ + {Name: "name", Descending: true}, }, Unique: false, }, @@ -281,13 +309,6 @@ func TestParseInvalidIndexOnField(t *testing.T) { }`, expectedErr: errIndexUnknownArgument, }, - { - description: "forbidden 'direction' argument", - sdl: `type user { - name: String @index(direction: ASC) - }`, - expectedErr: errIndexUnknownArgument, - }, { description: "invalid field index name type", sdl: `type user { diff --git a/request/graphql/schema/manager.go b/request/graphql/schema/manager.go index f44b770fcb..89860d2c53 100644 --- a/request/graphql/schema/manager.go +++ b/request/graphql/schema/manager.go @@ -111,9 +111,12 @@ func defaultMutationType() *gql.Object { // default directives type. func defaultDirectivesType() []*gql.Directive { return []*gql.Directive{ + schemaTypes.CRDTFieldDirective, schemaTypes.ExplainDirective, schemaTypes.IndexDirective, schemaTypes.IndexFieldDirective, + schemaTypes.PrimaryDirective, + schemaTypes.RelationDirective, } } @@ -143,6 +146,7 @@ func defaultTypes() []gql.Type { // Custom Scalar types schemaTypes.BlobScalarType, + schemaTypes.JSONScalarType, // Base Query types @@ -165,6 +169,7 @@ func defaultTypes() []gql.Type { schemaTypes.CommitLinkObject, schemaTypes.CommitObject, + schemaTypes.CRDTEnum, schemaTypes.ExplainEnum, } } diff --git a/request/graphql/schema/relations.go b/request/graphql/schema/relations.go index 87d6c7216d..e6d2af8b09 100644 --- a/request/graphql/schema/relations.go +++ b/request/graphql/schema/relations.go @@ -17,6 +17,20 @@ import ( "github.com/sourcenetwork/defradb/client" ) +// relationType describes the type of relation between two types. +type relationType uint8 + +const ( + relation_Type_ONE relationType = 1 // 0b0000 0001 + relation_Type_MANY relationType = 2 // 0b0000 0010 + relation_Type_Primary relationType = 128 // 0b1000 0000 Primary reference entity on relation +) + +// IsSet returns true if the target relation type is set. +func (m relationType) isSet(target relationType) bool { + return m&target > 0 +} + // RelationManager keeps track of all the relations that exist // between schema types type RelationManager struct { @@ -45,29 +59,18 @@ func (rm *RelationManager) RegisterSingle( name string, schemaType string, schemaField string, - relType client.RelationType, + relType relationType, ) (bool, error) { if name == "" { return false, client.NewErrUninitializeProperty("RegisterSingle", "name") } - // make sure the relation type is ONLY One or Many, not both - if relType.IsSet(client.Relation_Type_ONE) == relType.IsSet(client.Relation_Type_MANY) { - return false, ErrRelationMutlipleTypes - } - - // make a copy of rel type, one goes to the relation.relType, and the other goes into the []types. - // We need to clear the Primary bit on the relation.relType so we make a copy - rt := relType - rt &^= client.Relation_Type_Primary // clear the primary bit - rel, ok := rm.relations[name] if !ok { // If a relation doesn't exist then make one. rm.relations[name] = &Relation{ name: name, - relType: rt, - types: []client.RelationType{relType}, + types: []relationType{relType}, schemaTypes: []string{schemaType}, fields: []string{schemaField}, } @@ -76,22 +79,6 @@ func (rm *RelationManager) RegisterSingle( if !rel.finalized { // If a relation exists, and is not finalized, then finalizing it. - - // handle relationType, needs to be either One-to-One, One-to-Many, Many-to-Many. - if rel.relType.IsSet(client.Relation_Type_ONE) { - if relType.IsSet(client.Relation_Type_ONE) { // One-to-One - rel.relType = client.Relation_Type_ONEONE - } else if relType.IsSet(client.Relation_Type_MANY) { - rel.relType = client.Relation_Type_ONEMANY - } - } else { // many - if relType.IsSet(client.Relation_Type_ONE) { - rel.relType = client.Relation_Type_ONEMANY - } else if relType.IsSet(client.Relation_Type_MANY) { - rel.relType = client.Relation_Type_MANYMANY - } - } - rel.types = append(rel.types, relType) rel.schemaTypes = append(rel.schemaTypes, schemaType) rel.fields = append(rel.fields, schemaField) @@ -107,8 +94,7 @@ func (rm *RelationManager) RegisterSingle( type Relation struct { name string - relType client.RelationType - types []client.RelationType + types []relationType schemaTypes []string fields []string @@ -123,57 +109,42 @@ func (r *Relation) finalize() error { return ErrRelationMissingTypes } - // make sure its one of One-to-One, One-to-Many, Many-to-Many - if !r.relType.IsSet(client.Relation_Type_ONEONE) && - !r.relType.IsSet(client.Relation_Type_ONEMANY) && - !r.relType.IsSet(client.Relation_Type_MANYMANY) { - return ErrRelationInvalidType - } - - // make sure we have a primary set if its a one-to-one or many-to-many - if IsOneToOne(r.relType) || IsManyToMany(r.relType) { + if isOne(r.types[0]) && isMany(r.types[1]) { + r.types[0] |= relation_Type_Primary // set primary on one + r.types[1] &^= relation_Type_Primary // clear primary on many + } else if isOne(r.types[1]) && isMany(r.types[0]) { + r.types[1] |= relation_Type_Primary // set primary on one + r.types[0] &^= relation_Type_Primary // clear primary on many + } else if isOne(r.types[1]) && isOne(r.types[0]) { t1, t2 := r.types[0], r.types[1] aBit := t1 & t2 xBit := t1 ^ t2 // both types have primary set - if aBit.IsSet(client.Relation_Type_Primary) { + if aBit.isSet(relation_Type_Primary) { return ErrMultipleRelationPrimaries - } else if !xBit.IsSet(client.Relation_Type_Primary) { + } else if !xBit.isSet(relation_Type_Primary) { // neither type has primary set, auto add to // lexicographically first one by schema type name if strings.Compare(r.schemaTypes[0], r.schemaTypes[1]) < 1 { - r.types[1] = r.types[1] | client.Relation_Type_Primary + r.types[1] = r.types[1] | relation_Type_Primary } else { - r.types[0] = r.types[0] | client.Relation_Type_Primary + r.types[0] = r.types[0] | relation_Type_Primary } } - } else if IsOneToMany(r.relType) { // if its a one-to-many, set the one side as primary - if IsOne(r.types[0]) { - r.types[0] |= client.Relation_Type_Primary // set primary on one - r.types[1] &^= client.Relation_Type_Primary // clear primary on many - } else { - r.types[1] |= client.Relation_Type_Primary // set primary on one - r.types[0] &^= client.Relation_Type_Primary // clear primary on many - } } r.finalized = true return nil } -// Kind returns what type of relation it is -func (r Relation) Kind() client.RelationType { - return r.relType -} - -func (r Relation) GetField(schemaType string, field string) (string, client.RelationType, bool) { +func (r Relation) getField(schemaType string, field string) (string, relationType, bool) { for i, f := range r.fields { if f == field && r.schemaTypes[i] == schemaType { return f, r.types[i], true } } - return "", client.RelationType(0), false + return "", relationType(0), false } func genRelationName(t1, t2 string) (string, error) { @@ -189,55 +160,12 @@ func genRelationName(t1, t2 string) (string, error) { return fmt.Sprintf("%s_%s", t2, t1), nil } -// IsOne returns true if the Relation_ONE bit is set -func IsOne(fieldmeta client.RelationType) bool { - return fieldmeta.IsSet(client.Relation_Type_ONE) -} - -// IsOneToOne returns true if the Relation_ONEONE bit is set -func IsOneToOne(fieldmeta client.RelationType) bool { - return fieldmeta.IsSet(client.Relation_Type_ONEONE) -} - -// IsOneToMany returns true if the Relation_ONEMANY is set -func IsOneToMany(fieldmeta client.RelationType) bool { - return fieldmeta.IsSet(client.Relation_Type_ONEMANY) -} - -// IsManyToMany returns true if the Relation_MANYMANY bit is set -func IsManyToMany(fieldmeta client.RelationType) bool { - return fieldmeta.IsSet(client.Relation_Type_MANYMANY) +// isOne returns true if the Relation_ONE bit is set +func isOne(fieldmeta relationType) bool { + return fieldmeta.isSet(relation_Type_ONE) } -/* Example usage - -rm := NewRelationManager() - -type book { - name: String - rating: Float - author: author -} - -type author { - name: String - age: Int - verified: Boolean - published: [book] +// isMany returns true if the Relation_ONE bit is set +func isMany(fieldmeta relationType) bool { + return fieldmeta.isSet(relation_Type_MANY) } - -Relation names are autogenerated. They are the combination of each type -in the relation, sorted alphabetically. - -Relations: -name: author_book | related types: author, book | fields: author, published - (index same as types) type: one-to-many. - -rm.GetRelations(type) returns all the relations containing that type -rel := rm.GetRelation(name) returns the exact relation (if it exists) between those types -rel.IsPrimary(type) => bool, error -rel.IsOne(type) => bool, error -rel.IsMany(type) => bool, error -rel.Type() OneToOne | OneToMany | ManyToOne? | ManyToMany - -*/ diff --git a/request/graphql/schema/relations_test.go b/request/graphql/schema/relations_test.go deleted file mode 100644 index 50d50baa44..0000000000 --- a/request/graphql/schema/relations_test.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package schema - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/sourcenetwork/defradb/client" -) - -func TestSimpleOneToOneFromSingle(t *testing.T) { - rm := NewRelationManager() - - /* - type Book { - title: String - author: Author - } - - type Author { - name: String - published: Book - } - - // without explicit @primary directive - // Author is auto set to primary - */ - relName1, err := genRelationName("Book", "Author") - assert.NoError(t, err) - rm.RegisterSingle(relName1, "Author", "author", client.Relation_Type_ONE) - - relName2, err := genRelationName("Author", "Book") - assert.NoError(t, err) - assert.Equal(t, relName1, relName2) - rm.RegisterSingle(relName2, "Book", "published", client.Relation_Type_ONE) - - rel, err := rm.GetRelation(relName1) - assert.NoError(t, err) - assert.Equal(t, rel.relType, client.Relation_Type_ONEONE) -} - -func TestSimpleOneToOnePrimaryFromSingle(t *testing.T) { - rm := NewRelationManager() - - /* - type Book { - title: String - author: Author - } - - type Author { - name: String - published: Book - } - - // without explicit @primary directive - // Author is auto set to primary - */ - relName1, err := genRelationName("Book", "Author") - assert.NoError(t, err) - rm.RegisterSingle(relName1, "Author", "author", client.Relation_Type_ONE) - - relName2, err := genRelationName("Author", "Book") - assert.NoError(t, err) - assert.Equal(t, relName1, relName2) - rm.RegisterSingle( - relName2, - "Book", - "published", - client.Relation_Type_ONE|client.Relation_Type_Primary, - ) - - rel, err := rm.GetRelation(relName1) - assert.NoError(t, err) - assert.Equal(t, rel.relType, client.Relation_Type_ONEONE) -} diff --git a/request/graphql/schema/types/base.go b/request/graphql/schema/types/base.go index b348a564f8..83aa11c55d 100644 --- a/request/graphql/schema/types/base.go +++ b/request/graphql/schema/types/base.go @@ -291,6 +291,14 @@ var StringOperatorBlock = gql.NewInputObject(gql.InputObjectConfig{ Description: nlikeStringOperatorDescription, Type: gql.String, }, + "_ilike": &gql.InputObjectFieldConfig{ + Description: ilikeStringOperatorDescription, + Type: gql.String, + }, + "_nilike": &gql.InputObjectFieldConfig{ + Description: nilikeStringOperatorDescription, + Type: gql.String, + }, }, }) @@ -323,6 +331,14 @@ var NotNullstringOperatorBlock = gql.NewInputObject(gql.InputObjectConfig{ Description: nlikeStringOperatorDescription, Type: gql.String, }, + "_ilike": &gql.InputObjectFieldConfig{ + Description: ilikeStringOperatorDescription, + Type: gql.String, + }, + "_nilike": &gql.InputObjectFieldConfig{ + Description: nilikeStringOperatorDescription, + Type: gql.String, + }, }, }) diff --git a/request/graphql/schema/types/commits.go b/request/graphql/schema/types/commits.go index 1e8d6b5bb4..99373a367c 100644 --- a/request/graphql/schema/types/commits.go +++ b/request/graphql/schema/types/commits.go @@ -47,11 +47,11 @@ var ( Name: request.CommitTypeName, Description: commitDescription, Fields: gql.Fields{ - "height": &gql.Field{ + request.HeightFieldName: &gql.Field{ Description: commitHeightFieldDescription, Type: gql.Int, }, - "cid": &gql.Field{ + request.CidFieldName: &gql.Field{ Description: commitCIDFieldDescription, Type: gql.String, }, @@ -59,35 +59,35 @@ var ( Description: commitDocIDFieldDescription, Type: gql.String, }, - "collectionID": &gql.Field{ + request.CollectionIDFieldName: &gql.Field{ Description: commitCollectionIDFieldDescription, Type: gql.Int, }, - "schemaVersionId": &gql.Field{ + request.SchemaVersionIDFieldName: &gql.Field{ Description: commitSchemaVersionIDFieldDescription, Type: gql.String, }, - "fieldName": &gql.Field{ + request.FieldNameFieldName: &gql.Field{ Description: commitFieldNameFieldDescription, Type: gql.String, }, - "fieldId": &gql.Field{ + request.FieldIDFieldName: &gql.Field{ Type: gql.String, Description: commitFieldIDFieldDescription, }, - "delta": &gql.Field{ + request.DeltaFieldName: &gql.Field{ Description: commitDeltaFieldDescription, Type: gql.String, }, - "links": &gql.Field{ + request.LinksFieldName: &gql.Field{ Description: commitLinksDescription, Type: gql.NewList(CommitLinkObject), }, - "_count": &gql.Field{ + request.CountFieldName: &gql.Field{ Description: CountFieldDescription, Type: gql.Int, Args: gql.FieldConfigArgument{ - "field": &gql.ArgumentConfig{ + request.FieldName: &gql.ArgumentConfig{ Type: commitCountFieldArg, }, }, diff --git a/request/graphql/schema/types/descriptions.go b/request/graphql/schema/types/descriptions.go index 42c1ba956e..27cd3a6f74 100644 --- a/request/graphql/schema/types/descriptions.go +++ b/request/graphql/schema/types/descriptions.go @@ -202,6 +202,16 @@ The like operator - if the target value contains the given sub-string the check The not-like operator - if the target value does not contain the given sub-string the check will pass. '%' characters may be used as wildcards, for example '_nlike: "%Ritchie"' would match on the string 'Quentin Tarantino'. +` + ilikeStringOperatorDescription string = ` +The case insensitive like operator - if the target value contains the given case insensitive sub-string the check + will pass. '%' characters may be used as wildcards, for example '_like: "%ritchie"' would match on strings + ending in 'Ritchie'. +` + nilikeStringOperatorDescription string = ` +The case insensitive not-like operator - if the target value does not contain the given case insensitive sub-string + the check will pass. '%' characters may be used as wildcards, for example '_nlike: "%ritchie"' would match on + the string 'Quentin Tarantino'. ` AndOperatorDescription string = ` The and operator - all checks within this clause must pass in order for this check to pass. @@ -217,6 +227,9 @@ Sort the results in ascending order, e.g. null,1,2,3,a,b,c. ` descOrderDescription string = ` Sort the results in descending order, e.g. c,b,a,3,2,1,null. +` + crdtDirectiveDescription string = ` +Allows the explicit definition of a field's CRDT type. By default it is defined as LWWRegister. ` primaryDirectiveDescription string = ` Indicate the primary side of a one-to-one relationship. diff --git a/request/graphql/schema/types/scalars.go b/request/graphql/schema/types/scalars.go index a0e9dca369..1d944a0f73 100644 --- a/request/graphql/schema/types/scalars.go +++ b/request/graphql/schema/types/scalars.go @@ -16,6 +16,7 @@ import ( "github.com/sourcenetwork/graphql-go" "github.com/sourcenetwork/graphql-go/language/ast" + "github.com/valyala/fastjson" ) // BlobPattern is a regex for validating blob hex strings @@ -63,3 +64,55 @@ var BlobScalarType = graphql.NewScalar(graphql.ScalarConfig{ } }, }) + +// coerceJSON converts the given value into a valid json string. +// If the value cannot be converted nil is returned. +func coerceJSON(value any) any { + switch value := value.(type) { + case []byte: + err := fastjson.ValidateBytes(value) + if err != nil { + // ignore this error because the value + // cannot be converted to a json string + return nil + } + return string(value) + + case *[]byte: + return coerceJSON(*value) + + case string: + err := fastjson.Validate(value) + if err != nil { + // ignore this error because the value + // cannot be converted to a json string + return nil + } + return value + + case *string: + return coerceJSON(*value) + + default: + return nil + } +} + +var JSONScalarType = graphql.NewScalar(graphql.ScalarConfig{ + Name: "JSON", + Description: "The `JSON` scalar type represents a JSON string.", + // Serialize converts the value to a json string + Serialize: coerceJSON, + // ParseValue converts the value to a json string + ParseValue: coerceJSON, + // ParseLiteral converts the ast value to a json string + ParseLiteral: func(valueAST ast.Value) any { + switch valueAST := valueAST.(type) { + case *ast.StringValue: + return coerceJSON(valueAST.Value) + default: + // return nil if the value cannot be parsed + return nil + } + }, +}) diff --git a/request/graphql/schema/types/scalars_test.go b/request/graphql/schema/types/scalars_test.go index 5126f2e6a2..6be3fa23fa 100644 --- a/request/graphql/schema/types/scalars_test.go +++ b/request/graphql/schema/types/scalars_test.go @@ -86,3 +86,86 @@ func TestBlobScalarTypeParseLiteral(t *testing.T) { assert.Equal(t, c.expect, result) } } + +func TestJSONScalarTypeParseAndSerialize(t *testing.T) { + validString := `"hello"` + validBytes := []byte(`"hello"`) + + boolString := "true" + boolBytes := []byte("true") + + intString := "0" + intBytes := []byte("0") + + floatString := "3.14" + floatBytes := []byte("3.14") + + objectString := `{"name": "Bob"}` + objectBytes := []byte(`{"name": "Bob"}`) + + invalidString := "invalid" + invalidBytes := []byte("invalid") + + cases := []struct { + input any + expect any + }{ + {validString, `"hello"`}, + {&validString, `"hello"`}, + {validBytes, `"hello"`}, + {&validBytes, `"hello"`}, + {boolString, "true"}, + {&boolString, "true"}, + {boolBytes, "true"}, + {&boolBytes, "true"}, + {[]byte("true"), "true"}, + {[]byte("false"), "false"}, + {intString, "0"}, + {&intString, "0"}, + {intBytes, "0"}, + {&intBytes, "0"}, + {floatString, "3.14"}, + {&floatString, "3.14"}, + {floatBytes, "3.14"}, + {&floatBytes, "3.14"}, + {invalidString, nil}, + {&invalidString, nil}, + {invalidBytes, nil}, + {&invalidBytes, nil}, + {objectString, `{"name": "Bob"}`}, + {&objectString, `{"name": "Bob"}`}, + {objectBytes, `{"name": "Bob"}`}, + {&objectBytes, `{"name": "Bob"}`}, + {nil, nil}, + {0, nil}, + {false, nil}, + } + for _, c := range cases { + parsed := JSONScalarType.ParseValue(c.input) + assert.Equal(t, c.expect, parsed) + + serialized := JSONScalarType.Serialize(c.input) + assert.Equal(t, c.expect, serialized) + } +} + +func TestJSONScalarTypeParseLiteral(t *testing.T) { + cases := []struct { + input ast.Value + expect any + }{ + {&ast.StringValue{Value: "0"}, "0"}, + {&ast.StringValue{Value: "invalid"}, nil}, + {&ast.IntValue{}, nil}, + {&ast.BooleanValue{}, nil}, + {&ast.NullValue{}, nil}, + {&ast.EnumValue{}, nil}, + {&ast.FloatValue{}, nil}, + {&ast.ListValue{}, nil}, + {&ast.ObjectValue{}, nil}, + } + for _, c := range cases { + result := JSONScalarType.ParseLiteral(c.input) + assert.Equal(t, c.expect, result) + } +} diff --git a/request/graphql/schema/types/types.go b/request/graphql/schema/types/types.go index 065dadaa6d..37cb840d05 100644 --- a/request/graphql/schema/types/types.go +++ b/request/graphql/schema/types/types.go @@ -12,6 +12,8 @@ package types import ( gql "github.com/sourcenetwork/graphql-go" + + "github.com/sourcenetwork/defradb/client" ) const ( @@ -24,11 +26,18 @@ const ( ExplainArgExecute string = "execute" ExplainArgDebug string = "debug" + CRDTDirectiveLabel = "crdt" + CRDTDirectivePropType = "type" + IndexDirectiveLabel = "index" IndexDirectivePropName = "name" IndexDirectivePropUnique = "unique" IndexDirectivePropFields = "fields" + IndexDirectivePropDirection = "direction" IndexDirectivePropDirections = "directions" + + FieldOrderASC = "ASC" + FieldOrderDESC = "DESC" ) var ( @@ -111,6 +120,41 @@ var ( IndexDirectivePropName: &gql.ArgumentConfig{ Type: gql.String, }, + IndexDirectivePropUnique: &gql.ArgumentConfig{ + Type: gql.Boolean, + }, + IndexDirectivePropDirection: &gql.ArgumentConfig{ + Type: OrderingEnum, + }, + }, + Locations: []string{ + gql.DirectiveLocationField, + }, + }) + + CRDTEnum = gql.NewEnum(gql.EnumConfig{ + Name: "CRDTType", + Description: "One of the possible CRDT Types.", + Values: gql.EnumValueConfigMap{ + client.LWW_REGISTER.String(): &gql.EnumValueConfig{ + Value: client.LWW_REGISTER, + Description: "Last Write Wins register", + }, + client.PN_COUNTER.String(): &gql.EnumValueConfig{ + Value: client.PN_COUNTER, + Description: "Positive-Negative Counter", + }, + }, + }) + + // CRDTFieldDirective @crdt is used to define the CRDT type of a field + CRDTFieldDirective *gql.Directive = gql.NewDirective(gql.DirectiveConfig{ + Name: CRDTDirectiveLabel, + Description: crdtDirectiveDescription, + Args: gql.FieldConfigArgument{ + CRDTDirectivePropType: &gql.ArgumentConfig{ + Type: CRDTEnum, + }, }, Locations: []string{ gql.DirectiveLocationField, diff --git a/tests/change_detector/change_detector_test.go b/tests/change_detector/change_detector_test.go index 519bc7d965..badfc45ecb 100644 --- a/tests/change_detector/change_detector_test.go +++ b/tests/change_detector/change_detector_test.go @@ -126,7 +126,7 @@ func execClone(t *testing.T, dir, url, branch string) { // execMakeDeps runs make:deps in the given directory. func execMakeDeps(t *testing.T, dir string) { - cmd := exec.Command("make", "deps:lens") + cmd := exec.Command("make", "deps:test") cmd.Dir = dir out, err := cmd.Output() diff --git a/tests/clients/cli/errors.go b/tests/clients/cli/errors.go new file mode 100644 index 0000000000..08915de170 --- /dev/null +++ b/tests/clients/cli/errors.go @@ -0,0 +1,24 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/sourcenetwork/defradb/errors" +) + +const ( + errMethodIsNotImplemented string = "the method is not implemented" +) + +// Errors returnable from this package. +var ( + ErrMethodIsNotImplemented = errors.New(errMethodIsNotImplemented) +) diff --git a/tests/clients/cli/wrapper.go b/tests/clients/cli/wrapper.go index 49a0605598..89ba2cf3db 100644 --- a/tests/clients/cli/wrapper.go +++ b/tests/clients/cli/wrapper.go @@ -17,10 +17,13 @@ import ( "fmt" "io" "net/http/httptest" + "strconv" "strings" blockstore "github.com/ipfs/boxo/blockstore" + "github.com/lens-vm/lens/host-go/config/model" "github.com/libp2p/go-libp2p/core/peer" + "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/cli" "github.com/sourcenetwork/defradb/client" @@ -40,7 +43,7 @@ type Wrapper struct { } func NewWrapper(node *net.Node) (*Wrapper, error) { - handler, err := http.NewHandler(node, http.ServerOptions{}) + handler, err := http.NewHandler(node) if err != nil { return nil, err } @@ -183,30 +186,56 @@ func (w *Wrapper) AddSchema(ctx context.Context, schema string) ([]client.Collec return cols, nil } -func (w *Wrapper) PatchSchema(ctx context.Context, patch string, setDefault bool) error { +func (w *Wrapper) PatchSchema( + ctx context.Context, + patch string, + migration immutable.Option[model.Lens], + setDefault bool, +) error { args := []string{"client", "schema", "patch"} if setDefault { - args = append(args, "--set-default") + args = append(args, "--set-active") } args = append(args, patch) + if migration.HasValue() { + lenses, err := json.Marshal(migration.Value()) + if err != nil { + return err + } + args = append(args, string(lenses)) + } + _, err := w.cmd.execute(ctx, args) return err } -func (w *Wrapper) SetDefaultSchemaVersion(ctx context.Context, schemaVersionID string) error { - args := []string{"client", "schema", "set-default"} +func (w *Wrapper) SetActiveSchemaVersion(ctx context.Context, schemaVersionID string) error { + args := []string{"client", "schema", "set-active"} args = append(args, schemaVersionID) _, err := w.cmd.execute(ctx, args) return err } -func (w *Wrapper) AddView(ctx context.Context, query string, sdl string) ([]client.CollectionDefinition, error) { +func (w *Wrapper) AddView( + ctx context.Context, + query string, + sdl string, + transform immutable.Option[model.Lens], +) ([]client.CollectionDefinition, error) { args := []string{"client", "view", "add"} args = append(args, query) args = append(args, sdl) + if transform.HasValue() { + lenses, err := json.Marshal(transform.Value()) + if err != nil { + return nil, err + } + args = append(args, string(lenses)) + } + data, err := w.cmd.execute(ctx, args) if err != nil { return nil, err @@ -219,7 +248,18 @@ func (w *Wrapper) AddView(ctx context.Context, query string, sdl string) ([]clie } func (w *Wrapper) SetMigration(ctx context.Context, config client.LensConfig) error { - return w.LensRegistry().SetMigration(ctx, config) + args := []string{"client", "schema", "migration", "set"} + + lenses, err := json.Marshal(config.Lens) + if err != nil { + return err + } + args = append(args, config.SourceSchemaVersionID) + args = append(args, config.DestinationSchemaVersionID) + args = append(args, string(lenses)) + + _, err = w.cmd.execute(ctx, args) + return err } func (w *Wrapper) LensRegistry() client.LensRegistry { @@ -227,60 +267,32 @@ func (w *Wrapper) LensRegistry() client.LensRegistry { } func (w *Wrapper) GetCollectionByName(ctx context.Context, name client.CollectionName) (client.Collection, error) { - args := []string{"client", "collection", "describe"} - args = append(args, "--name", name) - - data, err := w.cmd.execute(ctx, args) + cols, err := w.GetCollections(ctx, client.CollectionFetchOptions{Name: immutable.Some(name)}) if err != nil { return nil, err } - var definition client.CollectionDefinition - if err := json.Unmarshal(data, &definition); err != nil { - return nil, err - } - return &Collection{w.cmd, definition}, nil -} - -func (w *Wrapper) GetCollectionsBySchemaRoot(ctx context.Context, schemaRoot string) ([]client.Collection, error) { - args := []string{"client", "collection", "describe"} - args = append(args, "--schema", schemaRoot) - data, err := w.cmd.execute(ctx, args) - if err != nil { - return nil, err - } - var colDesc []client.CollectionDefinition - if err := json.Unmarshal(data, &colDesc); err != nil { - return nil, err - } - cols := make([]client.Collection, len(colDesc)) - for i, v := range colDesc { - cols[i] = &Collection{w.cmd, v} - } - return cols, err + // cols will always have length == 1 here + return cols[0], nil } -func (w *Wrapper) GetCollectionsByVersionID(ctx context.Context, versionId string) ([]client.Collection, error) { +func (w *Wrapper) GetCollections( + ctx context.Context, + options client.CollectionFetchOptions, +) ([]client.Collection, error) { args := []string{"client", "collection", "describe"} - args = append(args, "--version", versionId) - - data, err := w.cmd.execute(ctx, args) - if err != nil { - return nil, err + if options.Name.HasValue() { + args = append(args, "--name", options.Name.Value()) } - var colDesc []client.CollectionDefinition - if err := json.Unmarshal(data, &colDesc); err != nil { - return nil, err + if options.SchemaVersionID.HasValue() { + args = append(args, "--version", options.SchemaVersionID.Value()) } - cols := make([]client.Collection, len(colDesc)) - for i, v := range colDesc { - cols[i] = &Collection{w.cmd, v} + if options.SchemaRoot.HasValue() { + args = append(args, "--schema", options.SchemaRoot.Value()) + } + if options.IncludeInactive.HasValue() { + args = append(args, "--get-inactive", strconv.FormatBool(options.IncludeInactive.Value())) } - return cols, err -} - -func (w *Wrapper) GetAllCollections(ctx context.Context) ([]client.Collection, error) { - args := []string{"client", "collection", "describe"} data, err := w.cmd.execute(ctx, args) if err != nil { @@ -297,53 +309,30 @@ func (w *Wrapper) GetAllCollections(ctx context.Context) ([]client.Collection, e return cols, err } -func (w *Wrapper) GetSchemasByName(ctx context.Context, name string) ([]client.SchemaDescription, error) { - args := []string{"client", "schema", "describe"} - args = append(args, "--name", name) - - data, err := w.cmd.execute(ctx, args) - if err != nil { - return nil, err - } - var schema []client.SchemaDescription - if err := json.Unmarshal(data, &schema); err != nil { - return nil, err - } - return schema, err -} - func (w *Wrapper) GetSchemaByVersionID(ctx context.Context, versionID string) (client.SchemaDescription, error) { - args := []string{"client", "schema", "describe"} - args = append(args, "--version", versionID) - - data, err := w.cmd.execute(ctx, args) + schemas, err := w.GetSchemas(ctx, client.SchemaFetchOptions{ID: immutable.Some(versionID)}) if err != nil { return client.SchemaDescription{}, err } - var schema client.SchemaDescription - if err := json.Unmarshal(data, &schema); err != nil { - return client.SchemaDescription{}, err - } - return schema, err + + // schemas will always have length == 1 here + return schemas[0], nil } -func (w *Wrapper) GetSchemasByRoot(ctx context.Context, root string) ([]client.SchemaDescription, error) { +func (w *Wrapper) GetSchemas( + ctx context.Context, + options client.SchemaFetchOptions, +) ([]client.SchemaDescription, error) { args := []string{"client", "schema", "describe"} - args = append(args, "--root", root) - - data, err := w.cmd.execute(ctx, args) - if err != nil { - return nil, err + if options.ID.HasValue() { + args = append(args, "--version", options.ID.Value()) } - var schema []client.SchemaDescription - if err := json.Unmarshal(data, &schema); err != nil { - return nil, err + if options.Root.HasValue() { + args = append(args, "--root", options.Root.Value()) + } + if options.Name.HasValue() { + args = append(args, "--name", options.Name.Value()) } - return schema, err -} - -func (w *Wrapper) GetAllSchemas(ctx context.Context) ([]client.SchemaDescription, error) { - args := []string{"client", "schema", "describe"} data, err := w.cmd.execute(ctx, args) if err != nil { @@ -376,7 +365,7 @@ func (w *Wrapper) ExecRequest(ctx context.Context, query string) *client.Request result := &client.RequestResult{} - stdOut, stdErr, err := w.cmd.executeStream(ctx, args) + stdOut, stdErr, err := w.cmd.executeStream(args) if err != nil { result.GQL.Errors = []error{err} return result @@ -388,7 +377,7 @@ func (w *Wrapper) ExecRequest(ctx context.Context, query string) *client.Request return result } if header == cli.SUB_RESULTS_HEADER { - result.Pub = w.execRequestSubscription(ctx, buffer) + result.Pub = w.execRequestSubscription(buffer) return result } data, err := io.ReadAll(buffer) @@ -416,7 +405,7 @@ func (w *Wrapper) ExecRequest(ctx context.Context, query string) *client.Request return result } -func (w *Wrapper) execRequestSubscription(ctx context.Context, r io.Reader) *events.Publisher[events.Update] { +func (w *Wrapper) execRequestSubscription(r io.Reader) *events.Publisher[events.Update] { pubCh := events.New[events.Update](0, 0) pub, err := events.NewPublisher[events.Update](pubCh, 0) if err != nil { diff --git a/tests/clients/cli/wrapper_cli.go b/tests/clients/cli/wrapper_cli.go index 1f73b20e25..2a985dcb18 100644 --- a/tests/clients/cli/wrapper_cli.go +++ b/tests/clients/cli/wrapper_cli.go @@ -17,7 +17,6 @@ import ( "strings" "github.com/sourcenetwork/defradb/cli" - "github.com/sourcenetwork/defradb/config" "github.com/sourcenetwork/defradb/datastore" ) @@ -39,8 +38,8 @@ func (w *cliWrapper) withTxn(tx datastore.Txn) *cliWrapper { } } -func (w *cliWrapper) execute(ctx context.Context, args []string) ([]byte, error) { - stdOut, stdErr, err := w.executeStream(ctx, args) +func (w *cliWrapper) execute(_ context.Context, args []string) ([]byte, error) { + stdOut, stdErr, err := w.executeStream(args) if err != nil { return nil, err } @@ -58,7 +57,7 @@ func (w *cliWrapper) execute(ctx context.Context, args []string) ([]byte, error) return stdOutData, nil } -func (w *cliWrapper) executeStream(ctx context.Context, args []string) (io.ReadCloser, io.ReadCloser, error) { +func (w *cliWrapper) executeStream(args []string) (io.ReadCloser, io.ReadCloser, error) { stdOutRead, stdOutWrite := io.Pipe() stdErrRead, stdErrWrite := io.Pipe() @@ -67,7 +66,7 @@ func (w *cliWrapper) executeStream(ctx context.Context, args []string) (io.ReadC } args = append(args, "--url", w.address) - cmd := cli.NewDefraCommand(config.DefaultConfig()) + cmd := cli.NewDefraCommand() cmd.SetOut(stdOutWrite) cmd.SetErr(stdErrWrite) cmd.SetArgs(args) diff --git a/tests/clients/cli/wrapper_collection.go b/tests/clients/cli/wrapper_collection.go index 8295bad8d7..be7c3302ac 100644 --- a/tests/clients/cli/wrapper_collection.go +++ b/tests/clients/cli/wrapper_collection.go @@ -16,6 +16,8 @@ import ( "fmt" "strings" + "github.com/sourcenetwork/immutable" + "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/datastore" @@ -34,7 +36,7 @@ func (c *Collection) Description() client.CollectionDescription { return c.def.Description } -func (c *Collection) Name() string { +func (c *Collection) Name() immutable.Option[string] { return c.Description().Name } @@ -55,8 +57,12 @@ func (c *Collection) Definition() client.CollectionDefinition { } func (c *Collection) Create(ctx context.Context, doc *client.Document) error { + if !c.Description().Name.HasValue() { + return client.ErrOperationNotPermittedOnNamelessCols + } + args := []string{"client", "collection", "create"} - args = append(args, "--name", c.Description().Name) + args = append(args, "--name", c.Description().Name.Value()) document, err := doc.String() if err != nil { @@ -73,8 +79,12 @@ func (c *Collection) Create(ctx context.Context, doc *client.Document) error { } func (c *Collection) CreateMany(ctx context.Context, docs []*client.Document) error { + if !c.Description().Name.HasValue() { + return client.ErrOperationNotPermittedOnNamelessCols + } + args := []string{"client", "collection", "create"} - args = append(args, "--name", c.Description().Name) + args = append(args, "--name", c.Description().Name.Value()) docMapList := make([]map[string]any, len(docs)) for i, doc := range docs { @@ -101,8 +111,12 @@ func (c *Collection) CreateMany(ctx context.Context, docs []*client.Document) er } func (c *Collection) Update(ctx context.Context, doc *client.Document) error { + if !c.Description().Name.HasValue() { + return client.ErrOperationNotPermittedOnNamelessCols + } + args := []string{"client", "collection", "update"} - args = append(args, "--name", c.Description().Name) + args = append(args, "--name", c.Description().Name.Value()) args = append(args, "--docID", doc.ID().String()) document, err := doc.ToJSONPatch() @@ -179,8 +193,12 @@ func (c *Collection) UpdateWithFilter( filter any, updater string, ) (*client.UpdateResult, error) { + if !c.Description().Name.HasValue() { + return nil, client.ErrOperationNotPermittedOnNamelessCols + } + args := []string{"client", "collection", "update"} - args = append(args, "--name", c.Description().Name) + args = append(args, "--name", c.Description().Name.Value()) args = append(args, "--updater", updater) filterJSON, err := json.Marshal(filter) @@ -197,8 +215,12 @@ func (c *Collection) UpdateWithDocID( docID client.DocID, updater string, ) (*client.UpdateResult, error) { + if !c.Description().Name.HasValue() { + return nil, client.ErrOperationNotPermittedOnNamelessCols + } + args := []string{"client", "collection", "update"} - args = append(args, "--name", c.Description().Name) + args = append(args, "--name", c.Description().Name.Value()) args = append(args, "--docID", docID.String()) args = append(args, "--updater", updater) @@ -210,8 +232,12 @@ func (c *Collection) UpdateWithDocIDs( docIDs []client.DocID, updater string, ) (*client.UpdateResult, error) { + if !c.Description().Name.HasValue() { + return nil, client.ErrOperationNotPermittedOnNamelessCols + } + args := []string{"client", "collection", "update"} - args = append(args, "--name", c.Description().Name) + args = append(args, "--name", c.Description().Name.Value()) args = append(args, "--updater", updater) strDocIDs := make([]string, len(docIDs)) @@ -252,8 +278,12 @@ func (c *Collection) deleteWith( } func (c *Collection) DeleteWithFilter(ctx context.Context, filter any) (*client.DeleteResult, error) { + if !c.Description().Name.HasValue() { + return nil, client.ErrOperationNotPermittedOnNamelessCols + } + args := []string{"client", "collection", "delete"} - args = append(args, "--name", c.Description().Name) + args = append(args, "--name", c.Description().Name.Value()) filterJSON, err := json.Marshal(filter) if err != nil { @@ -265,16 +295,24 @@ func (c *Collection) DeleteWithFilter(ctx context.Context, filter any) (*client. } func (c *Collection) DeleteWithDocID(ctx context.Context, docID client.DocID) (*client.DeleteResult, error) { + if !c.Description().Name.HasValue() { + return nil, client.ErrOperationNotPermittedOnNamelessCols + } + args := []string{"client", "collection", "delete"} - args = append(args, "--name", c.Description().Name) + args = append(args, "--name", c.Description().Name.Value()) args = append(args, "--docID", docID.String()) return c.deleteWith(ctx, args) } func (c *Collection) DeleteWithDocIDs(ctx context.Context, docIDs []client.DocID) (*client.DeleteResult, error) { + if !c.Description().Name.HasValue() { + return nil, client.ErrOperationNotPermittedOnNamelessCols + } + args := []string{"client", "collection", "delete"} - args = append(args, "--name", c.Description().Name) + args = append(args, "--name", c.Description().Name.Value()) strDocIDs := make([]string, len(docIDs)) for i, v := range docIDs { @@ -286,8 +324,12 @@ func (c *Collection) DeleteWithDocIDs(ctx context.Context, docIDs []client.DocID } func (c *Collection) Get(ctx context.Context, docID client.DocID, showDeleted bool) (*client.Document, error) { + if !c.Description().Name.HasValue() { + return nil, client.ErrOperationNotPermittedOnNamelessCols + } + args := []string{"client", "collection", "get"} - args = append(args, "--name", c.Description().Name) + args = append(args, "--name", c.Description().Name.Value()) args = append(args, docID.String()) if showDeleted { @@ -315,10 +357,14 @@ func (c *Collection) WithTxn(tx datastore.Txn) client.Collection { } func (c *Collection) GetAllDocIDs(ctx context.Context) (<-chan client.DocIDResult, error) { + if !c.Description().Name.HasValue() { + return nil, client.ErrOperationNotPermittedOnNamelessCols + } + args := []string{"client", "collection", "docIDs"} - args = append(args, "--name", c.Description().Name) + args = append(args, "--name", c.Description().Name.Value()) - stdOut, _, err := c.cmd.executeStream(ctx, args) + stdOut, _, err := c.cmd.executeStream(args) if err != nil { return nil, err } @@ -354,8 +400,12 @@ func (c *Collection) CreateIndex( ctx context.Context, indexDesc client.IndexDescription, ) (index client.IndexDescription, err error) { + if !c.Description().Name.HasValue() { + return client.IndexDescription{}, client.ErrOperationNotPermittedOnNamelessCols + } + args := []string{"client", "index", "create"} - args = append(args, "--collection", c.Description().Name) + args = append(args, "--collection", c.Description().Name.Value()) if indexDesc.Name != "" { args = append(args, "--name", indexDesc.Name) } @@ -380,8 +430,12 @@ func (c *Collection) CreateIndex( } func (c *Collection) DropIndex(ctx context.Context, indexName string) error { + if !c.Description().Name.HasValue() { + return client.ErrOperationNotPermittedOnNamelessCols + } + args := []string{"client", "index", "drop"} - args = append(args, "--collection", c.Description().Name) + args = append(args, "--collection", c.Description().Name.Value()) args = append(args, "--name", indexName) _, err := c.cmd.execute(ctx, args) @@ -389,8 +443,12 @@ func (c *Collection) DropIndex(ctx context.Context, indexName string) error { } func (c *Collection) GetIndexes(ctx context.Context) ([]client.IndexDescription, error) { + if !c.Description().Name.HasValue() { + return nil, client.ErrOperationNotPermittedOnNamelessCols + } + args := []string{"client", "index", "list"} - args = append(args, "--collection", c.Description().Name) + args = append(args, "--collection", c.Description().Name.Value()) data, err := c.cmd.execute(ctx, args) if err != nil { @@ -402,3 +460,15 @@ func (c *Collection) GetIndexes(ctx context.Context) ([]client.IndexDescription, } return indexes, nil } + +func (c *Collection) CreateDocIndex(context.Context, *client.Document) error { + return ErrMethodIsNotImplemented +} + +func (c *Collection) UpdateDocIndex(ctx context.Context, oldDoc, newDoc *client.Document) error { + return ErrMethodIsNotImplemented +} + +func (c *Collection) DeleteDocIndex(context.Context, *client.Document) error { + return ErrMethodIsNotImplemented +} diff --git a/tests/clients/cli/wrapper_lens.go b/tests/clients/cli/wrapper_lens.go index 679a792662..da6011b9eb 100644 --- a/tests/clients/cli/wrapper_lens.go +++ b/tests/clients/cli/wrapper_lens.go @@ -13,7 +13,10 @@ package cli import ( "context" "encoding/json" + "fmt" + "strconv" + "github.com/lens-vm/lens/host-go/config/model" "github.com/sourcenetwork/immutable/enumerable" "github.com/sourcenetwork/defradb/client" @@ -30,16 +33,15 @@ func (w *LensRegistry) WithTxn(tx datastore.Txn) client.LensRegistry { return &LensRegistry{w.cmd.withTxn(tx)} } -func (w *LensRegistry) SetMigration(ctx context.Context, config client.LensConfig) error { - args := []string{"client", "schema", "migration", "set"} - args = append(args, config.SourceSchemaVersionID) - args = append(args, config.DestinationSchemaVersionID) +func (w *LensRegistry) SetMigration(ctx context.Context, collectionID uint32, config model.Lens) error { + args := []string{"client", "schema", "migration", "set-registry"} - lensCfg, err := json.Marshal(config.Lens) + lenses, err := json.Marshal(config) if err != nil { return err } - args = append(args, string(lensCfg)) + args = append(args, strconv.FormatUint(uint64(collectionID), 10)) + args = append(args, string(lenses)) _, err = w.cmd.execute(ctx, args) return err @@ -55,10 +57,10 @@ func (w *LensRegistry) ReloadLenses(ctx context.Context) error { func (w *LensRegistry) MigrateUp( ctx context.Context, src enumerable.Enumerable[map[string]any], - schemaVersionID string, + collectionID uint32, ) (enumerable.Enumerable[map[string]any], error) { args := []string{"client", "schema", "migration", "up"} - args = append(args, "--version", schemaVersionID) + args = append(args, "--collection", fmt.Sprint(collectionID)) var srcData []map[string]any err := enumerable.ForEach(src, func(item map[string]any) { @@ -87,10 +89,10 @@ func (w *LensRegistry) MigrateUp( func (w *LensRegistry) MigrateDown( ctx context.Context, src enumerable.Enumerable[map[string]any], - schemaVersionID string, + collectionID uint32, ) (enumerable.Enumerable[map[string]any], error) { args := []string{"client", "schema", "migration", "down"} - args = append(args, "--version", schemaVersionID) + args = append(args, "--collection", fmt.Sprint(collectionID)) var srcData []map[string]any err := enumerable.ForEach(src, func(item map[string]any) { @@ -115,31 +117,3 @@ func (w *LensRegistry) MigrateDown( } return out, nil } - -func (w *LensRegistry) Config(ctx context.Context) ([]client.LensConfig, error) { - args := []string{"client", "schema", "migration", "get"} - - data, err := w.cmd.execute(ctx, args) - if err != nil { - return nil, err - } - var cfgs []client.LensConfig - if err := json.Unmarshal(data, &cfgs); err != nil { - return nil, err - } - return cfgs, nil -} - -func (w *LensRegistry) HasMigration(ctx context.Context, schemaVersionID string) (bool, error) { - cfgs, err := w.Config(ctx) - if err != nil { - return false, err - } - found := false - for _, cfg := range cfgs { - if cfg.SourceSchemaVersionID == schemaVersionID { - found = true - } - } - return found, nil -} diff --git a/tests/clients/http/wrapper.go b/tests/clients/http/wrapper.go index 040ab9c1b4..b45105a7f7 100644 --- a/tests/clients/http/wrapper.go +++ b/tests/clients/http/wrapper.go @@ -15,7 +15,9 @@ import ( "net/http/httptest" blockstore "github.com/ipfs/boxo/blockstore" + "github.com/lens-vm/lens/host-go/config/model" "github.com/libp2p/go-libp2p/core/peer" + "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" @@ -36,7 +38,7 @@ type Wrapper struct { } func NewWrapper(node *net.Node) (*Wrapper, error) { - handler, err := http.NewHandler(node, http.ServerOptions{}) + handler, err := http.NewHandler(node) if err != nil { return nil, err } @@ -95,16 +97,26 @@ func (w *Wrapper) AddSchema(ctx context.Context, schema string) ([]client.Collec return w.client.AddSchema(ctx, schema) } -func (w *Wrapper) PatchSchema(ctx context.Context, patch string, setAsDefaultVersion bool) error { - return w.client.PatchSchema(ctx, patch, setAsDefaultVersion) +func (w *Wrapper) PatchSchema( + ctx context.Context, + patch string, + migration immutable.Option[model.Lens], + setAsDefaultVersion bool, +) error { + return w.client.PatchSchema(ctx, patch, migration, setAsDefaultVersion) } -func (w *Wrapper) SetDefaultSchemaVersion(ctx context.Context, schemaVersionID string) error { - return w.client.SetDefaultSchemaVersion(ctx, schemaVersionID) +func (w *Wrapper) SetActiveSchemaVersion(ctx context.Context, schemaVersionID string) error { + return w.client.SetActiveSchemaVersion(ctx, schemaVersionID) } -func (w *Wrapper) AddView(ctx context.Context, query string, sdl string) ([]client.CollectionDefinition, error) { - return w.client.AddView(ctx, query, sdl) +func (w *Wrapper) AddView( + ctx context.Context, + query string, + sdl string, + transform immutable.Option[model.Lens], +) ([]client.CollectionDefinition, error) { + return w.client.AddView(ctx, query, sdl, transform) } func (w *Wrapper) SetMigration(ctx context.Context, config client.LensConfig) error { @@ -119,32 +131,22 @@ func (w *Wrapper) GetCollectionByName(ctx context.Context, name client.Collectio return w.client.GetCollectionByName(ctx, name) } -func (w *Wrapper) GetCollectionsBySchemaRoot(ctx context.Context, schemaRoot string) ([]client.Collection, error) { - return w.client.GetCollectionsBySchemaRoot(ctx, schemaRoot) -} - -func (w *Wrapper) GetCollectionsByVersionID(ctx context.Context, versionId string) ([]client.Collection, error) { - return w.client.GetCollectionsByVersionID(ctx, versionId) -} - -func (w *Wrapper) GetAllCollections(ctx context.Context) ([]client.Collection, error) { - return w.client.GetAllCollections(ctx) -} - -func (w *Wrapper) GetSchemasByName(ctx context.Context, name string) ([]client.SchemaDescription, error) { - return w.client.GetSchemasByName(ctx, name) +func (w *Wrapper) GetCollections( + ctx context.Context, + options client.CollectionFetchOptions, +) ([]client.Collection, error) { + return w.client.GetCollections(ctx, options) } func (w *Wrapper) GetSchemaByVersionID(ctx context.Context, versionID string) (client.SchemaDescription, error) { return w.client.GetSchemaByVersionID(ctx, versionID) } -func (w *Wrapper) GetSchemasByRoot(ctx context.Context, root string) ([]client.SchemaDescription, error) { - return w.client.GetSchemasByRoot(ctx, root) -} - -func (w *Wrapper) GetAllSchemas(ctx context.Context) ([]client.SchemaDescription, error) { - return w.client.GetAllSchemas(ctx) +func (w *Wrapper) GetSchemas( + ctx context.Context, + options client.SchemaFetchOptions, +) ([]client.SchemaDescription, error) { + return w.client.GetSchemas(ctx, options) } func (w *Wrapper) GetAllIndexes(ctx context.Context) (map[client.CollectionName][]client.IndexDescription, error) { diff --git a/tests/gen/cli/gendocs.go b/tests/gen/cli/gendocs.go index 6d388eaf67..9123bf0c2b 100644 --- a/tests/gen/cli/gendocs.go +++ b/tests/gen/cli/gendocs.go @@ -20,16 +20,15 @@ import ( "github.com/spf13/cobra" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" "github.com/sourcenetwork/defradb/http" "github.com/sourcenetwork/defradb/tests/gen" ) const defaultBatchSize = 1000 -func MakeGenDocCommand(cfg *config.Config) *cobra.Command { +func MakeGenDocCommand() *cobra.Command { var demandJSON string - + var url string var cmd = &cobra.Command{ Use: "gendocs --demand ", Short: "Automatically generates documents for existing collections.", @@ -39,11 +38,7 @@ Example: The following command generates 100 User documents and 500 Device docum gendocs --demand '{"User": 100, "Device": 500 }'`, ValidArgs: []string{"demand"}, RunE: func(cmd *cobra.Command, args []string) error { - // cobra does not chain pre run calls so we have to run them again here - if err := loadConfig(cfg); err != nil { - return err - } - store, err := http.NewClient(cfg.API.Address) + store, err := http.NewClient(url) if err != nil { return err } @@ -54,7 +49,7 @@ Example: The following command generates 100 User documents and 500 Device docum return NewErrInvalidDemandValue(err) } - collections, err := store.GetAllCollections(cmd.Context()) + collections, err := store.GetCollections(cmd.Context(), client.CollectionFetchOptions{}) if err != nil { return err } @@ -100,6 +95,8 @@ Example: The following command generates 100 User documents and 500 Device docum return nil }, } + + cmd.Flags().StringVar(&url, "url", "localhost:9181", "URL of HTTP endpoint to listen on or connect to") cmd.Flags().StringVarP(&demandJSON, "demand", "d", "", "Documents' demand in JSON format") return cmd @@ -123,7 +120,7 @@ func saveBatchToCollections( ) error { for colName, colDocs := range colDocsMap { for _, col := range collections { - if col.Description().Name == colName { + if col.Description().Name.Value() == colName { err := col.CreateMany(context.Background(), colDocs) if err != nil { return err @@ -138,7 +135,7 @@ func saveBatchToCollections( func groupDocsByCollection(docs []gen.GeneratedDoc) map[string][]*client.Document { result := make(map[string][]*client.Document) for _, doc := range docs { - result[doc.Col.Description.Name] = append(result[doc.Col.Description.Name], doc.Doc) + result[doc.Col.Description.Name.Value()] = append(result[doc.Col.Description.Name.Value()], doc.Doc) } return result } @@ -150,10 +147,3 @@ func colsToDefs(cols []client.Collection) []client.CollectionDefinition { } return colDefs } - -func loadConfig(cfg *config.Config) error { - if err := cfg.LoadRootDirFromFlagOrDefault(); err != nil { - return err - } - return cfg.LoadWithRootdir(cfg.ConfigFileExists()) -} diff --git a/tests/gen/cli/gendocs_test.go b/tests/gen/cli/gendocs_test.go index 18b9b157c1..35202ec9e0 100644 --- a/tests/gen/cli/gendocs_test.go +++ b/tests/gen/cli/gendocs_test.go @@ -12,43 +12,39 @@ package cli import ( "bytes" + "context" "io" + "strings" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/sourcenetwork/defradb/cli" - "github.com/sourcenetwork/defradb/config" "github.com/sourcenetwork/defradb/tests/gen" ) -func execAddSchemaCmd(t *testing.T, cfg *config.Config, schema string) { - rootCmd := cli.NewDefraCommand(cfg) - rootCmd.SetArgs([]string{"client", "schema", "add", schema}) - err := rootCmd.Execute() - require.NoError(t, err) -} - func TestGendocsCmd_IfNoErrors_ReturnGenerationOutput(t *testing.T) { - cfg, _, close := startTestNode(t) + defra, close := startTestNode(t) defer close() - execAddSchemaCmd(t, cfg, ` - type User { - name: String - devices: [Device] - } - type Device { - model: String - owner: User - }`) - - genDocsCmd := MakeGenDocCommand(cfg) + defra.db.AddSchema(context.Background(), ` + type User { + name: String + devices: [Device] + } + type Device { + model: String + owner: User + }`) + + genDocsCmd := MakeGenDocCommand() outputBuf := bytes.NewBufferString("") genDocsCmd.SetOut(outputBuf) - genDocsCmd.SetArgs([]string{"--demand", `{"User": 3, "Device": 12}`}) + genDocsCmd.SetArgs([]string{ + "--demand", `{"User": 3, "Device": 12}`, + "--url", strings.TrimPrefix(defra.server.URL, "http://"), + }) err := genDocsCmd.Execute() require.NoError(t, err) @@ -67,33 +63,38 @@ func TestGendocsCmd_IfNoErrors_ReturnGenerationOutput(t *testing.T) { } func TestGendocsCmd_IfInvalidDemandValue_ReturnError(t *testing.T) { - cfg, _, close := startTestNode(t) + defra, close := startTestNode(t) defer close() - execAddSchemaCmd(t, cfg, ` + defra.db.AddSchema(context.Background(), ` type User { name: String }`) - genDocsCmd := MakeGenDocCommand(cfg) - genDocsCmd.SetArgs([]string{"--demand", `{"User": invalid}`}) + genDocsCmd := MakeGenDocCommand() + genDocsCmd.SetArgs([]string{ + "--demand", `{"User": invalid}`, + "--url", strings.TrimPrefix(defra.server.URL, "http://"), + }) err := genDocsCmd.Execute() require.ErrorContains(t, err, errInvalidDemandValue) } func TestGendocsCmd_IfInvalidConfig_ReturnError(t *testing.T) { - cfg, _, close := startTestNode(t) + defra, close := startTestNode(t) defer close() - execAddSchemaCmd(t, cfg, ` + defra.db.AddSchema(context.Background(), ` type User { name: String }`) - genDocsCmd := MakeGenDocCommand(cfg) - - genDocsCmd.SetArgs([]string{"--demand", `{"Unknown": 3}`}) + genDocsCmd := MakeGenDocCommand() + genDocsCmd.SetArgs([]string{ + "--demand", `{"Unknown": 3}`, + "--url", strings.TrimPrefix(defra.server.URL, "http://"), + }) err := genDocsCmd.Execute() require.Error(t, err, gen.NewErrInvalidConfiguration("")) diff --git a/tests/gen/cli/util_test.go b/tests/gen/cli/util_test.go index 32d3c716f6..10bd98ca99 100644 --- a/tests/gen/cli/util_test.go +++ b/tests/gen/cli/util_test.go @@ -12,16 +12,13 @@ package cli import ( "context" - "fmt" - "net/http" - "os" + "net/http/httptest" "testing" badger "github.com/sourcenetwork/badger/v4" "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" @@ -33,21 +30,15 @@ var log = logging.MustNewLogger("cli") type defraInstance struct { db client.DB - server *httpapi.Server + server *httptest.Server } func (di *defraInstance) close(ctx context.Context) { di.db.Close() - if err := di.server.Close(); err != nil { - log.FeedbackInfo( - ctx, - "The server could not be closed successfully", - logging.NewKV("Error", err.Error()), - ) - } + di.server.Close() } -func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) { +func start(ctx context.Context) (*defraInstance, error) { log.FeedbackInfo(ctx, "Starting DefraDB service...") log.FeedbackInfo(ctx, "Building new memory store") @@ -63,26 +54,11 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) { return nil, errors.Wrap("failed to create database", err) } - server, err := httpapi.NewServer(db, httpapi.WithAddress(cfg.API.Address)) + handler, err := httpapi.NewHandler(db) if err != nil { - return nil, errors.Wrap("failed to create http server", err) - } - if err := server.Listen(ctx); err != nil { - return nil, errors.Wrap(fmt.Sprintf("failed to listen on TCP address %v", server.Addr), err) + return nil, errors.Wrap("failed to create http handler", err) } - // save the address on the config in case the port number was set to random - cfg.API.Address = server.AssignedAddr() - cfg.Persist() - - // run the server in a separate goroutine - go func(apiAddress string) { - log.FeedbackInfo(ctx, fmt.Sprintf("Providing HTTP API at %s.", apiAddress)) - if err := server.Run(ctx); err != nil && !errors.Is(err, http.ErrServerClosed) { - log.FeedbackErrorE(ctx, "Failed to run the HTTP server", err) - db.Close() - os.Exit(1) - } - }(cfg.API.AddressToURL()) + server := httptest.NewServer(handler) return &defraInstance{ db: db, @@ -90,22 +66,9 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) { }, nil } -func getTestConfig(t *testing.T) *config.Config { - cfg := config.DefaultConfig() - cfg.Datastore.Store = "memory" - cfg.Net.P2PDisabled = true - cfg.Rootdir = t.TempDir() - cfg.Net.P2PAddress = "/ip4/127.0.0.1/tcp/0" - cfg.API.Address = "127.0.0.1:0" - cfg.Persist() - return cfg -} - -func startTestNode(t *testing.T) (*config.Config, *defraInstance, func()) { - cfg := getTestConfig(t) - +func startTestNode(t *testing.T) (*defraInstance, func()) { ctx := context.Background() - di, err := start(ctx, cfg) + di, err := start(ctx) require.NoError(t, err) - return cfg, di, func() { di.close(ctx) } + return di, func() { di.close(ctx) } } diff --git a/tests/gen/gen_auto.go b/tests/gen/gen_auto.go index 7ad3bb2d41..c837b822a9 100644 --- a/tests/gen/gen_auto.go +++ b/tests/gen/gen_auto.go @@ -54,7 +54,7 @@ func AutoGenerate(definitions []client.CollectionDefinition, options ...Option) } typeDefs := make(map[string]client.CollectionDefinition) for _, def := range definitions { - typeDefs[def.Description.Name] = def + typeDefs[def.Description.Name.Value()] = def } generator := newRandomDocGenerator(typeDefs, nil) return generator.generateDocs(options...) @@ -119,7 +119,7 @@ func (g *randomDocGenerator) getMaxTotalDemand() int { } // getNextPrimaryDocID returns the docID of the next primary document to be used as a relation. -func (g *randomDocGenerator) getNextPrimaryDocID(secondaryType string, field *client.FieldDescription) string { +func (g *randomDocGenerator) getNextPrimaryDocID(secondaryType string, field *client.SchemaFieldDescription) string { ind := g.configurator.usageCounter.getNextTypeIndForField(secondaryType, field) return g.generatedDocs[field.Schema][ind].docID } @@ -139,7 +139,7 @@ func (g *randomDocGenerator) generateRandomDocs(order []string) error { continue } if field.IsRelation() { - if field.IsPrimaryRelation() { + if field.IsPrimaryRelation { if strings.HasSuffix(field.Name, request.RelatedObjectID) { newDoc[field.Name] = g.getNextPrimaryDocID(typeName, &field) } else { @@ -185,22 +185,22 @@ func (g *randomDocGenerator) generateRandomValue( func (g *randomDocGenerator) getValueGenerator(fieldKind client.FieldKind, fieldConfig genConfig) func() any { switch fieldKind { - case client.FieldKind_STRING: + case client.FieldKind_NILLABLE_STRING: strLen := DefaultStrLen if prop, ok := fieldConfig.props["len"]; ok { strLen = prop.(int) } return func() any { return getRandomString(&g.random, strLen) } - case client.FieldKind_INT: + case client.FieldKind_NILLABLE_INT: min, max := getMinMaxOrDefault(fieldConfig, DefaultIntMin, DefaultIntMax) return func() any { return min + g.random.Intn(max-min+1) } - case client.FieldKind_BOOL: + case client.FieldKind_NILLABLE_BOOL: ratio := 0.5 if prop, ok := fieldConfig.props["ratio"]; ok { ratio = prop.(float64) } return func() any { return g.random.Float64() < ratio } - case client.FieldKind_FLOAT: + case client.FieldKind_NILLABLE_FLOAT: min, max := getMinMaxOrDefault(fieldConfig, 0.0, 1.0) return func() any { return min + g.random.Float64()*(max-min) } } @@ -212,27 +212,27 @@ func validateDefinitions(definitions []client.CollectionDefinition) error { colNames := make(map[string]struct{}) fieldRefs := []string{} for _, def := range definitions { - if def.Description.Name == "" { + if def.Description.Name.Value() == "" { return NewErrIncompleteColDefinition("description name is empty") } if def.Schema.Name == "" { return NewErrIncompleteColDefinition("schema name is empty") } - if def.Description.Name != def.Schema.Name { + if def.Description.Name.Value() != def.Schema.Name { return NewErrIncompleteColDefinition("description name and schema name do not match") } for _, field := range def.Schema.Fields { if field.Name == "" { return NewErrIncompleteColDefinition("field name is empty") } - if field.IsObject() { + if field.Kind.IsObject() { if field.Schema == "" { return NewErrIncompleteColDefinition("field schema is empty") } fieldRefs = append(fieldRefs, field.Schema) } } - colNames[def.Description.Name] = struct{}{} + colNames[def.Description.Name.Value()] = struct{}{} colIDs[def.Description.ID] = struct{}{} } for _, ref := range fieldRefs { diff --git a/tests/gen/gen_auto_config.go b/tests/gen/gen_auto_config.go index ccebce92d1..eab85dd318 100644 --- a/tests/gen/gen_auto_config.go +++ b/tests/gen/gen_auto_config.go @@ -54,11 +54,11 @@ func (m configsMap) AddForField(typeStr, fieldName string, conf genConfig) { func validateConfig(types map[string]client.CollectionDefinition, configsMap configsMap) error { for typeName, typeConfigs := range configsMap { typeDef := types[typeName] - if typeDef.Description.Name == "" { + if typeDef.Description.Name.Value() == "" { return newNotDefinedTypeErr(typeName) } for fieldName, fieldConfig := range typeConfigs { - fieldDef, hasField := typeDef.Description.GetFieldByName(fieldName, &typeDef.Schema) + fieldDef, hasField := typeDef.Schema.GetFieldByName(fieldName) if !hasField { return NewErrInvalidConfiguration("field " + fieldName + " is not defined in the schema for type " + typeName) @@ -82,12 +82,12 @@ func validateConfig(types map[string]client.CollectionDefinition, configsMap con return nil } -func checkAndValidateMinMax(field *client.FieldDescription, conf *genConfig) error { +func checkAndValidateMinMax(field *client.SchemaFieldDescription, conf *genConfig) error { _, hasMin := conf.props["min"] if hasMin { var err error - if field.IsArray() || field.Kind == client.FieldKind_INT { - err = validateMinConfig[int](conf, field.IsArray()) + if field.Kind.IsArray() || field.Kind == client.FieldKind_NILLABLE_INT { + err = validateMinConfig[int](conf, field.Kind.IsArray()) } else { err = validateMinConfig[float64](conf, false) } @@ -100,10 +100,10 @@ func checkAndValidateMinMax(field *client.FieldDescription, conf *genConfig) err return nil } -func checkAndValidateLen(field *client.FieldDescription, conf *genConfig) error { +func checkAndValidateLen(field *client.SchemaFieldDescription, conf *genConfig) error { lenConf, hasLen := conf.props["len"] if hasLen { - if field.Kind != client.FieldKind_STRING { + if field.Kind != client.FieldKind_NILLABLE_STRING { return NewErrInvalidConfiguration("len is used on not String") } len, ok := lenConf.(int) @@ -117,10 +117,10 @@ func checkAndValidateLen(field *client.FieldDescription, conf *genConfig) error return nil } -func checkAndValidateRatio(field *client.FieldDescription, conf *genConfig) error { +func checkAndValidateRatio(field *client.SchemaFieldDescription, conf *genConfig) error { ratioConf, hasRatio := conf.props["ratio"] if hasRatio { - if field.Kind != client.FieldKind_BOOL { + if field.Kind != client.FieldKind_NILLABLE_BOOL { return NewErrInvalidConfiguration("ratio is used on not Boolean") } len, ok := ratioConf.(float64) diff --git a/tests/gen/gen_auto_configurator.go b/tests/gen/gen_auto_configurator.go index 4049e7ba4d..b4746ae437 100644 --- a/tests/gen/gen_auto_configurator.go +++ b/tests/gen/gen_auto_configurator.go @@ -65,7 +65,7 @@ func newTypeUsageCounter(random *rand.Rand) typeUsageCounters { // addRelationUsage adds a relation usage tracker for a foreign field. func (c *typeUsageCounters) addRelationUsage( secondaryType string, - field client.FieldDescription, + field client.SchemaFieldDescription, minPerDoc, maxPerDoc, numDocs int, ) { primaryType := field.Schema @@ -81,7 +81,7 @@ func (c *typeUsageCounters) addRelationUsage( } // getNextTypeIndForField returns the next index to be used for a foreign field. -func (c *typeUsageCounters) getNextTypeIndForField(secondaryType string, field *client.FieldDescription) int { +func (c *typeUsageCounters) getNextTypeIndForField(secondaryType string, field *client.SchemaFieldDescription) int { current := c.m[field.Schema][secondaryType][field.Name] return current.useNextDocIDIndex() } @@ -273,11 +273,11 @@ func (g *docsGenConfigurator) getDemandForPrimaryType( ) (typeDemand, error) { primaryTypeDef := g.types[primaryType] for _, field := range primaryTypeDef.Schema.Fields { - if field.IsObject() && field.Schema == secondaryType { + if field.Kind.IsObject() && field.Schema == secondaryType { primaryDemand := typeDemand{min: secondaryDemand.min, max: secondaryDemand.max} minPerDoc, maxPerDoc := 1, 1 - if field.IsArray() { + if field.Kind.IsArray() { fieldConf := g.config.ForField(primaryType, field.Name) minPerDoc, maxPerDoc = getMinMaxOrDefault(fieldConf, 0, secondaryDemand.max) // if we request min 100 of secondary docs and there can be max 5 per primary doc, @@ -339,14 +339,14 @@ func (g *docsGenConfigurator) calculateDemandForSecondaryTypes( ) error { typeDef := g.types[typeName] for _, field := range typeDef.Schema.Fields { - if field.IsObject() && !field.IsPrimaryRelation() { + if field.Kind.IsObject() && !field.IsPrimaryRelation { primaryDocDemand := g.docsDemand[typeName] newSecDemand := typeDemand{min: primaryDocDemand.min, max: primaryDocDemand.max} minPerDoc, maxPerDoc := 1, 1 curSecDemand, hasSecDemand := g.docsDemand[field.Schema] - if field.IsArray() { + if field.Kind.IsArray() { fieldConf := g.config.ForField(typeName, field.Name) if prop, ok := fieldConf.props["min"]; ok { minPerDoc = prop.(int) @@ -418,8 +418,8 @@ func getRelationGraph(types map[string]client.CollectionDefinition) map[string][ for typeName, typeDef := range types { for _, field := range typeDef.Schema.Fields { - if field.IsObject() { - if field.IsPrimaryRelation() { + if field.Kind.IsObject() { + if field.IsPrimaryRelation { primaryGraph[typeName] = appendUnique(primaryGraph[typeName], field.Schema) } else { primaryGraph[field.Schema] = appendUnique(primaryGraph[field.Schema], typeName) diff --git a/tests/gen/gen_auto_test.go b/tests/gen/gen_auto_test.go index f22859df0c..0ddca543f2 100644 --- a/tests/gen/gen_auto_test.go +++ b/tests/gen/gen_auto_test.go @@ -15,6 +15,7 @@ import ( "testing" "time" + "github.com/sourcenetwork/immutable" "github.com/stretchr/testify/assert" "github.com/sourcenetwork/defradb/client" @@ -71,7 +72,7 @@ func getDocIDsFromDocs(docs []*client.Document) []string { func filterByCollection(docs []GeneratedDoc, name string) []*client.Document { var result []*client.Document for _, doc := range docs { - if doc.Col.Description.Name == name { + if doc.Col.Description.Name.Value() == name { result = append(result, doc.Doc) } } @@ -1200,44 +1201,41 @@ func TestAutoGenerate_IfCollectionDefinitionIsIncomplete_ReturnError(t *testing. return []client.CollectionDefinition{ { Description: client.CollectionDescription{ - Name: "User", + Name: immutable.Some("User"), ID: 0, }, Schema: client.SchemaDescription{ Name: "User", - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "name", - Kind: client.FieldKind_INT, + Kind: client.FieldKind_NILLABLE_INT, }, { - Name: "device", - Kind: client.FieldKind_FOREIGN_OBJECT, - Schema: "Device", - RelationType: client.Relation_Type_ONE | client.Relation_Type_ONEONE, + Name: "device", + Kind: client.FieldKind_FOREIGN_OBJECT, + Schema: "Device", }, }, }, }, { Description: client.CollectionDescription{ - Name: "Device", + Name: immutable.Some("Device"), ID: 1, }, Schema: client.SchemaDescription{ Name: "Device", - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "model", - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, }, { - Name: "owner", - Kind: client.FieldKind_FOREIGN_OBJECT, - Schema: "User", - RelationType: client.Relation_Type_ONE | - client.Relation_Type_ONEONE | - client.Relation_Type_Primary, + Name: "owner", + Kind: client.FieldKind_FOREIGN_OBJECT, + Schema: "User", + IsPrimaryRelation: true, }, }, }, @@ -1252,7 +1250,13 @@ func TestAutoGenerate_IfCollectionDefinitionIsIncomplete_ReturnError(t *testing. { name: "description name is empty", changeDefs: func(defs []client.CollectionDefinition) { - defs[0].Description.Name = "" + defs[0].Description.Name = immutable.Some("") + }, + }, + { + name: "description name is none", + changeDefs: func(defs []client.CollectionDefinition) { + defs[0].Description.Name = immutable.None[string]() }, }, { @@ -1312,49 +1316,50 @@ func TestAutoGenerate_IfColDefinitionsAreValid_ShouldGenerate(t *testing.T) { defs := []client.CollectionDefinition{ { Description: client.CollectionDescription{ - Name: "User", + Name: immutable.Some("User"), ID: 0, }, Schema: client.SchemaDescription{ Name: "User", - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "name", - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, }, { Name: "age", - Kind: client.FieldKind_INT, + Kind: client.FieldKind_NILLABLE_INT, }, { Name: "rating", - Kind: client.FieldKind_FLOAT, + Kind: client.FieldKind_NILLABLE_FLOAT, }, { Name: "devices", Kind: client.FieldKind_FOREIGN_OBJECT_ARRAY, Schema: "Device", - RelationType: client.Relation_Type_MANY | client.Relation_Type_ONEMANY, + RelationName: "Device_owner", }, }, }, }, { Description: client.CollectionDescription{ - Name: "Device", + Name: immutable.Some("Device"), ID: 1, }, Schema: client.SchemaDescription{ Name: "Device", - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "model", - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, }, { Name: "owner_id", Kind: client.FieldKind_DocID, - RelationType: client.Relation_Type_INTERNAL_ID, + RelationName: "Device_owner", + Schema: "User", }, }, }, diff --git a/tests/gen/schema_parser.go b/tests/gen/schema_parser.go index 216376c26d..bcce388f22 100644 --- a/tests/gen/schema_parser.go +++ b/tests/gen/schema_parser.go @@ -31,7 +31,7 @@ func parseSDL(gqlSDL string) (map[string]client.CollectionDefinition, error) { } result := make(map[string]client.CollectionDefinition) for _, col := range cols { - result[col.Description.Name] = col + result[col.Description.Name.Value()] = col } return result, nil } diff --git a/tests/integration/events/simple/with_update_test.go b/tests/integration/events/simple/with_update_test.go index c929bf4384..2f0960b977 100644 --- a/tests/integration/events/simple/with_update_test.go +++ b/tests/integration/events/simple/with_update_test.go @@ -66,14 +66,14 @@ func TestEventsSimpleWithUpdate(t *testing.T) { ExpectedUpdates: []testUtils.ExpectedUpdate{ { DocID: immutable.Some(docID1), - Cid: immutable.Some("bafybeif5l2a5f2lcsmuml2cji6unq4qk2ta4f3uow4wccdjebsu7jcjrj4"), + Cid: immutable.Some("bafybeidzstxabh7qktq7pkmmxvpjbnwklxz3h5l6d425ldvjy65xvvuxu4"), }, { DocID: immutable.Some(docID2), }, { DocID: immutable.Some(docID1), - Cid: immutable.Some("bafybeihchzitl7e7pyhci5bs563dn3seykcleqk56r7vjtslvi3rv3wsne"), + Cid: immutable.Some("bafybeiah75qvtqxflw3urgejxetaugpcddx5h2ocj7pid34zjyy7tpp6wi"), }, }, } diff --git a/tests/integration/explain.go b/tests/integration/explain.go index a8de0e6441..0b9c4c2dab 100644 --- a/tests/integration/explain.go +++ b/tests/integration/explain.go @@ -56,6 +56,7 @@ var ( "updateNode": {}, "valuesNode": {}, "viewNode": {}, + "lensNode": {}, } ) diff --git a/tests/integration/explain/debug/with_view_transform_test.go b/tests/integration/explain/debug/with_view_transform_test.go new file mode 100644 index 0000000000..386324067a --- /dev/null +++ b/tests/integration/explain/debug/with_view_transform_test.go @@ -0,0 +1,88 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + "github.com/sourcenetwork/defradb/tests/lenses" +) + +var transformViewPattern = dataMap{ + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "viewNode": dataMap{ + "lensNode": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, + }, + }, + }, + }, +} + +func TestDebugExplainRequestWithViewWithTransform(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with view with transform", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + } + `, + }, + testUtils.CreateView{ + Query: ` + User { + name + } + `, + SDL: ` + type UserView { + fullName: String + } + `, + Transform: immutable.Some(model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.CopyModulePath, + Arguments: map[string]any{ + "src": "name", + "dst": "fullName", + }, + }, + }, + }), + }, + testUtils.ExplainRequest{ + Request: `query @explain(type: debug) { + UserView { + fullName + } + }`, + ExpectedPatterns: []dataMap{transformViewPattern}, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain_result_asserter.go b/tests/integration/explain_result_asserter.go index 30126d4fe4..45f998e481 100644 --- a/tests/integration/explain_result_asserter.go +++ b/tests/integration/explain_result_asserter.go @@ -59,15 +59,15 @@ func (a *ExplainResultAsserter) Assert(t *testing.T, result []dataMap) { require.Len(t, result, 1, "Expected len(result) = 1, got %d", len(result)) explainNode, ok := result[0]["explain"].(dataMap) require.True(t, ok, "Expected explain none") - assert.Equal(t, explainNode["executionSuccess"], true, "Expected executionSuccess property") + assert.Equal(t, true, explainNode["executionSuccess"], "Expected executionSuccess property") if a.sizeOfResults.HasValue() { actual := explainNode["sizeOfResult"] - assert.Equal(t, actual, a.sizeOfResults.Value(), + assert.Equal(t, a.sizeOfResults.Value(), actual, "Expected %d sizeOfResult, got %d", a.sizeOfResults.Value(), actual) } if a.planExecutions.HasValue() { actual := explainNode["planExecutions"] - assert.Equal(t, actual, a.planExecutions.Value(), + assert.Equal(t, a.planExecutions.Value(), actual, "Expected %d planExecutions, got %d", a.planExecutions.Value(), actual) } selectTopNode, ok := explainNode["selectTopNode"].(dataMap) @@ -78,7 +78,7 @@ func (a *ExplainResultAsserter) Assert(t *testing.T, result []dataMap) { if a.filterMatches.HasValue() { filterMatches, hasFilterMatches := selectNode["filterMatches"] require.True(t, hasFilterMatches, "Expected filterMatches property") - assert.Equal(t, filterMatches, uint64(a.filterMatches.Value()), + assert.Equal(t, uint64(a.filterMatches.Value()), filterMatches, "Expected %d filterMatches, got %d", a.filterMatches, filterMatches) } @@ -102,22 +102,22 @@ func (a *ExplainResultAsserter) Assert(t *testing.T, result []dataMap) { if a.iterations.HasValue() { actual := getScanNodesProp(iterationsProp) - assert.Equal(t, actual, uint64(a.iterations.Value()), + assert.Equal(t, uint64(a.iterations.Value()), actual, "Expected %d iterations, got %d", a.iterations.Value(), actual) } if a.docFetches.HasValue() { actual := getScanNodesProp(docFetchesProp) - assert.Equal(t, actual, uint64(a.docFetches.Value()), + assert.Equal(t, uint64(a.docFetches.Value()), actual, "Expected %d docFetches, got %d", a.docFetches.Value(), actual) } if a.fieldFetches.HasValue() { actual := getScanNodesProp(fieldFetchesProp) - assert.Equal(t, actual, uint64(a.fieldFetches.Value()), + assert.Equal(t, uint64(a.fieldFetches.Value()), actual, "Expected %d fieldFetches, got %d", a.fieldFetches.Value(), actual) } if a.indexFetches.HasValue() { actual := getScanNodesProp(indexFetchesProp) - assert.Equal(t, actual, uint64(a.indexFetches.Value()), + assert.Equal(t, uint64(a.indexFetches.Value()), actual, "Expected %d indexFetches, got %d", a.indexFetches.Value(), actual) } } diff --git a/tests/integration/index/create_composite_test.go b/tests/integration/index/create_composite_test.go new file mode 100644 index 0000000000..e9a83f1d15 --- /dev/null +++ b/tests/integration/index/create_composite_test.go @@ -0,0 +1,74 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +import ( + "testing" + + "github.com/sourcenetwork/defradb/client" + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestCompositeIndexCreate_WhenCreated_CanRetrieve(t *testing.T) { + test := testUtils.TestCase{ + Description: "create composite index and retrieve it", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + age: Int + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "John", + "age": 21 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Shahzad", + "age": 22 + }`, + }, + testUtils.CreateIndex{ + CollectionID: 0, + IndexName: "name_age_index", + Fields: []testUtils.IndexedField{{Name: "name"}, {Name: "age"}}, + }, + testUtils.GetIndexes{ + CollectionID: 0, + ExpectedIndexes: []client.IndexDescription{ + { + Name: "name_age_index", + ID: 1, + Fields: []client.IndexedFieldDescription{ + { + Name: "name", + }, + { + Name: "age", + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/index/create_get_test.go b/tests/integration/index/create_get_test.go index 6ec0962c17..3ba27cfa9e 100644 --- a/tests/integration/index/create_get_test.go +++ b/tests/integration/index/create_get_test.go @@ -37,8 +37,7 @@ func TestIndexGet_ShouldReturnListOfExistingIndexes(t *testing.T) { ID: 1, Fields: []client.IndexedFieldDescription{ { - Name: "name", - Direction: client.Ascending, + Name: "name", }, }, }, @@ -47,8 +46,7 @@ func TestIndexGet_ShouldReturnListOfExistingIndexes(t *testing.T) { ID: 2, Fields: []client.IndexedFieldDescription{ { - Name: "age", - Direction: client.Ascending, + Name: "age", }, }, }, diff --git a/tests/integration/index/create_unique_composite_test.go b/tests/integration/index/create_unique_composite_test.go new file mode 100644 index 0000000000..2f0ed96488 --- /dev/null +++ b/tests/integration/index/create_unique_composite_test.go @@ -0,0 +1,180 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +import ( + "testing" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/db" + "github.com/sourcenetwork/defradb/errors" + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestCreateUniqueCompositeIndex_IfFieldValuesAreNotUnique_ReturnError(t *testing.T) { + test := testUtils.TestCase{ + Description: "If combination of fields is not unique, creating of unique index fails", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + age: Int + email: String + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "John", + "age": 21, + "email": "email@gmail.com" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "John", + "age": 21, + "email": "another@gmail.com" + }`, + }, + testUtils.CreateIndex{ + CollectionID: 0, + Fields: []testUtils.IndexedField{{Name: "name"}, {Name: "age"}}, + Unique: true, + ExpectedError: db.NewErrCanNotIndexNonUniqueFields( + "bae-cae3deac-d371-5a1f-93b4-ede69042f79b", + errors.NewKV("name", "John"), errors.NewKV("age", 21), + ).Error(), + }, + testUtils.GetIndexes{ + CollectionID: 0, + ExpectedIndexes: []client.IndexDescription{}, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestUniqueCompositeIndexCreate_UponAddingDocWithExistingFieldValue_ReturnError(t *testing.T) { + test := testUtils.TestCase{ + Description: "adding a new doc with existing field combination for composite index should fail", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["name", "age"]) { + name: String + age: Int + email: String + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "John", + "age": 21, + "email": "email@gmail.com" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "John", + "age": 21, + "email": "another@gmail.com" + }`, + ExpectedError: db.NewErrCanNotIndexNonUniqueFields( + "bae-13254430-7e9e-52e2-9861-9a7ec7a75c8d", + errors.NewKV("name", "John"), errors.NewKV("age", 21)).Error(), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestUniqueCompositeIndexCreate_IfFieldValuesAreUnique_Succeed(t *testing.T) { + test := testUtils.TestCase{ + Description: "create unique composite index if all docs have unique fields combinations", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + age: Int + email: String + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "John", + "age": 21, + "email": "some@gmail.com" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "John", + "age": 35, + "email": "another@gmail.com" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Andy", + "age": 35, + "email": "different@gmail.com" + }`, + }, + testUtils.CreateIndex{ + CollectionID: 0, + Fields: []testUtils.IndexedField{{Name: "name"}, {Name: "age"}}, + IndexName: "name_age_unique_index", + Unique: true, + }, + testUtils.GetIndexes{ + CollectionID: 0, + ExpectedIndexes: []client.IndexDescription{ + { + Name: "name_age_unique_index", + ID: 1, + Unique: true, + Fields: []client.IndexedFieldDescription{ + { + Name: "name", + }, + { + Name: "age", + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/index/create_unique_test.go b/tests/integration/index/create_unique_test.go index fac2330a28..e9b2d41753 100644 --- a/tests/integration/index/create_unique_test.go +++ b/tests/integration/index/create_unique_test.go @@ -15,6 +15,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/db" + "github.com/sourcenetwork/defradb/errors" testUtils "github.com/sourcenetwork/defradb/tests/integration" ) @@ -57,10 +58,11 @@ func TestCreateUniqueIndex_IfFieldValuesAreNotUnique_ReturnError(t *testing.T) { }`, }, testUtils.CreateIndex{ - CollectionID: 0, - FieldName: "age", - Unique: true, - ExpectedError: db.NewErrCanNotIndexNonUniqueField(johnDocID, "age", 21).Error(), + CollectionID: 0, + FieldName: "age", + Unique: true, + ExpectedError: db.NewErrCanNotIndexNonUniqueFields( + johnDocID, errors.NewKV("age", 21)).Error(), }, testUtils.GetIndexes{ CollectionID: 0, @@ -99,7 +101,8 @@ func TestUniqueIndexCreate_UponAddingDocWithExistingFieldValue_ReturnError(t *te "name": "John", "age": 21 }`, - ExpectedError: db.NewErrCanNotIndexNonUniqueField(johnDocID, "age", 21).Error(), + ExpectedError: db.NewErrCanNotIndexNonUniqueFields( + johnDocID, errors.NewKV("age", 21)).Error(), }, testUtils.Request{ Request: `query { @@ -118,8 +121,7 @@ func TestUniqueIndexCreate_UponAddingDocWithExistingFieldValue_ReturnError(t *te Unique: true, Fields: []client.IndexedFieldDescription{ { - Name: "age", - Direction: client.Ascending, + Name: "age", }, }, }, @@ -174,8 +176,7 @@ func TestUniqueIndexCreate_IfFieldValuesAreUnique_Succeed(t *testing.T) { Unique: true, Fields: []client.IndexedFieldDescription{ { - Name: "age", - Direction: client.Ascending, + Name: "age", }, }, }, @@ -187,7 +188,7 @@ func TestUniqueIndexCreate_IfFieldValuesAreUnique_Succeed(t *testing.T) { testUtils.ExecuteTestCase(t, test) } -func TestUniqueIndexCreate_IfNilFieldsArePresent_ReturnError(t *testing.T) { +func TestUniqueIndexCreate_WithMultipleNilFields_ShouldSucceed(t *testing.T) { test := testUtils.TestCase{ Description: "If filter does not match any document, return empty result", Actions: []any{ @@ -222,10 +223,25 @@ func TestUniqueIndexCreate_IfNilFieldsArePresent_ReturnError(t *testing.T) { }`, }, testUtils.CreateIndex{ - CollectionID: 0, - FieldName: "age", - Unique: true, - ExpectedError: db.NewErrCanNotIndexNonUniqueField("bae-caba9876-89aa-5bcf-bc1c-387a52499b27", "age", nil).Error(), + CollectionID: 0, + IndexName: "age_unique_index", + FieldName: "age", + Unique: true, + }, + testUtils.GetIndexes{ + CollectionID: 0, + ExpectedIndexes: []client.IndexDescription{ + { + Name: "age_unique_index", + ID: 1, + Unique: true, + Fields: []client.IndexedFieldDescription{ + { + Name: "age", + }, + }, + }, + }, }, }, } @@ -258,7 +274,7 @@ func TestUniqueIndexCreate_AddingDocWithNilValue_ShouldSucceed(t *testing.T) { testUtils.ExecuteTestCase(t, test) } -func TestUniqueIndexCreate_UponAddingDocWithExistingNilValue_ReturnError(t *testing.T) { +func TestUniqueIndexCreate_UponAddingDocWithExistingNilValue_ShouldSucceed(t *testing.T) { test := testUtils.TestCase{ Description: "If filter does not match any document, return empty result", Actions: []any{ @@ -291,7 +307,6 @@ func TestUniqueIndexCreate_UponAddingDocWithExistingNilValue_ReturnError(t *test { "name": "Andy" }`, - ExpectedError: db.NewErrCanNotIndexNonUniqueField("bae-2159860f-3cd1-59de-9440-71331e77cbb8", "age", nil).Error(), }, }, } diff --git a/tests/integration/index/index_p2p_test.go b/tests/integration/index/index_p2p_test.go new file mode 100644 index 0000000000..820d6d2c5b --- /dev/null +++ b/tests/integration/index/index_p2p_test.go @@ -0,0 +1,190 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +import ( + "testing" + + "github.com/sourcenetwork/immutable" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestIndexP2P_IfPeerCreatedDoc_ListeningPeerShouldIndexIt(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.RandomNetworkingConfig(), + testUtils.RandomNetworkingConfig(), + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.CreateIndex{ + CollectionID: 0, + FieldName: "name", + }, + testUtils.ConnectPeers{ + SourceNodeID: 1, + TargetNodeID: 0, + }, + testUtils.SubscribeToCollection{ + NodeID: 1, + CollectionIDs: []int{0}, + }, + testUtils.CreateDoc{ + NodeID: immutable.Some(0), + Doc: `{ + "name": "Fred" + }`, + }, + testUtils.WaitForSync{}, + testUtils.Request{ + NodeID: immutable.Some(1), + Request: `query { + Users (filter: {name: {_eq: "Fred"}}){ + name + } + }`, + Results: []map[string]any{ + { + "name": "Fred", + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestIndexP2P_IfPeerUpdateDoc_ListeningPeerShouldUpdateIndex(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.RandomNetworkingConfig(), + testUtils.RandomNetworkingConfig(), + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.CreateIndex{ + CollectionID: 0, + FieldName: "name", + }, + testUtils.ConnectPeers{ + SourceNodeID: 1, + TargetNodeID: 0, + }, + testUtils.SubscribeToCollection{ + NodeID: 1, + CollectionIDs: []int{0}, + }, + testUtils.CreateDoc{ + NodeID: immutable.Some(0), + Doc: `{ + "name": "Fred" + }`, + }, + testUtils.WaitForSync{}, + testUtils.UpdateDoc{ + NodeID: immutable.Some(0), + Doc: `{ + "name": "Islam" + }`, + }, + testUtils.WaitForSync{}, + testUtils.Request{ + NodeID: immutable.Some(1), + Request: `query { + Users (filter: {name: {_eq: "Islam"}}){ + name + } + }`, + Results: []map[string]any{ + { + "name": "Islam", + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestIndexP2P_IfPeerDeleteDoc_ListeningPeerShouldDeleteIndex(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.RandomNetworkingConfig(), + testUtils.RandomNetworkingConfig(), + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + age: Int + } + `, + }, + testUtils.CreateIndex{ + CollectionID: 0, + FieldName: "name", + }, + testUtils.ConnectPeers{ + SourceNodeID: 1, + TargetNodeID: 0, + }, + testUtils.SubscribeToCollection{ + NodeID: 1, + CollectionIDs: []int{0}, + }, + testUtils.CreateDoc{ + NodeID: immutable.Some(0), + Doc: `{ + "name": "Fred", + "age": 25 + }`, + }, + testUtils.CreateDoc{ + NodeID: immutable.Some(0), + Doc: `{ + "name": "Fred", + "age": 30 + }`, + }, + testUtils.WaitForSync{}, + testUtils.DeleteDoc{ + NodeID: immutable.Some(0), + DocID: 0, + }, + testUtils.WaitForSync{}, + testUtils.Request{ + NodeID: immutable.Some(1), + Request: `query { + Users (filter: {name: {_eq: "Fred"}}){ + age + } + }`, + Results: []map[string]any{ + { + "age": int64(30), + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/index/query_with_composite_index_field_order_test.go b/tests/integration/index/query_with_composite_index_field_order_test.go new file mode 100644 index 0000000000..611bfed998 --- /dev/null +++ b/tests/integration/index/query_with_composite_index_field_order_test.go @@ -0,0 +1,677 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestQueryWithCompositeIndex_WithDefaultOrder_ShouldFetchInDefaultOrder(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test composite index in default order", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "age"]) { + name: String + age: Int + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 22 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alan", + "age": 29 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 38 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 24 + }`, + }, + testUtils.Request{ + Request: ` + query { + User(filter: {name: {_like: "Al%"}}) { + name + age + } + }`, + Results: []map[string]any{ + { + "name": "Alan", + "age": 29, + }, + { + "name": "Alice", + "age": 22, + }, + { + "name": "Alice", + "age": 24, + }, + { + "name": "Alice", + "age": 38, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_WithDefaultOrderCaseInsensitive_ShouldFetchInDefaultOrder(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test composite index in default order and case insensitive operator", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "age"]) { + name: String + age: Int + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 22 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alan", + "age": 29 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 38 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 24 + }`, + }, + testUtils.Request{ + Request: ` + query { + User(filter: {name: {_ilike: "al%"}}) { + name + age + } + }`, + Results: []map[string]any{ + { + "name": "Alan", + "age": 29, + }, + { + "name": "Alice", + "age": 22, + }, + { + "name": "Alice", + "age": 24, + }, + { + "name": "Alice", + "age": 38, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_WithRevertedOrderOnFirstField_ShouldFetchInRevertedOrder(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test composite index with reverted order on first field", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "age"], directions: [DESC, ASC]) { + name: String + age: Int + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 22 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alan", + "age": 29 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 38 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Andy", + "age": 24 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 24 + }`, + }, + testUtils.Request{ + Request: ` + query { + User(filter: {name: {_like: "A%"}}) { + name + age + } + }`, + Results: []map[string]any{ + { + "name": "Andy", + "age": 24, + }, + { + "name": "Alice", + "age": 22, + }, + { + "name": "Alice", + "age": 24, + }, + { + "name": "Alice", + "age": 38, + }, + { + "name": "Alan", + "age": 29, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_WithRevertedOrderOnFirstFieldCaseInsensitive_ShouldFetchInRevertedOrder(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test composite index with reverted order on first field and case insensitive operator", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "age"], directions: [DESC, ASC]) { + name: String + age: Int + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 22 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alan", + "age": 29 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 38 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Andy", + "age": 24 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 24 + }`, + }, + testUtils.Request{ + Request: ` + query { + User(filter: {name: {_ilike: "a%"}}) { + name + age + } + }`, + Results: []map[string]any{ + { + "name": "Andy", + "age": 24, + }, + { + "name": "Alice", + "age": 22, + }, + { + "name": "Alice", + "age": 24, + }, + { + "name": "Alice", + "age": 38, + }, + { + "name": "Alan", + "age": 29, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_WithRevertedOrderOnSecondField_ShouldFetchInRevertedOrder(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test composite index with reverted order on second field", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "age"], directions: [ASC, DESC]) { + name: String + age: Int + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 22 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alan", + "age": 29 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 38 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 24 + }`, + }, + testUtils.Request{ + Request: ` + query { + User(filter: {name: {_like: "Al%"}}) { + name + age + } + }`, + Results: []map[string]any{ + { + "name": "Alan", + "age": 29, + }, + { + "name": "Alice", + "age": 38, + }, + { + "name": "Alice", + "age": 24, + }, + { + "name": "Alice", + "age": 22, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_WithRevertedOrderOnSecondFieldCaseInsensitive_ShouldFetchInRevertedOrder( + t *testing.T, +) { + test := testUtils.TestCase{ + Description: "Test composite index with reverted order on second field and case insensitive operator", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "age"], directions: [ASC, DESC]) { + name: String + age: Int + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 22 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alan", + "age": 29 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 38 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 24 + }`, + }, + testUtils.Request{ + Request: ` + query { + User(filter: {name: {_ilike: "al%"}}) { + name + age + } + }`, + Results: []map[string]any{ + { + "name": "Alan", + "age": 29, + }, + { + "name": "Alice", + "age": 38, + }, + { + "name": "Alice", + "age": 24, + }, + { + "name": "Alice", + "age": 22, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_IfExactMatchWithRevertedOrderOnFirstField_ShouldFetch(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test composite index with reverted order on first field and filter with exact match", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "age"], directions: [DESC, ASC]) { + name: String + age: Int + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 38 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 22 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alan", + "age": 29 + }`, + }, + testUtils.Request{ + Request: ` + query { + User(filter: {name: {_eq: "Alice"}, age: {_eq: 22}}) { + name + age + } + }`, + Results: []map[string]any{ + { + "name": "Alice", + "age": 22, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_IfExactMatchWithRevertedOrderOnSecondField_ShouldFetch(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test composite index with reverted order on second field and filter with exact match", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "age"], directions: [ASC, DESC]) { + name: String + age: Int + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 38 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 22 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alan", + "age": 29 + }`, + }, + testUtils.Request{ + Request: ` + query { + User(filter: {name: {_eq: "Alice"}, age: {_eq: 22}}) { + name + age + } + }`, + Results: []map[string]any{ + { + "name": "Alice", + "age": 22, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_WithInFilterOnFirstFieldWithRevertedOrder_ShouldFetch(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test composite index with reverted order on first field and filtering with _in filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "age"], directions: [DESC, ASC]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: `query { + User(filter: {name: {_in: ["Addo", "Andy", "Fred"]}}) { + name + } + }`, + Results: []map[string]any{ + {"name": "Addo"}, + {"name": "Andy"}, + {"name": "Fred"}, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_WithInFilterOnSecondFieldWithRevertedOrder_ShouldFetch(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test composite index with reverted order on second field and filtering with _in filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "age"], directions: [ASC, DESC]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: `query { + User(filter: {age: {_in: [20, 28, 33]}}) { + name + } + }`, + Results: []map[string]any{ + {"name": "Shahzad"}, + {"name": "Andy"}, + {"name": "Fred"}, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/index/query_with_composite_index_only_filter_test.go b/tests/integration/index/query_with_composite_index_only_filter_test.go new file mode 100644 index 0000000000..bf7e8b17c3 --- /dev/null +++ b/tests/integration/index/query_with_composite_index_only_filter_test.go @@ -0,0 +1,948 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestQueryWithCompositeIndex_WithEqualFilter_ShouldFetch(t *testing.T) { + req1 := `query { + User(filter: {name: {_eq: "Islam"}}) { + name + age + } + }` + req2 := `query { + User(filter: {name: {_eq: "Islam"}, age: {_eq: 32}}) { + name + age + } + }` + req3 := `query { + User(filter: {name: {_eq: "Islam"}, age: {_eq: 66}}) { + name + age + } + }` + test := testUtils.TestCase{ + Description: "Test filtering on composite index with _eq filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "age"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req1, + Results: []map[string]any{ + {"name": "Islam", "age": 32}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req1), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(1), + }, + testUtils.Request{ + Request: req2, + Results: []map[string]any{ + {"name": "Islam", "age": 32}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req2), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(1), + }, + testUtils.Request{ + Request: req3, + Results: []map[string]any{}, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_WithGreaterThanFilterOnFirstField_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {name: {_ne: "Keenan"}, age: {_gt: 44}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _gt filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["age", "name"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Chris"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_WithGreaterThanFilterOnSecondField_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {name: {_ne: "Keenan"}, age: {_gt: 44}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _gt filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "age"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Chris"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_WithGreaterOrEqualFilterOnFirstField_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {name: {_ne: "Keenan"}, age: {_ge: 44},}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _ge filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["age", "name"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Roy"}, + {"name": "Chris"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_WithGreaterOrEqualFilterOnSecondField_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_ge: 44}, name: {_ne: "Keenan"}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _ge filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "age"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Chris"}, + {"name": "Roy"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_WithLessThanFilterOnFirstField_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_lt: 24}, name: {_ne: "Shahzad"}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _lt filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["age", "name"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Bruno"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_WithLessThanFilterOnSecondField_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_lt: 24}, name: {_ne: "Shahzad"}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _lt filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "age"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Bruno"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_WithLessOrEqualFilterOnFirstField_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_le: 28}, name: {_ne: "Bruno"}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _le filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["age", "name"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Shahzad"}, + {"name": "Fred"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_WithLessOrEqualFilterOnSecondField_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_le: 28}, name: {_ne: "Bruno"}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _le filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "age"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Fred"}, + {"name": "Shahzad"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_WithNotEqualFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {name: {_ne: "Islam"}, age: {_ne: 28}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _ne filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "age"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Addo"}, + {"name": "Andy"}, + {"name": "Bruno"}, + {"name": "Chris"}, + {"name": "John"}, + {"name": "Keenan"}, + {"name": "Roy"}, + {"name": "Shahzad"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_WithInFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_in: [20, 28, 33]}, name: {_in: ["Addo", "Andy", "Fred"]}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _in filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "age"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Andy"}, + {"name": "Fred"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(3), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_WithNotInFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_nin: [20, 23, 28, 42]}, name: {_nin: ["John", "Andy", "Chris"]}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _nin filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "age"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Islam"}, + {"name": "Keenan"}, + {"name": "Roy"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_WithLikeFilter_ShouldFetch(t *testing.T) { + req1 := `query { + User(filter: {email: {_like: "a%"}, name: {_like: "%o"}}) { + name + } + }` + req2 := `query { + User(filter: {email: {_like: "%d@gmail.com"}, name: {_like: "F%"}}) { + name + } + }` + req3 := `query { + User(filter: {email: {_like: "%e%"}, name: {_like: "%n%"}}) { + name + } + }` + req4 := `query { + User(filter: {email: {_like: "fred@gmail.com"}, name: {_like: "Fred"}}) { + name + } + }` + req5 := `query { + User(filter: {email: {_like: "a%@gmail.com"}, name: {_like: "%dd%"}}) { + name + } + }` + req6 := `query { + User(filter: {email: {_like: "a%com%m"}}) { + name + } + }` + req7 := `query { + User(filter: {email: {_like: "s%"}, name: {_like: "s%h%d"}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _like filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "email"]) { + name: String + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req1, + Results: []map[string]any{ + {"name": "Addo"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req1), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + testUtils.Request{ + Request: req2, + Results: []map[string]any{ + {"name": "Fred"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req2), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + testUtils.Request{ + Request: req3, + Results: []map[string]any{ + {"name": "Keenan"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req3), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + testUtils.Request{ + Request: req4, + Results: []map[string]any{ + {"name": "Fred"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req4), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + testUtils.Request{ + Request: req5, + Results: []map[string]any{ + {"name": "Addo"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req5), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + testUtils.Request{ + Request: req6, + Results: []map[string]any{}, + }, + testUtils.Request{ + Request: req7, + Results: []map[string]any{}, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_WithNotLikeFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {name: {_nlike: "%h%"}, email: {_nlike: "%d%"}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _nlike filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "email"]) { + name: String + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Bruno"}, + {"name": "Islam"}, + {"name": "Keenan"}, + {"name": "Roy"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_IfFirstFieldIsNotInFilter_ShouldNotUseIndex(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test if index is not used when first field is not in filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "age"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: `query @explain(type: execute) { + User(filter: {age: {_eq: 32}}) { + name + } + }`, + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(11).WithIndexFetches(0), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_WithEqualFilterOnNilValueOnFirst_ShouldFetch(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test index filtering with _eq filter on nil value on first field", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "age"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 22 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "age": 32 + }`, + }, + testUtils.Request{ + Request: ` + query { + User(filter: {name: {_eq: null}}) { + name + age + } + }`, + Results: []map[string]any{ + {"name": nil, "age": 32}, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_WithEqualFilterOnNilValueOnSecond_ShouldFetch(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test index filtering with _eq filter on nil value on second field", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "age"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 22 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Bob" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice" + }`, + }, + testUtils.Request{ + Request: ` + query { + User(filter: {name: {_eq: "Alice"}, age: {_eq: null}}) { + name + age + } + }`, + Results: []map[string]any{ + { + "name": "Alice", + "age": nil, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_IfMiddleFieldIsNotInFilter_ShouldIgnoreValue(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test composite index with filter without middle field", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "email", "age"]) { + name: String + email: String + age: Int + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "email": "alice@gmail.com", + "age": 22 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alan", + "email": "alan@gmail.com", + "age": 38 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Bob", + "email": "bob@gmail.com", + "age": 51 + }`, + }, + testUtils.Request{ + Request: ` + query { + User(filter: {name: {_like: "%l%"}, age: {_gt: 30}}) { + name + } + }`, + Results: []map[string]any{ + { + "name": "Alan", + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithCompositeIndex_IfConsecutiveEqOps_ShouldUseAllToOptimizeQuery(t *testing.T) { + reqWithName := `query { + User(filter: {name: {_eq: "Bob"}}) { + about + } + }` + reqWithNameAge := `query { + User(filter: {name: {_eq: "Bob"}, age: {_eq: 22}}) { + about + } + }` + reqWithNameAgeNumChildren := `query { + User(filter: {name: {_eq: "Bob"}, age: {_eq: 22}, numChildren: {_eq: 2}}) { + about + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _eq filter on nil value on second field", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(fields: ["name", "age", "numChildren"]) { + name: String + age: Int + numChildren: Int + about: String + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Bob", + "age": 22, + "numChildren": 2, + "about": "bob1" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Bob", + "age": 22, + "numChildren": 2, + "about": "bob2" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Bob", + "age": 22, + "numChildren": 0, + "about": "bob3" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Bob", + "age": 44, + "numChildren": 2, + "about": "bob4" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 22, + "numChildren": 2, + "about": "alice" + }`, + }, + testUtils.Request{ + Request: reqWithName, + Results: []map[string]any{ + {"about": "bob3"}, + {"about": "bob1"}, + {"about": "bob2"}, + {"about": "bob4"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(reqWithName), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(4).WithIndexFetches(4), + }, + testUtils.Request{ + Request: reqWithNameAge, + Results: []map[string]any{ + {"about": "bob3"}, + {"about": "bob1"}, + {"about": "bob2"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(reqWithNameAge), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(3).WithIndexFetches(3), + }, + testUtils.Request{ + Request: reqWithNameAgeNumChildren, + Results: []map[string]any{ + {"about": "bob1"}, + {"about": "bob2"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(reqWithNameAgeNumChildren), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(2), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/index/query_with_index_combined_filter_test.go b/tests/integration/index/query_with_index_combined_filter_test.go index 8faf5fa37a..595bf5fe44 100644 --- a/tests/integration/index/query_with_index_combined_filter_test.go +++ b/tests/integration/index/query_with_index_combined_filter_test.go @@ -46,7 +46,7 @@ func TestQueryWithIndex_IfIndexFilterWithRegular_ShouldFilter(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(3).WithFieldFetches(6).WithIndexFetches(3), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(3).WithIndexFetches(3), }, }, } @@ -86,7 +86,84 @@ func TestQueryWithIndex_IfMultipleIndexFiltersWithRegular_ShouldFilter(t *testin }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(6).WithFieldFetches(18), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(12), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_IfMultipleIndexFiltersWithRegularCaseInsensitive_ShouldFilter(t *testing.T) { + req := `query { + User(filter: { + name: {_ilike: "a%"}, + age: {_gt: 30}, + }) { + name + } + }` + test := testUtils.TestCase{ + Description: "Combination of a filter on regular and of 2 indexed fields and case insensitive operator", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String @index + age: Int @index + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Andy"}, + {"name": "Addo"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(6), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_FilterOnNonIndexedField_ShouldIgnoreIndex(t *testing.T) { + req := `query { + User(filter: { + age: {_eq: 44} + }) { + name + } + }` + test := testUtils.TestCase{ + Description: "If filter does not contain indexed field, index should be ignored", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String @index + age: Int + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Roy"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithIndexFetches(0), }, }, } diff --git a/tests/integration/index/query_with_index_only_field_order_test.go b/tests/integration/index/query_with_index_only_field_order_test.go new file mode 100644 index 0000000000..ae46213533 --- /dev/null +++ b/tests/integration/index/query_with_index_only_field_order_test.go @@ -0,0 +1,180 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestQueryWithIndex_IfIntFieldInDescOrder_ShouldFetchInRevertedOrder(t *testing.T) { + test := testUtils.TestCase{ + Description: "If indexed int field is in DESC order, it should be fetched in reverted order", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + age: Int @index(direction: DESC) + }`, + }, + testUtils.CreateDoc{ + Doc: ` + { + "name": "Alice", + "age": 22 + }`, + }, + testUtils.CreateDoc{ + Doc: ` + { + "name": "Bob", + "age": 24 + }`, + }, + testUtils.CreateDoc{ + Doc: ` + { + "name": "Kate", + "age": 23 + }`, + }, + testUtils.Request{ + Request: ` + query { + User(filter: {age: {_gt: 1}}) { + name + age + } + }`, + Results: []map[string]any{{ + "name": "Bob", + "age": 24, + }, { + "name": "Kate", + "age": 23, + }, { + "name": "Alice", + "age": 22, + }}, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_IfFloatFieldInDescOrder_ShouldFetchInRevertedOrder(t *testing.T) { + test := testUtils.TestCase{ + Description: "If indexed float field is in DESC order, it should be fetched in reverted order", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + iq: Float @index(direction: DESC) + }`, + }, + testUtils.CreateDoc{ + Doc: ` + { + "name": "Alice", + "iq": 0.2 + }`, + }, + testUtils.CreateDoc{ + Doc: ` + { + "name": "Bob", + "iq": 0.4 + }`, + }, + testUtils.CreateDoc{ + Doc: ` + { + "name": "Kate", + "iq": 0.3 + }`, + }, + testUtils.Request{ + Request: ` + query { + User(filter: {iq: {_gt: 1}}) { + name + iq + } + }`, + Results: []map[string]any{{ + "name": "Bob", + "iq": 0.4, + }, { + "name": "Kate", + "iq": 0.3, + }, { + "name": "Alice", + "iq": 0.2, + }}, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_IfStringFieldInDescOrder_ShouldFetchInRevertedOrder(t *testing.T) { + test := testUtils.TestCase{ + Description: "If indexed string field is in DESC order, it should be fetched in reverted order", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String @index(direction: DESC) + }`, + }, + testUtils.CreateDoc{ + Doc: ` + { + "name": "Alice" + }`, + }, + testUtils.CreateDoc{ + Doc: ` + { + "name": "Aaron" + }`, + }, + testUtils.CreateDoc{ + Doc: ` + { + "name": "Andy" + }`, + }, + testUtils.Request{ + Request: ` + query { + User(filter: {name: {_like: "A%"}}) { + name + } + }`, + Results: []map[string]any{{ + "name": "Andy", + }, { + "name": "Alice", + }, { + "name": "Aaron", + }}, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/index/query_with_index_only_filter_test.go b/tests/integration/index/query_with_index_only_filter_test.go index 82779c5832..0c2c337398 100644 --- a/tests/integration/index/query_with_index_only_filter_test.go +++ b/tests/integration/index/query_with_index_only_filter_test.go @@ -45,7 +45,7 @@ func TestQueryWithIndex_WithNonIndexedFields_ShouldFetchAllOfThem(t *testing.T) }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(1).WithFieldFetches(2).WithIndexFetches(1), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(1).WithIndexFetches(1), }, }, } @@ -79,7 +79,7 @@ func TestQueryWithIndex_WithEqualFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(1).WithFieldFetches(1).WithIndexFetches(1), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(1), }, }, } @@ -122,7 +122,7 @@ func TestQueryWithIndex_IfSeveralDocsWithEqFilter_ShouldFetchAll(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(2), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(2), }, }, } @@ -157,7 +157,7 @@ func TestQueryWithIndex_WithGreaterThanFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(1).WithFieldFetches(2).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(1).WithIndexFetches(10), }, }, } @@ -193,7 +193,7 @@ func TestQueryWithIndex_WithGreaterOrEqualFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(10), }, }, } @@ -228,7 +228,7 @@ func TestQueryWithIndex_WithLessThanFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(1).WithFieldFetches(2).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(1).WithIndexFetches(10), }, }, } @@ -264,7 +264,7 @@ func TestQueryWithIndex_WithLessOrEqualFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(10), }, }, } @@ -294,20 +294,20 @@ func TestQueryWithIndex_WithNotEqualFilter_ShouldFetch(t *testing.T) { testUtils.Request{ Request: req, Results: []map[string]any{ - {"name": "Roy"}, {"name": "Addo"}, {"name": "Andy"}, - {"name": "Fred"}, - {"name": "John"}, {"name": "Bruno"}, {"name": "Chris"}, + {"name": "Fred"}, + {"name": "John"}, {"name": "Keenan"}, + {"name": "Roy"}, {"name": "Shahzad"}, }, }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(9).WithFieldFetches(9).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), }, }, } @@ -343,7 +343,7 @@ func TestQueryWithIndex_WithInFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(2), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(2), }, }, } @@ -386,7 +386,7 @@ func TestQueryWithIndex_IfSeveralDocsWithInFilter_ShouldFetchAll(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(2), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(2), }, }, } @@ -424,7 +424,7 @@ func TestQueryWithIndex_WithNotInFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(4).WithFieldFetches(8).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(4).WithIndexFetches(10), }, }, } @@ -485,7 +485,7 @@ func TestQueryWithIndex_WithLikeFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req1), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(10), }, testUtils.Request{ Request: req2, @@ -496,7 +496,7 @@ func TestQueryWithIndex_WithLikeFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req2), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(10), }, testUtils.Request{ Request: req3, @@ -507,7 +507,7 @@ func TestQueryWithIndex_WithLikeFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req3), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(10), }, testUtils.Request{ Request: req4, @@ -517,7 +517,7 @@ func TestQueryWithIndex_WithLikeFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req4), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(1).WithFieldFetches(2).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(1).WithIndexFetches(10), }, testUtils.Request{ Request: req5, @@ -528,7 +528,7 @@ func TestQueryWithIndex_WithLikeFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req5), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(10), }, testUtils.Request{ Request: req6, @@ -536,7 +536,7 @@ func TestQueryWithIndex_WithLikeFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req6), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(0).WithFieldFetches(0).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), }, }, } @@ -566,18 +566,18 @@ func TestQueryWithIndex_WithNotLikeFilter_ShouldFetch(t *testing.T) { testUtils.Request{ Request: req, Results: []map[string]any{ - {"name": "Roy"}, {"name": "Addo"}, {"name": "Andy"}, - {"name": "Fred"}, {"name": "Bruno"}, + {"name": "Fred"}, {"name": "Islam"}, {"name": "Keenan"}, + {"name": "Roy"}, }, }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(7).WithFieldFetches(7).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), }, }, } diff --git a/tests/integration/index/query_with_relation_filter_test.go b/tests/integration/index/query_with_relation_filter_test.go index 57a43bf69e..e3ae71429e 100644 --- a/tests/integration/index/query_with_relation_filter_test.go +++ b/tests/integration/index/query_with_relation_filter_test.go @@ -60,7 +60,7 @@ func TestQueryWithIndexOnOneToManyRelation_IfFilterOnIndexedRelation_ShouldFilte }, testUtils.Request{ Request: makeExplainQuery(req1), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(6).WithFieldFetches(9).WithIndexFetches(3), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(6).WithIndexFetches(3), }, testUtils.Request{ Request: req2, @@ -70,7 +70,7 @@ func TestQueryWithIndexOnOneToManyRelation_IfFilterOnIndexedRelation_ShouldFilte }, testUtils.Request{ Request: makeExplainQuery(req2), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(3).WithIndexFetches(1), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(1), }, }, } @@ -122,7 +122,7 @@ func TestQueryWithIndexOnOneToManyRelation_IfFilterOnIndexedRelation_ShouldFilte }, testUtils.Request{ Request: makeExplainQuery(req1), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(6).WithFieldFetches(9).WithIndexFetches(3), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(6).WithIndexFetches(3), }, testUtils.Request{ Request: req2, @@ -132,7 +132,7 @@ func TestQueryWithIndexOnOneToManyRelation_IfFilterOnIndexedRelation_ShouldFilte }, testUtils.Request{ Request: makeExplainQuery(req2), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(3).WithIndexFetches(1), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(1), }, }, } @@ -182,7 +182,7 @@ func TestQueryWithIndexOnOneToOnesSecondaryRelation_IfFilterOnIndexedRelation_Sh }, testUtils.Request{ Request: makeExplainQuery(req1), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(3).WithIndexFetches(1), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(1), }, testUtils.Request{ Request: req2, @@ -194,7 +194,7 @@ func TestQueryWithIndexOnOneToOnesSecondaryRelation_IfFilterOnIndexedRelation_Sh }, testUtils.Request{ Request: makeExplainQuery(req2), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(6).WithFieldFetches(9).WithIndexFetches(3), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(6).WithIndexFetches(3), }, }, } @@ -245,7 +245,7 @@ func TestQueryWithIndexOnOneToOnePrimaryRelation_IfFilterOnIndexedFieldOfRelatio }, testUtils.Request{ Request: makeExplainQuery(req1), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(11).WithFieldFetches(12).WithIndexFetches(1), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(11).WithIndexFetches(1), }, testUtils.Request{ Request: req2, @@ -257,7 +257,7 @@ func TestQueryWithIndexOnOneToOnePrimaryRelation_IfFilterOnIndexedFieldOfRelatio }, testUtils.Request{ Request: makeExplainQuery(req2), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(15).WithFieldFetches(18).WithIndexFetches(3), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(15).WithIndexFetches(3), }, }, } @@ -301,7 +301,7 @@ func TestQueryWithIndexOnOneToOnePrimaryRelation_IfFilterOnIndexedRelationWhileI }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(11).WithFieldFetches(12).WithIndexFetches(1), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(11).WithIndexFetches(1), }, }, } @@ -368,7 +368,7 @@ func TestQueryWithIndexOnOneToTwoRelation_IfFilterOnIndexedRelation_ShouldFilter }, testUtils.Request{ Request: makeExplainQuery(req1), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(3).WithIndexFetches(1), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(1), }, testUtils.Request{ Request: req2, @@ -383,7 +383,7 @@ func TestQueryWithIndexOnOneToTwoRelation_IfFilterOnIndexedRelation_ShouldFilter }, testUtils.Request{ Request: makeExplainQuery(req2), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(3).WithIndexFetches(1), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(1), }, }, } diff --git a/tests/integration/index/query_with_unique_composite_index_filter_test.go b/tests/integration/index/query_with_unique_composite_index_filter_test.go new file mode 100644 index 0000000000..52712bc181 --- /dev/null +++ b/tests/integration/index/query_with_unique_composite_index_filter_test.go @@ -0,0 +1,1340 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestQueryWithUniqueCompositeIndex_WithEqualFilter_ShouldFetch(t *testing.T) { + req1 := `query { + User(filter: {name: {_eq: "Islam"}}) { + name + age + } + }` + req2 := `query { + User(filter: {name: {_eq: "Islam"}, age: {_eq: 32}}) { + name + age + } + }` + req3 := `query { + User(filter: {name: {_eq: "Islam"}, age: {_eq: 66}}) { + name + age + } + }` + test := testUtils.TestCase{ + Description: "Test filtering on composite index with _eq filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["name", "age"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Islam", + "age": 40 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Islam", + "age": 50 + }`, + }, + testUtils.Request{ + Request: req1, + Results: []map[string]any{ + {"name": "Islam", "age": 32}, + {"name": "Islam", "age": 40}, + {"name": "Islam", "age": 50}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req1), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(3), + }, + testUtils.Request{ + Request: req2, + Results: []map[string]any{ + {"name": "Islam", "age": 32}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req2), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(1), + }, + testUtils.Request{ + Request: req3, + Results: []map[string]any{}, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithUniqueCompositeIndex_WithGreaterThanFilterOnFirstField_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {name: {_ne: "Keenan"}, age: {_gt: 44}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _gt filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["age", "name"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Chris"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithUniqueCompositeIndex_WithGreaterThanFilterOnSecondField_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {name: {_ne: "Keenan"}, age: {_gt: 44}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _gt filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["name", "age"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Chris"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithUniqueCompositeIndex_WithGreaterOrEqualFilterOnFirstField_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {name: {_ne: "Keenan"}, age: {_ge: 44},}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _ge filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["age", "name"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Roy"}, + {"name": "Chris"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithUniqueCompositeIndex_WithGreaterOrEqualFilterOnSecondField_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_ge: 44}, name: {_ne: "Keenan"}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _ge filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["name", "age"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Chris"}, + {"name": "Roy"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithUniqueCompositeIndex_WithLessThanFilterOnFirstField_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_lt: 24}, name: {_ne: "Shahzad"}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _lt filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["age", "name"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Bruno"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithUniqueCompositeIndex_WithLessThanFilterOnSecondField_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_lt: 24}, name: {_ne: "Shahzad"}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _lt filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["name", "age"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Bruno"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithUniqueCompositeIndex_WithLessOrEqualFilterOnFirstField_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_le: 28}, name: {_ne: "Bruno"}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _le filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["age", "name"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Shahzad"}, + {"name": "Fred"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithUniqueCompositeIndex_WithLessOrEqualFilterOnSecondField_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_le: 28}, name: {_ne: "Bruno"}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _le filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["name", "age"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Fred"}, + {"name": "Shahzad"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithUniqueCompositeIndex_WithNotEqualFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {name: {_ne: "Islam"}, age: {_ne: 28}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _ne filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["name", "age"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Addo"}, + {"name": "Andy"}, + {"name": "Bruno"}, + {"name": "Chris"}, + {"name": "John"}, + {"name": "Keenan"}, + {"name": "Roy"}, + {"name": "Shahzad"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithUniqueCompositeIndex_WithInForFirstAndEqForRest_ShouldFetchEfficiently(t *testing.T) { + req := `query { + User(filter: {age: {_eq: 33}, name: {_in: ["Addo", "Andy", "Fred"]}}) { + name + age + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _in filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["name", "age"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Addo", + "age": 33 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Addo", + "age": 88 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Andy", + "age": 33 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Andy", + "age": 70 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Andy", + "age": 51 + }`, + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Addo", "age": 33}, + {"name": "Andy", "age": 33}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(2), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithUniqueCompositeIndex_WithInFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_in: [20, 28, 33]}, name: {_in: ["Addo", "Andy", "Fred"]}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _in filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["name", "age"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Addo", + "age": 10 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Addo", + "age": 88 + }`, + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Andy"}, + {"name": "Fred"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(5), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithUniqueCompositeIndex_WithNotInFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_nin: [20, 23, 28, 42]}, name: {_nin: ["John", "Andy", "Chris"]}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _nin filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["name", "age"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Islam"}, + {"name": "Keenan"}, + {"name": "Roy"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithUniqueCompositeIndex_WithLikeFilter_ShouldFetch(t *testing.T) { + req1 := `query { + User(filter: {email: {_like: "a%"}, name: {_like: "%o"}}) { + name + } + }` + req2 := `query { + User(filter: {email: {_like: "%d@gmail.com"}, name: {_like: "F%"}}) { + name + } + }` + req3 := `query { + User(filter: {email: {_like: "%e%"}, name: {_like: "%n%"}}) { + name + } + }` + req4 := `query { + User(filter: {email: {_like: "fred@gmail.com"}, name: {_like: "Fred"}}) { + name + } + }` + req5 := `query { + User(filter: {email: {_like: "a%@gmail.com"}, name: {_like: "%dd%"}}) { + name + } + }` + req6 := `query { + User(filter: {email: {_like: "a%com%m"}}) { + name + } + }` + req7 := `query { + User(filter: {email: {_like: "s%"}, name: {_like: "s%h%d"}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _like filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["name", "email"]) { + name: String + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req1, + Results: []map[string]any{ + {"name": "Addo"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req1), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + testUtils.Request{ + Request: req2, + Results: []map[string]any{ + {"name": "Fred"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req2), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + testUtils.Request{ + Request: req3, + Results: []map[string]any{ + {"name": "Keenan"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req3), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + testUtils.Request{ + Request: req4, + Results: []map[string]any{ + {"name": "Fred"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req4), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + testUtils.Request{ + Request: req5, + Results: []map[string]any{ + {"name": "Addo"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req5), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + testUtils.Request{ + Request: req6, + Results: []map[string]any{}, + }, + testUtils.Request{ + Request: req7, + Results: []map[string]any{}, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithUniqueCompositeIndex_WithNotLikeFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {name: {_nlike: "%h%"}, email: {_nlike: "%d%"}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _nlike filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["name", "email"]) { + name: String + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Bruno"}, + {"name": "Islam"}, + {"name": "Keenan"}, + {"name": "Roy"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithUniqueCompositeIndex_WithNotCaseInsensitiveLikeFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {name: {_nilike: "j%"}, email: {_nlike: "%d%"}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _nilike and _nlike filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["name", "email"]) { + name: String + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Bruno"}, + {"name": "Chris"}, + {"name": "Islam"}, + {"name": "Keenan"}, + {"name": "Roy"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithUniqueCompositeIndex_IfFirstFieldIsNotInFilter_ShouldNotUseIndex(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test if index is not used when first field is not in filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["name", "age"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: `query @explain(type: execute) { + User(filter: {age: {_eq: 32}}) { + name + } + }`, + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(11).WithIndexFetches(0), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithUniqueCompositeIndex_WithEqualFilterOnNilValueOnFirst_ShouldFetch(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test index filtering with _eq filter on nil value on first field", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["name", "age"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 22 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "age": 32 + }`, + }, + testUtils.Request{ + Request: ` + query { + User(filter: {name: {_eq: null}}) { + name + age + } + }`, + Results: []map[string]any{ + {"name": nil, "age": 32}, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithUniqueCompositeIndex_WithMultipleNilOnFirstFieldAndNilFilter_ShouldFetchAll(t *testing.T) { + req := `query { + User(filter: {name: {_eq: null}}) { + name + age + email + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _eq filter on nil value on first field with multiple matches", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["name", "age"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 22, + "email": "alice@gmail.com" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "age": 32, + "email": "bob@gmail.com" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "age": 32, + "email": "cate@gmail.com" + }`, + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": nil, "age": 32, "email": "bob@gmail.com"}, + {"name": nil, "age": 32, "email": "cate@gmail.com"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(2), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithUniqueCompositeIndex_WithEqualFilterOnNilValueOnSecond_ShouldFetch(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test index filtering with _eq filter on nil value on second field", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["name", "age"]) { + name: String + age: Int + about: String + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "age": 22, + "about": "alice_22" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Bob", + "about": "bob_nil" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "about": "alice_nil" + }`, + }, + testUtils.Request{ + Request: ` + query { + User(filter: {name: {_eq: "Alice"}, age: {_eq: null}}) { + age + about + } + }`, + Results: []map[string]any{ + { + "age": nil, + "about": "alice_nil", + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithUniqueCompositeIndex_WithMultipleNilOnSecondFieldsAndNilFilter_ShouldFetchAll(t *testing.T) { + req := `query { + User(filter: {name: {_eq: "Bob"}, age: {_eq: null}}) { + name + age + email + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _eq filter on nil value on second field", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["name", "age"]) { + name: String + age: Int + email: String + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Bob", + "age": 22, + "email": "bob_age_22@gmail.com" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Bob", + "age": 44, + "email": "bob_age_44@gmail.com" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Bob", + "email": "bob1@gmail.com" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Bob", + "email": "bob2@gmail.com" + }`, + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Bob", "age": nil, "email": "bob2@gmail.com"}, + {"name": "Bob", "age": nil, "email": "bob1@gmail.com"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(2), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithUniqueCompositeIndex_WithMultipleNilOnBothFieldsAndNilFilter_ShouldFetchAll(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test index filtering with _eq filter on nil value on both fields", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["name", "age"]) { + name: String + age: Int + about: String + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Bob", + "age": 22, + "about": "bob_22" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Bob", + "about": "bob_nil" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "age": 22, + "about": "nil_22" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "about": "nil_nil_1" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "about": "nil_nil_2" + }`, + }, + testUtils.Request{ + Request: ` + query { + User(filter: {name: {_eq: null}, age: {_eq: null}}) { + about + } + }`, + Results: []map[string]any{ + {"about": "nil_nil_2"}, + {"about": "nil_nil_1"}, + }, + }, + testUtils.Request{ + Request: ` + query { + User(filter: {name: {_eq: null}}) { + about + } + }`, + Results: []map[string]any{ + {"about": "nil_nil_2"}, + {"about": "nil_nil_1"}, + {"about": "nil_22"}, + }, + }, + testUtils.Request{ + Request: ` + query { + User(filter: {age: {_eq: null}}) { + about + } + }`, + Results: []map[string]any{ + {"about": "nil_nil_2"}, + {"about": "bob_nil"}, + {"about": "nil_nil_1"}, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithUniqueCompositeIndex_AfterUpdateOnNilFields_ShouldFetch(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test index querying on nil values works after values update", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["name", "age"]) { + name: String + age: Int + about: String + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Bob", + "age": 22, + "about": "bob_22 -> bob_nil" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Bob", + "about": "bob_nil -> nil_nil" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "age": 22, + "about": "nil_22 -> bob_nil" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "about": "nil_nil -> bob_nil" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "about": "nil_nil -> nil_22" + }`, + }, + testUtils.UpdateDoc{ + CollectionID: 0, + DocID: 0, + Doc: ` + { + "age": null + }`, + }, + testUtils.UpdateDoc{ + CollectionID: 0, + DocID: 1, + Doc: ` + { + "name": null + }`, + }, + testUtils.UpdateDoc{ + CollectionID: 0, + DocID: 2, + Doc: ` + { + "name": "Bob", + "age": null + }`, + }, + testUtils.UpdateDoc{ + CollectionID: 0, + DocID: 3, + Doc: ` + { + "name": "Bob" + }`, + }, + testUtils.UpdateDoc{ + CollectionID: 0, + DocID: 4, + Doc: ` + { + "age": 22 + }`, + }, + testUtils.Request{ + Request: ` + query { + User(filter: {name: {_eq: null}, age: {_eq: null}}) { + about + } + }`, + Results: []map[string]any{ + {"about": "bob_nil -> nil_nil"}, + }, + }, + testUtils.Request{ + Request: ` + query { + User(filter: {name: {_eq: null}}) { + about + } + }`, + Results: []map[string]any{ + {"about": "bob_nil -> nil_nil"}, + {"about": "nil_nil -> nil_22"}, + }, + }, + testUtils.Request{ + Request: ` + query { + User(filter: {age: {_eq: null}}) { + about + } + }`, + Results: []map[string]any{ + {"about": "bob_nil -> nil_nil"}, + {"about": "nil_nil -> bob_nil"}, + {"about": "bob_22 -> bob_nil"}, + {"about": "nil_22 -> bob_nil"}, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithUniqueCompositeIndex_IfMiddleFieldIsNotInFilter_ShouldIgnoreValue(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test composite index with filter without middle field", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User @index(unique: true, fields: ["name", "email", "age"]) { + name: String + email: String + age: Int + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice", + "email": "alice@gmail.com", + "age": 22 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alan", + "email": "alan@gmail.com", + "age": 38 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Bob", + "email": "bob@gmail.com", + "age": 51 + }`, + }, + testUtils.Request{ + Request: ` + query { + User(filter: {name: {_like: "%l%"}, age: {_gt: 30}}) { + name + } + }`, + Results: []map[string]any{ + { + "name": "Alan", + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/index/query_with_unique_index_only_filter_test.go b/tests/integration/index/query_with_unique_index_only_filter_test.go index ad453409d4..08f1b1b927 100644 --- a/tests/integration/index/query_with_unique_index_only_filter_test.go +++ b/tests/integration/index/query_with_unique_index_only_filter_test.go @@ -42,7 +42,7 @@ func TestQueryWithUniqueIndex_WithEqualFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(1).WithFieldFetches(1).WithIndexFetches(1), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(1), }, }, } @@ -77,7 +77,7 @@ func TestQueryWithUniqueIndex_WithGreaterThanFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(1).WithFieldFetches(2).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(1).WithIndexFetches(10), }, }, } @@ -113,7 +113,7 @@ func TestQueryWithUniqueIndex_WithGreaterOrEqualFilter_ShouldFetch(t *testing.T) }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(10), }, }, } @@ -148,7 +148,7 @@ func TestQueryWithUniqueIndex_WithLessThanFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(1).WithFieldFetches(2).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(1).WithIndexFetches(10), }, }, } @@ -184,7 +184,7 @@ func TestQueryWithUniqueIndex_WithLessOrEqualFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(10), }, }, } @@ -214,20 +214,20 @@ func TestQueryWithUniqueIndex_WithNotEqualFilter_ShouldFetch(t *testing.T) { testUtils.Request{ Request: req, Results: []map[string]any{ - {"name": "Roy"}, {"name": "Addo"}, {"name": "Andy"}, - {"name": "Fred"}, - {"name": "John"}, {"name": "Bruno"}, {"name": "Chris"}, + {"name": "Fred"}, + {"name": "John"}, {"name": "Keenan"}, + {"name": "Roy"}, {"name": "Shahzad"}, }, }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(9).WithFieldFetches(9).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), }, }, } @@ -263,7 +263,7 @@ func TestQueryWithUniqueIndex_WithInFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(2), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(2), }, }, } @@ -301,7 +301,7 @@ func TestQueryWithUniqueIndex_WithNotInFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(4).WithFieldFetches(8).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(4).WithIndexFetches(10), }, }, } @@ -362,7 +362,7 @@ func TestQueryWithUniqueIndex_WithLikeFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req1), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(10), }, testUtils.Request{ Request: req2, @@ -373,7 +373,7 @@ func TestQueryWithUniqueIndex_WithLikeFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req2), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(10), }, testUtils.Request{ Request: req3, @@ -384,7 +384,7 @@ func TestQueryWithUniqueIndex_WithLikeFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req3), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(10), }, testUtils.Request{ Request: req4, @@ -394,7 +394,7 @@ func TestQueryWithUniqueIndex_WithLikeFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req4), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(1).WithFieldFetches(2).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(1).WithIndexFetches(10), }, testUtils.Request{ Request: req5, @@ -405,7 +405,7 @@ func TestQueryWithUniqueIndex_WithLikeFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req5), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(10), }, testUtils.Request{ Request: req6, @@ -413,7 +413,7 @@ func TestQueryWithUniqueIndex_WithLikeFilter_ShouldFetch(t *testing.T) { }, testUtils.Request{ Request: makeExplainQuery(req6), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(0).WithFieldFetches(0).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), }, }, } @@ -443,18 +443,60 @@ func TestQueryWithUniqueIndex_WithNotLikeFilter_ShouldFetch(t *testing.T) { testUtils.Request{ Request: req, Results: []map[string]any{ - {"name": "Roy"}, {"name": "Addo"}, {"name": "Andy"}, + {"name": "Bruno"}, {"name": "Fred"}, + {"name": "Islam"}, + {"name": "Keenan"}, + {"name": "Roy"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithUniqueIndex_WithNotCaseInsensitiveLikeFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {name: {_nilike: "a%"}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _nilike filter", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String @index(unique: true) + age: Int + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ {"name": "Bruno"}, + {"name": "Chris"}, + {"name": "Fred"}, {"name": "Islam"}, + {"name": "John"}, {"name": "Keenan"}, + {"name": "Roy"}, + {"name": "Shahzad"}, }, }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithDocFetches(7).WithFieldFetches(7).WithIndexFetches(10), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(0).WithIndexFetches(10), }, }, } @@ -532,3 +574,54 @@ func TestQueryWithUniqueIndex_WithEqualFilterOnNilValue_ShouldFetch(t *testing.T testUtils.ExecuteTestCase(t, test) } + +func TestQueryWithUniqueIndex_WithMultipleNilValuesAndEqualFilter_ShouldFetch(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test index filtering with _eq filter on nil value", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + age: Int @index(unique: true) + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "John", + "age": 44 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Alice" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: ` + { + "name": "Bob" + }`, + }, + testUtils.Request{ + Request: ` + query { + User(filter: {age: {_eq: null}}) { + name + } + }`, + Results: []map[string]any{ + {"name": "Bob"}, + {"name": "Alice"}, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/lens.go b/tests/integration/lens.go index e69437d87b..69c49a1cbc 100644 --- a/tests/integration/lens.go +++ b/tests/integration/lens.go @@ -12,8 +12,6 @@ package tests import ( "github.com/sourcenetwork/immutable" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" ) @@ -39,19 +37,6 @@ type ConfigureMigration struct { ExpectedError string } -// GetMigrations is a test action which will fetch and assert on the results of calling -// `LensRegistry().Config()`. -type GetMigrations struct { - // NodeID is the node ID (index) of the node in which to configure the migration. - NodeID immutable.Option[int] - - // Used to identify the transaction for this to run against. Optional. - TransactionID immutable.Option[int] - - // The expected configuration. - ExpectedResults []client.LensConfig -} - func configureMigration( s *state, action ConfigureMigration, @@ -65,45 +50,3 @@ func configureMigration( assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) } } - -func getMigrations( - s *state, - action GetMigrations, -) { - for _, node := range getNodes(action.NodeID, s.nodes) { - db := getStore(s, node, action.TransactionID, "") - - configs, err := db.LensRegistry().Config(s.ctx) - require.NoError(s.t, err) - require.Equal(s.t, len(configs), len(action.ExpectedResults)) - - // The order of the results is not deterministic, so do not assert on the element - for _, expected := range action.ExpectedResults { - var actual client.LensConfig - var actualFound bool - - for _, config := range configs { - if config.SourceSchemaVersionID != expected.SourceSchemaVersionID { - continue - } - if config.DestinationSchemaVersionID != expected.DestinationSchemaVersionID { - continue - } - actual = config - actualFound = true - } - - require.True(s.t, actualFound, "matching lens config not found") - require.Equal(s.t, len(expected.Lenses), len(actual.Lenses)) - - for j, actualLens := range actual.Lenses { - expectedLens := expected.Lenses[j] - - assert.Equal(s.t, expectedLens.Inverse, actualLens.Inverse) - assert.Equal(s.t, expectedLens.Path, actualLens.Path) - - assertResultsEqual(s.t, s.clientType, expectedLens.Arguments, actualLens.Arguments) - } - } - } -} diff --git a/tests/integration/mutation/create/field_kinds/field_kind_json_test.go b/tests/integration/mutation/create/field_kinds/field_kind_json_test.go new file mode 100644 index 0000000000..69b1075464 --- /dev/null +++ b/tests/integration/mutation/create/field_kinds/field_kind_json_test.go @@ -0,0 +1,109 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package field_kinds + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestMutationCreate_WithJSONFieldGivenValidJSON_NoError(t *testing.T) { + test := testUtils.TestCase{ + Description: "Create mutation with JSON field given a valid JSON string.", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + custom: JSON + } + `, + }, + testUtils.Request{ + Request: `mutation { + create_Users(input: {name: "John", custom: "{\"tree\": \"maple\", \"age\": 250}"}) { + _docID + name + custom + } + }`, + Results: []map[string]any{ + { + "_docID": "bae-b2dff82c-ab26-5d06-a29a-02aa4807dde2", + "custom": "{\"tree\":\"maple\",\"age\":250}", + "name": "John", + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestMutationCreate_WithJSONFieldGivenInvalidJSON_Error(t *testing.T) { + test := testUtils.TestCase{ + Description: "Create mutation with JSON field given a valid JSON string.", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + custom: JSON + } + `, + }, + testUtils.Request{ + Request: `mutation { + create_Users(input: {name: "John", custom: "{\"tree\": \"maple, \"age\": 250}"}) { + _docID + name + custom + } + }`, + ExpectedError: `Argument "input" has invalid value {name: "John", custom: "{\"tree\": \"maple, \"age\": 250}"}. +In field "custom": Expected type "JSON", found "{\"tree\": \"maple, \"age\": 250}".`, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestMutationCreate_WithJSONFieldGivenSimpleString_Error(t *testing.T) { + test := testUtils.TestCase{ + Description: "Create mutation with JSON field given a valid JSON string.", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + custom: JSON + } + `, + }, + testUtils.Request{ + Request: `mutation { + create_Users(input: {name: "John", custom: "blah"}) { + _docID + name + custom + } + }`, + ExpectedError: `Argument "input" has invalid value {name: "John", custom: "blah"}. +In field "custom": Expected type "JSON", found "blah".`, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/mutation/create/with_version_test.go b/tests/integration/mutation/create/with_version_test.go index b4578786c9..958dc113f1 100644 --- a/tests/integration/mutation/create/with_version_test.go +++ b/tests/integration/mutation/create/with_version_test.go @@ -39,7 +39,7 @@ func TestMutationCreate_ReturnsVersionCID(t *testing.T) { { "_version": []map[string]any{ { - "cid": "bafybeif5l2a5f2lcsmuml2cji6unq4qk2ta4f3uow4wccdjebsu7jcjrj4", + "cid": "bafybeidzstxabh7qktq7pkmmxvpjbnwklxz3h5l6d425ldvjy65xvvuxu4", }, }, }, diff --git a/tests/integration/mutation/delete/with_deleted_field_test.go b/tests/integration/mutation/delete/with_deleted_field_test.go index 55e1a9f2dd..0067b8b12c 100644 --- a/tests/integration/mutation/delete/with_deleted_field_test.go +++ b/tests/integration/mutation/delete/with_deleted_field_test.go @@ -16,8 +16,6 @@ import ( testUtils "github.com/sourcenetwork/defradb/tests/integration" ) -// This test documents a bug, see: -// https://github.com/sourcenetwork/defradb/issues/1846 func TestMutationDeletion_WithDeletedField(t *testing.T) { test := testUtils.TestCase{ Actions: []any{ @@ -43,8 +41,7 @@ func TestMutationDeletion_WithDeletedField(t *testing.T) { }`, Results: []map[string]any{ { - // This should be true, as it has been deleted. - "_deleted": false, + "_deleted": true, "_docID": "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad", }, }, diff --git a/tests/integration/mutation/update/field_kinds/blob_test.go b/tests/integration/mutation/update/field_kinds/blob_test.go index 4445c45bba..4434d49ef0 100644 --- a/tests/integration/mutation/update/field_kinds/blob_test.go +++ b/tests/integration/mutation/update/field_kinds/blob_test.go @@ -58,3 +58,44 @@ func TestMutationUpdate_WithBlobField(t *testing.T) { testUtils.ExecuteTestCase(t, test) } + +func TestMutationUpdate_IfBlobFieldSetToNull_ShouldBeNil(t *testing.T) { + test := testUtils.TestCase{ + Description: "If blob field is set to null, should set to nil", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + data: Blob + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "data": "00FE" + }`, + }, + testUtils.UpdateDoc{ + Doc: `{ + "data": null + }`, + }, + testUtils.Request{ + Request: ` + query { + Users { + data + } + } + `, + Results: []map[string]any{ + { + "data": nil, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/mutation/update/field_kinds/bool_test.go b/tests/integration/mutation/update/field_kinds/bool_test.go new file mode 100644 index 0000000000..36301961ed --- /dev/null +++ b/tests/integration/mutation/update/field_kinds/bool_test.go @@ -0,0 +1,58 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package field_kinds + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestMutationUpdate_IfBoolFieldSetToNull_ShouldBeNil(t *testing.T) { + test := testUtils.TestCase{ + Description: "If bool field is set to null, should set to nil", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + valid: Boolean + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "valid": true + }`, + }, + testUtils.UpdateDoc{ + Doc: `{ + "valid": null + }`, + }, + testUtils.Request{ + Request: ` + query { + Users { + valid + } + } + `, + Results: []map[string]any{ + { + "valid": nil, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/mutation/update/field_kinds/date_time_test.go b/tests/integration/mutation/update/field_kinds/date_time_test.go index b7d1546864..c0ac23c166 100644 --- a/tests/integration/mutation/update/field_kinds/date_time_test.go +++ b/tests/integration/mutation/update/field_kinds/date_time_test.go @@ -106,3 +106,44 @@ func TestMutationUpdate_WithDateTimeField_MultipleDocs(t *testing.T) { testUtils.ExecuteTestCase(t, test) } + +func TestMutationUpdate_IfDateTimeFieldSetToNull_ShouldBeNil(t *testing.T) { + test := testUtils.TestCase{ + Description: "Simple update of date time field", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + created_at: DateTime + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "created_at": "2011-07-23T01:11:11-05:00" + }`, + }, + testUtils.UpdateDoc{ + Doc: `{ + "created_at": null + }`, + }, + testUtils.Request{ + Request: ` + query { + Users { + created_at + } + } + `, + Results: []map[string]any{ + { + "created_at": nil, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/mutation/update/field_kinds/float_test.go b/tests/integration/mutation/update/field_kinds/float_test.go new file mode 100644 index 0000000000..bb1a9babce --- /dev/null +++ b/tests/integration/mutation/update/field_kinds/float_test.go @@ -0,0 +1,58 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package field_kinds + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestMutationUpdate_IfFloatFieldSetToNull_ShouldBeNil(t *testing.T) { + test := testUtils.TestCase{ + Description: "If float field is set to null, should set to nil", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + rate: Float + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "rate": 0.55 + }`, + }, + testUtils.UpdateDoc{ + Doc: `{ + "rate": null + }`, + }, + testUtils.Request{ + Request: ` + query { + Users { + rate + } + } + `, + Results: []map[string]any{ + { + "rate": nil, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/mutation/update/field_kinds/int_test.go b/tests/integration/mutation/update/field_kinds/int_test.go new file mode 100644 index 0000000000..84a122e080 --- /dev/null +++ b/tests/integration/mutation/update/field_kinds/int_test.go @@ -0,0 +1,58 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package field_kinds + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestMutationUpdate_IfIntFieldSetToNull_ShouldBeNil(t *testing.T) { + test := testUtils.TestCase{ + Description: "If int field is set to null, should set to nil", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + age: Int + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "age": 33 + }`, + }, + testUtils.UpdateDoc{ + Doc: `{ + "age": null + }`, + }, + testUtils.Request{ + Request: ` + query { + Users { + age + } + } + `, + Results: []map[string]any{ + { + "age": nil, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/mutation/update/field_kinds/json_test.go b/tests/integration/mutation/update/field_kinds/json_test.go new file mode 100644 index 0000000000..c782f856a3 --- /dev/null +++ b/tests/integration/mutation/update/field_kinds/json_test.go @@ -0,0 +1,58 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package field_kinds + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestMutationUpdate_IfJSONFieldSetToNull_ShouldBeNil(t *testing.T) { + test := testUtils.TestCase{ + Description: "If json field is set to null, should set to nil", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + custom: JSON + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "custom": "{\"foo\": \"bar\"}" + }`, + }, + testUtils.UpdateDoc{ + Doc: `{ + "custom": null + }`, + }, + testUtils.Request{ + Request: ` + query { + Users { + custom + } + } + `, + Results: []map[string]any{ + { + "custom": nil, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/mutation/update/field_kinds/string_test.go b/tests/integration/mutation/update/field_kinds/string_test.go new file mode 100644 index 0000000000..7783f9b0fd --- /dev/null +++ b/tests/integration/mutation/update/field_kinds/string_test.go @@ -0,0 +1,58 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package field_kinds + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestMutationUpdate_IfStringFieldSetToNull_ShouldBeNil(t *testing.T) { + test := testUtils.TestCase{ + Description: "If string field is set to null, should set to nil", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.UpdateDoc{ + Doc: `{ + "name": null + }`, + }, + testUtils.Request{ + Request: ` + query { + Users { + name + } + } + `, + Results: []map[string]any{ + { + "name": nil, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/net/order/tcp_test.go b/tests/integration/net/order/tcp_test.go index 8a419360d5..f80701c64c 100644 --- a/tests/integration/net/order/tcp_test.go +++ b/tests/integration/net/order/tcp_test.go @@ -16,16 +16,16 @@ import ( "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/net" testutils "github.com/sourcenetwork/defradb/tests/integration" ) // TestP2PWithSingleDocumentUpdatePerNode tests document syncing between two nodes with a single update per node func TestP2PWithSingleDocumentUpdatePerNode(t *testing.T) { test := P2PTestCase{ - NodeConfig: []*config.Config{ - randomNetworkingConfig(), - randomNetworkingConfig(), + NodeConfig: [][]net.NodeOpt{ + testutils.RandomNetworkingConfig()(), + testutils.RandomNetworkingConfig()(), }, NodePeers: map[int][]int{ 1: { @@ -74,9 +74,9 @@ func TestP2PWithSingleDocumentUpdatePerNode(t *testing.T) { // TestP2PWithMultipleDocumentUpdatesPerNode tests document syncing between two nodes with multiple updates per node. func TestP2PWithMultipleDocumentUpdatesPerNode(t *testing.T) { test := P2PTestCase{ - NodeConfig: []*config.Config{ - randomNetworkingConfig(), - randomNetworkingConfig(), + NodeConfig: [][]net.NodeOpt{ + testutils.RandomNetworkingConfig()(), + testutils.RandomNetworkingConfig()(), }, NodePeers: map[int][]int{ 1: { @@ -145,9 +145,9 @@ func TestP2FullPReplicator(t *testing.T) { require.NoError(t, err) test := P2PTestCase{ - NodeConfig: []*config.Config{ - randomNetworkingConfig(), - randomNetworkingConfig(), + NodeConfig: [][]net.NodeOpt{ + testutils.RandomNetworkingConfig()(), + testutils.RandomNetworkingConfig()(), }, NodeReplicators: map[int][]int{ 0: { diff --git a/tests/integration/net/order/utils.go b/tests/integration/net/order/utils.go index e1149ae9c2..3ba5fc7f26 100644 --- a/tests/integration/net/order/utils.go +++ b/tests/integration/net/order/utils.go @@ -13,14 +13,12 @@ package order import ( "context" "fmt" - "strings" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" coreDB "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/logging" @@ -50,7 +48,7 @@ const ( type P2PTestCase struct { Query string // Configuration parameters for each peer - NodeConfig []*config.Config + NodeConfig [][]net.NodeOpt // List of peers for each net. // Only peers with lower index than the node can be used in the list of peers. @@ -69,7 +67,12 @@ type P2PTestCase struct { ReplicatorResult map[int]map[string]map[string]any } -func setupDefraNode(t *testing.T, cfg *config.Config, seeds []string) (*net.Node, []client.DocID, error) { +func setupDefraNode( + t *testing.T, + opts []net.NodeOpt, + peers []string, + seeds []string, +) (*net.Node, []client.DocID, error) { ctx := context.Background() log.Info(ctx, "Building new memory store") @@ -92,34 +95,28 @@ func setupDefraNode(t *testing.T, cfg *config.Config, seeds []string) (*net.Node // init the P2P node var n *net.Node - log.Info(ctx, "Starting P2P node", logging.NewKV("P2P address", cfg.Net.P2PAddress)) - n, err = net.NewNode( - ctx, - db, - net.WithConfig(cfg), - ) + n, err = net.NewNode(ctx, db, opts...) if err != nil { return nil, nil, errors.Wrap("failed to start P2P node", err) } // parse peers and bootstrap - if len(cfg.Net.Peers) != 0 { - log.Info(ctx, "Parsing bootstrap peers", logging.NewKV("Peers", cfg.Net.Peers)) - addrs, err := netutils.ParsePeers(strings.Split(cfg.Net.Peers, ",")) + if len(peers) != 0 { + log.Info(ctx, "Parsing bootstrap peers", logging.NewKV("Peers", peers)) + addrs, err := netutils.ParsePeers(peers) if err != nil { - return nil, nil, errors.Wrap(fmt.Sprintf("failed to parse bootstrap peers %v", cfg.Net.Peers), err) + return nil, nil, errors.Wrap(fmt.Sprintf("failed to parse bootstrap peers %v", peers), err) } log.Info(ctx, "Bootstrapping with peers", logging.NewKV("Addresses", addrs)) n.Bootstrap(addrs) } + log.Info(ctx, "Starting P2P node", logging.NewKV("P2P addresses", n.PeerInfo().Addrs)) if err := n.Start(); err != nil { n.Close() return nil, nil, errors.Wrap("unable to start P2P listeners", err) } - cfg.Net.P2PAddress = n.ListenAddrs()[0].String() - return n, docIDs, nil } @@ -195,9 +192,8 @@ func executeTestCase(t *testing.T, test P2PTestCase) { for i, cfg := range test.NodeConfig { log.Info(ctx, fmt.Sprintf("Setting up node %d", i)) - cfg.Datastore.Badger.Path = t.TempDir() + var peerAddresses []string if peers, ok := test.NodePeers[i]; ok { - peerAddresses := []string{} for _, p := range peers { if p >= len(nodes) { log.Info(ctx, "cannot set a peer that hasn't been started. Skipping to next peer") @@ -209,9 +205,8 @@ func executeTestCase(t *testing.T, test P2PTestCase) { fmt.Sprintf("%s/p2p/%s", peerInfo.Addrs[0], peerInfo.ID), ) } - cfg.Net.Peers = strings.Join(peerAddresses, ",") } - n, d, err := setupDefraNode(t, cfg, test.SeedDocuments) + n, d, err := setupDefraNode(t, cfg, peerAddresses, test.SeedDocuments) require.NoError(t, err) if i == 0 { @@ -342,10 +337,3 @@ func executeTestCase(t *testing.T, test P2PTestCase) { n.DB.Close() } } - -func randomNetworkingConfig() *config.Config { - cfg := config.DefaultConfig() - cfg.Net.P2PAddress = "/ip4/127.0.0.1/tcp/0" - cfg.Net.RelayEnabled = false - return cfg -} diff --git a/tests/integration/net/state/simple/peer/subscribe/with_add_get_test.go b/tests/integration/net/state/simple/peer/subscribe/with_add_get_test.go index 8fd73fe06a..b5990a050f 100644 --- a/tests/integration/net/state/simple/peer/subscribe/with_add_get_test.go +++ b/tests/integration/net/state/simple/peer/subscribe/with_add_get_test.go @@ -76,7 +76,7 @@ func TestP2PSubscribeAddGetMultiple(t *testing.T) { }, testUtils.GetAllP2PCollections{ NodeID: 1, - ExpectedCollectionIDs: []int{2, 0}, + ExpectedCollectionIDs: []int{0, 2}, }, }, } diff --git a/tests/integration/net/state/simple/peer/with_create_add_field_test.go b/tests/integration/net/state/simple/peer/with_create_add_field_test.go index 31861d6498..46ad3c5a9c 100644 --- a/tests/integration/net/state/simple/peer/with_create_add_field_test.go +++ b/tests/integration/net/state/simple/peer/with_create_add_field_test.go @@ -203,3 +203,92 @@ func TestP2PPeerCreateWithNewFieldSyncsDocsToUpdatedSchemaVersion(t *testing.T) testUtils.ExecuteTestCase(t, test) } + +// This test documents unwanted behaviour and should be changed when +// https://github.com/sourcenetwork/defradb/issues/2255 is fixed. +func TestP2PPeerCreateWithNewFieldDocSyncedBeforeReceivingNodeSchemaUpdatedDoesNotReturnNewField(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.RandomNetworkingConfig(), + testUtils.RandomNetworkingConfig(), + testUtils.SchemaUpdate{ + Schema: ` + type Users { + Name: String + } + `, + }, + testUtils.ConnectPeers{ + SourceNodeID: 1, + TargetNodeID: 0, + }, + testUtils.SubscribeToCollection{ + NodeID: 1, + CollectionIDs: []int{0}, + }, + testUtils.SchemaPatch{ + // Patch the schema on the first node only + NodeID: immutable.Some(0), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "Email", "Kind": 11} } + ] + `, + }, + testUtils.CreateDoc{ + // Create the doc with a value in the new field on the first node only, and allow the values to sync + NodeID: immutable.Some(0), + Doc: `{ + "Name": "John", + "Email": "imnotyourbuddyguy@source.ca" + }`, + }, + testUtils.WaitForSync{}, + testUtils.SchemaPatch{ + // Update the schema on the second node + NodeID: immutable.Some(1), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "Email", "Kind": 11} } + ] + `, + }, + testUtils.Request{ + NodeID: immutable.Some(0), + Request: ` + query { + Users { + Name + Email + } + } + `, + Results: []map[string]any{ + { + "Name": "John", + "Email": "imnotyourbuddyguy@source.ca", + }, + }, + }, + testUtils.Request{ + NodeID: immutable.Some(1), + Request: ` + query { + Users { + Name + Email + } + } + `, + Results: []map[string]any{ + { + "Name": "John", + // The email should be returned but it is not + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/net/state/simple/replicator/with_create_test.go b/tests/integration/net/state/simple/replicator/with_create_test.go index 08433629c4..3cec12b351 100644 --- a/tests/integration/net/state/simple/replicator/with_create_test.go +++ b/tests/integration/net/state/simple/replicator/with_create_test.go @@ -492,7 +492,7 @@ func TestP2POneToOneReplicatorOrderIndependent(t *testing.T) { "name": "John", "_version": []map[string]any{ { - "schemaVersionId": "bafkreicnoqat3exmvikr36xu3hhrkvay3d3cif24tezgsyvrydpobk2nqm", + "schemaVersionId": "bafkreiewca6o66mgkpbai2vtrupolvtf66wllbvouvtwo6fkc6alrybzfa", }, }, }, @@ -552,7 +552,7 @@ func TestP2POneToOneReplicatorOrderIndependentDirectCreate(t *testing.T) { "_docID": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", "_version": []map[string]any{ { - "schemaVersionId": "bafkreicnoqat3exmvikr36xu3hhrkvay3d3cif24tezgsyvrydpobk2nqm", + "schemaVersionId": "bafkreiewca6o66mgkpbai2vtrupolvtf66wllbvouvtwo6fkc6alrybzfa", }, }, }, diff --git a/tests/integration/p2p.go b/tests/integration/p2p.go index 2e4fb86b5d..4d48cb033b 100644 --- a/tests/integration/p2p.go +++ b/tests/integration/p2p.go @@ -14,8 +14,8 @@ import ( "time" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" "github.com/sourcenetwork/defradb/logging" + "github.com/sourcenetwork/defradb/net" "github.com/sourcenetwork/defradb/tests/clients" "github.com/libp2p/go-libp2p/core/peer" @@ -523,10 +523,10 @@ func waitForSync( } func RandomNetworkingConfig() ConfigureNode { - return func() config.Config { - cfg := config.DefaultConfig() - cfg.Net.P2PAddress = "/ip4/127.0.0.1/tcp/0" - cfg.Net.RelayEnabled = false - return *cfg + return func() []net.NodeOpt { + return []net.NodeOpt{ + net.WithListenAddresses("/ip4/127.0.0.1/tcp/0"), + net.WithEnableRelay(false), + } } } diff --git a/tests/integration/query/commits/simple_test.go b/tests/integration/query/commits/simple_test.go index 9592c46d1f..4239e7cfd6 100644 --- a/tests/integration/query/commits/simple_test.go +++ b/tests/integration/query/commits/simple_test.go @@ -36,13 +36,13 @@ func TestQueryCommits(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", }, { - "cid": "bafybeigmez6gtszsqx6aevzlanvpazhhezw5va4wizhqtqz5k4s2dqjb24", + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", }, { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", }, }, }, @@ -79,22 +79,22 @@ func TestQueryCommitsMultipleDocs(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeigdcaas33fnrv7jbigm5a5phxtxl76weuf74kqcrb5udjgttqssju", + "cid": "bafybeibhain2764v7eltfiam6dgwivfj56mvbme34nbdsdbndrsjkc2cje", }, { - "cid": "bafybeiahfq2ji7uneqfqddeqsvz5t3rdkgo7wpnpswo2jon23kxpgvqdsa", + "cid": "bafybeickrd5xayjhedyypf3yus55bkhpwd5dqlkdhivrcceexkpsgnic24", }, { - "cid": "bafybeihhadjgfxsyrlg5gftmi4ikppuhecyeqznjru47l3tup4c6sbzhga", + "cid": "bafybeieqyyprwrkbgyn7x4jkzmlnupnzpdymvbulef37brkzn7blqbe6l4", }, { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", }, { - "cid": "bafybeigmez6gtszsqx6aevzlanvpazhhezw5va4wizhqtqz5k4s2dqjb24", + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", }, { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", }, }, }, @@ -125,16 +125,16 @@ func TestQueryCommitsWithSchemaVersionIdField(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", - "schemaVersionId": "bafkreiayhdsgzhmrz6t5d3x2cgqqbdjt7aqgldtlkmxn5eibg542j3n6ea", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", + "schemaVersionId": "bafkreidqkjb23ngp34eebeaxiogrlogkpfz62vjb3clnnyvhlbgdaywkg4", }, { - "cid": "bafybeigmez6gtszsqx6aevzlanvpazhhezw5va4wizhqtqz5k4s2dqjb24", - "schemaVersionId": "bafkreiayhdsgzhmrz6t5d3x2cgqqbdjt7aqgldtlkmxn5eibg542j3n6ea", + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", + "schemaVersionId": "bafkreidqkjb23ngp34eebeaxiogrlogkpfz62vjb3clnnyvhlbgdaywkg4", }, { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", - "schemaVersionId": "bafkreiayhdsgzhmrz6t5d3x2cgqqbdjt7aqgldtlkmxn5eibg542j3n6ea", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", + "schemaVersionId": "bafkreidqkjb23ngp34eebeaxiogrlogkpfz62vjb3clnnyvhlbgdaywkg4", }, }, }, @@ -285,8 +285,6 @@ func TestQueryCommitsWithFieldIDFieldWithUpdate(t *testing.T) { testUtils.Request{ Request: ` query { - - commits { fieldId } @@ -315,3 +313,118 @@ func TestQueryCommitsWithFieldIDFieldWithUpdate(t *testing.T) { testUtils.ExecuteTestCase(t, test) } + +func TestQuery_CommitsWithAllFieldsWithUpdate_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + updateUserCollectionSchema(), + testUtils.CreateDoc{ + Doc: `{ + "name": "John", + "age": 21 + }`, + }, + testUtils.UpdateDoc{ + Doc: `{ + "age": 22 + }`, + }, + testUtils.Request{ + Request: ` + query { + commits { + cid + collectionID + delta + docID + fieldId + fieldName + height + links { + cid + name + } + } + } + `, + Results: []map[string]any{ + { + "cid": "bafybeicwg56ddi7smy3j2kkv5y4yghvdrj3twqqafzdwtinbkw5mlpxwz4", + "collectionID": int64(1), + "delta": testUtils.CBORValue(22), + "docID": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", + "fieldId": "1", + "fieldName": "age", + "height": int64(2), + "links": []map[string]any{ + { + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", + "name": "_head", + }, + }, + }, + { + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", + "collectionID": int64(1), + "delta": testUtils.CBORValue(21), + "docID": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", + "fieldId": "1", + "fieldName": "age", + "height": int64(1), + "links": []map[string]any{}, + }, + { + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", + "collectionID": int64(1), + "delta": testUtils.CBORValue("John"), + "docID": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", + "fieldId": "2", + "fieldName": "name", + "height": int64(1), + "links": []map[string]any{}, + }, + { + "cid": "bafybeidxnkwhuzmkdw5wuippru3tp74vcmz5jvcziambpjadxeathdh26a", + "collectionID": int64(1), + "delta": nil, + "docID": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", + "fieldId": "C", + "fieldName": nil, + "height": int64(2), + "links": []map[string]any{ + { + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", + "name": "_head", + }, + { + "cid": "bafybeicwg56ddi7smy3j2kkv5y4yghvdrj3twqqafzdwtinbkw5mlpxwz4", + "name": "age", + }, + }, + }, + { + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", + "collectionID": int64(1), + "delta": nil, + "docID": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", + "fieldId": "C", + "fieldName": nil, + "height": int64(1), + "links": []map[string]any{ + { + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", + "name": "age", + }, + { + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", + "name": "name", + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/query/commits/with_cid_test.go b/tests/integration/query/commits/with_cid_test.go index 5c2703d41c..4878ea8f9a 100644 --- a/tests/integration/query/commits/with_cid_test.go +++ b/tests/integration/query/commits/with_cid_test.go @@ -38,14 +38,14 @@ func TestQueryCommitsWithCid(t *testing.T) { testUtils.Request{ Request: `query { commits( - cid: "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u" + cid: "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi" ) { cid } }`, Results: []map[string]any{ { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", }, }, }, @@ -71,14 +71,14 @@ func TestQueryCommitsWithCidForFieldCommit(t *testing.T) { testUtils.Request{ Request: `query { commits( - cid: "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u" + cid: "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi" ) { cid } }`, Results: []map[string]any{ { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", }, }, }, diff --git a/tests/integration/query/commits/with_depth_test.go b/tests/integration/query/commits/with_depth_test.go index a0d2c817f4..cdda45101c 100644 --- a/tests/integration/query/commits/with_depth_test.go +++ b/tests/integration/query/commits/with_depth_test.go @@ -36,13 +36,13 @@ func TestQueryCommitsWithDepth1(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", }, { - "cid": "bafybeigmez6gtszsqx6aevzlanvpazhhezw5va4wizhqtqz5k4s2dqjb24", + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", }, { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", }, }, }, @@ -81,16 +81,16 @@ func TestQueryCommitsWithDepth1WithUpdate(t *testing.T) { Results: []map[string]any{ { // "Age" field head - "cid": "bafybeihqgrwnhc4w7e5cbhycxvqrpzgi2ei4xrcsre2plceclptgn4tc3i", + "cid": "bafybeicwg56ddi7smy3j2kkv5y4yghvdrj3twqqafzdwtinbkw5mlpxwz4", "height": int64(2), }, { // "Name" field head (unchanged from create) - "cid": "bafybeigmez6gtszsqx6aevzlanvpazhhezw5va4wizhqtqz5k4s2dqjb24", + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", "height": int64(1), }, { - "cid": "bafybeibfwqf5szatmlyl3alru4nq3gnxaiyyb3ggqung2jwb4qnm6mejyu", + "cid": "bafybeidxnkwhuzmkdw5wuippru3tp74vcmz5jvcziambpjadxeathdh26a", "height": int64(2), }, }, @@ -137,27 +137,27 @@ func TestQueryCommitsWithDepth2WithUpdate(t *testing.T) { Results: []map[string]any{ { // Composite head - "cid": "bafybeihvhr7ke7bjgjixce262544tlo7mdlyuswtgl66zsrxcfc5targjy", + "cid": "bafybeic45wkhxtpn3vgd2dmmohq76vw56qz3cpu3oorha3hf2w6qu7bpoa", "height": int64(3), }, { // Composite head -1 - "cid": "bafybeihqgrwnhc4w7e5cbhycxvqrpzgi2ei4xrcsre2plceclptgn4tc3i", + "cid": "bafybeicwg56ddi7smy3j2kkv5y4yghvdrj3twqqafzdwtinbkw5mlpxwz4", "height": int64(2), }, { // "Name" field head (unchanged from create) - "cid": "bafybeigmez6gtszsqx6aevzlanvpazhhezw5va4wizhqtqz5k4s2dqjb24", + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", "height": int64(1), }, { // "Age" field head - "cid": "bafybeicacrvck5qf37pk3pdsiavvxy2jk67dbdpww5pvoun2k52lw2ftqi", + "cid": "bafybeid4y4vqmvec2mvm3su77rrmj6tzsx5zdlt6ias4hzqxbevmosydc4", "height": int64(3), }, { // "Age" field head -1 - "cid": "bafybeibfwqf5szatmlyl3alru4nq3gnxaiyyb3ggqung2jwb4qnm6mejyu", + "cid": "bafybeidxnkwhuzmkdw5wuippru3tp74vcmz5jvcziambpjadxeathdh26a", "height": int64(2), }, }, @@ -195,22 +195,22 @@ func TestQueryCommitsWithDepth1AndMultipleDocs(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeigvcksw7ck2o7rqfyxncn2h5u6bbwj5ejjfvsihsjibxvrqrxbtui", + "cid": "bafybeifysgo74dhzl2t74j5qh32t5uufar7otua6ggvsapjbxpzimcbnoi", }, { - "cid": "bafybeibcdmghhshx4v3xamoktw3n6blv7courh6x2d5cttwuzlodml74ny", + "cid": "bafybeibqvujxi4tjtrwg5igvg6zdvjaxvkmb5h2msjbtta3lmytgs7hft4", }, { - "cid": "bafybeig6rwkq6hlf5rcjq64jodl3gtfv5svnmsjlkwrnmbcjui7t3vy3qi", + "cid": "bafybeib7zmofgbtvxcb3gy3bfbwp3btqrmoacmxl4duqhwlvwu6pihzbeu", }, { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", }, { - "cid": "bafybeigmez6gtszsqx6aevzlanvpazhhezw5va4wizhqtqz5k4s2dqjb24", + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", }, { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", }, }, }, diff --git a/tests/integration/query/commits/with_doc_id_cid_test.go b/tests/integration/query/commits/with_doc_id_cid_test.go index 48878ae06c..434e8b27aa 100644 --- a/tests/integration/query/commits/with_doc_id_cid_test.go +++ b/tests/integration/query/commits/with_doc_id_cid_test.go @@ -104,14 +104,14 @@ func TestQueryCommitsWithDocIDAndCidWithUpdate(t *testing.T) { Request: ` { commits( docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", - cid: "bafybeibfwqf5szatmlyl3alru4nq3gnxaiyyb3ggqung2jwb4qnm6mejyu" + cid: "bafybeidxnkwhuzmkdw5wuippru3tp74vcmz5jvcziambpjadxeathdh26a" ) { cid } }`, Results: []map[string]any{ { - "cid": "bafybeibfwqf5szatmlyl3alru4nq3gnxaiyyb3ggqung2jwb4qnm6mejyu", + "cid": "bafybeidxnkwhuzmkdw5wuippru3tp74vcmz5jvcziambpjadxeathdh26a", }, }, }, diff --git a/tests/integration/query/commits/with_doc_id_count_test.go b/tests/integration/query/commits/with_doc_id_count_test.go index abab180695..3cd01352ad 100644 --- a/tests/integration/query/commits/with_doc_id_count_test.go +++ b/tests/integration/query/commits/with_doc_id_count_test.go @@ -37,15 +37,15 @@ func TestQueryCommitsWithDocIDAndLinkCount(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", "_count": 0, }, { - "cid": "bafybeigmez6gtszsqx6aevzlanvpazhhezw5va4wizhqtqz5k4s2dqjb24", + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", "_count": 0, }, { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", "_count": 2, }, }, diff --git a/tests/integration/query/commits/with_doc_id_field_test.go b/tests/integration/query/commits/with_doc_id_field_test.go index 3de42d2e42..de672e8d70 100644 --- a/tests/integration/query/commits/with_doc_id_field_test.go +++ b/tests/integration/query/commits/with_doc_id_field_test.go @@ -118,7 +118,7 @@ func TestQueryCommitsWithDocIDAndFieldId(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", }, }, }, @@ -150,7 +150,7 @@ func TestQueryCommitsWithDocIDAndCompositeFieldId(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", }, }, }, diff --git a/tests/integration/query/commits/with_doc_id_limit_offset_test.go b/tests/integration/query/commits/with_doc_id_limit_offset_test.go index e56dfb6d85..d7981cc6ce 100644 --- a/tests/integration/query/commits/with_doc_id_limit_offset_test.go +++ b/tests/integration/query/commits/with_doc_id_limit_offset_test.go @@ -57,10 +57,10 @@ func TestQueryCommitsWithDocIDAndLimitAndOffset(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihvhr7ke7bjgjixce262544tlo7mdlyuswtgl66zsrxcfc5targjy", + "cid": "bafybeic45wkhxtpn3vgd2dmmohq76vw56qz3cpu3oorha3hf2w6qu7bpoa", }, { - "cid": "bafybeihqgrwnhc4w7e5cbhycxvqrpzgi2ei4xrcsre2plceclptgn4tc3i", + "cid": "bafybeicwg56ddi7smy3j2kkv5y4yghvdrj3twqqafzdwtinbkw5mlpxwz4", }, }, }, diff --git a/tests/integration/query/commits/with_doc_id_limit_test.go b/tests/integration/query/commits/with_doc_id_limit_test.go index 7c4a4b2fc8..b31a3b848e 100644 --- a/tests/integration/query/commits/with_doc_id_limit_test.go +++ b/tests/integration/query/commits/with_doc_id_limit_test.go @@ -50,10 +50,10 @@ func TestQueryCommitsWithDocIDAndLimit(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihvhr7ke7bjgjixce262544tlo7mdlyuswtgl66zsrxcfc5targjy", + "cid": "bafybeic45wkhxtpn3vgd2dmmohq76vw56qz3cpu3oorha3hf2w6qu7bpoa", }, { - "cid": "bafybeihqgrwnhc4w7e5cbhycxvqrpzgi2ei4xrcsre2plceclptgn4tc3i", + "cid": "bafybeicwg56ddi7smy3j2kkv5y4yghvdrj3twqqafzdwtinbkw5mlpxwz4", }, }, }, diff --git a/tests/integration/query/commits/with_doc_id_order_limit_offset_test.go b/tests/integration/query/commits/with_doc_id_order_limit_offset_test.go index 9312c66928..135418b8f2 100644 --- a/tests/integration/query/commits/with_doc_id_order_limit_offset_test.go +++ b/tests/integration/query/commits/with_doc_id_order_limit_offset_test.go @@ -58,11 +58,11 @@ func TestQueryCommitsWithDocIDAndOrderAndLimitAndOffset(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeibfwqf5szatmlyl3alru4nq3gnxaiyyb3ggqung2jwb4qnm6mejyu", + "cid": "bafybeidxnkwhuzmkdw5wuippru3tp74vcmz5jvcziambpjadxeathdh26a", "height": int64(2), }, { - "cid": "bafybeihvhr7ke7bjgjixce262544tlo7mdlyuswtgl66zsrxcfc5targjy", + "cid": "bafybeic45wkhxtpn3vgd2dmmohq76vw56qz3cpu3oorha3hf2w6qu7bpoa", "height": int64(3), }, }, diff --git a/tests/integration/query/commits/with_doc_id_order_test.go b/tests/integration/query/commits/with_doc_id_order_test.go index f89121e199..10009bab11 100644 --- a/tests/integration/query/commits/with_doc_id_order_test.go +++ b/tests/integration/query/commits/with_doc_id_order_test.go @@ -44,23 +44,23 @@ func TestQueryCommitsWithDocIDAndOrderHeightDesc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihqgrwnhc4w7e5cbhycxvqrpzgi2ei4xrcsre2plceclptgn4tc3i", + "cid": "bafybeicwg56ddi7smy3j2kkv5y4yghvdrj3twqqafzdwtinbkw5mlpxwz4", "height": int64(2), }, { - "cid": "bafybeibfwqf5szatmlyl3alru4nq3gnxaiyyb3ggqung2jwb4qnm6mejyu", + "cid": "bafybeidxnkwhuzmkdw5wuippru3tp74vcmz5jvcziambpjadxeathdh26a", "height": int64(2), }, { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", "height": int64(1), }, { - "cid": "bafybeigmez6gtszsqx6aevzlanvpazhhezw5va4wizhqtqz5k4s2dqjb24", + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", "height": int64(1), }, { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", "height": int64(1), }, }, @@ -99,23 +99,23 @@ func TestQueryCommitsWithDocIDAndOrderHeightAsc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", "height": int64(1), }, { - "cid": "bafybeigmez6gtszsqx6aevzlanvpazhhezw5va4wizhqtqz5k4s2dqjb24", + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", "height": int64(1), }, { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", "height": int64(1), }, { - "cid": "bafybeihqgrwnhc4w7e5cbhycxvqrpzgi2ei4xrcsre2plceclptgn4tc3i", + "cid": "bafybeicwg56ddi7smy3j2kkv5y4yghvdrj3twqqafzdwtinbkw5mlpxwz4", "height": int64(2), }, { - "cid": "bafybeibfwqf5szatmlyl3alru4nq3gnxaiyyb3ggqung2jwb4qnm6mejyu", + "cid": "bafybeidxnkwhuzmkdw5wuippru3tp74vcmz5jvcziambpjadxeathdh26a", "height": int64(2), }, }, @@ -154,24 +154,24 @@ func TestQueryCommitsWithDocIDAndOrderCidDesc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihqgrwnhc4w7e5cbhycxvqrpzgi2ei4xrcsre2plceclptgn4tc3i", - "height": int64(2), + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", + "height": int64(1), }, { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", - "height": int64(1), + "cid": "bafybeidxnkwhuzmkdw5wuippru3tp74vcmz5jvcziambpjadxeathdh26a", + "height": int64(2), }, { - "cid": "bafybeigmez6gtszsqx6aevzlanvpazhhezw5va4wizhqtqz5k4s2dqjb24", - "height": int64(1), + "cid": "bafybeicwg56ddi7smy3j2kkv5y4yghvdrj3twqqafzdwtinbkw5mlpxwz4", + "height": int64(2), }, { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", "height": int64(1), }, { - "cid": "bafybeibfwqf5szatmlyl3alru4nq3gnxaiyyb3ggqung2jwb4qnm6mejyu", - "height": int64(2), + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", + "height": int64(1), }, }, }, @@ -209,25 +209,25 @@ func TestQueryCommitsWithDocIDAndOrderCidAsc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeibfwqf5szatmlyl3alru4nq3gnxaiyyb3ggqung2jwb4qnm6mejyu", - "height": int64(2), - }, - { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", "height": int64(1), }, { - "cid": "bafybeigmez6gtszsqx6aevzlanvpazhhezw5va4wizhqtqz5k4s2dqjb24", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", "height": int64(1), }, { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", - "height": int64(1), + "cid": "bafybeicwg56ddi7smy3j2kkv5y4yghvdrj3twqqafzdwtinbkw5mlpxwz4", + "height": int64(2), }, { - "cid": "bafybeihqgrwnhc4w7e5cbhycxvqrpzgi2ei4xrcsre2plceclptgn4tc3i", + "cid": "bafybeidxnkwhuzmkdw5wuippru3tp74vcmz5jvcziambpjadxeathdh26a", "height": int64(2), }, + { + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", + "height": int64(1), + }, }, }, }, @@ -278,39 +278,39 @@ func TestQueryCommitsWithDocIDAndOrderAndMultiUpdatesCidAsc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", "height": int64(1), }, { - "cid": "bafybeigmez6gtszsqx6aevzlanvpazhhezw5va4wizhqtqz5k4s2dqjb24", + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", "height": int64(1), }, { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", "height": int64(1), }, { - "cid": "bafybeihqgrwnhc4w7e5cbhycxvqrpzgi2ei4xrcsre2plceclptgn4tc3i", + "cid": "bafybeicwg56ddi7smy3j2kkv5y4yghvdrj3twqqafzdwtinbkw5mlpxwz4", "height": int64(2), }, { - "cid": "bafybeibfwqf5szatmlyl3alru4nq3gnxaiyyb3ggqung2jwb4qnm6mejyu", + "cid": "bafybeidxnkwhuzmkdw5wuippru3tp74vcmz5jvcziambpjadxeathdh26a", "height": int64(2), }, { - "cid": "bafybeihvhr7ke7bjgjixce262544tlo7mdlyuswtgl66zsrxcfc5targjy", + "cid": "bafybeic45wkhxtpn3vgd2dmmohq76vw56qz3cpu3oorha3hf2w6qu7bpoa", "height": int64(3), }, { - "cid": "bafybeicacrvck5qf37pk3pdsiavvxy2jk67dbdpww5pvoun2k52lw2ftqi", + "cid": "bafybeid4y4vqmvec2mvm3su77rrmj6tzsx5zdlt6ias4hzqxbevmosydc4", "height": int64(3), }, { - "cid": "bafybeicv72yzbkdmp5r32eesxcna7rqyuhwoovg66kkivclzji3onbwm3a", + "cid": "bafybeiatfviresatclvedt6zhk4ys7p6cdts5udqsl33nu5d2hxtw4l6la", "height": int64(4), }, { - "cid": "bafybeicf36fznyghq3spknjabxrp72kf66khrzscco3rnyat3ezaufhon4", + "cid": "bafybeiaydxxf7bmeh5ou47z6exa73heg6vjjzznbvrxqbemmu55sdhvuom", "height": int64(4), }, }, diff --git a/tests/integration/query/commits/with_doc_id_test.go b/tests/integration/query/commits/with_doc_id_test.go index c754a18fde..a08f82f3a0 100644 --- a/tests/integration/query/commits/with_doc_id_test.go +++ b/tests/integration/query/commits/with_doc_id_test.go @@ -62,13 +62,13 @@ func TestQueryCommitsWithDocID(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", }, { - "cid": "bafybeigmez6gtszsqx6aevzlanvpazhhezw5va4wizhqtqz5k4s2dqjb24", + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", }, { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", }, }, }, @@ -102,22 +102,22 @@ func TestQueryCommitsWithDocIDAndLinks(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", "links": []map[string]any{}, }, { - "cid": "bafybeigmez6gtszsqx6aevzlanvpazhhezw5va4wizhqtqz5k4s2dqjb24", + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", "links": []map[string]any{}, }, { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", "links": []map[string]any{ { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", "name": "age", }, { - "cid": "bafybeigmez6gtszsqx6aevzlanvpazhhezw5va4wizhqtqz5k4s2dqjb24", + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", "name": "name", }, }, @@ -158,23 +158,23 @@ func TestQueryCommitsWithDocIDAndUpdate(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihqgrwnhc4w7e5cbhycxvqrpzgi2ei4xrcsre2plceclptgn4tc3i", + "cid": "bafybeicwg56ddi7smy3j2kkv5y4yghvdrj3twqqafzdwtinbkw5mlpxwz4", "height": int64(2), }, { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", "height": int64(1), }, { - "cid": "bafybeigmez6gtszsqx6aevzlanvpazhhezw5va4wizhqtqz5k4s2dqjb24", + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", "height": int64(1), }, { - "cid": "bafybeibfwqf5szatmlyl3alru4nq3gnxaiyyb3ggqung2jwb4qnm6mejyu", + "cid": "bafybeidxnkwhuzmkdw5wuippru3tp74vcmz5jvcziambpjadxeathdh26a", "height": int64(2), }, { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", "height": int64(1), }, }, @@ -219,44 +219,44 @@ func TestQueryCommitsWithDocIDAndUpdateAndLinks(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihqgrwnhc4w7e5cbhycxvqrpzgi2ei4xrcsre2plceclptgn4tc3i", + "cid": "bafybeicwg56ddi7smy3j2kkv5y4yghvdrj3twqqafzdwtinbkw5mlpxwz4", "links": []map[string]any{ { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", "name": "_head", }, }, }, { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", "links": []map[string]any{}, }, { - "cid": "bafybeigmez6gtszsqx6aevzlanvpazhhezw5va4wizhqtqz5k4s2dqjb24", + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", "links": []map[string]any{}, }, { - "cid": "bafybeibfwqf5szatmlyl3alru4nq3gnxaiyyb3ggqung2jwb4qnm6mejyu", + "cid": "bafybeidxnkwhuzmkdw5wuippru3tp74vcmz5jvcziambpjadxeathdh26a", "links": []map[string]any{ { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", "name": "_head", }, { - "cid": "bafybeihqgrwnhc4w7e5cbhycxvqrpzgi2ei4xrcsre2plceclptgn4tc3i", + "cid": "bafybeicwg56ddi7smy3j2kkv5y4yghvdrj3twqqafzdwtinbkw5mlpxwz4", "name": "age", }, }, }, { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", "links": []map[string]any{ { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", "name": "age", }, { - "cid": "bafybeigmez6gtszsqx6aevzlanvpazhhezw5va4wizhqtqz5k4s2dqjb24", + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", "name": "name", }, }, diff --git a/tests/integration/query/commits/with_doc_id_typename_test.go b/tests/integration/query/commits/with_doc_id_typename_test.go index 8248724cd8..09dcc4060f 100644 --- a/tests/integration/query/commits/with_doc_id_typename_test.go +++ b/tests/integration/query/commits/with_doc_id_typename_test.go @@ -37,15 +37,15 @@ func TestQueryCommitsWithDocIDWithTypeName(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", "__typename": "Commit", }, { - "cid": "bafybeigmez6gtszsqx6aevzlanvpazhhezw5va4wizhqtqz5k4s2dqjb24", + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", "__typename": "Commit", }, { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", "__typename": "Commit", }, }, diff --git a/tests/integration/query/commits/with_field_test.go b/tests/integration/query/commits/with_field_test.go index 45b0acf550..01a2204326 100644 --- a/tests/integration/query/commits/with_field_test.go +++ b/tests/integration/query/commits/with_field_test.go @@ -66,7 +66,7 @@ func TestQueryCommitsWithFieldId(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", }, }, }, @@ -98,7 +98,7 @@ func TestQueryCommitsWithCompositeFieldId(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", }, }, }, @@ -131,8 +131,8 @@ func TestQueryCommitsWithCompositeFieldIdWithReturnedSchemaVersionId(t *testing. }`, Results: []map[string]any{ { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", - "schemaVersionId": "bafkreiayhdsgzhmrz6t5d3x2cgqqbdjt7aqgldtlkmxn5eibg542j3n6ea", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", + "schemaVersionId": "bafkreidqkjb23ngp34eebeaxiogrlogkpfz62vjb3clnnyvhlbgdaywkg4", }, }, }, diff --git a/tests/integration/query/commits/with_group_test.go b/tests/integration/query/commits/with_group_test.go index d7c539a999..362829ee0b 100644 --- a/tests/integration/query/commits/with_group_test.go +++ b/tests/integration/query/commits/with_group_test.go @@ -89,10 +89,10 @@ func TestQueryCommitsWithGroupByHeightWithChild(t *testing.T) { "height": int64(2), "_group": []map[string]any{ { - "cid": "bafybeihqgrwnhc4w7e5cbhycxvqrpzgi2ei4xrcsre2plceclptgn4tc3i", + "cid": "bafybeicwg56ddi7smy3j2kkv5y4yghvdrj3twqqafzdwtinbkw5mlpxwz4", }, { - "cid": "bafybeibfwqf5szatmlyl3alru4nq3gnxaiyyb3ggqung2jwb4qnm6mejyu", + "cid": "bafybeidxnkwhuzmkdw5wuippru3tp74vcmz5jvcziambpjadxeathdh26a", }, }, }, @@ -100,13 +100,13 @@ func TestQueryCommitsWithGroupByHeightWithChild(t *testing.T) { "height": int64(1), "_group": []map[string]any{ { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", }, { - "cid": "bafybeigmez6gtszsqx6aevzlanvpazhhezw5va4wizhqtqz5k4s2dqjb24", + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", }, { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", }, }, }, @@ -142,7 +142,7 @@ func TestQueryCommitsWithGroupByCidWithChild(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", "_group": []map[string]any{ { "height": int64(1), @@ -150,7 +150,7 @@ func TestQueryCommitsWithGroupByCidWithChild(t *testing.T) { }, }, { - "cid": "bafybeigmez6gtszsqx6aevzlanvpazhhezw5va4wizhqtqz5k4s2dqjb24", + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", "_group": []map[string]any{ { "height": int64(1), @@ -158,7 +158,7 @@ func TestQueryCommitsWithGroupByCidWithChild(t *testing.T) { }, }, { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", "_group": []map[string]any{ { "height": int64(1), diff --git a/tests/integration/query/latest_commits/with_doc_id_field_test.go b/tests/integration/query/latest_commits/with_doc_id_field_test.go index 9f3441e52e..c1fce06eb6 100644 --- a/tests/integration/query/latest_commits/with_doc_id_field_test.go +++ b/tests/integration/query/latest_commits/with_doc_id_field_test.go @@ -68,7 +68,7 @@ func TestQueryLatestCommitsWithDocIDAndFieldId(t *testing.T) { }, Results: []map[string]any{ { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", "links": []map[string]any{}, }, }, @@ -101,14 +101,14 @@ func TestQueryLatestCommitsWithDocIDAndCompositeFieldId(t *testing.T) { }, Results: []map[string]any{ { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", "links": []map[string]any{ { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", "name": "age", }, { - "cid": "bafybeigmez6gtszsqx6aevzlanvpazhhezw5va4wizhqtqz5k4s2dqjb24", + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", "name": "name", }, }, diff --git a/tests/integration/query/latest_commits/with_doc_id_test.go b/tests/integration/query/latest_commits/with_doc_id_test.go index 4d02a8d96e..0c34c4dab2 100644 --- a/tests/integration/query/latest_commits/with_doc_id_test.go +++ b/tests/integration/query/latest_commits/with_doc_id_test.go @@ -38,14 +38,14 @@ func TestQueryLatestCommitsWithDocID(t *testing.T) { }, Results: []map[string]any{ { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", "links": []map[string]any{ { - "cid": "bafybeihfw5lufgs7ygv45to5rqvt3xkecjgikoccjyx6y2i7lnaclmrcjm", + "cid": "bafybeicvpgfinf2m2jufbbcy5mhv6jca6in5k4fzx5op7xvvcmbp7sceaa", "name": "age", }, { - "cid": "bafybeigmez6gtszsqx6aevzlanvpazhhezw5va4wizhqtqz5k4s2dqjb24", + "cid": "bafybeib2espk2hq366wjnmazg45uvoswqbvf4plx7fgzayagxdn737onci", "name": "name", }, }, @@ -75,8 +75,8 @@ func TestQueryLatestCommitsWithDocIDWithSchemaVersionIdField(t *testing.T) { }, Results: []map[string]any{ { - "cid": "bafybeiedu23doqe2nagdbmkvfyuouajnfxo7ezy57vbv34dqewhwbfg45u", - "schemaVersionId": "bafkreiayhdsgzhmrz6t5d3x2cgqqbdjt7aqgldtlkmxn5eibg542j3n6ea", + "cid": "bafybeigvpf62j7j2wbpid5iavzxielbhbsbbirmgzqkw3wpptdvysuztwi", + "schemaVersionId": "bafkreidqkjb23ngp34eebeaxiogrlogkpfz62vjb3clnnyvhlbgdaywkg4", }, }, } diff --git a/tests/integration/query/one_to_many/with_cid_doc_id_test.go b/tests/integration/query/one_to_many/with_cid_doc_id_test.go index 92ec678468..c9dd0ff4ba 100644 --- a/tests/integration/query/one_to_many/with_cid_doc_id_test.go +++ b/tests/integration/query/one_to_many/with_cid_doc_id_test.go @@ -64,48 +64,68 @@ import ( // } func TestQueryOneToManyWithCidAndDocID(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "One-to-many relation query from one side with cid and docID", - Request: `query { - Book ( - cid: "bafybeielrctlwgqx3o5cu3m2636fnfqcizayinyyuemaqhgdgy7ykfhyvi" - docID: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" - ) { - name - author { - name - } + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + rating: Float + author: Author } - }`, - Docs: map[int][]string{ - //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d - `{ + + type Author { + name: String + age: Int + verified: Boolean + published: [Book] + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + Doc: `{ "name": "Painted House", "rating": 4.9, "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" }`, }, - //authors - 1: { // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ + testUtils.CreateDoc{ + CollectionID: 1, + // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + Doc: `{ "name": "John Grisham", "age": 65, "verified": true }`, }, - }, - Results: []map[string]any{ - { - "name": "Painted House", - "author": map[string]any{ - "name": "John Grisham", + testUtils.Request{ + Request: `query { + Book ( + cid: "bafybeidshqlc7z2psrtfhmrarsxwxwwis6baxjrzs2x6mdmzsop6b7hnii" + docID: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" + ) { + name + author { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Painted House", + "author": map[string]any{ + "name": "John Grisham", + }, + }, }, }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } // This test is for documentation reasons only. This is not @@ -113,165 +133,218 @@ func TestQueryOneToManyWithCidAndDocID(t *testing.T) { // parent creation without explicit child cid, which is also not tied // to parent state). func TestQueryOneToManyWithChildUpdateAndFirstCidAndDocID(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "One-to-many relation query from one side with child update and parent cid and docID", - Request: `query { - Book ( - cid: "bafybeielrctlwgqx3o5cu3m2636fnfqcizayinyyuemaqhgdgy7ykfhyvi", - docID: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" - ) { - name - author { - name - age - } + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + rating: Float + author: Author } - }`, - Docs: map[int][]string{ - //books - 0: { // bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d - `{ + + type Author { + name: String + age: Int + verified: Boolean + published: [Book] + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + Doc: `{ "name": "Painted House", "rating": 4.9, "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" }`, }, - //authors - 1: { // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ + testUtils.CreateDoc{ + CollectionID: 1, + // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + Doc: `{ "name": "John Grisham", "age": 65, "verified": true }`, }, - }, - Updates: map[int]map[int][]string{ - 1: { - 0: { - `{ - "age": 22 - }`, - }, + testUtils.UpdateDoc{ + CollectionID: 1, + Doc: `{ + "age": 22 + }`, }, - }, - Results: []map[string]any{ - { - "name": "Painted House", - "author": map[string]any{ - "name": "John Grisham", - "age": int64(22), + testUtils.Request{ + Request: `query { + Book ( + cid: "bafybeidshqlc7z2psrtfhmrarsxwxwwis6baxjrzs2x6mdmzsop6b7hnii", + docID: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" + ) { + name + author { + name + age + } + } + }`, + Results: []map[string]any{ + { + "name": "Painted House", + "author": map[string]any{ + "name": "John Grisham", + "age": int64(22), + }, + }, }, }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryOneToManyWithParentUpdateAndFirstCidAndDocID(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "One-to-many relation query from one side with parent update and parent cid and docID", - Request: `query { - Book ( - cid: "bafybeiao32zf3tqrtutibbivxhk4fjjhsryb5q4mqyp3gecqp3s5tgegfy", - docID: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" - ) { - name + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + rating: Float + author: Author } - }`, - Docs: map[int][]string{ - //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d - `{ + + type Author { + name: String + age: Int + verified: Boolean + published: [Book] + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + Doc: `{ "name": "Painted House", "rating": 4.9, "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" }`, }, - //authors - 1: { // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ + testUtils.CreateDoc{ + CollectionID: 1, + // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + Doc: `{ "name": "John Grisham", "age": 65, "verified": true }`, }, - }, - Updates: map[int]map[int][]string{ - 0: { - 0: { - `{ - "rating": 4.5 - }`, - }, + testUtils.UpdateDoc{ + CollectionID: 0, + Doc: `{ + "rating": 4.5 + }`, }, - }, - Results: []map[string]any{ - { - "name": "Painted House", - "rating": float64(4.9), - "author": map[string]any{ - "name": "John Grisham", + testUtils.Request{ + Request: `query { + Book ( + cid: "bafybeidshqlc7z2psrtfhmrarsxwxwwis6baxjrzs2x6mdmzsop6b7hnii", + docID: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" + ) { + rating + author { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Painted House", + "rating": float64(4.9), + "author": map[string]any{ + "name": "John Grisham", + }, + }, }, }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryOneToManyWithParentUpdateAndLastCidAndDocID(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "One-to-many relation query from one side with parent update and parent cid and docID", - Request: `query { - Book ( - cid: "bafybeiao32zf3tqrtutibbivxhk4fjjhsryb5q4mqyp3gecqp3s5tgegfy", - docID: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" - ) { - name - rating - author { - name - } + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + rating: Float + author: Author } - }`, - Docs: map[int][]string{ - //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d - `{ + + type Author { + name: String + age: Int + verified: Boolean + published: [Book] + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + Doc: `{ "name": "Painted House", "rating": 4.9, "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" }`, }, - //authors - 1: { // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ + testUtils.CreateDoc{ + CollectionID: 1, + // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + Doc: `{ "name": "John Grisham", "age": 65, "verified": true }`, }, - }, - Updates: map[int]map[int][]string{ - 0: { - 0: { - `{ - "rating": 4.5 - }`, - }, + testUtils.UpdateDoc{ + CollectionID: 0, + Doc: `{ + "rating": 4.5 + }`, }, - }, - Results: []map[string]any{ - { - "name": "Painted House", - "rating": float64(4.5), - "author": map[string]any{ - "name": "John Grisham", + testUtils.Request{ + Request: `query { + Book ( + cid: "bafybeiefqhex3axofwy2gwdynhs6rijwrpkdpwy5fnqnzbk3e7iwcgvrqa", + docID: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" + ) { + rating + author { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Painted House", + "rating": float64(4.5), + "author": map[string]any{ + "name": "John Grisham", + }, + }, }, }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/one_to_many/with_filter_test.go b/tests/integration/query/one_to_many/with_filter_test.go index 405e345801..ce019f2afa 100644 --- a/tests/integration/query/one_to_many/with_filter_test.go +++ b/tests/integration/query/one_to_many/with_filter_test.go @@ -456,3 +456,98 @@ func TestQueryOneToManyWithCompoundOperatorInFilterAndRelation(t *testing.T) { } testUtils.ExecuteTestCase(t, test) } + +func TestQueryOneToMany_WithCompoundOperatorInFilterAndRelationAndCaseInsensitiveLike_NoError(t *testing.T) { + test := testUtils.TestCase{ + Description: "One-to-many relation query filter with compound operator and relation", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: bookAuthorGQLSchema, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ + "name": "Painted House", + "rating": 4.9, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ + "name": "Theif Lord", + "rating": 4.8, + "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ + "name": "The Lord of the Rings", + "rating": 5.0, + "author_id": "bae-61d279c1-eab9-56ec-8654-dce0324ebfda" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + Doc: `{ + "name": "John Grisham", + "age": 65, + "verified": true + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + Doc: `{ + "name": "Cornelia Funke", + "age": 62, + "verified": false + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + // bae-61d279c1-eab9-56ec-8654-dce0324ebfda + Doc: `{ + "name": "John Tolkien", + "age": 70, + "verified": true + }`, + }, + testUtils.Request{ + Request: `query { + Author(filter: {_or: [ + {_and: [ + {published: {rating: {_lt: 5.0}}}, + {published: {rating: {_gt: 4.8}}} + ]}, + {_and: [ + {age: {_le: 65}}, + {published: {name: {_ilike: "%lord%"}}} + ]}, + ]}) { + name + } + }`, + Results: []map[string]any{ + { + "name": "John Grisham", + }, + { + "name": "Cornelia Funke", + }, + }, + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/query/one_to_one/with_count_filter_test.go b/tests/integration/query/one_to_one/with_count_filter_test.go index a69be17f78..d5f2b60d22 100644 --- a/tests/integration/query/one_to_one/with_count_filter_test.go +++ b/tests/integration/query/one_to_one/with_count_filter_test.go @@ -16,8 +16,6 @@ import ( testUtils "github.com/sourcenetwork/defradb/tests/integration" ) -// This test documents a bug and should be altered with: -// https://github.com/sourcenetwork/defradb/issues/1869 func TestQueryOneToOneWithCountWithCompoundOrFilterThatIncludesRelation(t *testing.T) { test := testUtils.TestCase{ Description: "One-to-one relation with count with _or filter that includes relation", @@ -103,17 +101,12 @@ func TestQueryOneToOneWithCountWithCompoundOrFilterThatIncludesRelation(t *testi }`, Results: []map[string]any{ { - "_count": "2", + "_count": int(2), }, }, }, }, } - testUtils.AssertPanic( - t, - func() { - testUtils.ExecuteTestCase(t, test) - }, - ) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/one_to_one/with_version_test.go b/tests/integration/query/one_to_one/with_version_test.go new file mode 100644 index 0000000000..4c3656a312 --- /dev/null +++ b/tests/integration/query/one_to_one/with_version_test.go @@ -0,0 +1,83 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package one_to_one + +// This test documents unwanted behaviour, see the linked ticket for more info: +// https://github.com/sourcenetwork/defradb/issues/1709 +// +// It is currently commented out because the panic is caught in the CLI and HTTP clients +// and we have no good way atm to skip it. +/*func TestQueryOneToOne_WithVersionOnOuter(t *testing.T) { + test := testUtils.TestCase{ + Description: "Embedded commits query within one-one query", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + author: Author + } + + type Author { + name: String + published: Book @primary + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ + "name": "فارسی دوم دبستان" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ + "name": "نمی دانم", + "published": "bae-c052eade-23f6-5ee3-8067-20004e746be3" + }`, + }, + testUtils.Request{ + Request: ` + query { + Book { + name + _version { + docID + } + author { + name + } + } + } + `, + Results: []map[string]any{ + { + "name": "نمی دانم", + "_version": []map[string]any{ + { + "docID": "bae-c052eade-23f6-5ee3-8067-20004e746be3", + }, + }, + "author": map[string]any{ + "name": "فارسی دوم دبستان", + }, + }, + }, + }, + }, + } + + require.Panics(t, + func() { testUtils.ExecuteTestCase(t, test) }, + ) +} +*/ diff --git a/tests/integration/query/simple/with_cid_doc_id_test.go b/tests/integration/query/simple/with_cid_doc_id_test.go index 4b40c4d76d..f19bc4e9fa 100644 --- a/tests/integration/query/simple/with_cid_doc_id_test.go +++ b/tests/integration/query/simple/with_cid_doc_id_test.go @@ -17,256 +17,279 @@ import ( ) func TestQuerySimpleWithInvalidCidAndInvalidDocID(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "Simple query with invalid cid and invalid docID", - Request: `query { + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.Request{ + Request: `query { Users ( cid: "any non-nil string value - this will be ignored", docID: "invalid docID" ) { - Name + name } }`, - Docs: map[int][]string{ - 0: { - `{ - "Name": "John", - "Age": 21 - }`, + ExpectedError: "invalid cid: selected encoding not supported", }, }, - ExpectedError: "invalid cid: selected encoding not supported", } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } // This test is for documentation reasons only. This is not // desired behaviour (should just return empty). func TestQuerySimpleWithUnknownCidAndInvalidDocID(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "Simple query with unknown cid and invalid docID", - Request: `query { + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.Request{ + Request: `query { Users ( cid: "bafybeid57gpbwi4i6bg7g357vwwyzsmr4bjo22rmhoxrwqvdxlqxcgaqvu", docID: "invalid docID" ) { - Name + name } }`, - Docs: map[int][]string{ - 0: { - `{ - "Name": "John", - "Age": 21 - }`, + ExpectedError: "failed to get block in blockstore: ipld: could not find", }, }, - ExpectedError: "failed to get block in blockstore: ipld: could not find", } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } func TestQuerySimpleWithCidAndDocID(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "Simple query with cid and docID", - Request: `query { + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.Request{ + Request: `query { Users ( - cid: "bafybeigwxfw2nfcwelqxzgjsmm5okrt7dctzvzml4tm7i7q7fsdit3ihz4", - docID: "bae-52b9170d-b77a-5887-b877-cbdbb99b009f" + cid: "bafybeidzstxabh7qktq7pkmmxvpjbnwklxz3h5l6d425ldvjy65xvvuxu4", + docID: "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" ) { - Name + name } }`, - Docs: map[int][]string{ - 0: { - `{ - "Name": "John", - "Age": 21 - }`, - }, - }, - Results: []map[string]any{ - { - "Name": "John", + Results: []map[string]any{ + { + "name": "John", + }, + }, }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } func TestQuerySimpleWithUpdateAndFirstCidAndDocID(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "Simple query with (first) cid and docID", - Request: `query { - Users ( - cid: "bafybeigwxfw2nfcwelqxzgjsmm5okrt7dctzvzml4tm7i7q7fsdit3ihz4", - docID: "bae-52b9170d-b77a-5887-b877-cbdbb99b009f" - ) { - Name - Age + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" }`, - Docs: map[int][]string{ - 0: { - `{ - "Name": "John", - "Age": 21 + }, + testUtils.UpdateDoc{ + Doc: `{ + "name": "Johnn" }`, }, - }, - Updates: map[int]map[int][]string{ - 0: { - 0: { - // update to change age to 22 on document 0 - `{"Age": 22}`, - // then update it again to change age to 23 on document 0 - `{"Age": 23}`, + testUtils.Request{ + Request: `query { + Users ( + cid: "bafybeidzstxabh7qktq7pkmmxvpjbnwklxz3h5l6d425ldvjy65xvvuxu4", + docID: "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" + ) { + name + } + }`, + Results: []map[string]any{ + { + "name": "John", + }, }, }, }, - Results: []map[string]any{ - { - "Name": "John", - "Age": int64(21), - }, - }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } func TestQuerySimpleWithUpdateAndLastCidAndDocID(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "Simple query with (last) cid and docID", - Request: `query { - Users ( - cid: "bafybeigotwnjltl5y5ou5yqxujdayoqet4axspaclbvzustjhinzqx77ym" - docID: "bae-52b9170d-b77a-5887-b877-cbdbb99b009f" - ) { - Name - Age + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" }`, - Docs: map[int][]string{ - 0: { - `{ - "Name": "John", - "Age": 21 + }, + testUtils.UpdateDoc{ + Doc: `{ + "name": "Johnn" }`, }, - }, - Updates: map[int]map[int][]string{ - 0: { - 0: { - // update to change age to 22 on document 0 - `{"Age": 22}`, - // then update it again to change age to 23 on document 0 - `{"Age": 23}`, + testUtils.Request{ + Request: `query { + Users ( + cid: "bafybeickytibhqnqtwhpjfi7ponnu5756ifo76oxb2ksxrz4iiqaywg3lu", + docID: "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" + ) { + name + } + }`, + Results: []map[string]any{ + { + "name": "Johnn", + }, }, }, }, - Results: []map[string]any{ - { - "Name": "John", - "Age": int64(23), - }, - }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } func TestQuerySimpleWithUpdateAndMiddleCidAndDocID(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "Simple query with (middle) cid and docID", - Request: `query { - Users ( - cid: "bafybeib4cdjv4dxmayzgf242hx2r3v5tq5ib5z6oyyrzk3dtddt3wsyyhi", - docID: "bae-52b9170d-b77a-5887-b877-cbdbb99b009f" - ) { - Name - Age + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" }`, - Docs: map[int][]string{ - 0: { - `{ - "Name": "John", - "Age": 21 + }, + testUtils.UpdateDoc{ + Doc: `{ + "name": "Johnn" }`, }, - }, - Updates: map[int]map[int][]string{ - 0: { - 0: { - // update to change age to 22 on document 0 - `{"Age": 22}`, - // then update it again to change age to 23 on document 0 - `{"Age": 23}`, - }, + testUtils.UpdateDoc{ + Doc: `{ + "name": "Johnnn" + }`, }, - }, - Results: []map[string]any{ - { - "Name": "John", - "Age": int64(22), + testUtils.Request{ + Request: `query { + Users ( + cid: "bafybeickytibhqnqtwhpjfi7ponnu5756ifo76oxb2ksxrz4iiqaywg3lu", + docID: "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" + ) { + name + } + }`, + Results: []map[string]any{ + { + "name": "Johnn", + }, + }, }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } func TestQuerySimpleWithUpdateAndFirstCidAndDocIDAndSchemaVersion(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "Simple query with (first) cid and docID and yielded schema version", - Request: `query { - Users ( - cid: "bafybeigwxfw2nfcwelqxzgjsmm5okrt7dctzvzml4tm7i7q7fsdit3ihz4", - docID: "bae-52b9170d-b77a-5887-b877-cbdbb99b009f" + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.UpdateDoc{ + Doc: `{ + "name": "Johnn" + }`, + }, + testUtils.Request{ + Request: `query { + Users ( + cid: "bafybeidzstxabh7qktq7pkmmxvpjbnwklxz3h5l6d425ldvjy65xvvuxu4", + docID: "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" ) { - Name - Age + name _version { schemaVersionId } } }`, - Docs: map[int][]string{ - 0: { - `{ - "Name": "John", - "Age": 21 - }`, - }, - }, - Updates: map[int]map[int][]string{ - 0: { - 0: { - // update to change age to 22 on document 0 - `{"Age": 22}`, - // then update it again to change age to 23 on document 0 - `{"Age": 23}`, - }, - }, - }, - Results: []map[string]any{ - { - "Name": "John", - "Age": int64(21), - "_version": []map[string]any{ + Results: []map[string]any{ { - "schemaVersionId": "bafkreidvd63bawkelxe3wtf7a65klkq4x3dvenqafyasndyal6fvffkeam", + "name": "John", + "_version": []map[string]any{ + { + "schemaVersionId": "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4", + }, + }, }, }, }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } // Note: Only the first CID is reproducible given the added entropy to the Counter CRDT type. @@ -301,7 +324,7 @@ func TestCidAndDocIDQuery_ContainsPNCounterWithIntKind_NoError(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafybeiepi2gpoyshdj2ekdsydhw5itxqmipsh7f6pd6iyoiu6sqsdlj2se", + cid: "bafybeiebqzqml6nn3laarr7yekakrsdnkn4nbgrl4xc5rshljp3in6au2m", docID: "bae-a688789e-d8a6-57a7-be09-22e005ab79e0" ) { name @@ -353,7 +376,7 @@ func TestCidAndDocIDQuery_ContainsPNCounterWithFloatKind_NoError(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafybeihjdntxsc75hpnyakog4nnaxakljer7zf7pjybpgntcsg45qmisau", + cid: "bafybeifzuh74aq47vjngkwipjne4r2gi3v2clewgsruspqirihnps4vcmu", docID: "bae-fa6a97e9-e0e9-5826-8a8c-57775d35e07c" ) { name diff --git a/tests/integration/query/simple/with_filter/with_like_string_test.go b/tests/integration/query/simple/with_filter/with_like_string_test.go index 00e53aed82..95ebabc5de 100644 --- a/tests/integration/query/simple/with_filter/with_like_string_test.go +++ b/tests/integration/query/simple/with_filter/with_like_string_test.go @@ -46,6 +46,36 @@ func TestQuerySimpleWithLikeStringContainsFilterBlockContainsString(t *testing.T executeTestCase(t, test) } +func TestQuerySimple_WithCaseInsensitiveLike_ShouldMatchString(t *testing.T) { + test := testUtils.RequestTestCase{ + Description: "Simple query with basic case insensitive like-string filter contains string", + Request: `query { + Users(filter: {Name: {_ilike: "%stormborn%"}}) { + Name + } + }`, + Docs: map[int][]string{ + 0: { + `{ + "Name": "Daenerys Stormborn of House Targaryen, the First of Her Name", + "HeightM": 1.65 + }`, + `{ + "Name": "Viserys I Targaryen, King of the Andals", + "HeightM": 1.82 + }`, + }, + }, + Results: []map[string]any{ + { + "Name": "Daenerys Stormborn of House Targaryen, the First of Her Name", + }, + }, + } + + executeTestCase(t, test) +} + func TestQuerySimpleWithLikeStringContainsFilterBlockAsPrefixString(t *testing.T) { test := testUtils.RequestTestCase{ Description: "Simple query with basic like-string filter with string as prefix", @@ -76,6 +106,36 @@ func TestQuerySimpleWithLikeStringContainsFilterBlockAsPrefixString(t *testing.T executeTestCase(t, test) } +func TestQuerySimple_WithCaseInsensitiveLikeString_ShouldMatchPrefixString(t *testing.T) { + test := testUtils.RequestTestCase{ + Description: "Simple query with basic case insensitive like-string filter with string as prefix", + Request: `query { + Users(filter: {Name: {_ilike: "viserys%"}}) { + Name + } + }`, + Docs: map[int][]string{ + 0: { + `{ + "Name": "Daenerys Stormborn of House Targaryen, the First of Her Name", + "HeightM": 1.65 + }`, + `{ + "Name": "Viserys I Targaryen, King of the Andals", + "HeightM": 1.82 + }`, + }, + }, + Results: []map[string]any{ + { + "Name": "Viserys I Targaryen, King of the Andals", + }, + }, + } + + executeTestCase(t, test) +} + func TestQuerySimpleWithLikeStringContainsFilterBlockAsSuffixString(t *testing.T) { test := testUtils.RequestTestCase{ Description: "Simple query with basic like-string filter with string as suffix", @@ -106,6 +166,36 @@ func TestQuerySimpleWithLikeStringContainsFilterBlockAsSuffixString(t *testing.T executeTestCase(t, test) } +func TestQuerySimple_WithCaseInsensitiveLikeString_ShouldMatchSuffixString(t *testing.T) { + test := testUtils.RequestTestCase{ + Description: "Simple query with basic case insensitive like-string filter with string as suffix", + Request: `query { + Users(filter: {Name: {_ilike: "%andals"}}) { + Name + } + }`, + Docs: map[int][]string{ + 0: { + `{ + "Name": "Daenerys Stormborn of House Targaryen, the First of Her Name", + "HeightM": 1.65 + }`, + `{ + "Name": "Viserys I Targaryen, King of the Andals", + "HeightM": 1.82 + }`, + }, + }, + Results: []map[string]any{ + { + "Name": "Viserys I Targaryen, King of the Andals", + }, + }, + } + + executeTestCase(t, test) +} + func TestQuerySimpleWithLikeStringContainsFilterBlockExactString(t *testing.T) { test := testUtils.RequestTestCase{ Description: "Simple query with basic like-string filter with string as suffix", @@ -136,6 +226,36 @@ func TestQuerySimpleWithLikeStringContainsFilterBlockExactString(t *testing.T) { executeTestCase(t, test) } +func TestQuerySimple_WithCaseInsensitiveLikeString_ShouldMatchExactString(t *testing.T) { + test := testUtils.RequestTestCase{ + Description: "Simple query with basic like-string filter with string as suffix", + Request: `query { + Users(filter: {Name: {_ilike: "daenerys stormborn of house targaryen, the first of her name"}}) { + Name + } + }`, + Docs: map[int][]string{ + 0: { + `{ + "Name": "Daenerys Stormborn of House Targaryen, the First of Her Name", + "HeightM": 1.65 + }`, + `{ + "Name": "Viserys I Targaryen, King of the Andals", + "HeightM": 1.82 + }`, + }, + }, + Results: []map[string]any{ + { + "Name": "Daenerys Stormborn of House Targaryen, the First of Her Name", + }, + }, + } + + executeTestCase(t, test) +} + func TestQuerySimpleWithLikeStringContainsFilterBlockContainsStringMuplitpleResults(t *testing.T) { test := testUtils.RequestTestCase{ Description: "Simple query with basic like-string filter with contains string multiple results", diff --git a/tests/integration/query/simple/with_filter/with_nlike_string_test.go b/tests/integration/query/simple/with_filter/with_nlike_string_test.go index e1e825abd2..a7ca84163d 100644 --- a/tests/integration/query/simple/with_filter/with_nlike_string_test.go +++ b/tests/integration/query/simple/with_filter/with_nlike_string_test.go @@ -46,6 +46,36 @@ func TestQuerySimpleWithNotLikeStringContainsFilterBlockContainsString(t *testin executeTestCase(t, test) } +func TestQuerySimple_WithNotCaseInsensitiveLikeString_ShouldMatchString(t *testing.T) { + test := testUtils.RequestTestCase{ + Description: "Simple query with basic not case insensitive like-string filter contains string", + Request: `query { + Users(filter: {Name: {_nilike: "%stormborn%"}}) { + Name + } + }`, + Docs: map[int][]string{ + 0: { + `{ + "Name": "Daenerys Stormborn of House Targaryen, the First of Her Name", + "HeightM": 1.65 + }`, + `{ + "Name": "Viserys I Targaryen, King of the Andals", + "HeightM": 1.82 + }`, + }, + }, + Results: []map[string]any{ + { + "Name": "Viserys I Targaryen, King of the Andals", + }, + }, + } + + executeTestCase(t, test) +} + func TestQuerySimpleWithNotLikeStringContainsFilterBlockAsPrefixString(t *testing.T) { test := testUtils.RequestTestCase{ Description: "Simple query with basic not like-string filter with string as prefix", @@ -76,6 +106,36 @@ func TestQuerySimpleWithNotLikeStringContainsFilterBlockAsPrefixString(t *testin executeTestCase(t, test) } +func TestQuerySimple_WithNotCaseInsensitiveLikeString_ShouldMatchPrefixString(t *testing.T) { + test := testUtils.RequestTestCase{ + Description: "Simple query with basic not case insensitive like-string filter with string as prefix", + Request: `query { + Users(filter: {Name: {_nilike: "viserys%"}}) { + Name + } + }`, + Docs: map[int][]string{ + 0: { + `{ + "Name": "Daenerys Stormborn of House Targaryen, the First of Her Name", + "HeightM": 1.65 + }`, + `{ + "Name": "Viserys I Targaryen, King of the Andals", + "HeightM": 1.82 + }`, + }, + }, + Results: []map[string]any{ + { + "Name": "Daenerys Stormborn of House Targaryen, the First of Her Name", + }, + }, + } + + executeTestCase(t, test) +} + func TestQuerySimpleWithNotLikeStringContainsFilterBlockAsSuffixString(t *testing.T) { test := testUtils.RequestTestCase{ Description: "Simple query with basic not like-string filter with string as suffix", @@ -106,6 +166,36 @@ func TestQuerySimpleWithNotLikeStringContainsFilterBlockAsSuffixString(t *testin executeTestCase(t, test) } +func TestQuerySimple_WithNotCaseInsensitiveLikeString_ShouldMatchSuffixString(t *testing.T) { + test := testUtils.RequestTestCase{ + Description: "Simple query with basic not like-string filter with string as suffix", + Request: `query { + Users(filter: {Name: {_nilike: "%andals"}}) { + Name + } + }`, + Docs: map[int][]string{ + 0: { + `{ + "Name": "Daenerys Stormborn of House Targaryen, the First of Her Name", + "HeightM": 1.65 + }`, + `{ + "Name": "Viserys I Targaryen, King of the Andals", + "HeightM": 1.82 + }`, + }, + }, + Results: []map[string]any{ + { + "Name": "Daenerys Stormborn of House Targaryen, the First of Her Name", + }, + }, + } + + executeTestCase(t, test) +} + func TestQuerySimpleWithNotLikeStringContainsFilterBlockExactString(t *testing.T) { test := testUtils.RequestTestCase{ Description: "Simple query with basic not like-string filter with string as suffix", @@ -136,6 +226,36 @@ func TestQuerySimpleWithNotLikeStringContainsFilterBlockExactString(t *testing.T executeTestCase(t, test) } +func TestQuerySimple_WithNotCaseInsensitiveLikeString_MatchExactString(t *testing.T) { + test := testUtils.RequestTestCase{ + Description: "Simple query with basic not case insensitive like-string filter with string as suffix", + Request: `query { + Users(filter: {Name: {_nilike: "daenerys stormborn of house targaryen, the first of her name"}}) { + Name + } + }`, + Docs: map[int][]string{ + 0: { + `{ + "Name": "Daenerys Stormborn of House Targaryen, the First of Her Name", + "HeightM": 1.65 + }`, + `{ + "Name": "Viserys I Targaryen, King of the Andals", + "HeightM": 1.82 + }`, + }, + }, + Results: []map[string]any{ + { + "Name": "Viserys I Targaryen, King of the Andals", + }, + }, + } + + executeTestCase(t, test) +} + func TestQuerySimpleWithNotLikeStringContainsFilterBlockContainsStringMuplitpleResults(t *testing.T) { test := testUtils.RequestTestCase{ Description: "Simple query with basic not like-string filter with contains string multiple results", diff --git a/tests/integration/query/simple/with_version_test.go b/tests/integration/query/simple/with_version_test.go index a40c4d660f..0f3866f910 100644 --- a/tests/integration/query/simple/with_version_test.go +++ b/tests/integration/query/simple/with_version_test.go @@ -46,14 +46,14 @@ func TestQuerySimpleWithEmbeddedLatestCommit(t *testing.T) { "Age": int64(21), "_version": []map[string]any{ { - "cid": "bafybeigwxfw2nfcwelqxzgjsmm5okrt7dctzvzml4tm7i7q7fsdit3ihz4", + "cid": "bafybeicojqe66grk564b2hns3zi6rhquqvugxj6wi4s6xk4e2gg65dzx5e", "links": []map[string]any{ { - "cid": "bafybeigcmjyt2ux4mzfckbsz5snkoqrr42vfkesgk7rdw6xzblrowrzfg4", + "cid": "bafybeic45t5rj54wx47fhaqm6dubwt2cf5fkqzwm2nea7ypam3f6s2zbk4", "name": "Age", }, { - "cid": "bafybeihkekm4kfn2ttx3wb33l2ps7aductuzd7hrmu6n7zloaicrj5n75u", + "cid": "bafybeifkcrogypyaq5iw7krgi5jd26s7jlfsy5u232e7e7y7dqe3wm2hcu", "name": "Name", }, }, @@ -90,7 +90,7 @@ func TestQuerySimpleWithEmbeddedLatestCommitWithSchemaVersionId(t *testing.T) { "Name": "John", "_version": []map[string]any{ { - "schemaVersionId": "bafkreidvd63bawkelxe3wtf7a65klkq4x3dvenqafyasndyal6fvffkeam", + "schemaVersionId": "bafkreiekkppcdl573ru624wh3kwkmy2nhqzjsvqpu6jv5dgq2kidpnon4u", }, }, }, @@ -171,14 +171,14 @@ func TestQuerySimpleWithMultipleAliasedEmbeddedLatestCommit(t *testing.T) { "Age": int64(21), "_version": []map[string]any{ { - "cid": "bafybeigwxfw2nfcwelqxzgjsmm5okrt7dctzvzml4tm7i7q7fsdit3ihz4", + "cid": "bafybeicojqe66grk564b2hns3zi6rhquqvugxj6wi4s6xk4e2gg65dzx5e", "L1": []map[string]any{ { - "cid": "bafybeigcmjyt2ux4mzfckbsz5snkoqrr42vfkesgk7rdw6xzblrowrzfg4", + "cid": "bafybeic45t5rj54wx47fhaqm6dubwt2cf5fkqzwm2nea7ypam3f6s2zbk4", "name": "Age", }, { - "cid": "bafybeihkekm4kfn2ttx3wb33l2ps7aductuzd7hrmu6n7zloaicrj5n75u", + "cid": "bafybeifkcrogypyaq5iw7krgi5jd26s7jlfsy5u232e7e7y7dqe3wm2hcu", "name": "Name", }, }, @@ -198,3 +198,174 @@ func TestQuerySimpleWithMultipleAliasedEmbeddedLatestCommit(t *testing.T) { executeTestCase(t, test) } + +func TestQuery_WithAllCommitFields_NoError(t *testing.T) { + const docID = "bae-52b9170d-b77a-5887-b877-cbdbb99b009f" + + test := testUtils.TestCase{ + Description: "Embedded commits query within object query with document ID", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: userCollectionGQLSchema, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ + "Name": "John", + "Age": 21 + }`, + }, + testUtils.Request{ + Request: `query { + Users { + Name + _docID + _version { + cid + collectionID + delta + docID + fieldId + fieldName + height + links { + cid + name + } + schemaVersionId + } + } + }`, + Results: []map[string]any{ + { + "Name": "John", + "_docID": docID, + "_version": []map[string]any{ + { + "cid": "bafybeicojqe66grk564b2hns3zi6rhquqvugxj6wi4s6xk4e2gg65dzx5e", + "collectionID": int64(1), + "delta": nil, + "docID": "bae-52b9170d-b77a-5887-b877-cbdbb99b009f", + "fieldId": "C", + "fieldName": nil, + "height": int64(1), + "links": []map[string]any{ + { + "cid": "bafybeic45t5rj54wx47fhaqm6dubwt2cf5fkqzwm2nea7ypam3f6s2zbk4", + "name": "Age", + }, + { + "cid": "bafybeifkcrogypyaq5iw7krgi5jd26s7jlfsy5u232e7e7y7dqe3wm2hcu", + "name": "Name", + }, + }, + "schemaVersionId": "bafkreiekkppcdl573ru624wh3kwkmy2nhqzjsvqpu6jv5dgq2kidpnon4u", + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQuery_WithAllCommitFieldsWithUpdate_NoError(t *testing.T) { + const docID = "bae-52b9170d-b77a-5887-b877-cbdbb99b009f" + + test := testUtils.TestCase{ + Description: "Embedded commits query within object query with document ID", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: userCollectionGQLSchema, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ + "Name": "John", + "Age": 21 + }`, + }, + testUtils.UpdateDoc{ + CollectionID: 0, + DocID: 0, + Doc: `{"Age": 22}`, + }, + testUtils.Request{ + Request: `query { + Users { + Name + Age + _docID + _version { + cid + collectionID + delta + docID + fieldId + fieldName + height + links { + cid + name + } + schemaVersionId + } + } + }`, + Results: []map[string]any{ + { + "Name": "John", + "Age": int64(22), + "_docID": docID, + "_version": []map[string]any{ + { + "cid": "bafybeigcjabzlkuj4j35boczgcl4jmars7gz5a7dfvpq3m344bzth7ebqq", + "collectionID": int64(1), + "delta": nil, + "docID": "bae-52b9170d-b77a-5887-b877-cbdbb99b009f", + "fieldId": "C", + "fieldName": nil, + "height": int64(2), + "links": []map[string]any{ + { + "cid": "bafybeihzra5nmcai4omdv2hkplrpexjsau62eaa2ndrf2b7ksxvl7hx3qm", + "name": "Age", + }, + { + "cid": "bafybeicojqe66grk564b2hns3zi6rhquqvugxj6wi4s6xk4e2gg65dzx5e", + "name": "_head", + }, + }, + "schemaVersionId": "bafkreiekkppcdl573ru624wh3kwkmy2nhqzjsvqpu6jv5dgq2kidpnon4u", + }, + { + "cid": "bafybeicojqe66grk564b2hns3zi6rhquqvugxj6wi4s6xk4e2gg65dzx5e", + "collectionID": int64(1), + "delta": nil, + "docID": "bae-52b9170d-b77a-5887-b877-cbdbb99b009f", + "fieldId": "C", + "fieldName": nil, + "height": int64(1), + "links": []map[string]any{ + { + "cid": "bafybeic45t5rj54wx47fhaqm6dubwt2cf5fkqzwm2nea7ypam3f6s2zbk4", + "name": "Age", + }, + { + "cid": "bafybeifkcrogypyaq5iw7krgi5jd26s7jlfsy5u232e7e7y7dqe3wm2hcu", + "name": "Name", + }, + }, + "schemaVersionId": "bafkreiekkppcdl573ru624wh3kwkmy2nhqzjsvqpu6jv5dgq2kidpnon4u", + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/results.go b/tests/integration/results.go index df21acef30..20270af1c4 100644 --- a/tests/integration/results.go +++ b/tests/integration/results.go @@ -11,6 +11,7 @@ package tests import ( + "encoding/base64" "encoding/json" "testing" "time" @@ -116,6 +117,8 @@ func areResultsEqual(expected any, actual any) bool { return areResultOptionsEqual(expectedVal, actual) case immutable.Option[string]: return areResultOptionsEqual(expectedVal, actual) + case []uint8: + return areResultsEqual(base64.StdEncoding.EncodeToString(expectedVal), actual) case []int64: return areResultArraysEqual(expectedVal, actual) case []uint64: diff --git a/tests/integration/schema/aggregates/inline_array_test.go b/tests/integration/schema/aggregates/inline_array_test.go index 75c9d76414..1dfaa4a858 100644 --- a/tests/integration/schema/aggregates/inline_array_test.go +++ b/tests/integration/schema/aggregates/inline_array_test.go @@ -1386,6 +1386,12 @@ func TestSchemaAggregateInlineArrayCreatesUsersNillableStringCountFilter(t *test "name": "String", }, }, + map[string]any{ + "name": "_ilike", + "type": map[string]any{ + "name": "String", + }, + }, map[string]any{ "name": "_in", "type": map[string]any{ @@ -1404,6 +1410,12 @@ func TestSchemaAggregateInlineArrayCreatesUsersNillableStringCountFilter(t *test "name": "String", }, }, + map[string]any{ + "name": "_nilike", + "type": map[string]any{ + "name": "String", + }, + }, map[string]any{ "name": "_nin", "type": map[string]any{ @@ -1524,6 +1536,12 @@ func TestSchemaAggregateInlineArrayCreatesUsersStringCountFilter(t *testing.T) { "name": "String", }, }, + map[string]any{ + "name": "_ilike", + "type": map[string]any{ + "name": "String", + }, + }, map[string]any{ "name": "_in", "type": map[string]any{ @@ -1542,6 +1560,12 @@ func TestSchemaAggregateInlineArrayCreatesUsersStringCountFilter(t *testing.T) { "name": "String", }, }, + map[string]any{ + "name": "_nilike", + "type": map[string]any{ + "name": "String", + }, + }, map[string]any{ "name": "_nin", "type": map[string]any{ diff --git a/tests/integration/schema/crdt_type_test.go b/tests/integration/schema/crdt_type_test.go index 0df94edcf6..073a8e4e83 100644 --- a/tests/integration/schema/crdt_type_test.go +++ b/tests/integration/schema/crdt_type_test.go @@ -20,7 +20,7 @@ import ( ) func TestSchemaCreate_ContainsPNCounterTypeWithIntKind_NoError(t *testing.T) { - schemaVersionID := "bafkreig54q5pw7elljueepsyux4qgdspm3ozct5dqocr5b2kufpjwb2mae" + schemaVersionID := "bafkreib2rcnzkjrwabw6kx7qnncfuylugukoosilmb2dct5qylmgec7fdu" test := testUtils.TestCase{ Actions: []any{ @@ -38,15 +38,14 @@ func TestSchemaCreate_ContainsPNCounterTypeWithIntKind_NoError(t *testing.T) { Name: "Users", VersionID: schemaVersionID, Root: schemaVersionID, - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "_docID", Kind: client.FieldKind_DocID, }, { Name: "points", - ID: 1, - Kind: client.FieldKind_INT, + Kind: client.FieldKind_NILLABLE_INT, Typ: client.PN_COUNTER, }, }, @@ -60,7 +59,7 @@ func TestSchemaCreate_ContainsPNCounterTypeWithIntKind_NoError(t *testing.T) { } func TestSchemaCreate_ContainsPNCounterTypeWithFloatKind_NoError(t *testing.T) { - schemaVersionID := "bafkreibaeypr2i2eg3kozq3mlfsibgtolqlrcozo5ufqfb725dfq3hx43e" + schemaVersionID := "bafkreiddz4h2oqi3qzfeqfbjt3wpwrvtm62r4l6uche2nxyullmlmezrsq" test := testUtils.TestCase{ Actions: []any{ @@ -78,15 +77,14 @@ func TestSchemaCreate_ContainsPNCounterTypeWithFloatKind_NoError(t *testing.T) { Name: "Users", VersionID: schemaVersionID, Root: schemaVersionID, - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "_docID", Kind: client.FieldKind_DocID, }, { Name: "points", - ID: 1, - Kind: client.FieldKind_FLOAT, + Kind: client.FieldKind_NILLABLE_FLOAT, Typ: client.PN_COUNTER, }, }, diff --git a/tests/integration/schema/get_schema_test.go b/tests/integration/schema/get_schema_test.go index ae63d49812..f809b58627 100644 --- a/tests/integration/schema/get_schema_test.go +++ b/tests/integration/schema/get_schema_test.go @@ -71,9 +71,9 @@ func TestGetSchema_GivenNoSchemaGivenUnknownName(t *testing.T) { } func TestGetSchema_ReturnsAllSchema(t *testing.T) { - usersSchemaVersion1ID := "bafkreicavrlknsnfqey6nfwthyiguvv4dqcwhvywl5j6socx3vvjt4zqte" - usersSchemaVersion2ID := "bafkreiabmj6ypcc6alqswrscgpj6rqbhogsojgv7fopr5rgrluvxtwente" - booksSchemaVersion1ID := "bafkreiaiku34mjr2za5yo6yc4pzoupenwzjq7d5pclgfdiihdnjq33fn5y" + usersSchemaVersion1ID := "bafkreiegrxzoqa3mdgjsfz2vuatbpjbnqxub6yi23dvdumjpt4g3nhiwzq" + usersSchemaVersion2ID := "bafkreidic23paxtc5sannovwkpp6kmpg7xufufz4dgxjsiq2exk2wieh4a" + booksSchemaVersion1ID := "bafkreiakx6sdz3govsorfppdv2pru4fgjzt2qljgjhpkxnkyr7kl4vhdme" test := testUtils.TestCase{ Actions: []any{ @@ -97,11 +97,22 @@ func TestGetSchema_ReturnsAllSchema(t *testing.T) { }, testUtils.GetSchema{ ExpectedResults: []client.SchemaDescription{ + { + Name: "Books", + Root: booksSchemaVersion1ID, + VersionID: booksSchemaVersion1ID, + Fields: []client.SchemaFieldDescription{ + { + Name: "_docID", + Kind: client.FieldKind_DocID, + }, + }, + }, { Name: "Users", Root: usersSchemaVersion1ID, VersionID: usersSchemaVersion2ID, - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "_docID", Kind: client.FieldKind_DocID, @@ -109,28 +120,16 @@ func TestGetSchema_ReturnsAllSchema(t *testing.T) { }, { Name: "name", - ID: 1, - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, Typ: client.LWW_REGISTER, }, }, }, - { - Name: "Books", - Root: booksSchemaVersion1ID, - VersionID: booksSchemaVersion1ID, - Fields: []client.FieldDescription{ - { - Name: "_docID", - Kind: client.FieldKind_DocID, - }, - }, - }, { Name: "Users", Root: usersSchemaVersion1ID, VersionID: usersSchemaVersion1ID, - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "_docID", Kind: client.FieldKind_DocID, @@ -146,8 +145,8 @@ func TestGetSchema_ReturnsAllSchema(t *testing.T) { } func TestGetSchema_ReturnsSchemaForGivenRoot(t *testing.T) { - usersSchemaVersion1ID := "bafkreicavrlknsnfqey6nfwthyiguvv4dqcwhvywl5j6socx3vvjt4zqte" - usersSchemaVersion2ID := "bafkreiabmj6ypcc6alqswrscgpj6rqbhogsojgv7fopr5rgrluvxtwente" + usersSchemaVersion1ID := "bafkreiegrxzoqa3mdgjsfz2vuatbpjbnqxub6yi23dvdumjpt4g3nhiwzq" + usersSchemaVersion2ID := "bafkreidic23paxtc5sannovwkpp6kmpg7xufufz4dgxjsiq2exk2wieh4a" test := testUtils.TestCase{ Actions: []any{ @@ -176,7 +175,7 @@ func TestGetSchema_ReturnsSchemaForGivenRoot(t *testing.T) { Name: "Users", Root: usersSchemaVersion1ID, VersionID: usersSchemaVersion2ID, - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "_docID", Kind: client.FieldKind_DocID, @@ -184,8 +183,7 @@ func TestGetSchema_ReturnsSchemaForGivenRoot(t *testing.T) { }, { Name: "name", - ID: 1, - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, Typ: client.LWW_REGISTER, }, }, @@ -194,7 +192,7 @@ func TestGetSchema_ReturnsSchemaForGivenRoot(t *testing.T) { Name: "Users", Root: usersSchemaVersion1ID, VersionID: usersSchemaVersion1ID, - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "_docID", Kind: client.FieldKind_DocID, @@ -210,8 +208,8 @@ func TestGetSchema_ReturnsSchemaForGivenRoot(t *testing.T) { } func TestGetSchema_ReturnsSchemaForGivenName(t *testing.T) { - usersSchemaVersion1ID := "bafkreicavrlknsnfqey6nfwthyiguvv4dqcwhvywl5j6socx3vvjt4zqte" - usersSchemaVersion2ID := "bafkreiabmj6ypcc6alqswrscgpj6rqbhogsojgv7fopr5rgrluvxtwente" + usersSchemaVersion1ID := "bafkreiegrxzoqa3mdgjsfz2vuatbpjbnqxub6yi23dvdumjpt4g3nhiwzq" + usersSchemaVersion2ID := "bafkreidic23paxtc5sannovwkpp6kmpg7xufufz4dgxjsiq2exk2wieh4a" test := testUtils.TestCase{ Actions: []any{ @@ -240,7 +238,7 @@ func TestGetSchema_ReturnsSchemaForGivenName(t *testing.T) { Name: "Users", Root: usersSchemaVersion1ID, VersionID: usersSchemaVersion2ID, - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "_docID", Kind: client.FieldKind_DocID, @@ -248,8 +246,7 @@ func TestGetSchema_ReturnsSchemaForGivenName(t *testing.T) { }, { Name: "name", - ID: 1, - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, Typ: client.LWW_REGISTER, }, }, @@ -258,7 +255,7 @@ func TestGetSchema_ReturnsSchemaForGivenName(t *testing.T) { Name: "Users", Root: usersSchemaVersion1ID, VersionID: usersSchemaVersion1ID, - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "_docID", Kind: client.FieldKind_DocID, diff --git a/tests/integration/schema/migrations/query/simple_test.go b/tests/integration/schema/migrations/query/simple_test.go index b758356cac..c80b1386dd 100644 --- a/tests/integration/schema/migrations/query/simple_test.go +++ b/tests/integration/schema/migrations/query/simple_test.go @@ -45,8 +45,8 @@ func TestSchemaMigrationQuery(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreig3zt63qt7bkji47etyu2sqtzroa3tcfdxgwqc3ka2ijy63refq3a", - DestinationSchemaVersionID: "bafkreia4m6sn2rfypj2velvwpyude22fcb5jyfzum2eh3cdzg4a3myj5nu", + SourceSchemaVersionID: "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4", + DestinationSchemaVersionID: "bafkreiexwzcpjuz3eaghcanr3fnmyc6el5w6i5ovhop5zfrqctucwlraba", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -115,8 +115,8 @@ func TestSchemaMigrationQueryMultipleDocs(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreig3zt63qt7bkji47etyu2sqtzroa3tcfdxgwqc3ka2ijy63refq3a", - DestinationSchemaVersionID: "bafkreia4m6sn2rfypj2velvwpyude22fcb5jyfzum2eh3cdzg4a3myj5nu", + SourceSchemaVersionID: "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4", + DestinationSchemaVersionID: "bafkreiexwzcpjuz3eaghcanr3fnmyc6el5w6i5ovhop5zfrqctucwlraba", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -178,8 +178,8 @@ func TestSchemaMigrationQueryWithMigrationRegisteredBeforeSchemaPatch(t *testing }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreig3zt63qt7bkji47etyu2sqtzroa3tcfdxgwqc3ka2ijy63refq3a", - DestinationSchemaVersionID: "bafkreia4m6sn2rfypj2velvwpyude22fcb5jyfzum2eh3cdzg4a3myj5nu", + SourceSchemaVersionID: "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4", + DestinationSchemaVersionID: "bafkreiexwzcpjuz3eaghcanr3fnmyc6el5w6i5ovhop5zfrqctucwlraba", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -254,8 +254,8 @@ func TestSchemaMigrationQueryMigratesToIntermediaryVersion(t *testing.T) { // Register a migration from schema version 1 to schema version 2 **only** - // there should be no migration from version 2 to version 3. LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreig3zt63qt7bkji47etyu2sqtzroa3tcfdxgwqc3ka2ijy63refq3a", - DestinationSchemaVersionID: "bafkreia4m6sn2rfypj2velvwpyude22fcb5jyfzum2eh3cdzg4a3myj5nu", + SourceSchemaVersionID: "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4", + DestinationSchemaVersionID: "bafkreiexwzcpjuz3eaghcanr3fnmyc6el5w6i5ovhop5zfrqctucwlraba", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -325,8 +325,8 @@ func TestSchemaMigrationQueryMigratesFromIntermediaryVersion(t *testing.T) { // Register a migration from schema version 2 to schema version 3 **only** - // there should be no migration from version 1 to version 2. LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreia4m6sn2rfypj2velvwpyude22fcb5jyfzum2eh3cdzg4a3myj5nu", - DestinationSchemaVersionID: "bafkreigrpkox3omi3c3sp5zoupcjg2b32mysztjozaqsceafsdtkadzufe", + SourceSchemaVersionID: "bafkreiexwzcpjuz3eaghcanr3fnmyc6el5w6i5ovhop5zfrqctucwlraba", + DestinationSchemaVersionID: "bafkreicyyn7ourjvr2o6bqa57z2bl5wz5u2ykdlmd5v7n53cw7l6xsdplm", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -394,8 +394,8 @@ func TestSchemaMigrationQueryMigratesAcrossMultipleVersions(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreig3zt63qt7bkji47etyu2sqtzroa3tcfdxgwqc3ka2ijy63refq3a", - DestinationSchemaVersionID: "bafkreia4m6sn2rfypj2velvwpyude22fcb5jyfzum2eh3cdzg4a3myj5nu", + SourceSchemaVersionID: "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4", + DestinationSchemaVersionID: "bafkreiexwzcpjuz3eaghcanr3fnmyc6el5w6i5ovhop5zfrqctucwlraba", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -411,8 +411,8 @@ func TestSchemaMigrationQueryMigratesAcrossMultipleVersions(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreia4m6sn2rfypj2velvwpyude22fcb5jyfzum2eh3cdzg4a3myj5nu", - DestinationSchemaVersionID: "bafkreigrpkox3omi3c3sp5zoupcjg2b32mysztjozaqsceafsdtkadzufe", + SourceSchemaVersionID: "bafkreiexwzcpjuz3eaghcanr3fnmyc6el5w6i5ovhop5zfrqctucwlraba", + DestinationSchemaVersionID: "bafkreicyyn7ourjvr2o6bqa57z2bl5wz5u2ykdlmd5v7n53cw7l6xsdplm", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -448,6 +448,179 @@ func TestSchemaMigrationQueryMigratesAcrossMultipleVersions(t *testing.T) { testUtils.ExecuteTestCase(t, test) } +func TestSchemaMigrationQueryMigratesAcrossMultipleVersionsBeforePatches(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, multiple migrations before patch", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4", + DestinationSchemaVersionID: "bafkreiexwzcpjuz3eaghcanr3fnmyc6el5w6i5ovhop5zfrqctucwlraba", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreiexwzcpjuz3eaghcanr3fnmyc6el5w6i5ovhop5zfrqctucwlraba", + DestinationSchemaVersionID: "bafkreicyyn7ourjvr2o6bqa57z2bl5wz5u2ykdlmd5v7n53cw7l6xsdplm", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "email", + "value": "ilovewasm@source.com", + }, + }, + }, + }, + }, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + ] + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": "String"} } + ] + `, + }, + testUtils.Request{ + Request: `query { + Users { + name + verified + email + } + }`, + Results: []map[string]any{ + { + "name": "John", + "verified": true, + "email": "ilovewasm@source.com", + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaMigrationQueryMigratesAcrossMultipleVersionsBeforePatchesWrongOrder(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, multiple migrations before patch", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.ConfigureMigration{ + // Declare the migration from v2=>v3 before declaring the migration from v1=>v2 + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreiexwzcpjuz3eaghcanr3fnmyc6el5w6i5ovhop5zfrqctucwlraba", + DestinationSchemaVersionID: "bafkreicyyn7ourjvr2o6bqa57z2bl5wz5u2ykdlmd5v7n53cw7l6xsdplm", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "email", + "value": "ilovewasm@source.com", + }, + }, + }, + }, + }, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4", + DestinationSchemaVersionID: "bafkreiexwzcpjuz3eaghcanr3fnmyc6el5w6i5ovhop5zfrqctucwlraba", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + ] + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": "String"} } + ] + `, + }, + testUtils.Request{ + Request: `query { + Users { + name + verified + email + } + }`, + Results: []map[string]any{ + { + "name": "John", + "verified": true, + "email": "ilovewasm@source.com", + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + // This test is important as it tests that orphan migrations do not block the fetcher(s) // from functioning. // @@ -539,8 +712,8 @@ func TestSchemaMigrationQueryMigrationMutatesExistingScalarField(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreig3zt63qt7bkji47etyu2sqtzroa3tcfdxgwqc3ka2ijy63refq3a", - DestinationSchemaVersionID: "bafkreia4m6sn2rfypj2velvwpyude22fcb5jyfzum2eh3cdzg4a3myj5nu", + SourceSchemaVersionID: "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4", + DestinationSchemaVersionID: "bafkreiexwzcpjuz3eaghcanr3fnmyc6el5w6i5ovhop5zfrqctucwlraba", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -600,8 +773,8 @@ func TestSchemaMigrationQueryMigrationMutatesExistingInlineArrayField(t *testing }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreigjtl5r3lq6dkbod766let7ewqirc2ai6l2c5j5fxxc43zmvqqs24", - DestinationSchemaVersionID: "bafkreicwipnhoplttqy7spj2ksgk7vwmxmdtwt6g23os2kmqgvb22wfg3m", + SourceSchemaVersionID: "bafkreiasjk4ypvsmdiebxadvhdnpvq4eun6wielebzlcnipyqr357bz7ou", + DestinationSchemaVersionID: "bafkreie7zotytkhmsp7ro5dqyf75fwrafos4xowgatalicbcb3lu5lfade", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -663,8 +836,8 @@ func TestSchemaMigrationQueryMigrationRemovesExistingField(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreicnoqat3exmvikr36xu3hhrkvay3d3cif24tezgsyvrydpobk2nqm", - DestinationSchemaVersionID: "bafkreigamaevrkcknutb275x3uxpgc2sn73qsfvkjqli7fiqaxfnniunjy", + SourceSchemaVersionID: "bafkreiewca6o66mgkpbai2vtrupolvtf66wllbvouvtwo6fkc6alrybzfa", + DestinationSchemaVersionID: "bafkreibqzsrn3acwn7hkakm2ko5i4t5pdarmylvodi5tnpxunfcwmut2ua", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -724,8 +897,8 @@ func TestSchemaMigrationQueryMigrationPreservesExistingFieldWhenFieldNotRequeste }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreicnoqat3exmvikr36xu3hhrkvay3d3cif24tezgsyvrydpobk2nqm", - DestinationSchemaVersionID: "bafkreigamaevrkcknutb275x3uxpgc2sn73qsfvkjqli7fiqaxfnniunjy", + SourceSchemaVersionID: "bafkreiewca6o66mgkpbai2vtrupolvtf66wllbvouvtwo6fkc6alrybzfa", + DestinationSchemaVersionID: "bafkreibqzsrn3acwn7hkakm2ko5i4t5pdarmylvodi5tnpxunfcwmut2ua", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -798,8 +971,8 @@ func TestSchemaMigrationQueryMigrationCopiesExistingFieldWhenSrcFieldNotRequeste }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreicnoqat3exmvikr36xu3hhrkvay3d3cif24tezgsyvrydpobk2nqm", - DestinationSchemaVersionID: "bafkreigamaevrkcknutb275x3uxpgc2sn73qsfvkjqli7fiqaxfnniunjy", + SourceSchemaVersionID: "bafkreiewca6o66mgkpbai2vtrupolvtf66wllbvouvtwo6fkc6alrybzfa", + DestinationSchemaVersionID: "bafkreicf3nvrorgv2v6czh2lkakibv4me2il5xxytqxfyof7jlmkkdkle4", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -860,8 +1033,8 @@ func TestSchemaMigrationQueryMigrationCopiesExistingFieldWhenSrcAndDstFieldNotRe }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreicnoqat3exmvikr36xu3hhrkvay3d3cif24tezgsyvrydpobk2nqm", - DestinationSchemaVersionID: "bafkreigamaevrkcknutb275x3uxpgc2sn73qsfvkjqli7fiqaxfnniunjy", + SourceSchemaVersionID: "bafkreiewca6o66mgkpbai2vtrupolvtf66wllbvouvtwo6fkc6alrybzfa", + DestinationSchemaVersionID: "bafkreicf3nvrorgv2v6czh2lkakibv4me2il5xxytqxfyof7jlmkkdkle4", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/query/with_doc_id_test.go b/tests/integration/schema/migrations/query/with_doc_id_test.go index 2ce1fd8ac3..3acb7ab890 100644 --- a/tests/integration/schema/migrations/query/with_doc_id_test.go +++ b/tests/integration/schema/migrations/query/with_doc_id_test.go @@ -52,8 +52,8 @@ func TestSchemaMigrationQueryByDocID(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreig3zt63qt7bkji47etyu2sqtzroa3tcfdxgwqc3ka2ijy63refq3a", - DestinationSchemaVersionID: "bafkreia4m6sn2rfypj2velvwpyude22fcb5jyfzum2eh3cdzg4a3myj5nu", + SourceSchemaVersionID: "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4", + DestinationSchemaVersionID: "bafkreiexwzcpjuz3eaghcanr3fnmyc6el5w6i5ovhop5zfrqctucwlraba", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -158,8 +158,8 @@ func TestSchemaMigrationQueryMultipleQueriesByDocID(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreig3zt63qt7bkji47etyu2sqtzroa3tcfdxgwqc3ka2ijy63refq3a", - DestinationSchemaVersionID: "bafkreia4m6sn2rfypj2velvwpyude22fcb5jyfzum2eh3cdzg4a3myj5nu", + SourceSchemaVersionID: "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4", + DestinationSchemaVersionID: "bafkreiexwzcpjuz3eaghcanr3fnmyc6el5w6i5ovhop5zfrqctucwlraba", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/query/with_p2p_test.go b/tests/integration/schema/migrations/query/with_p2p_test.go index 4b06bf6586..2b22fba89d 100644 --- a/tests/integration/schema/migrations/query/with_p2p_test.go +++ b/tests/integration/schema/migrations/query/with_p2p_test.go @@ -46,8 +46,8 @@ func TestSchemaMigrationQueryWithP2PReplicatedDocAtOlderSchemaVersion(t *testing testUtils.ConfigureMigration{ // Register the migration on both nodes. LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreibgg4ex7aya4w4x3dnrlyov4juyuffjjokzkjrpoupncfuvsyi6du", - DestinationSchemaVersionID: "bafkreidvp3xozpau2zanh7s5or4fhr7kchm6klznsyzd7fpcm3sh2xlgfm", + SourceSchemaVersionID: "bafkreiaqs2jvnjgddkkhxzhhfmrr6o4yohhqymbi55b7ltynxo4tmge4wu", + DestinationSchemaVersionID: "bafkreigc5whyvnmgqvdr6yk366ct4dddgmwnwrnbgbmu4f3edm3sfwerha", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -111,6 +111,123 @@ func TestSchemaMigrationQueryWithP2PReplicatedDocAtOlderSchemaVersion(t *testing testUtils.ExecuteTestCase(t, test) } +func TestSchemaMigrationQueryWithP2PReplicatedDocAtMuchOlderSchemaVersion(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.RandomNetworkingConfig(), + testUtils.RandomNetworkingConfig(), + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + verified: Boolean + } + `, + }, + testUtils.SchemaPatch{ + // Patch node 1 only + NodeID: immutable.Some(1), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": "String"} } + ] + `, + }, + testUtils.SchemaPatch{ + // Patch node 1 only + NodeID: immutable.Some(1), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "address", "Kind": "String"} } + ] + `, + }, + testUtils.ConfigureMigration{ + // Register the migration on both nodes. + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreiaqs2jvnjgddkkhxzhhfmrr6o4yohhqymbi55b7ltynxo4tmge4wu", + DestinationSchemaVersionID: "bafkreigc5whyvnmgqvdr6yk366ct4dddgmwnwrnbgbmu4f3edm3sfwerha", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + testUtils.ConfigureMigration{ + // Register the migration on both nodes. + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreigc5whyvnmgqvdr6yk366ct4dddgmwnwrnbgbmu4f3edm3sfwerha", + DestinationSchemaVersionID: "bafkreidtw4d7bv57wmwwwxkejburwuktc2kiakkmzgiacyy5vl7gj2ih5i", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "name", + "value": "Fred", + }, + }, + }, + }, + }, + }, + testUtils.ConfigureReplicator{ + SourceNodeID: 0, + TargetNodeID: 1, + }, + testUtils.CreateDoc{ + // Create John on the first (source) node only, and allow the value to sync + NodeID: immutable.Some(0), + Doc: `{ + "name": "John" + }`, + }, + testUtils.WaitForSync{}, + testUtils.Request{ + // Node 0 should yield results as they were defined, as the newer schema version is + // unknown to this node. + NodeID: immutable.Some(0), + Request: `query { + Users { + name + } + }`, + Results: []map[string]any{ + { + "name": "John", + }, + }, + }, + testUtils.Request{ + // Node 1 should yield results migrated to the new schema version. + NodeID: immutable.Some(1), + Request: `query { + Users { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "Fred", + // John has been migrated up to the newer schema version on node 1 + "verified": true, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + func TestSchemaMigrationQueryWithP2PReplicatedDocAtNewerSchemaVersion(t *testing.T) { test := testUtils.TestCase{ Actions: []any{ @@ -136,8 +253,8 @@ func TestSchemaMigrationQueryWithP2PReplicatedDocAtNewerSchemaVersion(t *testing testUtils.ConfigureMigration{ // Register the migration on both nodes. LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreibgg4ex7aya4w4x3dnrlyov4juyuffjjokzkjrpoupncfuvsyi6du", - DestinationSchemaVersionID: "bafkreidvp3xozpau2zanh7s5or4fhr7kchm6klznsyzd7fpcm3sh2xlgfm", + SourceSchemaVersionID: "bafkreiaqs2jvnjgddkkhxzhhfmrr6o4yohhqymbi55b7ltynxo4tmge4wu", + DestinationSchemaVersionID: "bafkreigc5whyvnmgqvdr6yk366ct4dddgmwnwrnbgbmu4f3edm3sfwerha", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -238,8 +355,8 @@ func TestSchemaMigrationQueryWithP2PReplicatedDocAtMuchNewerSchemaVersionWithSch // Register a migration from version 2 to version 3 on both nodes. // There is no migration from version 1 to 2, thus node 1 has no knowledge of schema version 2. LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreia4m6sn2rfypj2velvwpyude22fcb5jyfzum2eh3cdzg4a3myj5nu", - DestinationSchemaVersionID: "bafkreigrpkox3omi3c3sp5zoupcjg2b32mysztjozaqsceafsdtkadzufe", + SourceSchemaVersionID: "bafkreiexwzcpjuz3eaghcanr3fnmyc6el5w6i5ovhop5zfrqctucwlraba", + DestinationSchemaVersionID: "bafkreicyyn7ourjvr2o6bqa57z2bl5wz5u2ykdlmd5v7n53cw7l6xsdplm", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/query/with_restart_test.go b/tests/integration/schema/migrations/query/with_restart_test.go index 3b51c92ada..196b5cf57e 100644 --- a/tests/integration/schema/migrations/query/with_restart_test.go +++ b/tests/integration/schema/migrations/query/with_restart_test.go @@ -45,8 +45,8 @@ func TestSchemaMigrationQueryWithRestart(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreig3zt63qt7bkji47etyu2sqtzroa3tcfdxgwqc3ka2ijy63refq3a", - DestinationSchemaVersionID: "bafkreia4m6sn2rfypj2velvwpyude22fcb5jyfzum2eh3cdzg4a3myj5nu", + SourceSchemaVersionID: "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4", + DestinationSchemaVersionID: "bafkreiexwzcpjuz3eaghcanr3fnmyc6el5w6i5ovhop5zfrqctucwlraba", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -80,3 +80,64 @@ func TestSchemaMigrationQueryWithRestart(t *testing.T) { testUtils.ExecuteTestCase(t, test) } + +func TestSchemaMigrationQueryWithRestartAndMigrationBeforeSchemaPatch(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, with migration and restart before patch", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4", + DestinationSchemaVersionID: "bafkreiexwzcpjuz3eaghcanr3fnmyc6el5w6i5ovhop5zfrqctucwlraba", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + testUtils.Restart{}, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + ] + `, + }, + testUtils.Request{ + Request: `query { + Users { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "John", + "verified": true, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/schema/migrations/query/with_set_default_test.go b/tests/integration/schema/migrations/query/with_set_default_test.go index 55a3fc9968..d18f2f4092 100644 --- a/tests/integration/schema/migrations/query/with_set_default_test.go +++ b/tests/integration/schema/migrations/query/with_set_default_test.go @@ -22,7 +22,7 @@ import ( ) func TestSchemaMigrationQuery_WithSetDefaultToLatest_AppliesForwardMigration(t *testing.T) { - schemaVersionID2 := "bafkreidvp3xozpau2zanh7s5or4fhr7kchm6klznsyzd7fpcm3sh2xlgfm" + schemaVersionID2 := "bafkreigc5whyvnmgqvdr6yk366ct4dddgmwnwrnbgbmu4f3edm3sfwerha" test := testUtils.TestCase{ Description: "Test schema migration", @@ -47,25 +47,19 @@ func TestSchemaMigrationQuery_WithSetDefaultToLatest_AppliesForwardMigration(t * ] `, SetAsDefaultVersion: immutable.Some(false), - }, - testUtils.ConfigureMigration{ - LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreibgg4ex7aya4w4x3dnrlyov4juyuffjjokzkjrpoupncfuvsyi6du", - DestinationSchemaVersionID: schemaVersionID2, - Lens: model.Lens{ - Lenses: []model.LensModule{ - { - Path: lenses.SetDefaultModulePath, - Arguments: map[string]any{ - "dst": "verified", - "value": true, - }, + Lens: immutable.Some(model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, }, }, }, - }, + }), }, - testUtils.SetDefaultSchemaVersion{ + testUtils.SetActiveSchemaVersion{ SchemaVersionID: schemaVersionID2, }, testUtils.Request{ @@ -89,8 +83,8 @@ func TestSchemaMigrationQuery_WithSetDefaultToLatest_AppliesForwardMigration(t * } func TestSchemaMigrationQuery_WithSetDefaultToOriginal_AppliesInverseMigration(t *testing.T) { - schemaVersionID1 := "bafkreibgg4ex7aya4w4x3dnrlyov4juyuffjjokzkjrpoupncfuvsyi6du" - schemaVersionID2 := "bafkreidvp3xozpau2zanh7s5or4fhr7kchm6klznsyzd7fpcm3sh2xlgfm" + schemaVersionID1 := "bafkreiaqs2jvnjgddkkhxzhhfmrr6o4yohhqymbi55b7ltynxo4tmge4wu" + schemaVersionID2 := "bafkreigc5whyvnmgqvdr6yk366ct4dddgmwnwrnbgbmu4f3edm3sfwerha" test := testUtils.TestCase{ Description: "Test schema migration", @@ -111,7 +105,7 @@ func TestSchemaMigrationQuery_WithSetDefaultToOriginal_AppliesInverseMigration(t `, SetAsDefaultVersion: immutable.Some(false), }, - testUtils.SetDefaultSchemaVersion{ + testUtils.SetActiveSchemaVersion{ SchemaVersionID: schemaVersionID2, }, // Create John using the new schema version @@ -139,7 +133,7 @@ func TestSchemaMigrationQuery_WithSetDefaultToOriginal_AppliesInverseMigration(t }, }, // Set the schema version back to the original - testUtils.SetDefaultSchemaVersion{ + testUtils.SetActiveSchemaVersion{ SchemaVersionID: schemaVersionID1, }, testUtils.Request{ @@ -164,8 +158,8 @@ func TestSchemaMigrationQuery_WithSetDefaultToOriginal_AppliesInverseMigration(t } func TestSchemaMigrationQuery_WithSetDefaultToOriginalVersionThatDocWasCreatedAt_ClearsMigrations(t *testing.T) { - schemaVersionID1 := "bafkreibgg4ex7aya4w4x3dnrlyov4juyuffjjokzkjrpoupncfuvsyi6du" - schemaVersionID2 := "bafkreidvp3xozpau2zanh7s5or4fhr7kchm6klznsyzd7fpcm3sh2xlgfm" + schemaVersionID1 := "bafkreiaqs2jvnjgddkkhxzhhfmrr6o4yohhqymbi55b7ltynxo4tmge4wu" + schemaVersionID2 := "bafkreigc5whyvnmgqvdr6yk366ct4dddgmwnwrnbgbmu4f3edm3sfwerha" test := testUtils.TestCase{ Description: "Test schema migration", @@ -211,7 +205,7 @@ func TestSchemaMigrationQuery_WithSetDefaultToOriginalVersionThatDocWasCreatedAt }, }, // Set the schema version back to the original - testUtils.SetDefaultSchemaVersion{ + testUtils.SetActiveSchemaVersion{ SchemaVersionID: schemaVersionID1, }, testUtils.Request{ diff --git a/tests/integration/schema/migrations/query/with_txn_test.go b/tests/integration/schema/migrations/query/with_txn_test.go index 4bb0395365..a4cbba67f8 100644 --- a/tests/integration/schema/migrations/query/with_txn_test.go +++ b/tests/integration/schema/migrations/query/with_txn_test.go @@ -47,8 +47,8 @@ func TestSchemaMigrationQueryWithTxn(t *testing.T) { testUtils.ConfigureMigration{ TransactionID: immutable.Some(0), LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreig3zt63qt7bkji47etyu2sqtzroa3tcfdxgwqc3ka2ijy63refq3a", - DestinationSchemaVersionID: "bafkreia4m6sn2rfypj2velvwpyude22fcb5jyfzum2eh3cdzg4a3myj5nu", + SourceSchemaVersionID: "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4", + DestinationSchemaVersionID: "bafkreiexwzcpjuz3eaghcanr3fnmyc6el5w6i5ovhop5zfrqctucwlraba", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -109,8 +109,8 @@ func TestSchemaMigrationQueryWithTxnAndCommit(t *testing.T) { testUtils.ConfigureMigration{ TransactionID: immutable.Some(0), LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreig3zt63qt7bkji47etyu2sqtzroa3tcfdxgwqc3ka2ijy63refq3a", - DestinationSchemaVersionID: "bafkreia4m6sn2rfypj2velvwpyude22fcb5jyfzum2eh3cdzg4a3myj5nu", + SourceSchemaVersionID: "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4", + DestinationSchemaVersionID: "bafkreiexwzcpjuz3eaghcanr3fnmyc6el5w6i5ovhop5zfrqctucwlraba", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/query/with_update_test.go b/tests/integration/schema/migrations/query/with_update_test.go index 1c5c8e87a9..b01c197c46 100644 --- a/tests/integration/schema/migrations/query/with_update_test.go +++ b/tests/integration/schema/migrations/query/with_update_test.go @@ -45,8 +45,8 @@ func TestSchemaMigrationQueryWithUpdateRequest(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreig3zt63qt7bkji47etyu2sqtzroa3tcfdxgwqc3ka2ijy63refq3a", - DestinationSchemaVersionID: "bafkreia4m6sn2rfypj2velvwpyude22fcb5jyfzum2eh3cdzg4a3myj5nu", + SourceSchemaVersionID: "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4", + DestinationSchemaVersionID: "bafkreiexwzcpjuz3eaghcanr3fnmyc6el5w6i5ovhop5zfrqctucwlraba", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -129,8 +129,8 @@ func TestSchemaMigrationQueryWithMigrationRegisteredAfterUpdate(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreig3zt63qt7bkji47etyu2sqtzroa3tcfdxgwqc3ka2ijy63refq3a", - DestinationSchemaVersionID: "bafkreia4m6sn2rfypj2velvwpyude22fcb5jyfzum2eh3cdzg4a3myj5nu", + SourceSchemaVersionID: "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4", + DestinationSchemaVersionID: "bafkreiexwzcpjuz3eaghcanr3fnmyc6el5w6i5ovhop5zfrqctucwlraba", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/simple_test.go b/tests/integration/schema/migrations/simple_test.go index 29769f1bac..07fa12ca53 100644 --- a/tests/integration/schema/migrations/simple_test.go +++ b/tests/integration/schema/migrations/simple_test.go @@ -14,6 +14,7 @@ import ( "testing" "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" testUtils "github.com/sourcenetwork/defradb/tests/integration" @@ -43,20 +44,34 @@ func TestSchemaMigrationDoesNotErrorGivenUnknownSchemaRoots(t *testing.T) { }, }, }, - testUtils.GetMigrations{ - ExpectedResults: []client.LensConfig{ + testUtils.GetCollections{ + FilterOptions: client.CollectionFetchOptions{ + IncludeInactive: immutable.Some(true), + }, + ExpectedResults: []client.CollectionDescription{ + { + ID: 1, + SchemaVersionID: "does not exist", + }, { - SourceSchemaVersionID: "does not exist", - DestinationSchemaVersionID: "also does not exist", - Lens: model.Lens{ - Lenses: []model.LensModule{ - { - Path: lenses.SetDefaultModulePath, - Arguments: map[string]any{ - "dst": "verified", - "value": false, + ID: 2, + SchemaVersionID: "also does not exist", + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + Transform: immutable.Some( + model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": false, + }, + }, + }, }, - }, + ), }, }, }, @@ -91,8 +106,8 @@ func TestSchemaMigrationGetMigrationsReturnsMultiple(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreig3zt63qt7bkji47etyu2sqtzroa3tcfdxgwqc3ka2ijy63refq3a", - DestinationSchemaVersionID: "bafkreia4m6sn2rfypj2velvwpyude22fcb5jyfzum2eh3cdzg4a3myj5nu", + SourceSchemaVersionID: "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4", + DestinationSchemaVersionID: "bafkreiexwzcpjuz3eaghcanr3fnmyc6el5w6i5ovhop5zfrqctucwlraba", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -106,35 +121,60 @@ func TestSchemaMigrationGetMigrationsReturnsMultiple(t *testing.T) { }, }, }, - testUtils.GetMigrations{ - ExpectedResults: []client.LensConfig{ + testUtils.GetCollections{ + FilterOptions: client.CollectionFetchOptions{ + IncludeInactive: immutable.Some(true), + }, + ExpectedResults: []client.CollectionDescription{ { - SourceSchemaVersionID: "does not exist", - DestinationSchemaVersionID: "also does not exist", - Lens: model.Lens{ - Lenses: []model.LensModule{ - { - Path: lenses.SetDefaultModulePath, - Arguments: map[string]any{ - "dst": "verified", - "value": false, + ID: 1, + SchemaVersionID: "does not exist", + }, + { + ID: 2, + SchemaVersionID: "also does not exist", + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + Transform: immutable.Some( + model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": false, + }, + }, + }, }, - }, + ), }, }, }, { - SourceSchemaVersionID: "bafkreig3zt63qt7bkji47etyu2sqtzroa3tcfdxgwqc3ka2ijy63refq3a", - DestinationSchemaVersionID: "bafkreia4m6sn2rfypj2velvwpyude22fcb5jyfzum2eh3cdzg4a3myj5nu", - Lens: model.Lens{ - Lenses: []model.LensModule{ - { - Path: lenses.SetDefaultModulePath, - Arguments: map[string]any{ - "dst": "verified", - "value": true, + ID: 3, + SchemaVersionID: "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4", + }, + { + ID: 4, + SchemaVersionID: "bafkreiexwzcpjuz3eaghcanr3fnmyc6el5w6i5ovhop5zfrqctucwlraba", + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 3, + Transform: immutable.Some( + model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, }, - }, + ), }, }, }, @@ -185,20 +225,56 @@ func TestSchemaMigrationReplacesExistingMigationBasedOnSourceID(t *testing.T) { }, }, }, - testUtils.GetMigrations{ - ExpectedResults: []client.LensConfig{ + testUtils.GetCollections{ + FilterOptions: client.CollectionFetchOptions{ + IncludeInactive: immutable.Some(true), + }, + ExpectedResults: []client.CollectionDescription{ { - SourceSchemaVersionID: "a", - DestinationSchemaVersionID: "c", - Lens: model.Lens{ - Lenses: []model.LensModule{ - { - Path: lenses.SetDefaultModulePath, - Arguments: map[string]any{ - "dst": "age", - "value": 123, + ID: 1, + SchemaVersionID: "a", + }, + { + ID: 2, + SchemaVersionID: "b", + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + Transform: immutable.Some( + model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": false, + }, + }, + }, }, - }, + ), + }, + }, + }, + { + ID: 3, + SchemaVersionID: "c", + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + Transform: immutable.Some( + model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "age", + "value": float64(123), + }, + }, + }, + }, + ), }, }, }, diff --git a/tests/integration/schema/migrations/with_txn_test.go b/tests/integration/schema/migrations/with_txn_test.go index 827f40de5e..7fe80263a7 100644 --- a/tests/integration/schema/migrations/with_txn_test.go +++ b/tests/integration/schema/migrations/with_txn_test.go @@ -43,23 +43,35 @@ func TestSchemaMigrationGetMigrationsWithTxn(t *testing.T) { }, }, }, - testUtils.GetMigrations{ + testUtils.GetCollections{ TransactionID: immutable.Some(0), - // This is the bug - although the GetMigrations call and migration are on the same transaction - // the migration is not returned in the results. - ExpectedResults: []client.LensConfig{ + FilterOptions: client.CollectionFetchOptions{ + IncludeInactive: immutable.Some(true), + }, + ExpectedResults: []client.CollectionDescription{ + { + ID: 1, + SchemaVersionID: "does not exist", + }, { - SourceSchemaVersionID: "does not exist", - DestinationSchemaVersionID: "also does not exist", - Lens: model.Lens{ - Lenses: []model.LensModule{ - { - Path: lenses.SetDefaultModulePath, - Arguments: map[string]any{ - "dst": "verified", - "value": false, + ID: 2, + SchemaVersionID: "also does not exist", + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + Transform: immutable.Some( + model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": false, + }, + }, + }, }, - }, + ), }, }, }, diff --git a/tests/integration/schema/simple_test.go b/tests/integration/schema/simple_test.go index dccec9c4dd..b8ca9c71e7 100644 --- a/tests/integration/schema/simple_test.go +++ b/tests/integration/schema/simple_test.go @@ -20,7 +20,7 @@ import ( ) func TestSchemaSimpleCreatesSchemaGivenEmptyType(t *testing.T) { - schemaVersionID := "bafkreicavrlknsnfqey6nfwthyiguvv4dqcwhvywl5j6socx3vvjt4zqte" + schemaVersionID := "bafkreiegrxzoqa3mdgjsfz2vuatbpjbnqxub6yi23dvdumjpt4g3nhiwzq" test := testUtils.TestCase{ Actions: []any{ @@ -50,7 +50,7 @@ func TestSchemaSimpleCreatesSchemaGivenEmptyType(t *testing.T) { Name: "Users", VersionID: schemaVersionID, Root: schemaVersionID, - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "_docID", Kind: client.FieldKind_DocID, diff --git a/tests/integration/schema/updates/add/field/create_update_test.go b/tests/integration/schema/updates/add/field/create_update_test.go index 12cf973d59..0fa756891c 100644 --- a/tests/integration/schema/updates/add/field/create_update_test.go +++ b/tests/integration/schema/updates/add/field/create_update_test.go @@ -17,8 +17,8 @@ import ( ) func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndVersionJoin(t *testing.T) { - initialSchemaVersionId := "bafkreig3zt63qt7bkji47etyu2sqtzroa3tcfdxgwqc3ka2ijy63refq3a" - updatedSchemaVersionId := "bafkreiclwd4nrvczrzy7aj52olojyzvgm4ht6jpktwpxuqej5wk3ocxpqi" + initialSchemaVersionId := "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4" + updatedSchemaVersionId := "bafkreidn4f3i52756wevi3sfpbqzijgy6v24zh565pmvtmpqr4ou52v2q4" test := testUtils.TestCase{ Description: "Test schema update, add field with update after schema update, version join", @@ -105,8 +105,8 @@ func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndVersionJoi } func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndCommitQuery(t *testing.T) { - initialSchemaVersionId := "bafkreig3zt63qt7bkji47etyu2sqtzroa3tcfdxgwqc3ka2ijy63refq3a" - updatedSchemaVersionId := "bafkreiclwd4nrvczrzy7aj52olojyzvgm4ht6jpktwpxuqej5wk3ocxpqi" + initialSchemaVersionId := "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4" + updatedSchemaVersionId := "bafkreidn4f3i52756wevi3sfpbqzijgy6v24zh565pmvtmpqr4ou52v2q4" test := testUtils.TestCase{ Description: "Test schema update, add field with update after schema update, commits query", diff --git a/tests/integration/schema/updates/add/field/kind/foreign_object_array_test.go b/tests/integration/schema/updates/add/field/kind/foreign_object_array_test.go index b0ee08bb80..95b19e1a59 100644 --- a/tests/integration/schema/updates/add/field/kind/foreign_object_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/foreign_object_array_test.go @@ -58,31 +58,7 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_InvalidSchemaJson(t *testin { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 17, "Schema": 123} } ] `, - ExpectedError: "json: cannot unmarshal number into Go struct field FieldDescription.Fields.Schema of type string", - }, - }, - } - testUtils.ExecuteTestCase(t, test) -} - -func TestSchemaUpdatesAddFieldKindForeignObjectArray_MissingRelationType(t *testing.T) { - test := testUtils.TestCase{ - Description: "Test schema update, add field with kind foreign object (17), missing relation type", - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Users { - name: String - } - `, - }, - testUtils.SchemaPatch{ - Patch: ` - [ - { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 17, "Schema": "Users"} } - ] - `, - ExpectedError: "invalid RelationType. Field: foo, Expected: 10, Actual: 0", + ExpectedError: "json: cannot unmarshal number into Go struct field SchemaFieldDescription.Fields.Schema of type string", }, }, } @@ -104,7 +80,7 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_MissingRelationName(t *test Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 17, "RelationType": 10, "Schema": "Users" + "Name": "foo", "Kind": 17, "Schema": "Users" }} ] `, @@ -130,7 +106,7 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_IDFieldMissingKind(t *testi Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" + "Name": "foo", "Kind": 16, "IsPrimaryRelation": true, "Schema": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id"} } ] @@ -157,7 +133,7 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_IDFieldInvalidKind(t *testi Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" + "Name": "foo", "Kind": 16, "IsPrimaryRelation": true, "Schema": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 2} } ] @@ -169,60 +145,6 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_IDFieldInvalidKind(t *testi testUtils.ExecuteTestCase(t, test) } -func TestSchemaUpdatesAddFieldKindForeignObjectArray_IDFieldMissingRelationType(t *testing.T) { - test := testUtils.TestCase{ - Description: "Test schema update, add field with kind foreign object array (17), id field missing relation type", - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Users { - name: String - } - `, - }, - testUtils.SchemaPatch{ - Patch: ` - [ - { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" - }}, - { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 1} } - ] - `, - ExpectedError: "invalid RelationType. Field: foo_id, Expected: 64, Actual: 0", - }, - }, - } - testUtils.ExecuteTestCase(t, test) -} - -func TestSchemaUpdatesAddFieldKindForeignObjectArray_IDFieldInvalidRelationType(t *testing.T) { - test := testUtils.TestCase{ - Description: "Test schema update, add field with kind foreign object array (17), id field invalid RelationType", - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Users { - name: String - } - `, - }, - testUtils.SchemaPatch{ - Patch: ` - [ - { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" - }}, - { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 1, "RelationType": 4} } - ] - `, - ExpectedError: "invalid RelationType. Field: foo_id, Expected: 64, Actual: 4", - }, - }, - } - testUtils.ExecuteTestCase(t, test) -} - func TestSchemaUpdatesAddFieldKindForeignObjectArray_IDFieldMissingRelationName(t *testing.T) { test := testUtils.TestCase{ Description: "Test schema update, add field with kind foreign object array (17), id field missing relation name", @@ -238,9 +160,9 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_IDFieldMissingRelationName( Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" + "Name": "foo", "Kind": 16, "IsPrimaryRelation": true, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 1, "RelationType": 64} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 1} } ] `, ExpectedError: "missing relation name. Field: foo_id", @@ -265,10 +187,10 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_OnlyHalfRelationDefined(t * Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" + "Name": "foo", "Kind": 16, "IsPrimaryRelation": true, "Schema": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foo_id", "Kind": 1, "RelationName": "foo" }} ] `, @@ -294,13 +216,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_NoPrimaryDefined(t *testing Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 9, "Schema": "Users", "RelationName": "foo" + "Name": "foo", "Kind": 16, "Schema": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foo_id", "Kind": 1, "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": 17, "RelationType": 10, "Schema": "Users", "RelationName": "foo" + "Name": "foobar", "Kind": 17, "Schema": "Users", "RelationName": "foo" }} ] `, @@ -326,13 +248,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_PrimaryDefinedOnManySide(t Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 9, "Schema": "Users", "RelationName": "foo" + "Name": "foo", "Kind": 16, "Schema": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foo_id", "Kind": 1, "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": 17, "RelationType": 138, "Schema": "Users", "RelationName": "foo" + "Name": "foobar", "Kind": 17, "IsPrimaryRelation": true, "Schema": "Users", "RelationName": "foo" }} ] `, @@ -343,102 +265,6 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_PrimaryDefinedOnManySide(t testUtils.ExecuteTestCase(t, test) } -func TestSchemaUpdatesAddFieldKindForeignObjectArray_RelatedKindMismatch(t *testing.T) { - test := testUtils.TestCase{ - Description: "Test schema update, add field with kind foreign object array (17), related kind mismatch", - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Users { - name: String - } - `, - }, - testUtils.SchemaPatch{ - Patch: ` - [ - { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" - }}, - { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" - }}, - { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": 16, "RelationType": 10, "Schema": "Users", "RelationName": "foo" - }} - ] - `, - ExpectedError: "invalid Kind of the related field. RelationName: foo, Expected: 17, Actual: 16", - }, - }, - } - testUtils.ExecuteTestCase(t, test) -} - -func TestSchemaUpdatesAddFieldKindForeignObjectArray_RelatedKindAndRelationTypeMismatch(t *testing.T) { - test := testUtils.TestCase{ - Description: "Test schema update, add field with kind foreign object array (17), related kind mismatch", - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Users { - name: String - } - `, - }, - testUtils.SchemaPatch{ - Patch: ` - [ - { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" - }}, - { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" - }}, - { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": 16, "RelationType": 9, "Schema": "Users", "RelationName": "foo" - }} - ] - `, - ExpectedError: "invalid Kind of the related field. RelationName: foo, Expected: 17, Actual: 16", - }, - }, - } - testUtils.ExecuteTestCase(t, test) -} - -func TestSchemaUpdatesAddFieldKindForeignObjectArray_RelatedRelationTypeMismatch(t *testing.T) { - test := testUtils.TestCase{ - Description: "Test schema update, add field with kind foreign object array (17), related relation type mismatch", - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Users { - name: String - } - `, - }, - testUtils.SchemaPatch{ - Patch: ` - [ - { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" - }}, - { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" - }}, - { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": 16, "RelationType": 5, "Schema": "Users", "RelationName": "foo" - }} - ] - `, - ExpectedError: "invalid Kind of the related field. RelationName: foo, Expected: 17, Actual: 16", - }, - }, - } - testUtils.ExecuteTestCase(t, test) -} - func TestSchemaUpdatesAddFieldKindForeignObjectArray_Succeeds(t *testing.T) { key1 := "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" @@ -456,13 +282,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_Succeeds(t *testing.T) { Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" + "Name": "foo", "Kind": 16, "IsPrimaryRelation": true, "Schema": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foo_id", "Kind": 1, "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": 17, "RelationType": 10, "Schema": "Users", "RelationName": "foo" + "Name": "foobar", "Kind": 17, "Schema": "Users", "RelationName": "foo" }} ] `, @@ -552,13 +378,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_SinglePrimaryObjectKindSubs Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": "Users", "RelationType": 137, "Schema": "Users", "RelationName": "foo" + "Name": "foo", "Kind": "Users", "IsPrimaryRelation": true, "Schema": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foo_id", "Kind": 1, "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": 17, "RelationType": 10, "Schema": "Users", "RelationName": "foo" + "Name": "foobar", "Kind": 17, "Schema": "Users", "RelationName": "foo" }} ] `, @@ -631,13 +457,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_SingleSecondaryObjectKindSu Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" + "Name": "foo", "Kind": 16, "IsPrimaryRelation": true, "Schema": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foo_id", "Kind": 1, "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": "[Users]", "RelationType": 10, "Schema": "Users", "RelationName": "foo" + "Name": "foobar", "Kind": "[Users]", "Schema": "Users", "RelationName": "foo" }} ] `, @@ -710,13 +536,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_ObjectKindSubstitution(t *t Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": "Users", "RelationType": 137, "Schema": "Users", "RelationName": "foo" + "Name": "foo", "Kind": "Users", "IsPrimaryRelation": true, "Schema": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foo_id", "Kind": 1, "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": "[Users]", "RelationType": 10, "Schema": "Users", "RelationName": "foo" + "Name": "foobar", "Kind": "[Users]", "Schema": "Users", "RelationName": "foo" }} ] `, @@ -789,13 +615,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_ObjectKindSubstitutionWithA Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": "Users", "RelationType": 137, "RelationName": "foo" + "Name": "foo", "Kind": "Users", "IsPrimaryRelation": true, "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foo_id", "Kind": 1, "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": "[Users]", "RelationType": 10, "RelationName": "foo" + "Name": "foobar", "Kind": "[Users]", "RelationName": "foo" }} ] `, @@ -873,13 +699,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_PrimaryObjectKindAndSchemaM Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": "Users", "RelationType": 137, "Schema": "Dog", "RelationName": "foo" + "Name": "foo", "Kind": "Users", "IsPrimaryRelation": true, "Schema": "Dog", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foo_id", "Kind": 1, "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": "[Users]", "RelationType": 10, "Schema": "Users", "RelationName": "foo" + "Name": "foobar", "Kind": "[Users]", "Schema": "Users", "RelationName": "foo" }} ] `, @@ -912,13 +738,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_SecondaryObjectKindAndSchem Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": "Users", "RelationType": 137, "Schema": "Users", "RelationName": "foo" + "Name": "foo", "Kind": "Users", "IsPrimaryRelation": true, "Schema": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foo_id", "Kind": 1, "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": "[Users]", "RelationType": 10, "Schema": "Dog", "RelationName": "foo" + "Name": "foobar", "Kind": "[Users]", "Schema": "Dog", "RelationName": "foo" }} ] `, @@ -946,10 +772,10 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_MissingPrimaryIDField(t *te Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": "Users", "RelationType": 137, "RelationName": "foo" + "Name": "foo", "Kind": "Users", "IsPrimaryRelation": true, "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": "[Users]", "RelationType": 10, "RelationName": "foo" + "Name": "foobar", "Kind": "[Users]", "RelationName": "foo" }} ] `, @@ -1023,10 +849,10 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_MissingPrimaryIDField_DoesN Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": "Users", "RelationType": 137, "RelationName": "foo" + "Name": "foo", "Kind": "Users", "IsPrimaryRelation": true, "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": "[Users]", "RelationType": 10, "RelationName": "foo" + "Name": "foobar", "Kind": "[Users]", "RelationName": "foo" }} ] `, diff --git a/tests/integration/schema/updates/add/field/kind/foreign_object_test.go b/tests/integration/schema/updates/add/field/kind/foreign_object_test.go index dc724d5af7..525c41d658 100644 --- a/tests/integration/schema/updates/add/field/kind/foreign_object_test.go +++ b/tests/integration/schema/updates/add/field/kind/foreign_object_test.go @@ -58,31 +58,7 @@ func TestSchemaUpdatesAddFieldKindForeignObject_InvalidSchemaJson(t *testing.T) { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 16, "Schema": 123} } ] `, - ExpectedError: "json: cannot unmarshal number into Go struct field FieldDescription.Fields.Schema of type string", - }, - }, - } - testUtils.ExecuteTestCase(t, test) -} - -func TestSchemaUpdatesAddFieldKindForeignObject_MissingRelationType(t *testing.T) { - test := testUtils.TestCase{ - Description: "Test schema update, add field with kind foreign object (16), missing relation type", - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Users { - name: String - } - `, - }, - testUtils.SchemaPatch{ - Patch: ` - [ - { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 16, "Schema": "Users"} } - ] - `, - ExpectedError: "invalid RelationType. Field: foo, Expected: 1 and 4 or 8, with optionally 128, Actual: 0", + ExpectedError: "json: cannot unmarshal number into Go struct field SchemaFieldDescription.Fields.Schema of type string", }, }, } @@ -104,7 +80,7 @@ func TestSchemaUpdatesAddFieldKindForeignObject_UnknownSchema(t *testing.T) { Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 5, "Schema": "Unknown" + "Name": "foo", "Kind": 16, "Schema": "Unknown" }} ] `, @@ -130,7 +106,7 @@ func TestSchemaUpdatesAddFieldKindForeignObject_MissingRelationName(t *testing.T Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 5, "Schema": "Users" + "Name": "foo", "Kind": 16, "Schema": "Users" }} ] `, @@ -156,7 +132,7 @@ func TestSchemaUpdatesAddFieldKindForeignObject_IDFieldMissingKind(t *testing.T) Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" + "Name": "foo", "Kind": 16,"IsPrimaryRelation": true, "Schema": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id"} } ] @@ -183,7 +159,7 @@ func TestSchemaUpdatesAddFieldKindForeignObject_IDFieldInvalidKind(t *testing.T) Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" + "Name": "foo", "Kind": 16, "IsPrimaryRelation": true, "Schema": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 2} } ] @@ -195,60 +171,6 @@ func TestSchemaUpdatesAddFieldKindForeignObject_IDFieldInvalidKind(t *testing.T) testUtils.ExecuteTestCase(t, test) } -func TestSchemaUpdatesAddFieldKindForeignObject_IDFieldMissingRelationType(t *testing.T) { - test := testUtils.TestCase{ - Description: "Test schema update, add field with kind foreign object (16), id field missing relation type", - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Users { - name: String - } - `, - }, - testUtils.SchemaPatch{ - Patch: ` - [ - { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" - }}, - { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 1} } - ] - `, - ExpectedError: "invalid RelationType. Field: foo_id, Expected: 64, Actual: 0", - }, - }, - } - testUtils.ExecuteTestCase(t, test) -} - -func TestSchemaUpdatesAddFieldKindForeignObject_IDFieldInvalidRelationType(t *testing.T) { - test := testUtils.TestCase{ - Description: "Test schema update, add field with kind foreign object (16), id field invalid RelationType", - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Users { - name: String - } - `, - }, - testUtils.SchemaPatch{ - Patch: ` - [ - { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" - }}, - { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 1, "RelationType": 4} } - ] - `, - ExpectedError: "invalid RelationType. Field: foo_id, Expected: 64, Actual: 4", - }, - }, - } - testUtils.ExecuteTestCase(t, test) -} - func TestSchemaUpdatesAddFieldKindForeignObject_IDFieldMissingRelationName(t *testing.T) { test := testUtils.TestCase{ Description: "Test schema update, add field with kind foreign object (16), id field missing relation name", @@ -264,9 +186,9 @@ func TestSchemaUpdatesAddFieldKindForeignObject_IDFieldMissingRelationName(t *te Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" + "Name": "foo", "Kind": 16, "IsPrimaryRelation": true, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 1, "RelationType": 64} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 1} } ] `, ExpectedError: "missing relation name. Field: foo_id", @@ -291,10 +213,10 @@ func TestSchemaUpdatesAddFieldKindForeignObject_OnlyHalfRelationDefined(t *testi Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" + "Name": "foo", "Kind": 16, "IsPrimaryRelation": true, "Schema": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foo_id", "Kind": 1, "RelationName": "foo" }} ] `, @@ -320,13 +242,13 @@ func TestSchemaUpdatesAddFieldKindForeignObject_NoPrimaryDefined(t *testing.T) { Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 5, "Schema": "Users", "RelationName": "foo" + "Name": "foo", "Kind": 16, "Schema": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foo_id", "Kind": 1, "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": 16, "RelationType": 5, "Schema": "Users", "RelationName": "foo" + "Name": "foobar", "Kind": 16, "Schema": "Users", "RelationName": "foo" }} ] `, @@ -352,16 +274,16 @@ func TestSchemaUpdatesAddFieldKindForeignObject_BothSidesPrimary(t *testing.T) { Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" + "Name": "foo", "Kind": 16, "IsPrimaryRelation": true, "Schema": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foo_id", "Kind": 1, "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" + "Name": "foobar", "Kind": 16, "IsPrimaryRelation": true, "Schema": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar_id", "Kind": 1, "RelationType": 64, "Schema": "Users", "RelationName": "foo" + "Name": "foobar_id", "Kind": 1, "Schema": "Users", "RelationName": "foo" }} ] `, @@ -372,70 +294,6 @@ func TestSchemaUpdatesAddFieldKindForeignObject_BothSidesPrimary(t *testing.T) { testUtils.ExecuteTestCase(t, test) } -func TestSchemaUpdatesAddFieldKindForeignObject_RelatedKindMismatch(t *testing.T) { - test := testUtils.TestCase{ - Description: "Test schema update, add field with kind foreign object (16), related kind mismatch", - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Users { - name: String - } - `, - }, - testUtils.SchemaPatch{ - Patch: ` - [ - { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" - }}, - { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" - }}, - { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": 17, "RelationType": 5, "Schema": "Users", "RelationName": "foo" - }} - ] - `, - ExpectedError: "invalid Kind of the related field. RelationName: foo, Expected: 16, Actual: 17", - }, - }, - } - testUtils.ExecuteTestCase(t, test) -} - -func TestSchemaUpdatesAddFieldKindForeignObject_RelatedRelationTypeMismatch(t *testing.T) { - test := testUtils.TestCase{ - Description: "Test schema update, add field with kind foreign object (16), related relation type mismatch", - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Users { - name: String - } - `, - }, - testUtils.SchemaPatch{ - Patch: ` - [ - { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" - }}, - { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" - }}, - { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": 16, "RelationType": 9, "Schema": "Users", "RelationName": "foo" - }} - ] - `, - ExpectedError: "invalid RelationType of the related field. RelationName: foo, Expected: 4, Actual: 9", - }, - }, - } - testUtils.ExecuteTestCase(t, test) -} - func TestSchemaUpdatesAddFieldKindForeignObject_Succeeds(t *testing.T) { key1 := "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" @@ -453,16 +311,16 @@ func TestSchemaUpdatesAddFieldKindForeignObject_Succeeds(t *testing.T) { Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" + "Name": "foo", "Kind": 16, "IsPrimaryRelation": true, "Schema": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foo_id", "Kind": 1, "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": 16, "RelationType": 5, "Schema": "Users", "RelationName": "foo" + "Name": "foobar", "Kind": 16, "Schema": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foobar_id", "Kind": 1, "RelationName": "foo" }} ] `, @@ -550,16 +408,16 @@ func TestSchemaUpdatesAddFieldKindForeignObject_SinglePrimaryObjectKindSubstitut Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": "Users", "RelationType": 133, "Schema": "Users", "RelationName": "foo" + "Name": "foo", "Kind": "Users", "IsPrimaryRelation": true, "Schema": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foo_id", "Kind": 1, "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": 16, "RelationType": 5, "Schema": "Users", "RelationName": "foo" + "Name": "foobar", "Kind": 16, "Schema": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foobar_id", "Kind": 1, "RelationName": "foo" }} ] `, @@ -630,16 +488,16 @@ func TestSchemaUpdatesAddFieldKindForeignObject_SingleSecondaryObjectKindSubstit Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" + "Name": "foo", "Kind": 16, "IsPrimaryRelation": true, "Schema": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foo_id", "Kind": 1, "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": "Users", "RelationType": 5, "Schema": "Users", "RelationName": "foo" + "Name": "foobar", "Kind": "Users", "Schema": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foobar_id", "Kind": 1, "RelationName": "foo" }} ] `, @@ -710,16 +568,16 @@ func TestSchemaUpdatesAddFieldKindForeignObject_ObjectKindSubstitution(t *testin Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": "Users", "RelationType": 133, "Schema": "Users", "RelationName": "foo" + "Name": "foo", "Kind": "Users", "IsPrimaryRelation": true, "Schema": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foo_id", "Kind": 1, "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": "Users", "RelationType": 5, "Schema": "Users", "RelationName": "foo" + "Name": "foobar", "Kind": "Users", "Schema": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foobar_id", "Kind": 1, "RelationName": "foo" }} ] `, @@ -790,16 +648,16 @@ func TestSchemaUpdatesAddFieldKindForeignObject_ObjectKindSubstitutionWithAutoSc Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": "Users", "RelationType": 133, "RelationName": "foo" + "Name": "foo", "Kind": "Users", "IsPrimaryRelation": true, "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foo_id", "Kind": 1, "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": "Users", "RelationType": 5, "RelationName": "foo" + "Name": "foobar", "Kind": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foobar_id", "Kind": 1, "RelationName": "foo" }} ] `, @@ -875,16 +733,16 @@ func TestSchemaUpdatesAddFieldKindForeignObject_ObjectKindAndSchemaMismatch(t *t Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": "Users", "RelationType": 133, "Schema": "Dog", "RelationName": "foo" + "Name": "foo", "Kind": "Users", "IsPrimaryRelation": true, "Schema": "Dog", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foo_id", "Kind": 1, "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": "Users", "RelationType": 5, "RelationName": "foo" + "Name": "foobar", "Kind": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foobar_id", "Kind": 1, "RelationName": "foo" }} ] `, @@ -912,13 +770,13 @@ func TestSchemaUpdatesAddFieldKindForeignObject_MissingPrimaryIDField(t *testing Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": "Users", "RelationType": 133, "RelationName": "foo" + "Name": "foo", "Kind": "Users", "IsPrimaryRelation": true, "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": "Users", "RelationType": 5, "RelationName": "foo" + "Name": "foobar", "Kind": "Users", "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foobar_id", "Kind": 1, "RelationName": "foo" }} ] `, @@ -990,13 +848,13 @@ func TestSchemaUpdatesAddFieldKindForeignObject_MissingSecondaryIDField(t *testi Patch: ` [ { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo", "Kind": "Users", "RelationType": 133, "RelationName": "foo" + "Name": "foo", "Kind": "Users", "IsPrimaryRelation": true, "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" + "Name": "foo_id", "Kind": 1, "RelationName": "foo" }}, { "op": "add", "path": "/Users/Fields/-", "value": { - "Name": "foobar", "Kind": "Users", "RelationType": 5, "RelationName": "foo" + "Name": "foobar", "Kind": "Users", "RelationName": "foo" }} ] `, diff --git a/tests/integration/schema/updates/add/field/kind/invalid_test.go b/tests/integration/schema/updates/add/field/kind/invalid_test.go index 98f026ecc2..b9c6dbbf31 100644 --- a/tests/integration/schema/updates/add/field/kind/invalid_test.go +++ b/tests/integration/schema/updates/add/field/kind/invalid_test.go @@ -64,30 +64,6 @@ func TestSchemaUpdatesAddFieldKind9(t *testing.T) { testUtils.ExecuteTestCase(t, test) } -func TestSchemaUpdatesAddFieldKind14(t *testing.T) { - test := testUtils.TestCase{ - Description: "Test schema update, add field with kind deprecated (14)", - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Users { - name: String - } - `, - }, - testUtils.SchemaPatch{ - Patch: ` - [ - { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 14} } - ] - `, - ExpectedError: "no type found for given name. Type: 14", - }, - }, - } - testUtils.ExecuteTestCase(t, test) -} - func TestSchemaUpdatesAddFieldKind15(t *testing.T) { test := testUtils.TestCase{ Description: "Test schema update, add field with kind deprecated (15)", diff --git a/tests/integration/schema/updates/add/field/kind/json_test.go b/tests/integration/schema/updates/add/field/kind/json_test.go new file mode 100644 index 0000000000..37e2886a58 --- /dev/null +++ b/tests/integration/schema/updates/add/field/kind/json_test.go @@ -0,0 +1,137 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package kind + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestSchemaUpdatesAddFieldKindJSON(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema update, add field with kind json (14)", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 14} } + ] + `, + }, + testUtils.Request{ + Request: `query { + Users { + name + foo + } + }`, + Results: []map[string]any{}, + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaUpdatesAddFieldKindJSONWithCreate(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema update, add field with kind json (14) with create", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 14} } + ] + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ + "name": "John", + "foo": "{}" + }`, + }, + testUtils.Request{ + Request: `query { + Users { + name + foo + } + }`, + Results: []map[string]any{ + { + "name": "John", + "foo": "{}", + }, + }, + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaUpdatesAddFieldKindJSONSubstitutionWithCreate(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema update, add field with kind json substitution with create", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "JSON"} } + ] + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ + "name": "John", + "foo": "{}" + }`, + }, + testUtils.Request{ + Request: `query { + Users { + name + foo + } + }`, + Results: []map[string]any{ + { + "name": "John", + "foo": "{}", + }, + }, + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/schema/updates/add/field/simple_test.go b/tests/integration/schema/updates/add/field/simple_test.go index 04bafb2694..c505668325 100644 --- a/tests/integration/schema/updates/add/field/simple_test.go +++ b/tests/integration/schema/updates/add/field/simple_test.go @@ -20,8 +20,8 @@ import ( ) func TestSchemaUpdatesAddFieldSimple(t *testing.T) { - schemaVersion1ID := "bafkreig3zt63qt7bkji47etyu2sqtzroa3tcfdxgwqc3ka2ijy63refq3a" - schemaVersion2ID := "bafkreiclwd4nrvczrzy7aj52olojyzvgm4ht6jpktwpxuqej5wk3ocxpqi" + schemaVersion1ID := "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4" + schemaVersion2ID := "bafkreidn4f3i52756wevi3sfpbqzijgy6v24zh565pmvtmpqr4ou52v2q4" test := testUtils.TestCase{ Description: "Test schema update, add field", @@ -56,7 +56,7 @@ func TestSchemaUpdatesAddFieldSimple(t *testing.T) { Name: "Users", VersionID: schemaVersion2ID, Root: schemaVersion1ID, - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "_docID", Kind: client.FieldKind_DocID, @@ -64,14 +64,12 @@ func TestSchemaUpdatesAddFieldSimple(t *testing.T) { }, { Name: "name", - ID: 1, - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, Typ: client.LWW_REGISTER, }, { Name: "email", - ID: 2, - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, Typ: client.LWW_REGISTER, }, }, @@ -117,8 +115,8 @@ func TestSchemaUpdates_AddFieldSimpleDoNotSetDefault_Errors(t *testing.T) { } func TestSchemaUpdates_AddFieldSimpleDoNotSetDefault_VersionIsQueryable(t *testing.T) { - schemaVersion1ID := "bafkreig3zt63qt7bkji47etyu2sqtzroa3tcfdxgwqc3ka2ijy63refq3a" - schemaVersion2ID := "bafkreiclwd4nrvczrzy7aj52olojyzvgm4ht6jpktwpxuqej5wk3ocxpqi" + schemaVersion1ID := "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4" + schemaVersion2ID := "bafkreidn4f3i52756wevi3sfpbqzijgy6v24zh565pmvtmpqr4ou52v2q4" test := testUtils.TestCase{ Description: "Test schema update, add field", @@ -147,7 +145,7 @@ func TestSchemaUpdates_AddFieldSimpleDoNotSetDefault_VersionIsQueryable(t *testi // fetch it. VersionID: schemaVersion2ID, Root: schemaVersion1ID, - Fields: []client.FieldDescription{ + Fields: []client.SchemaFieldDescription{ { Name: "_docID", Kind: client.FieldKind_DocID, @@ -155,14 +153,12 @@ func TestSchemaUpdates_AddFieldSimpleDoNotSetDefault_VersionIsQueryable(t *testi }, { Name: "name", - ID: 1, - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, Typ: client.LWW_REGISTER, }, { Name: "email", - ID: 2, - Kind: client.FieldKind_STRING, + Kind: client.FieldKind_NILLABLE_STRING, Typ: client.LWW_REGISTER, }, }, @@ -397,27 +393,3 @@ func TestSchemaUpdatesAddFieldSimpleDuplicateField(t *testing.T) { } testUtils.ExecuteTestCase(t, test) } - -func TestSchemaUpdatesAddFieldWithExplicitIDErrors(t *testing.T) { - test := testUtils.TestCase{ - Description: "Test schema update, add field that already exists", - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Users { - name: String - } - `, - }, - testUtils.SchemaPatch{ - Patch: ` - [ - { "op": "add", "path": "/Users/Fields/-", "value": {"ID": 2, "Name": "email", "Kind": 11} } - ] - `, - ExpectedError: "explicitly setting a field ID value is not supported. Field: email, ID: 2", - }, - }, - } - testUtils.ExecuteTestCase(t, test) -} diff --git a/tests/integration/schema/updates/copy/field/simple_test.go b/tests/integration/schema/updates/copy/field/simple_test.go index f10569dabd..5721a9fb8b 100644 --- a/tests/integration/schema/updates/copy/field/simple_test.go +++ b/tests/integration/schema/updates/copy/field/simple_test.go @@ -50,9 +50,9 @@ func TestSchemaUpdatesCopyFieldErrors(t *testing.T) { testUtils.ExecuteTestCase(t, test) } -func TestSchemaUpdatesCopyFieldWithRemoveIDAndReplaceName(t *testing.T) { +func TestSchemaUpdatesCopyFieldWithAndReplaceName(t *testing.T) { test := testUtils.TestCase{ - Description: "Test schema update, copy field, rename and remove IDs", + Description: "Test schema update, copy field and rename", Actions: []any{ testUtils.SchemaUpdate{ Schema: ` @@ -63,12 +63,11 @@ func TestSchemaUpdatesCopyFieldWithRemoveIDAndReplaceName(t *testing.T) { `, }, testUtils.SchemaPatch{ - // Here we esentially use Email as a template, copying it, clearing the ID, and renaming the + // Here we esentially use Email as a template, copying it and renaming the // clone. Patch: ` [ { "op": "copy", "from": "/Users/Fields/1", "path": "/Users/Fields/3" }, - { "op": "remove", "path": "/Users/Fields/3/ID" }, { "op": "replace", "path": "/Users/Fields/3/Name", "value": "fax" } ] `, @@ -89,9 +88,9 @@ func TestSchemaUpdatesCopyFieldWithRemoveIDAndReplaceName(t *testing.T) { } // This is an odd test, but still a possibility and we should still cover it. -func TestSchemaUpdatesCopyFieldWithRemoveIDAndReplaceNameAndKindSubstitution(t *testing.T) { +func TestSchemaUpdatesCopyFieldWithReplaceNameAndKindSubstitution(t *testing.T) { test := testUtils.TestCase{ - Description: "Test schema update, copy field, rename, re-type, and remove IDs", + Description: "Test schema update, copy field, rename, re-type", Actions: []any{ testUtils.SchemaUpdate{ Schema: ` @@ -101,12 +100,11 @@ func TestSchemaUpdatesCopyFieldWithRemoveIDAndReplaceNameAndKindSubstitution(t * `, }, testUtils.SchemaPatch{ - // Here we esentially use Name as a template, copying it, clearing the ID, and renaming and + // Here we esentially use Name as a template, copying it, and renaming and // re-typing the clone. Patch: ` [ { "op": "copy", "from": "/Users/Fields/1", "path": "/Users/Fields/2" }, - { "op": "remove", "path": "/Users/Fields/2/ID" }, { "op": "replace", "path": "/Users/Fields/2/Name", "value": "age" }, { "op": "replace", "path": "/Users/Fields/2/Kind", "value": "Int" } ] @@ -140,9 +138,9 @@ func TestSchemaUpdatesCopyFieldWithRemoveIDAndReplaceNameAndKindSubstitution(t * } // This is an odd test, but still a possibility and we should still cover it. -func TestSchemaUpdatesCopyFieldWithRemoveIDAndReplaceNameAndInvalidKindSubstitution(t *testing.T) { +func TestSchemaUpdatesCopyFieldAndReplaceNameAndInvalidKindSubstitution(t *testing.T) { test := testUtils.TestCase{ - Description: "Test schema update, copy field, rename, re-type to invalid, and remove ID", + Description: "Test schema update, copy field, rename, re-type to invalid", Actions: []any{ testUtils.SchemaUpdate{ Schema: ` @@ -152,12 +150,11 @@ func TestSchemaUpdatesCopyFieldWithRemoveIDAndReplaceNameAndInvalidKindSubstitut `, }, testUtils.SchemaPatch{ - // Here we esentially use Name as a template, copying it, clearing the ID, and renaming and + // Here we esentially use Name as a template, copying it and renaming and // re-typing the clone. Patch: ` [ { "op": "copy", "from": "/Users/Schema/Fields/1", "path": "/Users/Fields/2" }, - { "op": "remove", "path": "/Users/Fields/2/ID" }, { "op": "replace", "path": "/Users/Fields/2/Name", "value": "Age" }, { "op": "replace", "path": "/Users/Fields/2/Kind", "value": "NotAValidKind" } ] diff --git a/tests/integration/schema/updates/copy/field/with_introspection_test.go b/tests/integration/schema/updates/copy/field/with_introspection_test.go index 2106d22b1b..0cda7e1425 100644 --- a/tests/integration/schema/updates/copy/field/with_introspection_test.go +++ b/tests/integration/schema/updates/copy/field/with_introspection_test.go @@ -32,7 +32,6 @@ func TestSchemaUpdatesCopyFieldIntrospectionWithRemoveIDAndReplaceName(t *testin Patch: ` [ { "op": "copy", "from": "/Users/Fields/1", "path": "/Users/Fields/2" }, - { "op": "remove", "path": "/Users/Fields/2/ID" }, { "op": "replace", "path": "/Users/Fields/2/Name", "value": "fax" } ] `, diff --git a/tests/integration/schema/updates/copy/simple_test.go b/tests/integration/schema/updates/copy/simple_test.go index 96fc3a0025..206cd49b52 100644 --- a/tests/integration/schema/updates/copy/simple_test.go +++ b/tests/integration/schema/updates/copy/simple_test.go @@ -28,14 +28,13 @@ func TestSchemaUpdatesCopyCollectionWithRemoveIDAndReplaceName(t *testing.T) { `, }, testUtils.SchemaPatch{ - // Here we esentially use Users as a template, copying it, clearing the IDs, and renaming the + // Here we esentially use Users as a template, copying it and renaming the // clone. It is deliberately blocked for now, but should function at somepoint. Patch: ` [ { "op": "copy", "from": "/Users", "path": "/Book" }, { "op": "remove", "path": "/Book/Root" }, { "op": "remove", "path": "/Book/VersionID" }, - { "op": "remove", "path": "/Book/Fields/1/ID" }, { "op": "replace", "path": "/Book/Name", "value": "Book" } ] `, diff --git a/tests/integration/schema/updates/move/simple_test.go b/tests/integration/schema/updates/move/simple_test.go index e16226c1cf..94ecfcf1bb 100644 --- a/tests/integration/schema/updates/move/simple_test.go +++ b/tests/integration/schema/updates/move/simple_test.go @@ -17,7 +17,7 @@ import ( ) func TestSchemaUpdatesMoveCollectionDoesNothing(t *testing.T) { - schemaVersionID := "bafkreig3zt63qt7bkji47etyu2sqtzroa3tcfdxgwqc3ka2ijy63refq3a" + schemaVersionID := "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4" test := testUtils.TestCase{ Description: "Test schema update, move collection", diff --git a/tests/integration/schema/updates/remove/fields/simple_test.go b/tests/integration/schema/updates/remove/fields/simple_test.go index 515a8736e5..ef2ed6f6db 100644 --- a/tests/integration/schema/updates/remove/fields/simple_test.go +++ b/tests/integration/schema/updates/remove/fields/simple_test.go @@ -84,32 +84,7 @@ func TestSchemaUpdatesRemoveFieldNameErrors(t *testing.T) { { "op": "remove", "path": "/Users/Fields/2/Name" } ] `, - ExpectedError: "mutating an existing field is not supported. ID: 2, ProposedName: ", - }, - }, - } - testUtils.ExecuteTestCase(t, test) -} - -func TestSchemaUpdatesRemoveFieldIDErrors(t *testing.T) { - test := testUtils.TestCase{ - Description: "Test schema update, remove field id", - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Users { - name: String - email: String - } - `, - }, - testUtils.SchemaPatch{ - Patch: ` - [ - { "op": "remove", "path": "/Users/Fields/2/ID" } - ] - `, - ExpectedError: "deleting an existing field is not supported. Name: name, ID: 2", + ExpectedError: "deleting an existing field is not supported. Name: name", }, }, } @@ -134,7 +109,7 @@ func TestSchemaUpdatesRemoveFieldKindErrors(t *testing.T) { { "op": "remove", "path": "/Users/Fields/2/Kind" } ] `, - ExpectedError: "mutating an existing field is not supported. ID: 2, ProposedName: ", + ExpectedError: "mutating an existing field is not supported. ProposedName: ", }, }, } @@ -159,7 +134,7 @@ func TestSchemaUpdatesRemoveFieldTypErrors(t *testing.T) { { "op": "remove", "path": "/Users/Fields/2/Typ" } ] `, - ExpectedError: "mutating an existing field is not supported. ID: 2, ProposedName: ", + ExpectedError: "mutating an existing field is not supported. ProposedName: name", }, }, } @@ -178,7 +153,7 @@ func TestSchemaUpdatesRemoveFieldSchemaErrors(t *testing.T) { } type Book { name: String - author: [Author] + author: Author } `, }, @@ -188,7 +163,7 @@ func TestSchemaUpdatesRemoveFieldSchemaErrors(t *testing.T) { { "op": "remove", "path": "/Author/Fields/1/Schema" } ] `, - ExpectedError: "mutating an existing field is not supported. ID: 1, ProposedName: book", + ExpectedError: "mutating an existing field is not supported. ProposedName: book", }, }, } @@ -207,7 +182,7 @@ func TestSchemaUpdatesRemoveFieldRelationNameErrors(t *testing.T) { } type Book { name: String - author: [Author] + author: Author } `, }, @@ -217,36 +192,7 @@ func TestSchemaUpdatesRemoveFieldRelationNameErrors(t *testing.T) { { "op": "remove", "path": "/Author/Fields/1/RelationName" } ] `, - ExpectedError: "mutating an existing field is not supported. ID: 1, ProposedName: book", - }, - }, - } - testUtils.ExecuteTestCase(t, test) -} - -func TestSchemaUpdatesRemoveFieldRelationTypeErrors(t *testing.T) { - test := testUtils.TestCase{ - Description: "Test schema update, remove field RelationType", - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Author { - name: String - book: [Book] - } - type Book { - name: String - author: [Author] - } - `, - }, - testUtils.SchemaPatch{ - Patch: ` - [ - { "op": "remove", "path": "/Author/Fields/1/RelationType" } - ] - `, - ExpectedError: "mutating an existing field is not supported. ID: 1, ProposedName: book", + ExpectedError: "mutating an existing field is not supported. ProposedName: book", }, }, } diff --git a/tests/integration/schema/updates/replace/field/simple_test.go b/tests/integration/schema/updates/replace/field/simple_test.go index 057b8fe9b7..f7596c7c96 100644 --- a/tests/integration/schema/updates/replace/field/simple_test.go +++ b/tests/integration/schema/updates/replace/field/simple_test.go @@ -34,7 +34,7 @@ func TestSchemaUpdatesReplaceFieldErrors(t *testing.T) { { "op": "replace", "path": "/Users/Fields/2", "value": {"Name": "Fax", "Kind": 11} } ] `, - ExpectedError: "deleting an existing field is not supported. Name: name, ID: 2", + ExpectedError: "deleting an existing field is not supported. Name: name", }, }, } @@ -56,10 +56,10 @@ func TestSchemaUpdatesReplaceFieldWithIDErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "replace", "path": "/Users/Fields/2", "value": {"ID":2, "Name": "fax", "Kind": 11} } + { "op": "replace", "path": "/Users/Fields/2", "value": {"Name": "fax", "Kind": 11} } ] `, - ExpectedError: "mutating an existing field is not supported. ID: 2, ProposedName: fax", + ExpectedError: "deleting an existing field is not supported. Name: name", }, }, } diff --git a/tests/integration/schema/updates/test/field/simple_test.go b/tests/integration/schema/updates/test/field/simple_test.go index 414a472149..afde980f97 100644 --- a/tests/integration/schema/updates/test/field/simple_test.go +++ b/tests/integration/schema/updates/test/field/simple_test.go @@ -102,7 +102,7 @@ func TestSchemaUpdatesTestFieldPasses(t *testing.T) { Patch: ` [ { "op": "test", "path": "/Users/Fields/1", "value": { - "ID":1, "Name": "name", "Kind": 11, "Schema":"","RelationName":"","Typ":1,"RelationType":0 + "Name": "name", "Kind": 11, "Schema":"", "IsPrimaryRelation":false, "RelationName":"", "Typ":1 } } ] `, @@ -127,7 +127,7 @@ func TestSchemaUpdatesTestFieldPasses_UsingFieldNameAsIndex(t *testing.T) { Patch: ` [ { "op": "test", "path": "/Users/Fields/name", "value": { - "ID":1, "Kind": 11, "Schema":"","RelationName":"","Typ":1,"RelationType":0 + "Kind": 11, "Schema":"", "IsPrimaryRelation":false, "RelationName":"", "Typ":1 } } ] `, diff --git a/tests/integration/schema/updates/with_schema_branch_test.go b/tests/integration/schema/updates/with_schema_branch_test.go new file mode 100644 index 0000000000..e6e6e6e850 --- /dev/null +++ b/tests/integration/schema/updates/with_schema_branch_test.go @@ -0,0 +1,585 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package updates + +import ( + "testing" + + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/client" + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestSchemaUpdates_WithBranchingSchema(t *testing.T) { + schemaVersion1ID := "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4" + schemaVersion2ID := "bafkreidn4f3i52756wevi3sfpbqzijgy6v24zh565pmvtmpqr4ou52v2q4" + schemaVersion3ID := "bafkreieilqyv4bydakul5tbikpysmzwhzvxdau4twcny5n46zvxhkv7oli" + + test := testUtils.TestCase{ + Description: "Test schema update, with branching schema", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.SchemaPatch{ + // The second schema version will not be set as the active version, leaving the initial version active + SetAsDefaultVersion: immutable.Some(false), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } + ] + `, + }, + testUtils.SchemaPatch{ + // The third schema version will be set as the active version, going from version 1 to 3 + SetAsDefaultVersion: immutable.Some(true), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "phone", "Kind": 11} } + ] + `, + }, + testUtils.Request{ + Request: `query { + Users { + name + email + } + }`, + // The email field is not queriable + ExpectedError: `Cannot query field "email" on type "Users".`, + }, + testUtils.GetSchema{ + // The second schema version is present in the system, with the email field + VersionID: immutable.Some(schemaVersion2ID), + ExpectedResults: []client.SchemaDescription{ + { + Name: "Users", + VersionID: schemaVersion2ID, + Root: schemaVersion1ID, + Fields: []client.SchemaFieldDescription{ + { + Name: "_docID", + Kind: client.FieldKind_DocID, + Typ: client.LWW_REGISTER, + }, + { + Name: "name", + Kind: client.FieldKind_NILLABLE_STRING, + Typ: client.LWW_REGISTER, + }, + { + Name: "email", + Kind: client.FieldKind_NILLABLE_STRING, + Typ: client.LWW_REGISTER, + }, + }, + }, + }, + }, + testUtils.Request{ + // The phone field is queriable + Request: `query { + Users { + name + phone + } + }`, + Results: []map[string]any{}, + }, + testUtils.GetSchema{ + // The third schema version is present in the system, with the phone field + VersionID: immutable.Some(schemaVersion3ID), + ExpectedResults: []client.SchemaDescription{ + { + Name: "Users", + VersionID: schemaVersion3ID, + Root: schemaVersion1ID, + Fields: []client.SchemaFieldDescription{ + { + Name: "_docID", + Kind: client.FieldKind_DocID, + Typ: client.LWW_REGISTER, + }, + { + Name: "name", + Kind: client.FieldKind_NILLABLE_STRING, + Typ: client.LWW_REGISTER, + }, + { + Name: "phone", + Kind: client.FieldKind_NILLABLE_STRING, + Typ: client.LWW_REGISTER, + }, + }, + }, + }, + }, + testUtils.GetCollections{ + FilterOptions: client.CollectionFetchOptions{ + IncludeInactive: immutable.Some(true), + }, + ExpectedResults: []client.CollectionDescription{ + { + // The original collection version is present, it has no source and is inactive (has no name). + ID: 1, + SchemaVersionID: schemaVersion1ID, + }, + { + // The collection version for schema version 2 is present, it has the first collection as a source + // and is inactive. + ID: 2, + SchemaVersionID: schemaVersion2ID, + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + }, + }, + }, + { + // The collection version for schema version 3 is present and is active, it also has the first collection + // as source. + ID: 3, + Name: immutable.Some("Users"), + SchemaVersionID: schemaVersion3ID, + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + }, + }, + }, + }, + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaUpdates_WithPatchOnBranchedSchema(t *testing.T) { + schemaVersion1ID := "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4" + schemaVersion2ID := "bafkreidn4f3i52756wevi3sfpbqzijgy6v24zh565pmvtmpqr4ou52v2q4" + schemaVersion3ID := "bafkreieilqyv4bydakul5tbikpysmzwhzvxdau4twcny5n46zvxhkv7oli" + schemaVersion4ID := "bafkreicy4llechrh44zwviafs2ptjnr7sloiajjvpp7buaknhwspfevnt4" + + test := testUtils.TestCase{ + Description: "Test schema update, with patch on branching schema", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.SchemaPatch{ + // The second schema version will not be set as the active version, leaving the initial version active + SetAsDefaultVersion: immutable.Some(false), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } + ] + `, + }, + testUtils.SchemaPatch{ + // The third schema version will be set as the active version, going from version 1 to 3 + SetAsDefaultVersion: immutable.Some(true), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "phone", "Kind": 11} } + ] + `, + }, + testUtils.SchemaPatch{ + // The fourth schema version will be set as the active version, going from version 3 to 4 + SetAsDefaultVersion: immutable.Some(true), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "discordName", "Kind": 11} } + ] + `, + }, + testUtils.Request{ + // The phone and discordName fields are queriable + Request: `query { + Users { + name + phone + discordName + } + }`, + Results: []map[string]any{}, + }, + testUtils.GetSchema{ + // The fourth schema version is present in the system, with the phone and discordName field + VersionID: immutable.Some(schemaVersion4ID), + ExpectedResults: []client.SchemaDescription{ + { + Name: "Users", + VersionID: schemaVersion4ID, + Root: schemaVersion1ID, + Fields: []client.SchemaFieldDescription{ + { + Name: "_docID", + Kind: client.FieldKind_DocID, + Typ: client.LWW_REGISTER, + }, + { + Name: "name", + Kind: client.FieldKind_NILLABLE_STRING, + Typ: client.LWW_REGISTER, + }, + { + Name: "phone", + Kind: client.FieldKind_NILLABLE_STRING, + Typ: client.LWW_REGISTER, + }, + { + Name: "discordName", + Kind: client.FieldKind_NILLABLE_STRING, + Typ: client.LWW_REGISTER, + }, + }, + }, + }, + }, + testUtils.GetCollections{ + FilterOptions: client.CollectionFetchOptions{ + IncludeInactive: immutable.Some(true), + }, + ExpectedResults: []client.CollectionDescription{ + { + // The original collection version is present, it has no source and is inactive (has no name). + ID: 1, + SchemaVersionID: schemaVersion1ID, + }, + { + // The collection version for schema version 2 is present, it has the first collection as a source + // and is inactive. + ID: 2, + SchemaVersionID: schemaVersion2ID, + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + }, + }, + }, + { + // The collection version for schema version 3 is present and inactive, it has the first collection + // as source. + ID: 3, + SchemaVersionID: schemaVersion3ID, + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + }, + }, + }, + { + // The collection version for schema version 4 is present and is active, it also has the third collection + // as source. + ID: 4, + Name: immutable.Some("Users"), + SchemaVersionID: schemaVersion4ID, + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 3, + }, + }, + }, + }, + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaUpdates_WithBranchingSchemaAndSetActiveSchemaToOtherBranch(t *testing.T) { + schemaVersion1ID := "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4" + schemaVersion2ID := "bafkreidn4f3i52756wevi3sfpbqzijgy6v24zh565pmvtmpqr4ou52v2q4" + schemaVersion3ID := "bafkreieilqyv4bydakul5tbikpysmzwhzvxdau4twcny5n46zvxhkv7oli" + + test := testUtils.TestCase{ + Description: "Test schema update, with branching schema toggling between branches", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.SchemaPatch{ + // The second schema version will not be set as the active version, leaving the initial version active + SetAsDefaultVersion: immutable.Some(false), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } + ] + `, + }, + testUtils.SchemaPatch{ + // The third schema version will be set as the active version, going from version 1 to 3 + SetAsDefaultVersion: immutable.Some(true), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "phone", "Kind": 11} } + ] + `, + }, + testUtils.SetActiveSchemaVersion{ + // Set the second schema version to be active + SchemaVersionID: schemaVersion2ID, + }, + testUtils.Request{ + Request: `query { + Users { + name + email + } + }`, + // The email field is queriable + Results: []map[string]any{}, + }, + testUtils.Request{ + Request: `query { + Users { + name + phone + } + }`, + // The phone field is not queriable + ExpectedError: `Cannot query field "phone" on type "Users".`, + }, + testUtils.GetCollections{ + FilterOptions: client.CollectionFetchOptions{ + IncludeInactive: immutable.Some(true), + }, + ExpectedResults: []client.CollectionDescription{ + { + // The original collection version is present, it has no source and is inactive (has no name). + ID: 1, + SchemaVersionID: schemaVersion1ID, + }, + { + // The collection version for schema version 2 is present and is active, it has the first collection as a source + ID: 2, + Name: immutable.Some("Users"), + SchemaVersionID: schemaVersion2ID, + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + }, + }, + }, + { + // The collection version for schema version 3 is present and is inactive, it also has the first collection + // as source. + ID: 3, + SchemaVersionID: schemaVersion3ID, + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + }, + }, + }, + }, + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaUpdates_WithBranchingSchemaAndSetActiveSchemaToOtherBranchThenPatch(t *testing.T) { + schemaVersion1ID := "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4" + schemaVersion2ID := "bafkreidn4f3i52756wevi3sfpbqzijgy6v24zh565pmvtmpqr4ou52v2q4" + schemaVersion3ID := "bafkreieilqyv4bydakul5tbikpysmzwhzvxdau4twcny5n46zvxhkv7oli" + schemaVersion4ID := "bafkreict4nqhcurfkjskxlek3djpep2acwlfkztughoum4dsvuwigkfqzi" + + test := testUtils.TestCase{ + Description: "Test schema update, with branching schema toggling between branches then patch", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.SchemaPatch{ + // The second schema version will not be set as the active version, leaving the initial version active + SetAsDefaultVersion: immutable.Some(false), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } + ] + `, + }, + testUtils.SchemaPatch{ + // The third schema version will be set as the active version, going from version 1 to 3 + SetAsDefaultVersion: immutable.Some(true), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "phone", "Kind": 11} } + ] + `, + }, + testUtils.SetActiveSchemaVersion{ + // Set the second schema version to be active + SchemaVersionID: schemaVersion2ID, + }, + testUtils.SchemaPatch{ + // The fourth schema version will be set as the active version, going from version 2 to 4 + SetAsDefaultVersion: immutable.Some(true), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "discordName", "Kind": 11} } + ] + `, + }, + testUtils.Request{ + // The email and discordName fields are queriable + Request: `query { + Users { + name + email + discordName + } + }`, + Results: []map[string]any{}, + }, + testUtils.GetSchema{ + // The fourth schema version is present in the system, with the email and discordName field + VersionID: immutable.Some(schemaVersion4ID), + ExpectedResults: []client.SchemaDescription{ + { + Name: "Users", + VersionID: schemaVersion4ID, + Root: schemaVersion1ID, + Fields: []client.SchemaFieldDescription{ + { + Name: "_docID", + Kind: client.FieldKind_DocID, + Typ: client.LWW_REGISTER, + }, + { + Name: "name", + Kind: client.FieldKind_NILLABLE_STRING, + Typ: client.LWW_REGISTER, + }, + { + Name: "email", + Kind: client.FieldKind_NILLABLE_STRING, + Typ: client.LWW_REGISTER, + }, + { + Name: "discordName", + Kind: client.FieldKind_NILLABLE_STRING, + Typ: client.LWW_REGISTER, + }, + }, + }, + }, + }, + testUtils.GetCollections{ + FilterOptions: client.CollectionFetchOptions{ + IncludeInactive: immutable.Some(true), + }, + ExpectedResults: []client.CollectionDescription{ + { + // The original collection version is present, it has no source and is inactive (has no name). + ID: 1, + SchemaVersionID: schemaVersion1ID, + }, + { + // The collection version for schema version 2 is present, it has the first collection as a source + // and is inactive. + ID: 2, + SchemaVersionID: schemaVersion2ID, + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + }, + }, + }, + { + // The collection version for schema version 3 is present and inactive, it has the first collection + // as source. + ID: 3, + SchemaVersionID: schemaVersion3ID, + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + }, + }, + }, + { + // The collection version for schema version 4 is present and is active, it also has the second collection + // as source. + ID: 4, + Name: immutable.Some("Users"), + SchemaVersionID: schemaVersion4ID, + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 2, + }, + }, + }, + }, + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaUpdates_WithBranchingSchemaAndGetCollectionAtVersion(t *testing.T) { + schemaVersion1ID := "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4" + + test := testUtils.TestCase{ + Description: `Test schema update, with branching schema toggling between branches and gets the +collection at a specific version`, + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.SchemaPatch{ + // The second schema version will not be set as the active version, leaving the initial version active + SetAsDefaultVersion: immutable.Some(true), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } + ] + `, + }, + testUtils.GetCollections{ + FilterOptions: client.CollectionFetchOptions{ + SchemaVersionID: immutable.Some(schemaVersion1ID), + }, + ExpectedResults: []client.CollectionDescription{ + { + // The original collection version is present, it has no source and is inactive (has no name). + ID: 1, + SchemaVersionID: schemaVersion1ID, + }, + }, + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/schema/with_update_set_default_test.go b/tests/integration/schema/with_update_set_default_test.go index 602e6d48d6..e5179eb814 100644 --- a/tests/integration/schema/with_update_set_default_test.go +++ b/tests/integration/schema/with_update_set_default_test.go @@ -36,7 +36,7 @@ func TestSchema_WithUpdateAndSetDefaultVersionToEmptyString_Errors(t *testing.T) ] `, }, - testUtils.SetDefaultSchemaVersion{ + testUtils.SetActiveSchemaVersion{ SchemaVersionID: "", ExpectedError: "schema version ID can't be empty", }, @@ -63,7 +63,7 @@ func TestSchema_WithUpdateAndSetDefaultVersionToUnknownVersion_Errors(t *testing ] `, }, - testUtils.SetDefaultSchemaVersion{ + testUtils.SetActiveSchemaVersion{ SchemaVersionID: "does not exist", ExpectedError: "datastore: key not found", }, @@ -91,8 +91,8 @@ func TestSchema_WithUpdateAndSetDefaultVersionToOriginal_NewFieldIsNotQueriable( `, SetAsDefaultVersion: immutable.Some(false), }, - testUtils.SetDefaultSchemaVersion{ - SchemaVersionID: "bafkreig3zt63qt7bkji47etyu2sqtzroa3tcfdxgwqc3ka2ijy63refq3a", + testUtils.SetActiveSchemaVersion{ + SchemaVersionID: "bafkreiebcgze3rs6j3g7gu65dwskdg5fn3qby5c6nqffhbdkcy2l5bbvp4", }, testUtils.Request{ Request: `query { @@ -128,8 +128,8 @@ func TestSchema_WithUpdateAndSetDefaultVersionToNew_AllowsQueryingOfNewField(t * `, SetAsDefaultVersion: immutable.Some(false), }, - testUtils.SetDefaultSchemaVersion{ - SchemaVersionID: "bafkreiclwd4nrvczrzy7aj52olojyzvgm4ht6jpktwpxuqej5wk3ocxpqi", + testUtils.SetActiveSchemaVersion{ + SchemaVersionID: "bafkreidn4f3i52756wevi3sfpbqzijgy6v24zh565pmvtmpqr4ou52v2q4", }, testUtils.Request{ Request: `query { diff --git a/tests/integration/state.go b/tests/integration/state.go index ca795a2492..25a248413b 100644 --- a/tests/integration/state.go +++ b/tests/integration/state.go @@ -14,12 +14,11 @@ import ( "context" "testing" - "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/net" "github.com/sourcenetwork/defradb/tests/clients" ) @@ -53,14 +52,11 @@ type state struct { // These synchronisation channels allow async actions to track their completion. syncChans []chan struct{} - // The private keys for any nodes. - nodePrivateKeys []crypto.PrivKey - // The addresses of any nodes configured. nodeAddresses []peer.AddrInfo // The configurations for any nodes - nodeConfigs []config.Config + nodeConfigs [][]net.NodeOpt // The nodes active in this test. nodes []clients.Client @@ -108,9 +104,8 @@ func newState( allActionsDone: make(chan struct{}), subscriptionResultsChans: []chan func(){}, syncChans: []chan struct{}{}, - nodePrivateKeys: []crypto.PrivKey{}, nodeAddresses: []peer.AddrInfo{}, - nodeConfigs: []config.Config{}, + nodeConfigs: [][]net.NodeOpt{}, nodes: []clients.Client{}, dbPaths: []string{}, collections: [][]client.Collection{}, diff --git a/tests/integration/test_case.go b/tests/integration/test_case.go index 435f1cf9b4..ce6e456fbb 100644 --- a/tests/integration/test_case.go +++ b/tests/integration/test_case.go @@ -13,10 +13,11 @@ package tests import ( "testing" + "github.com/lens-vm/lens/host-go/config/model" "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/net" "github.com/sourcenetwork/defradb/tests/gen" "github.com/sourcenetwork/defradb/tests/predefined" ) @@ -54,7 +55,7 @@ type SetupComplete struct{} // Nodes may be explicitly referenced by index by other actions using `NodeID` properties. // If the action has a `NodeID` property and it is not specified, the action will be // effected on all nodes. -type ConfigureNode func() config.Config +type ConfigureNode func() []net.NodeOpt // Restart is an action that will close and then start all nodes. type Restart struct{} @@ -90,7 +91,10 @@ type SchemaPatch struct { // If SetAsDefaultVersion has a value, and that value is false then the schema version // resulting from this patch will not be made default. SetAsDefaultVersion immutable.Option[bool] - ExpectedError string + + Lens immutable.Option[model.Lens] + + ExpectedError string } // GetSchema is an action that fetches schema using the provided options. @@ -118,9 +122,37 @@ type GetSchema struct { ExpectedError string } -// SetDefaultSchemaVersion is an action that will set the default schema version to the +// GetCollections is an action that fetches collections using the provided options. +// +// ID, RootID and SchemaVersionID will only be asserted on if an expected value is provided. +type GetCollections struct { + // NodeID may hold the ID (index) of a node to apply this patch to. + // + // If a value is not provided the patch will be applied to all nodes. + NodeID immutable.Option[int] + + // Used to identify the transaction for this to run against. Optional. + TransactionID immutable.Option[int] + + // The expected results. + // + // Each item will be compared individually, if ID, RootID or SchemaVersionID on the + // expected item are default they will not be compared with the actual. + // + // Assertions on Indexes and Sources will not distinguish between nil and empty (in order + // to allow their ommission in most cases). + ExpectedResults []client.CollectionDescription + + // An optional set of fetch options for the collections. + FilterOptions client.CollectionFetchOptions + + // Any error expected from the action. Optional. + ExpectedError string +} + +// SetActiveSchemaVersion is an action that will set the active schema version to the // given value. -type SetDefaultSchemaVersion struct { +type SetActiveSchemaVersion struct { // NodeID may hold the ID (index) of a node to set the default schema version on. // // If a value is not provided the default will be set on all nodes. @@ -143,6 +175,9 @@ type CreateView struct { // The SDL containing all types used by the view output. SDL string + // An optional Lens transform to add to the view. + Transform immutable.Option[model.Lens] + // Any error expected from the action. Optional. // // String can be a partial, and the test will pass if an error is returned that @@ -226,6 +261,14 @@ type UpdateDoc struct { DontSync bool } +// IndexField describes a field to be indexed. +type IndexedField struct { + // Name contains the name of the field. + Name string + // Descending indicates whether the field is indexed in descending order. + Descending bool +} + // CreateIndex will attempt to create the given secondary index for the given collection // using the collection api. type CreateIndex struct { @@ -243,10 +286,8 @@ type CreateIndex struct { // The name of the field to index. Used only for single field indexes. FieldName string - // The names of the fields to index. Used only for composite indexes. - FieldsNames []string - // The directions of the 'FieldsNames' to index. Used only for composite indexes. - Directions []client.IndexDirection + // The fields to index. Used only for composite indexes. + Fields []IndexedField // If Unique is true, the index will be created as a unique index. Unique bool diff --git a/tests/integration/utils.go b/tests/integration/utils.go index f15ac8f0ef..b530f45920 100644 --- a/tests/integration/utils.go +++ b/tests/integration/utils.go @@ -22,10 +22,6 @@ type RequestTestCase struct { // of docs in stringified JSON format Docs map[int][]string - // updates is a map from document index, to a list - // of changes in strinigied JSON format - Updates map[int]map[int][]string - Results []map[string]any // The expected content of an expected error @@ -56,21 +52,6 @@ func ExecuteRequestTestCase( } } - for collectionIndex, docUpdates := range test.Updates { - for docIndex, docs := range docUpdates { - for _, doc := range docs { - actions = append( - actions, - UpdateDoc{ - CollectionID: collectionIndex, - DocID: docIndex, - Doc: doc, - }, - ) - } - } - } - if test.Request != "" { actions = append( actions, diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index dc344d49f7..d5cdcbd01d 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -20,6 +20,7 @@ import ( "time" "github.com/bxcodec/faker/support/slice" + "github.com/fxamacker/cbor/v2" "github.com/libp2p/go-libp2p/core/crypto" "github.com/sourcenetwork/immutable" "github.com/stretchr/testify/assert" @@ -80,6 +81,8 @@ const ( lensPoolSize = 2 ) +const testJSONFile = "/test.json" + func init() { // We use environment variables instead of flags `go test ./...` throws for all packages // that don't have the flag defined @@ -260,8 +263,11 @@ func performAction( case GetSchema: getSchema(s, action) - case SetDefaultSchemaVersion: - setDefaultSchemaVersion(s, action) + case GetCollections: + getCollections(s, action) + + case SetActiveSchemaVersion: + setActiveSchemaVersion(s, action) case CreateView: createView(s, action) @@ -269,9 +275,6 @@ func performAction( case ConfigureMigration: configureMigration(s, action) - case GetMigrations: - getMigrations(s, action) - case CreateDoc: createDoc(s, action) @@ -344,7 +347,7 @@ func createGenerateDocs(s *state, docs []gen.GeneratedDoc, nodeID immutable.Opti if err != nil { s.t.Fatalf("Failed to generate docs %s", err) } - createDoc(s, CreateDoc{CollectionID: nameToInd[doc.Col.Description.Name], Doc: docJSON, NodeID: nodeID}) + createDoc(s, CreateDoc{CollectionID: nameToInd[doc.Col.Description.Name.Value()], Doc: docJSON, NodeID: nodeID}) } } @@ -352,7 +355,7 @@ func generateDocs(s *state, action GenerateDocs) { collections := getNodeCollections(action.NodeID, s.collections) defs := make([]client.CollectionDefinition, 0, len(collections[0])) for _, col := range collections[0] { - if len(action.ForCollections) == 0 || slice.Contains(action.ForCollections, col.Name()) { + if len(action.ForCollections) == 0 || slice.Contains(action.ForCollections, col.Name().Value()) { defs = append(defs, col.Definition()) } } @@ -661,19 +664,18 @@ func restartNodes( continue } - key := s.nodePrivateKeys[i] - cfg := s.nodeConfigs[i] // We need to make sure the node is configured with its old address, otherwise // a new one may be selected and reconnnection to it will fail. - cfg.Net.P2PAddress = s.nodeAddresses[i].Addrs[0].String() + var addresses []string + for _, addr := range s.nodeAddresses[i].Addrs { + addresses = append(addresses, addr.String()) + } + + nodeOpts := s.nodeConfigs[i] + nodeOpts = append(nodeOpts, net.WithListenAddresses(addresses...)) var n *net.Node - n, err = net.NewNode( - s.ctx, - db, - net.WithConfig(&cfg), - net.WithPrivateKey(key), - ) + n, err = net.NewNode(s.ctx, db, nodeOpts...) require.NoError(s.t, err) if err := n.Start(); err != nil { @@ -734,12 +736,12 @@ func refreshCollections( for nodeID, node := range s.nodes { s.collections[nodeID] = make([]client.Collection, len(s.collectionNames)) - allCollections, err := node.GetAllCollections(s.ctx) + allCollections, err := node.GetCollections(s.ctx, client.CollectionFetchOptions{}) require.Nil(s.t, err) for i, collectionName := range s.collectionNames { for _, collection := range allCollections { - if collection.Name() == collectionName { + if collection.Name().Value() == collectionName { s.collections[nodeID][i] = collection break } @@ -762,21 +764,17 @@ func configureNode( return } - cfg := action() db, path, err := setupDatabase(s) //disable change dector, or allow it? require.NoError(s.t, err) privateKey, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 0) require.NoError(s.t, err) + nodeOpts := action() + nodeOpts = append(nodeOpts, net.WithPrivateKey(privateKey)) + var n *net.Node - log.Info(s.ctx, "Starting P2P node", logging.NewKV("P2P address", cfg.Net.P2PAddress)) - n, err = net.NewNode( - s.ctx, - db, - net.WithConfig(&cfg), - net.WithPrivateKey(privateKey), - ) + n, err = net.NewNode(s.ctx, db, nodeOpts...) require.NoError(s.t, err) log.Info(s.ctx, "Starting P2P node", logging.NewKV("P2P address", n.PeerInfo())) @@ -786,8 +784,7 @@ func configureNode( } s.nodeAddresses = append(s.nodeAddresses, n.PeerInfo()) - s.nodeConfigs = append(s.nodeConfigs, cfg) - s.nodePrivateKeys = append(s.nodePrivateKeys, privateKey) + s.nodeConfigs = append(s.nodeConfigs, nodeOpts) c, err := setupClient(s, n) require.NoError(s.t, err) @@ -997,7 +994,7 @@ func patchSchema( setAsDefaultVersion = true } - err := node.PatchSchema(s.ctx, action.Patch, setAsDefaultVersion) + err := node.PatchSchema(s.ctx, action.Patch, action.Lens, setAsDefaultVersion) expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) @@ -1020,12 +1017,14 @@ func getSchema( result, e := node.GetSchemaByVersionID(s.ctx, action.VersionID.Value()) err = e results = []client.SchemaDescription{result} - case action.Root.HasValue(): - results, err = node.GetSchemasByRoot(s.ctx, action.Root.Value()) - case action.Name.HasValue(): - results, err = node.GetSchemasByName(s.ctx, action.Name.Value()) default: - results, err = node.GetAllSchemas(s.ctx) + results, err = node.GetSchemas( + s.ctx, + client.SchemaFetchOptions{ + Root: action.Root, + Name: action.Name, + }, + ) } expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) @@ -1037,12 +1036,56 @@ func getSchema( } } -func setDefaultSchemaVersion( +func getCollections( s *state, - action SetDefaultSchemaVersion, + action GetCollections, ) { for _, node := range getNodes(action.NodeID, s.nodes) { - err := node.SetDefaultSchemaVersion(s.ctx, action.SchemaVersionID) + db := getStore(s, node, action.TransactionID, "") + results, err := db.GetCollections(s.ctx, action.FilterOptions) + + expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) + assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) + + if !expectedErrorRaised { + require.Equal(s.t, len(action.ExpectedResults), len(results)) + + for i, expected := range action.ExpectedResults { + actual := results[i].Description() + if expected.ID != 0 { + require.Equal(s.t, expected.ID, actual.ID) + } + if expected.RootID != 0 { + require.Equal(s.t, expected.RootID, actual.RootID) + } + if expected.SchemaVersionID != "" { + require.Equal(s.t, expected.SchemaVersionID, actual.SchemaVersionID) + } + + require.Equal(s.t, expected.Name, actual.Name) + + if expected.Indexes != nil || len(actual.Indexes) != 0 { + // Dont bother asserting this if the expected is nil and the actual is nil/empty. + // This is to say each test action from having to bother declaring an empty slice (if there are no indexes) + require.Equal(s.t, expected.Indexes, actual.Indexes) + } + + if expected.Sources != nil || len(actual.Sources) != 0 { + // Dont bother asserting this if the expected is nil and the actual is nil/empty. + // This is to say each test action from having to bother declaring an empty slice (if there are no sources) + require.Equal(s.t, expected.Sources, actual.Sources) + } + } + } + } +} + +func setActiveSchemaVersion( + s *state, + action SetActiveSchemaVersion, +) { + for _, node := range getNodes(action.NodeID, s.nodes) { + err := node.SetActiveSchemaVersion(s.ctx, action.SchemaVersionID) expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) @@ -1057,7 +1100,7 @@ func createView( action CreateView, ) { for _, node := range getNodes(action.NodeID, s.nodes) { - _, err := node.AddView(s.ctx, action.Query, action.SDL) + _, err := node.AddView(s.ctx, action.Query, action.SDL, action.Transform) expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) @@ -1155,7 +1198,7 @@ func createDocViaGQL( _docID } }`, - collection.Name(), + collection.Name().Value(), input, ) @@ -1302,7 +1345,7 @@ func updateDocViaGQL( _docID } }`, - collection.Name(), + collection.Name().Value(), doc.ID().String(), input, ) @@ -1337,11 +1380,11 @@ func createIndex( Name: action.FieldName, }, } - } else if len(action.FieldsNames) > 0 { - for i := range action.FieldsNames { + } else if len(action.Fields) > 0 { + for i := range action.Fields { indexDesc.Fields = append(indexDesc.Fields, client.IndexedFieldDescription{ - Name: action.FieldsNames[i], - Direction: action.Directions[i], + Name: action.Fields[i].Name, + Descending: action.Fields[i].Descending, }) } } @@ -1399,7 +1442,7 @@ func backupExport( action BackupExport, ) { if action.Config.Filepath == "" { - action.Config.Filepath = s.t.TempDir() + "/test.json" + action.Config.Filepath = s.t.TempDir() + testJSONFile } var expectedErrorRaised bool @@ -1425,7 +1468,7 @@ func backupImport( action BackupImport, ) { if action.Filepath == "" { - action.Filepath = s.t.TempDir() + "/test.json" + action.Filepath = s.t.TempDir() + testJSONFile } // we can avoid checking the error here as this would mean the filepath is invalid @@ -1883,7 +1926,7 @@ func ParseSDL(gqlSDL string) (map[string]client.CollectionDefinition, error) { } result := make(map[string]client.CollectionDefinition) for _, col := range cols { - result[col.Description.Name] = col + result[col.Description.Name.Value()] = col } return result, nil } @@ -1895,3 +1938,11 @@ func MustParseTime(timeString string) time.Time { } return t } + +func CBORValue(value any) []byte { + enc, err := cbor.Marshal(value) + if err != nil { + panic(err) + } + return enc +} diff --git a/tests/integration/view/one_to_many/with_transform_test.go b/tests/integration/view/one_to_many/with_transform_test.go new file mode 100644 index 0000000000..05b41516f4 --- /dev/null +++ b/tests/integration/view/one_to_many/with_transform_test.go @@ -0,0 +1,192 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package one_to_many + +import ( + "testing" + + "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + "github.com/sourcenetwork/defradb/tests/lenses" +) + +func TestView_OneToManyWithTransformOnOuter(t *testing.T) { + test := testUtils.TestCase{ + Description: "One to many view with transform on outer", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Author { + name: String + books: [Book] + } + type Book { + name: String + author: Author + } + `, + }, + testUtils.CreateView{ + Query: ` + Author { + name + books { + name + } + } + `, + SDL: ` + type AuthorView { + fullName: String + books: [BookView] + } + interface BookView { + name: String + } + `, + Transform: immutable.Some(model.Lens{ + // This transform will copy the value from `name` into the `fullName` field, + // like an overly-complicated alias + Lenses: []model.LensModule{ + { + Path: lenses.CopyModulePath, + Arguments: map[string]any{ + "src": "name", + "dst": "fullName", + }, + }, + }, + }), + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ + "name": "Ferdowsi" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ + "name": "Shahnameh", + "author": "bae-db3c6923-c6a4-5386-8301-b20a5454bf1d" + }`, + }, + testUtils.Request{ + Request: ` + query { + AuthorView { + fullName + books { + name + } + } + } + `, + Results: []map[string]any{ + { + "fullName": "Ferdowsi", + "books": []any{ + map[string]any{ + "name": "Shahnameh", + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestView_OneToManyWithTransformAddingInnerDocs(t *testing.T) { + test := testUtils.TestCase{ + Description: "One to many view with transform adding inner docs", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Author { + name: String + } + `, + }, + testUtils.CreateView{ + Query: ` + Author { + name + } + `, + SDL: ` + type AuthorView { + name: String + books: [BookView] + } + interface BookView { + name: String + } + `, + Transform: immutable.Some(model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "books", + "value": []map[string]any{ + { + "name": "The Tragedy of Sohrab and Rostam", + }, + { + "name": "The Legend of Seyavash", + }, + }, + }, + }, + }, + }), + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ + "name": "Ferdowsi" + }`, + }, + testUtils.Request{ + Request: ` + query { + AuthorView { + name + books { + name + } + } + } + `, + Results: []map[string]any{ + { + "name": "Ferdowsi", + "books": []any{ + map[string]any{ + "name": "The Tragedy of Sohrab and Rostam", + }, + map[string]any{ + "name": "The Legend of Seyavash", + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/view/one_to_one/with_transform_test.go b/tests/integration/view/one_to_one/with_transform_test.go new file mode 100644 index 0000000000..cc638596e0 --- /dev/null +++ b/tests/integration/view/one_to_one/with_transform_test.go @@ -0,0 +1,108 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package one_to_one + +import ( + "testing" + + "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + "github.com/sourcenetwork/defradb/tests/lenses" +) + +func TestView_OneToOneWithTransformOnOuter(t *testing.T) { + test := testUtils.TestCase{ + Description: "One to one view with transform on outer", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Author { + name: String + book: Book + } + type Book { + name: String + author: Author + } + `, + }, + testUtils.CreateView{ + Query: ` + Author { + name + book { + name + } + } + `, + SDL: ` + type AuthorView { + fullName: String + book: BookView + } + interface BookView { + name: String + } + `, + Transform: immutable.Some(model.Lens{ + // This transform will copy the value from `name` into the `fullName` field, + // like an overly-complicated alias + Lenses: []model.LensModule{ + { + Path: lenses.CopyModulePath, + Arguments: map[string]any{ + "src": "name", + "dst": "fullName", + }, + }, + }, + }), + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ + "name": "Ferdowsi" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ + "name": "Shahnameh", + "author": "bae-db3c6923-c6a4-5386-8301-b20a5454bf1d" + }`, + }, + testUtils.Request{ + Request: ` + query { + AuthorView { + fullName + book { + name + } + } + } + `, + Results: []map[string]any{ + { + "fullName": "Ferdowsi", + "book": map[string]any{ + "name": "Shahnameh", + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/view/simple/with_transform_test.go b/tests/integration/view/simple/with_transform_test.go new file mode 100644 index 0000000000..fc148357e9 --- /dev/null +++ b/tests/integration/view/simple/with_transform_test.go @@ -0,0 +1,323 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package simple + +import ( + "testing" + + "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + "github.com/sourcenetwork/defradb/tests/lenses" +) + +func TestView_SimpleWithTransform(t *testing.T) { + test := testUtils.TestCase{ + Description: "Simple view with transform", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + } + `, + }, + testUtils.CreateView{ + Query: ` + User { + name + } + `, + SDL: ` + type UserView { + fullName: String + } + `, + Transform: immutable.Some(model.Lens{ + // This transform will copy the value from `name` into the `fullName` field, + // like an overly-complicated alias + Lenses: []model.LensModule{ + { + Path: lenses.CopyModulePath, + Arguments: map[string]any{ + "src": "name", + "dst": "fullName", + }, + }, + }, + }), + }, + testUtils.CreateDoc{ + // Set the `name` field only + Doc: `{ + "name": "John" + }`, + }, + testUtils.CreateDoc{ + // Set the `name` field only + Doc: `{ + "name": "Fred" + }`, + }, + testUtils.Request{ + Request: ` + query { + UserView { + fullName + } + } + `, + Results: []map[string]any{ + { + "fullName": "Fred", + }, + { + "fullName": "John", + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestView_SimpleWithMultipleTransforms(t *testing.T) { + test := testUtils.TestCase{ + Description: "Simple view with multiple transforms", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + } + `, + }, + testUtils.CreateView{ + Query: ` + User { + name + } + `, + SDL: ` + type UserView { + fullName: String + age: Int + } + `, + Transform: immutable.Some(model.Lens{ + // This transform will copy the value from `name` into the `fullName` field, + // like an overly-complicated alias. It will then set `age` to 23. + // + // It is important that this test tests the returning of more fields than it is + // provided with, given the production code. + Lenses: []model.LensModule{ + { + Path: lenses.CopyModulePath, + Arguments: map[string]any{ + "src": "name", + "dst": "fullName", + }, + }, + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "age", + "value": 23, + }, + }, + }, + }), + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "Fred" + }`, + }, + testUtils.Request{ + Request: ` + query { + UserView { + fullName + age + } + } + `, + Results: []map[string]any{ + { + "fullName": "Fred", + "age": 23, + }, + { + "fullName": "John", + "age": 23, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestView_SimpleWithTransformReturningMoreDocsThanInput(t *testing.T) { + test := testUtils.TestCase{ + Description: "Simple view with transform returning more docs than input", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + } + `, + }, + testUtils.CreateView{ + Query: ` + User { + name + } + `, + SDL: ` + type UserView { + name: String + } + `, + Transform: immutable.Some(model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.PrependModulePath, + Arguments: map[string]any{ + "values": []map[string]any{ + { + "name": "Fred", + }, + { + "name": "Shahzad", + }, + }, + }, + }, + }, + }), + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.Request{ + Request: ` + query { + UserView { + name + } + } + `, + Results: []map[string]any{ + { + "name": "Fred", + }, + { + "name": "Shahzad", + }, + { + "name": "John", + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestView_SimpleWithTransformReturningFewerDocsThanInput(t *testing.T) { + test := testUtils.TestCase{ + Description: "Simple view with transform returning fewer docs than input", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + valid: Boolean + } + `, + }, + testUtils.CreateView{ + Query: ` + User { + name + valid + } + `, + SDL: ` + type UserView { + name: String + } + `, + Transform: immutable.Some(model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.FilterModulePath, + Arguments: map[string]any{ + "src": "valid", + "value": true, + }, + }, + }, + }), + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John", + "valid": true + }`, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "Fred", + "valid": false + }`, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "Shahzad", + "valid": true + }`, + }, + testUtils.Request{ + Request: ` + query { + UserView { + name + } + } + `, + Results: []map[string]any{ + { + "name": "Shahzad", + }, + { + "name": "John", + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/lenses/Makefile b/tests/lenses/Makefile index 7370a04b80..5ebd3a0217 100644 --- a/tests/lenses/Makefile +++ b/tests/lenses/Makefile @@ -2,3 +2,5 @@ build: cargo build --target wasm32-unknown-unknown --manifest-path "./rust_wasm32_set_default/Cargo.toml" cargo build --target wasm32-unknown-unknown --manifest-path "./rust_wasm32_remove/Cargo.toml" cargo build --target wasm32-unknown-unknown --manifest-path "./rust_wasm32_copy/Cargo.toml" + cargo build --target wasm32-unknown-unknown --manifest-path "./rust_wasm32_prepend/Cargo.toml" + cargo build --target wasm32-unknown-unknown --manifest-path "./rust_wasm32_filter/Cargo.toml" diff --git a/tests/lenses/rust_wasm32_filter/Cargo.toml b/tests/lenses/rust_wasm32_filter/Cargo.toml new file mode 100644 index 0000000000..c3f815599f --- /dev/null +++ b/tests/lenses/rust_wasm32_filter/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "rust-wasm32-filter" +version = "0.1.0" +edition = "2018" + +[lib] +crate-type = ["cdylib"] + +[dependencies] +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0.87" +lens_sdk = "^0.5" diff --git a/tests/lenses/rust_wasm32_filter/src/lib.rs b/tests/lenses/rust_wasm32_filter/src/lib.rs new file mode 100644 index 0000000000..5736d867f9 --- /dev/null +++ b/tests/lenses/rust_wasm32_filter/src/lib.rs @@ -0,0 +1,106 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::collections::HashMap; +use std::sync::RwLock; +use std::error::Error; +use std::{fmt, error}; +use serde::Deserialize; +use lens_sdk::StreamOption; +use lens_sdk::option::StreamOption::{Some, None, EndOfStream}; + +#[link(wasm_import_module = "lens")] +extern "C" { + fn next() -> *mut u8; +} + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +enum ModuleError { + ParametersNotSetError, + PropertyNotFoundError{requested: String}, +} + +impl error::Error for ModuleError { } + +impl fmt::Display for ModuleError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match &*self { + ModuleError::ParametersNotSetError => f.write_str("Parameters have not been set."), + ModuleError::PropertyNotFoundError { requested } => + write!(f, "The requested property was not found. Requested: {}", requested), + } + } +} + +#[derive(Deserialize, Clone)] +pub struct Parameters { + pub src: String, + pub value: serde_json::Value, +} + +static PARAMETERS: RwLock> = RwLock::new(None); + +#[no_mangle] +pub extern fn alloc(size: usize) -> *mut u8 { + lens_sdk::alloc(size) +} + +#[no_mangle] +pub extern fn set_param(ptr: *mut u8) -> *mut u8 { + match try_set_param(ptr) { + Ok(_) => lens_sdk::nil_ptr(), + Err(e) => lens_sdk::to_mem(lens_sdk::ERROR_TYPE_ID, &e.to_string().as_bytes()) + } +} + +fn try_set_param(ptr: *mut u8) -> Result<(), Box> { + let parameter = lens_sdk::try_from_mem::(ptr)? + .ok_or(ModuleError::ParametersNotSetError)?; + + let mut dst = PARAMETERS.write()?; + *dst = Some(parameter); + Ok(()) +} + +#[no_mangle] +pub extern fn transform() -> *mut u8 { + match try_transform() { + Ok(o) => match o { + Some(result_json) => lens_sdk::to_mem(lens_sdk::JSON_TYPE_ID, &result_json), + None => lens_sdk::nil_ptr(), + EndOfStream => lens_sdk::to_mem(lens_sdk::EOS_TYPE_ID, &[]), + }, + Err(e) => lens_sdk::to_mem(lens_sdk::ERROR_TYPE_ID, &e.to_string().as_bytes()) + } +} + +fn try_transform() -> Result>, Box> { + let ptr = unsafe { next() }; + let mut input = match lens_sdk::try_from_mem::>(ptr)? { + Some(v) => v, + // Implementations of `transform` are free to handle nil however they like. In this + // implementation we chose to return nil given a nil input. + None => return Ok(None), + EndOfStream => return Ok(EndOfStream) + }; + + let params = PARAMETERS.read()? + .clone() + .ok_or(ModuleError::ParametersNotSetError)? + .clone(); + + let value = input.get_mut(¶ms.src) + .ok_or(ModuleError::PropertyNotFoundError{requested: params.src.clone()})? + .clone(); + + if value != params.value { + return try_transform(); + } + + let result = input.clone(); + + let result_json = serde_json::to_vec(&result)?; + lens_sdk::free_transport_buffer(ptr)?; + Ok(Some(result_json)) +} diff --git a/tests/lenses/rust_wasm32_prepend/Cargo.toml b/tests/lenses/rust_wasm32_prepend/Cargo.toml new file mode 100644 index 0000000000..7038a68b9c --- /dev/null +++ b/tests/lenses/rust_wasm32_prepend/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "rust-wasm32-prepend" +version = "0.1.0" +edition = "2018" + +[lib] +crate-type = ["cdylib"] + +[dependencies] +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0.87" +lens_sdk = "^0.5" diff --git a/tests/lenses/rust_wasm32_prepend/src/lib.rs b/tests/lenses/rust_wasm32_prepend/src/lib.rs new file mode 100644 index 0000000000..30dcab58f8 --- /dev/null +++ b/tests/lenses/rust_wasm32_prepend/src/lib.rs @@ -0,0 +1,111 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::collections::HashMap; +use std::sync::RwLock; +use std::error::Error; +use std::{fmt, error}; +use serde::Deserialize; +use lens_sdk::StreamOption; +use lens_sdk::option::StreamOption::{Some, None, EndOfStream}; + +#[link(wasm_import_module = "lens")] +extern "C" { + fn next() -> *mut u8; +} + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +enum ModuleError { + ParametersNotSetError, +} + +impl error::Error for ModuleError { } + +impl fmt::Display for ModuleError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match &*self { + ModuleError::ParametersNotSetError => f.write_str("Parameters have not been set."), + } + } +} + +#[derive(Deserialize, Clone)] +pub struct Parameters { + pub values: Vec>, +} + +static PARAMETERS: RwLock> = RwLock::new(None); +static PARAM_INDEX: RwLock = RwLock::new(0); + +#[no_mangle] +pub extern fn alloc(size: usize) -> *mut u8 { + lens_sdk::alloc(size) +} + +#[no_mangle] +pub extern fn set_param(ptr: *mut u8) -> *mut u8 { + match try_set_param(ptr) { + Ok(_) => lens_sdk::nil_ptr(), + Err(e) => lens_sdk::to_mem(lens_sdk::ERROR_TYPE_ID, &e.to_string().as_bytes()) + } +} + +fn try_set_param(ptr: *mut u8) -> Result<(), Box> { + let parameter = lens_sdk::try_from_mem::(ptr)? + .ok_or(ModuleError::ParametersNotSetError)? + .clone(); + + let mut dst = PARAMETERS.write()?; + *dst = Some(parameter); + Ok(()) +} + +#[no_mangle] +pub extern fn transform() -> *mut u8 { + match try_transform() { + Ok(o) => match o { + Some(result_json) => lens_sdk::to_mem(lens_sdk::JSON_TYPE_ID, &result_json), + None => lens_sdk::nil_ptr(), + EndOfStream => lens_sdk::to_mem(lens_sdk::EOS_TYPE_ID, &[]), + }, + Err(e) => lens_sdk::to_mem(lens_sdk::ERROR_TYPE_ID, &e.to_string().as_bytes()) + } +} + +fn try_transform() -> Result>, Box> { + let params = PARAMETERS.read()? + .clone() + .ok_or(ModuleError::ParametersNotSetError)? + .clone(); + + let param_index = PARAM_INDEX.read()? + .clone(); + + if param_index < params.values.len() { + let result = ¶ms.values[param_index]; + let result_json = serde_json::to_vec(&result)?; + + let mut dst = PARAM_INDEX.write()?; + *dst = param_index+1; + return Ok(Some(result_json)) + } + + // Note: The following is a very unperformant, but simple way of yielding the input documents, + // as this module is only used for testing, this is preferred. + + let ptr = unsafe { next() }; + let input = match lens_sdk::try_from_mem::>(ptr)? { + Some(v) => v, + // Implementations of `transform` are free to handle nil however they like. In this + // implementation we chose to return nil given a nil input. + None => return Ok(None), + EndOfStream => return Ok(EndOfStream) + }; + + let result = input.clone(); + + let result_json = serde_json::to_vec(&result)?; + lens_sdk::free_transport_buffer(ptr)?; + Ok(Some(result_json)) +} diff --git a/tests/lenses/utils.go b/tests/lenses/utils.go index 132c7d33c4..0fab59d3a6 100644 --- a/tests/lenses/utils.go +++ b/tests/lenses/utils.go @@ -44,6 +44,25 @@ var CopyModulePath string = getPathRelativeToProjectRoot( "/tests/lenses/rust_wasm32_copy/target/wasm32-unknown-unknown/debug/rust_wasm32_copy.wasm", ) +// PrependModulePath is the path to the `Prepend` lens module compiled to wasm. +// +// The module has one parameter: +// - `values` is an array of `map[string]string`s, the module will yield these documents before +// any documents fed to it (from Defra). +var PrependModulePath string = getPathRelativeToProjectRoot( + "/tests/lenses/rust_wasm32_prepend/target/wasm32-unknown-unknown/debug/rust_wasm32_prepend.wasm", +) + +// FilterModulePath is the path to the `Filter` lens module compiled to wasm. +// +// The module has two parameters: +// - `src` is a string and is the name of the property you wish to evaluate +// - `value` can be any valid json value and will be compared to the document value at the `src` location +// only documents with values that match this given value will be returned. +var FilterModulePath string = getPathRelativeToProjectRoot( + "/tests/lenses/rust_wasm32_filter/target/wasm32-unknown-unknown/debug/rust_wasm32_filter.wasm", +) + func getPathRelativeToProjectRoot(relativePath string) string { _, filename, _, _ := runtime.Caller(0) root := path.Dir(path.Dir(path.Dir(filename))) diff --git a/tests/predefined/gen_predefined.go b/tests/predefined/gen_predefined.go index 76e143c896..8252156e55 100644 --- a/tests/predefined/gen_predefined.go +++ b/tests/predefined/gen_predefined.go @@ -31,7 +31,7 @@ func parseSDL(gqlSDL string) (map[string]client.CollectionDefinition, error) { } result := make(map[string]client.CollectionDefinition) for _, col := range cols { - result[col.Description.Name] = col + result[col.Description.Name.Value()] = col } return result, nil } @@ -85,7 +85,7 @@ func Create(defs []client.CollectionDefinition, docsList DocsList) ([]gen.Genera resultDocs := make([]gen.GeneratedDoc, 0, len(docsList.Docs)) typeDefs := make(map[string]client.CollectionDefinition) for _, col := range defs { - typeDefs[col.Description.Name] = col + typeDefs[col.Description.Name.Value()] = col } generator := docGenerator{types: typeDefs} for _, doc := range docsList.Docs { @@ -134,7 +134,7 @@ func (this *docGenerator) generatePrimary( for _, secDocField := range secType.Schema.Fields { if secDocField.IsRelation() { if secDocMapField, hasField := secDocMap[secDocField.Name]; hasField { - if secDocField.IsPrimaryRelation() { + if secDocField.IsPrimaryRelation { primType := this.types[secDocField.Schema] primDocMap, subResult, err := this.generatePrimary( secDocMap[secDocField.Name].(map[string]any), &primType) @@ -151,7 +151,7 @@ func (this *docGenerator) generatePrimary( result = append(result, subResult...) secondaryDocs, err := this.generateSecondaryDocs( - secDocMapField.(map[string]any), docID, &primType, secType.Description.Name) + secDocMapField.(map[string]any), docID, &primType, secType.Description.Name.Value()) if err != nil { return nil, nil, err } @@ -199,10 +199,10 @@ func (this *docGenerator) generateSecondaryDocs( for _, field := range primaryType.Schema.Fields { if field.IsRelation() { if _, hasProp := primaryDocMap[field.Name]; hasProp { - if !field.IsPrimaryRelation() && + if !field.IsPrimaryRelation && (parentTypeName == "" || parentTypeName != field.Schema) { docs, err := this.generateSecondaryDocsForField( - primaryDocMap, primaryType.Description.Name, &field, docID) + primaryDocMap, primaryType.Description.Name.Value(), &field, docID) if err != nil { return nil, err } @@ -218,20 +218,20 @@ func (this *docGenerator) generateSecondaryDocs( func (this *docGenerator) generateSecondaryDocsForField( primaryDoc map[string]any, primaryTypeName string, - relField *client.FieldDescription, + relField *client.SchemaFieldDescription, primaryDocID string, ) ([]gen.GeneratedDoc, error) { result := []gen.GeneratedDoc{} relTypeDef := this.types[relField.Schema] primaryPropName := "" for _, relDocField := range relTypeDef.Schema.Fields { - if relDocField.Schema == primaryTypeName && relDocField.IsPrimaryRelation() { + if relDocField.Schema == primaryTypeName && relDocField.IsPrimaryRelation { primaryPropName = relDocField.Name + request.RelatedObjectID switch relVal := primaryDoc[relField.Name].(type) { case []map[string]any: for _, relDoc := range relVal { relDoc[primaryPropName] = primaryDocID - actions, err := this.generateRelatedDocs(relDoc, relTypeDef.Description.Name) + actions, err := this.generateRelatedDocs(relDoc, relTypeDef.Description.Name.Value()) if err != nil { return nil, err } @@ -239,7 +239,7 @@ func (this *docGenerator) generateSecondaryDocsForField( } case map[string]any: relVal[primaryPropName] = primaryDocID - actions, err := this.generateRelatedDocs(relVal, relTypeDef.Description.Name) + actions, err := this.generateRelatedDocs(relVal, relTypeDef.Description.Name.Value()) if err != nil { return nil, err } diff --git a/tests/predefined/gen_predefined_test.go b/tests/predefined/gen_predefined_test.go index ae68cf9804..c5e863a51c 100644 --- a/tests/predefined/gen_predefined_test.go +++ b/tests/predefined/gen_predefined_test.go @@ -508,7 +508,7 @@ func TestGeneratePredefinedFromSchema_TwoPrimaryToOneRoot(t *testing.T) { // }, // Schema: client.SchemaDescription{ // Name: "User", -// Fields: []client.FieldDescription{ +// Fields: []client.SchemaFieldDescription{ // { // Name: "name", // Kind: client.FieldKind_STRING, @@ -517,7 +517,6 @@ func TestGeneratePredefinedFromSchema_TwoPrimaryToOneRoot(t *testing.T) { // Name: "devices", // Kind: client.FieldKind_FOREIGN_OBJECT_ARRAY, // Schema: "Device", -// RelationType: client.Relation_Type_MANY | client.Relation_Type_ONEMANY, // }, // }, // }, @@ -529,7 +528,7 @@ func TestGeneratePredefinedFromSchema_TwoPrimaryToOneRoot(t *testing.T) { // }, // Schema: client.SchemaDescription{ // Name: "Device", -// Fields: []client.FieldDescription{ +// Fields: []client.SchemaFieldDescription{ // { // Name: "model", // Kind: client.FieldKind_STRING, @@ -538,9 +537,7 @@ func TestGeneratePredefinedFromSchema_TwoPrimaryToOneRoot(t *testing.T) { // Name: "owner", // Kind: client.FieldKind_FOREIGN_OBJECT, // Schema: "User", -// RelationType: client.Relation_Type_ONE | -// client.Relation_Type_ONEMANY | -// client.Relation_Type_Primary, +// IsPrimary: true, // }, // }, // }, diff --git a/tools/cloud/akash/deploy.yaml b/tools/cloud/akash/deploy.yaml index c6f7070f98..48d86fff8b 100644 --- a/tools/cloud/akash/deploy.yaml +++ b/tools/cloud/akash/deploy.yaml @@ -5,7 +5,7 @@ services: defradb: image: sourcenetwork/defradb:v0.6.0 args: - - start + - start - --url=0.0.0.0:9181 expose: - port: 9161 @@ -40,7 +40,7 @@ profiles: - "akash1365yvmc4s7awdyj3n2sav7xfx76adc6dnmlx63" - "akash18qa2a2ltfyvkyj0ggj3hkvuj6twzyumuaru9s4" pricing: - defradb: + defradb: denom: uakt amount: 10000 @@ -48,4 +48,4 @@ deployment: defradb: akash: profile: defradb - count: 1 \ No newline at end of file + count: 1 diff --git a/tools/cloud/aws/packer/build_aws_ami.pkr.hcl b/tools/cloud/aws/packer/build_aws_ami.pkr.hcl index 8afacfb339..4eb1579778 100644 --- a/tools/cloud/aws/packer/build_aws_ami.pkr.hcl +++ b/tools/cloud/aws/packer/build_aws_ami.pkr.hcl @@ -66,8 +66,8 @@ build { inline = [ "/usr/bin/cloud-init status --wait", "sudo apt-get update && sudo apt-get install make build-essential -y", - "curl -OL https://golang.org/dl/go1.20.6.linux-amd64.tar.gz", - "rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go1.20.6.linux-amd64.tar.gz", + "curl -OL https://golang.org/dl/go1.21.6.linux-amd64.tar.gz", + "rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go1.21.6.linux-amd64.tar.gz", "export PATH=$PATH:/usr/local/go/bin", "git clone \"https://git@$DEFRADB_GIT_REPO\"", "cd ./defradb || { printf \"\\\ncd into defradb failed.\\\n\" && exit 2; }", diff --git a/tools/configs/golangci.yaml b/tools/configs/golangci.yaml index c9d69b641e..e8fe63a1fc 100644 --- a/tools/configs/golangci.yaml +++ b/tools/configs/golangci.yaml @@ -57,7 +57,7 @@ run: # Define the Go version limit. # Default: use Go version from the go.mod file, fallback on the env var `GOVERSION`. - go: "1.20" + go: "1.21" #=====================================================================================[ Output Configuration Options ] output: @@ -149,7 +149,7 @@ issues: - errorlint # Exclude running header check in these paths - - path: "(net|datastore/badger/v4/compat_logger.go|datastore/badger/v4/datastore.go|connor)" + - path: "(net|datastore/badger/v4/compat_logger.go|datastore/badger/v4/datastore.go|connor|encoding)" linters: - goheader @@ -263,7 +263,7 @@ linters-settings: gosimple: # Select the Go version to target. - go: "1.20" + go: "1.21" # https://staticcheck.io/docs/options#checks checks: ["all", "-S1038"] # Turn on all except (these are disabled): @@ -287,6 +287,7 @@ linters-settings: # run `go tool vet help` to see all analyzers enable: - atomicalign + - nilness enable-all: false @@ -355,13 +356,13 @@ linters-settings: staticcheck: # Select the Go version to target. - go: "1.20" + go: "1.21" # https://staticcheck.io/docs/options#checks checks: ["all"] unused: # Select the Go version to target. - go: "1.20" + go: "1.21" whitespace: # Enforces newlines (or comments) after every multi-line if statement. diff --git a/tools/configs/yamllint.yaml b/tools/configs/yamllint.yaml new file mode 100644 index 0000000000..b7f98b844a --- /dev/null +++ b/tools/configs/yamllint.yaml @@ -0,0 +1,39 @@ +# This file contains our linter configurations that will be used for all Source Inc. projects. + +yaml-files: + - '*.yaml' + - '*.yml' + - '.yamllint' + +rules: + # Disabled lint rules + comments: disable + comments-indentation: disable + document-end: disable + document-start: disable + empty-values: disable + float-values: disable + key-ordering: disable + octal-values: disable + quoted-strings: disable + + # Enabled lint rules + anchors: enable + braces: enable + brackets: enable + colons: enable + commas: enable + empty-lines: enable + hyphens: enable + key-duplicates: enable + new-line-at-end-of-file: enable + new-lines: enable + trailing-spaces: enable + truthy: disable + line-length: + max: 120 + level: error + indentation: + indent-sequences: consistent + spaces: consistent + check-multi-line-strings: false diff --git a/tools/defradb.containerfile b/tools/defradb.containerfile index 272a7e67ba..53a849b6c6 100644 --- a/tools/defradb.containerfile +++ b/tools/defradb.containerfile @@ -11,7 +11,7 @@ RUN npm run build # Stage: BUILD # Several steps are involved to enable caching and because of the behavior of COPY regarding directories. -FROM docker.io/golang:1.20 AS BUILD +FROM docker.io/golang:1.21 AS BUILD WORKDIR /repo/ COPY go.mod go.sum Makefile ./ RUN make deps:modules