From 67339a39e41a6128e24868be40be1dddc4966cca Mon Sep 17 00:00:00 2001 From: Aus Lacroix Date: Fri, 30 Aug 2024 15:05:03 -0700 Subject: [PATCH] Initial commit --- LICENSE | 201 + README.md | 5 + Taskfile.yaml | 130 + cmd/cli/Taskfile.yml | 8 + cmd/cli/cmd/database/database.go | 17 + cmd/cli/cmd/database/database_create.go | 78 + cmd/cli/cmd/database/database_delete.go | 55 + cmd/cli/cmd/database/database_get.go | 86 + cmd/cli/cmd/database/doc.go | 2 + cmd/cli/cmd/errors.go | 23 + cmd/cli/cmd/group/doc.go | 2 + cmd/cli/cmd/group/group.go | 17 + cmd/cli/cmd/group/group_create.go | 76 + cmd/cli/cmd/group/group_delete.go | 55 + cmd/cli/cmd/group/group_get.go | 86 + cmd/cli/cmd/root.go | 312 + cmd/cli/cmd/version/doc.go | 2 + cmd/cli/cmd/version/version.go | 25 + cmd/cli/main.go | 15 + cmd/doc.go | 2 + cmd/root.go | 78 + cmd/serve.go | 98 + config/.env.example | 77 + config/config-dev.example.yaml | 20 + config/config.example.yaml | 93 + config/config.go | 155 + config/configmap.yaml | 89 + config/doc.go | 2 + db/README.md | 65 + db/Taskfile.yml | 45 + db/create_migrations.go | 71 + db/migrate.go | 16 + db/migrations-goose/20240414234138_init.sql | 23 + db/migrations-goose/atlas.sum | 2 + db/migrations/.gitkeep | 0 db/migrations/20240414234138_init.sql | 10 + db/migrations/atlas.sum | 2 + docker/Dockerfile | 19 + docker/Taskfile.yml | 79 + docker/docker-compose-fga.yml | 70 + docker/docker-compose-redis.yml | 16 + docker/docker-compose-tracing.yml | 31 + docker/docker-compose.yml | 24 + docker/files/otel-collector-config.yaml | 26 + gen_schema.go | 27 + generate.go | 13 + go.mod | 222 + go.sum | 766 + gqlgen.yml | 88 + gqlgenc.yml | 22 + internal/constants/doc.go | 2 + internal/constants/version.go | 76 + internal/ent/.gitkeep | 0 internal/ent/base/entinit.tmpl | 51 + internal/ent/entc.go | 133 + internal/ent/generated/auth_from_mutation.go | 5 + internal/ent/generated/authz_checks.go | 5 + internal/ent/generated/client.go | 599 + internal/ent/generated/database.go | 278 + internal/ent/generated/database/database.go | 242 + internal/ent/generated/database/where.go | 1018 ++ internal/ent/generated/database_create.go | 516 + internal/ent/generated/database_delete.go | 92 + internal/ent/generated/database_query.go | 630 + internal/ent/generated/database_update.go | 876 ++ internal/ent/generated/doc.go | 2 + internal/ent/generated/edge_cleanup.go | 27 + internal/ent/generated/ent.go | 610 + internal/ent/generated/entql.go | 340 + internal/ent/generated/enttest/enttest.go | 84 + internal/ent/generated/gql_collection.go | 371 + internal/ent/generated/gql_edge.go | 29 + internal/ent/generated/gql_mutation_input.go | 281 + internal/ent/generated/gql_node.go | 218 + internal/ent/generated/gql_pagination.go | 589 + internal/ent/generated/gql_transaction.go | 30 + internal/ent/generated/gql_where_input.go | 1572 ++ internal/ent/generated/group.go | 284 + internal/ent/generated/group/group.go | 207 + internal/ent/generated/group/where.go | 858 ++ internal/ent/generated/group_create.go | 475 + internal/ent/generated/group_delete.go | 92 + internal/ent/generated/group_query.go | 652 + internal/ent/generated/group_update.go | 897 ++ internal/ent/generated/hook/hook.go | 211 + internal/ent/generated/intercept/intercept.go | 179 + internal/ent/generated/internal/schema.go | 9 + .../ent/generated/internal/schemaconfig.go | 25 + internal/ent/generated/migrate/migrate.go | 96 + internal/ent/generated/migrate/schema.go | 103 + internal/ent/generated/mutation.go | 2484 +++ internal/ent/generated/openapi.json | 1038 ++ internal/ent/generated/predicate/predicate.go | 13 + internal/ent/generated/privacy/privacy.go | 215 + internal/ent/generated/runtime.go | 5 + internal/ent/generated/runtime/runtime.go | 95 + internal/ent/generated/tx.go | 213 + internal/ent/hooks/database.go | 140 + internal/ent/hooks/doc.go | 2 + internal/ent/hooks/group.go | 112 + internal/ent/mixin/doc.go | 2 + internal/ent/mixin/errors.go | 23 + internal/ent/mixin/softdelete_mixin.go | 113 + internal/ent/schema/database.go | 102 + internal/ent/schema/doc.go | 2 + internal/ent/schema/group.go | 92 + internal/ent/templates/.gitkeep | 0 internal/ent/templates/edge_cleanup.tmpl | 43 + internal/entdb/client.go | 216 + internal/entdb/doc.go | 2 + internal/graphapi/.gitkeep | 0 internal/graphapi/database.resolvers.go | 80 + internal/graphapi/database_test.go | 210 + internal/graphapi/doc.go | 2 + internal/graphapi/ent.resolvers.go | 39 + internal/graphapi/errors.go | 15 + internal/graphapi/gen_models.go | 43 + internal/graphapi/gen_server.go | 12491 ++++++++++++++++ internal/graphapi/group.resolvers.go | 97 + internal/graphapi/group_test.go | 202 + internal/graphapi/helpers.go | 23 + internal/graphapi/models_test.go | 109 + internal/graphapi/resolver.go | 163 + internal/graphapi/tools_test.go | 137 + internal/httpserve/config/config.go | 102 + internal/httpserve/config/configprovider.go | 7 + .../httpserve/config/configproviderrefresh.go | 92 + internal/httpserve/config/doc.go | 2 + internal/httpserve/handlers/doc.go | 2 + internal/httpserve/handlers/handlers.go | 29 + internal/httpserve/handlers/readiness.go | 57 + internal/httpserve/route/base.go | 46 + internal/httpserve/route/doc.go | 2 + internal/httpserve/route/openapi.json | 1038 ++ internal/httpserve/route/routes.go | 84 + internal/httpserve/server/doc.go | 2 + internal/httpserve/server/errors.go | 11 + internal/httpserve/server/server.go | 90 + internal/httpserve/server/validate.go | 14 + internal/httpserve/serveropts/doc.go | 2 + internal/httpserve/serveropts/option.go | 224 + internal/httpserve/serveropts/server.go | 37 + jsonschema/DOC.md | 286 + jsonschema/Taskfile.yml | 36 + jsonschema/api-docs.md | 287 + jsonschema/envparse/doc.go | 2 + jsonschema/envparse/parse.go | 126 + jsonschema/geodetic.config.json | 443 + jsonschema/schema_generator.go | 163 + jsonschema/templates/configmap.tmpl | 12 + main.go | 11 + pkg/enums/doc.go | 2 + pkg/enums/provider.go | 59 + pkg/enums/provider_test.go | 37 + pkg/enums/region.go | 62 + pkg/enums/region_test.go | 41 + pkg/enums/status.go | 65 + pkg/enums/status_test.go | 45 + pkg/geodeticclient/config.go | 44 + pkg/geodeticclient/doc.go | 2 + pkg/geodeticclient/graphclient.go | 747 + pkg/geodeticclient/interceptor.go | 55 + pkg/geodeticclient/models.go | 634 + query/.gitkeep | 0 query/database.graphql | 47 + query/group.graphql | 44 + renovate.json | 8 + schema.graphql | 971 ++ schema/.gitkeep | 0 schema/database.graphql | 75 + schema/ent.graphql | 833 ++ schema/group.graphql | 75 + sonar-project.properties | 16 + tools.go | 10 + 174 files changed, 41812 insertions(+) create mode 100644 LICENSE create mode 100644 README.md create mode 100644 Taskfile.yaml create mode 100644 cmd/cli/Taskfile.yml create mode 100644 cmd/cli/cmd/database/database.go create mode 100644 cmd/cli/cmd/database/database_create.go create mode 100644 cmd/cli/cmd/database/database_delete.go create mode 100644 cmd/cli/cmd/database/database_get.go create mode 100644 cmd/cli/cmd/database/doc.go create mode 100644 cmd/cli/cmd/errors.go create mode 100644 cmd/cli/cmd/group/doc.go create mode 100644 cmd/cli/cmd/group/group.go create mode 100644 cmd/cli/cmd/group/group_create.go create mode 100644 cmd/cli/cmd/group/group_delete.go create mode 100644 cmd/cli/cmd/group/group_get.go create mode 100644 cmd/cli/cmd/root.go create mode 100644 cmd/cli/cmd/version/doc.go create mode 100644 cmd/cli/cmd/version/version.go create mode 100644 cmd/cli/main.go create mode 100644 cmd/doc.go create mode 100644 cmd/root.go create mode 100644 cmd/serve.go create mode 100644 config/.env.example create mode 100644 config/config-dev.example.yaml create mode 100644 config/config.example.yaml create mode 100644 config/config.go create mode 100644 config/configmap.yaml create mode 100644 config/doc.go create mode 100644 db/README.md create mode 100644 db/Taskfile.yml create mode 100644 db/create_migrations.go create mode 100644 db/migrate.go create mode 100644 db/migrations-goose/20240414234138_init.sql create mode 100644 db/migrations-goose/atlas.sum create mode 100644 db/migrations/.gitkeep create mode 100644 db/migrations/20240414234138_init.sql create mode 100644 db/migrations/atlas.sum create mode 100644 docker/Dockerfile create mode 100644 docker/Taskfile.yml create mode 100644 docker/docker-compose-fga.yml create mode 100644 docker/docker-compose-redis.yml create mode 100644 docker/docker-compose-tracing.yml create mode 100644 docker/docker-compose.yml create mode 100644 docker/files/otel-collector-config.yaml create mode 100644 gen_schema.go create mode 100644 generate.go create mode 100644 go.mod create mode 100644 go.sum create mode 100644 gqlgen.yml create mode 100644 gqlgenc.yml create mode 100644 internal/constants/doc.go create mode 100644 internal/constants/version.go create mode 100644 internal/ent/.gitkeep create mode 100644 internal/ent/base/entinit.tmpl create mode 100644 internal/ent/entc.go create mode 100644 internal/ent/generated/auth_from_mutation.go create mode 100644 internal/ent/generated/authz_checks.go create mode 100644 internal/ent/generated/client.go create mode 100644 internal/ent/generated/database.go create mode 100644 internal/ent/generated/database/database.go create mode 100644 internal/ent/generated/database/where.go create mode 100644 internal/ent/generated/database_create.go create mode 100644 internal/ent/generated/database_delete.go create mode 100644 internal/ent/generated/database_query.go create mode 100644 internal/ent/generated/database_update.go create mode 100644 internal/ent/generated/doc.go create mode 100644 internal/ent/generated/edge_cleanup.go create mode 100644 internal/ent/generated/ent.go create mode 100644 internal/ent/generated/entql.go create mode 100644 internal/ent/generated/enttest/enttest.go create mode 100644 internal/ent/generated/gql_collection.go create mode 100644 internal/ent/generated/gql_edge.go create mode 100644 internal/ent/generated/gql_mutation_input.go create mode 100644 internal/ent/generated/gql_node.go create mode 100644 internal/ent/generated/gql_pagination.go create mode 100644 internal/ent/generated/gql_transaction.go create mode 100644 internal/ent/generated/gql_where_input.go create mode 100644 internal/ent/generated/group.go create mode 100644 internal/ent/generated/group/group.go create mode 100644 internal/ent/generated/group/where.go create mode 100644 internal/ent/generated/group_create.go create mode 100644 internal/ent/generated/group_delete.go create mode 100644 internal/ent/generated/group_query.go create mode 100644 internal/ent/generated/group_update.go create mode 100644 internal/ent/generated/hook/hook.go create mode 100644 internal/ent/generated/intercept/intercept.go create mode 100644 internal/ent/generated/internal/schema.go create mode 100644 internal/ent/generated/internal/schemaconfig.go create mode 100644 internal/ent/generated/migrate/migrate.go create mode 100644 internal/ent/generated/migrate/schema.go create mode 100644 internal/ent/generated/mutation.go create mode 100644 internal/ent/generated/openapi.json create mode 100644 internal/ent/generated/predicate/predicate.go create mode 100644 internal/ent/generated/privacy/privacy.go create mode 100644 internal/ent/generated/runtime.go create mode 100644 internal/ent/generated/runtime/runtime.go create mode 100644 internal/ent/generated/tx.go create mode 100644 internal/ent/hooks/database.go create mode 100644 internal/ent/hooks/doc.go create mode 100644 internal/ent/hooks/group.go create mode 100644 internal/ent/mixin/doc.go create mode 100644 internal/ent/mixin/errors.go create mode 100644 internal/ent/mixin/softdelete_mixin.go create mode 100644 internal/ent/schema/database.go create mode 100644 internal/ent/schema/doc.go create mode 100644 internal/ent/schema/group.go create mode 100644 internal/ent/templates/.gitkeep create mode 100644 internal/ent/templates/edge_cleanup.tmpl create mode 100644 internal/entdb/client.go create mode 100644 internal/entdb/doc.go create mode 100644 internal/graphapi/.gitkeep create mode 100644 internal/graphapi/database.resolvers.go create mode 100644 internal/graphapi/database_test.go create mode 100644 internal/graphapi/doc.go create mode 100644 internal/graphapi/ent.resolvers.go create mode 100644 internal/graphapi/errors.go create mode 100644 internal/graphapi/gen_models.go create mode 100644 internal/graphapi/gen_server.go create mode 100644 internal/graphapi/group.resolvers.go create mode 100644 internal/graphapi/group_test.go create mode 100644 internal/graphapi/helpers.go create mode 100644 internal/graphapi/models_test.go create mode 100644 internal/graphapi/resolver.go create mode 100644 internal/graphapi/tools_test.go create mode 100644 internal/httpserve/config/config.go create mode 100644 internal/httpserve/config/configprovider.go create mode 100644 internal/httpserve/config/configproviderrefresh.go create mode 100644 internal/httpserve/config/doc.go create mode 100644 internal/httpserve/handlers/doc.go create mode 100644 internal/httpserve/handlers/handlers.go create mode 100644 internal/httpserve/handlers/readiness.go create mode 100644 internal/httpserve/route/base.go create mode 100644 internal/httpserve/route/doc.go create mode 100644 internal/httpserve/route/openapi.json create mode 100644 internal/httpserve/route/routes.go create mode 100644 internal/httpserve/server/doc.go create mode 100644 internal/httpserve/server/errors.go create mode 100644 internal/httpserve/server/server.go create mode 100644 internal/httpserve/server/validate.go create mode 100644 internal/httpserve/serveropts/doc.go create mode 100644 internal/httpserve/serveropts/option.go create mode 100644 internal/httpserve/serveropts/server.go create mode 100644 jsonschema/DOC.md create mode 100644 jsonschema/Taskfile.yml create mode 100644 jsonschema/api-docs.md create mode 100644 jsonschema/envparse/doc.go create mode 100644 jsonschema/envparse/parse.go create mode 100644 jsonschema/geodetic.config.json create mode 100644 jsonschema/schema_generator.go create mode 100644 jsonschema/templates/configmap.tmpl create mode 100644 main.go create mode 100644 pkg/enums/doc.go create mode 100644 pkg/enums/provider.go create mode 100644 pkg/enums/provider_test.go create mode 100644 pkg/enums/region.go create mode 100644 pkg/enums/region_test.go create mode 100644 pkg/enums/status.go create mode 100644 pkg/enums/status_test.go create mode 100644 pkg/geodeticclient/config.go create mode 100644 pkg/geodeticclient/doc.go create mode 100644 pkg/geodeticclient/graphclient.go create mode 100644 pkg/geodeticclient/interceptor.go create mode 100644 pkg/geodeticclient/models.go create mode 100644 query/.gitkeep create mode 100644 query/database.graphql create mode 100644 query/group.graphql create mode 100644 renovate.json create mode 100644 schema.graphql create mode 100644 schema/.gitkeep create mode 100644 schema/database.graphql create mode 100644 schema/ent.graphql create mode 100644 schema/group.graphql create mode 100644 sonar-project.properties create mode 100644 tools.go diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..44c06de --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2024 Datum Technology, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..0f150e2 --- /dev/null +++ b/README.md @@ -0,0 +1,5 @@ +[![Build status](https://badge.buildkite.com/6d265b36442938da91767d8f801e18c1119bea627591f03234.svg)](https://buildkite.com/datum/geodetic?branch=main) +[![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=datumforge_geodetic&metric=alert_status)](https://sonarcloud.io/summary/new_code?id=datumforge_geodetic) + +# geodetic +geo distributed database scheduler diff --git a/Taskfile.yaml b/Taskfile.yaml new file mode 100644 index 0000000..1cbd47d --- /dev/null +++ b/Taskfile.yaml @@ -0,0 +1,130 @@ +version: '3' + +includes: + cli: + taskfile: ./cmd/cli + dir: ./cmd/cli + docker: + taskfile: ./docker + dir: ./docker + aliases: [compose] + db: + taskfile: ./db + dir: ./db + aliases: [atlas] + config: + taskfile: ./jsonschema + +env: + ATLAS_DB_URI: "sqlite://file?mode=memory&_fk=1" + TEST_DB_URL: "libsql://file::memory:?cache=shared" + TEST_FGA_URL: "localhost:8080" + ENV: config + +tasks: + install: + desc: installs tools and packages needed to develop against the geodetic repo + cmds: + - "brew install gomplate" + - "go install entgo.io/ent/cmd/ent@latest" + - "go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest" + - "curl -sSf https://atlasgo.sh | sh" + - "curl -sSL https://rover.apollo.dev/nix/latest | sh" + - "go get -u github.com/openfga/go-sdk" + - "brew install openfga/tap/fga" + - go install github.com/vektra/mockery/v2@v2.40.1 + - task: config:install + - defer: { task: go:tidy } + + generate: + desc: a combination of the ent, graph, and gqlgen tasks which are required to fully generate the necessary graph, server, resolvers, client, etc. + cmds: + - go generate ./... + - cp ./internal/ent/generated/openapi.json ./internal/httpserve/route + + ## Go tasks + go:lint: + desc: runs golangci-lint, the most annoying opinionated linter ever + cmds: + - golangci-lint run --config=.golangci.yaml --verbose + + go:test: + desc: runs and outputs results of created go tests + cmds: + - go test -v ./... + + go:test:cover: + desc: runs and outputs results of created go tests with coverage + aliases: [cover] + cmds: + - go test -v ./... -coverprofile=coverage.out + - go tool cover -html=coverage.out + + go:fmt: + desc: format all go code + cmds: + - go fmt ./... + + go:tidy: + desc: Runs go mod tidy on the backend + aliases: [tidy] + cmds: + - go mod tidy + + go:build: + desc: Runs go build for the geodetic server + cmds: + - go build -mod=mod -o geodetic + + go:build-cli: + aliases: [buildcli] + desc: Runs go build for the geodetic cli + cmds: + - go build -mod=mod -o geodetic-cli ./cmd/cli + + go:all: + aliases: [go] + desc: Runs all go test and lint related tasks + cmds: + - task: go:tidy + - task: go:fmt + - task: go:lint + - task: go:test + + # dev tasks + run-dev: + dotenv: ['{{.ENV}}/.env-dev'] + desc: runs the geodetic server in dev mode + cmds: + - task: compose:redis + - task: compose:fga + - go run main.go serve --debug --pretty + + pr: + desc: runs the comprehensive roll-up tasks required to ensure all files are being committed / pushed as a part of opening a PR + cmds: + - task: generate + - task: config:generate + - task: config:docs + - task: atlas:all + - task: go + + ci: + desc: a task that runs during CI + cmds: + - task: generate + - task: tidy + - "git config --global --add safe.directory /workdir" + - | + status=$(git status --porcelain) + if [ -n "$status" ]; then + echo "detected git diff after running generate; please re-run tasks" + echo "$status" + exit 1 + fi + + clean:local: + desc: cleans up geodetic.db and geodetic-cli local + cmds: + - "rm -f geodetic.db" + - "rm -f geodetic-cli" diff --git a/cmd/cli/Taskfile.yml b/cmd/cli/Taskfile.yml new file mode 100644 index 0000000..3b6d461 --- /dev/null +++ b/cmd/cli/Taskfile.yml @@ -0,0 +1,8 @@ +version: "3" + +tasks: + version: + desc: gets the cli version + cmds: + - go run main.go org version + diff --git a/cmd/cli/cmd/database/database.go b/cmd/cli/cmd/database/database.go new file mode 100644 index 0000000..7e54ba0 --- /dev/null +++ b/cmd/cli/cmd/database/database.go @@ -0,0 +1,17 @@ +package geodeticdatabase + +import ( + "github.com/spf13/cobra" + + geodetic "github.com/datumforge/geodetic/cmd/cli/cmd" +) + +// databaseCmd represents the base database command when called without any subcommands +var databaseCmd = &cobra.Command{ + Use: "database", + Short: "The subcommands for working with geodetic databases", +} + +func init() { + geodetic.RootCmd.AddCommand(databaseCmd) +} diff --git a/cmd/cli/cmd/database/database_create.go b/cmd/cli/cmd/database/database_create.go new file mode 100644 index 0000000..463b343 --- /dev/null +++ b/cmd/cli/cmd/database/database_create.go @@ -0,0 +1,78 @@ +package geodeticdatabase + +import ( + "context" + "encoding/json" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + + geodetic "github.com/datumforge/geodetic/cmd/cli/cmd" + "github.com/datumforge/geodetic/pkg/enums" + "github.com/datumforge/geodetic/pkg/geodeticclient" +) + +var databaseCreateCmd = &cobra.Command{ + Use: "create", + Short: "Create a new geodetic database", + RunE: func(cmd *cobra.Command, args []string) error { + return createDatabase(cmd.Context()) + }, +} + +func init() { + databaseCmd.AddCommand(databaseCreateCmd) + + databaseCreateCmd.Flags().StringP("org-id", "o", "", "owning organization id of the database") + geodetic.ViperBindFlag("database.create.orgid", databaseCreateCmd.Flags().Lookup("org-id")) + + databaseCreateCmd.Flags().StringP("provider", "p", "turso", "provider of the database (local, turso)") + geodetic.ViperBindFlag("database.create.provider", databaseCreateCmd.Flags().Lookup("provider")) + + databaseCreateCmd.Flags().StringP("group-id", "g", "", "group name to assign to the database") + geodetic.ViperBindFlag("database.create.groupid", databaseCreateCmd.Flags().Lookup("group-id")) +} + +func createDatabase(ctx context.Context) error { + cli, err := geodetic.GetGraphClient() + if err != nil { + return err + } + + orgID := viper.GetString("database.create.orgid") + if orgID == "" { + return geodetic.NewRequiredFieldMissingError("organization_id") + } + + provider := viper.GetString("database.create.provider") + if provider == "" { + return geodetic.NewRequiredFieldMissingError("provider") + } + + groupID := viper.GetString("database.create.groupid") + if groupID == "" { + return geodetic.NewRequiredFieldMissingError("group_id") + } + + input := geodeticclient.CreateDatabaseInput{ + OrganizationID: orgID, + Provider: enums.ToDatabaseProvider(provider), + GroupID: groupID, + } + + d, err := cli.Client.CreateDatabase(ctx, input, cli.Interceptor) + if err != nil { + return err + } + + if viper.GetString("output.format") == "json" { + s, err := json.Marshal(d.CreateDatabase.Database) + if err != nil { + return err + } + + return geodetic.JSONPrint(s) + } + + return geodetic.SingleRowTablePrint(d.CreateDatabase.Database) +} diff --git a/cmd/cli/cmd/database/database_delete.go b/cmd/cli/cmd/database/database_delete.go new file mode 100644 index 0000000..8e86fe9 --- /dev/null +++ b/cmd/cli/cmd/database/database_delete.go @@ -0,0 +1,55 @@ +package geodeticdatabase + +import ( + "context" + "encoding/json" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + + geodetic "github.com/datumforge/geodetic/cmd/cli/cmd" +) + +var databaseDeleteCmd = &cobra.Command{ + Use: "delete", + Short: "Delete an existing geodetic database", + RunE: func(cmd *cobra.Command, args []string) error { + return deleteDatabase(cmd.Context()) + }, +} + +func init() { + databaseCmd.AddCommand(databaseDeleteCmd) + + databaseDeleteCmd.Flags().StringP("name", "n", "", "database name to delete") + geodetic.ViperBindFlag("database.delete.name", databaseDeleteCmd.Flags().Lookup("name")) +} + +func deleteDatabase(ctx context.Context) error { + // setup geodetic http client + cli, err := geodetic.GetGraphClient() + if err != nil { + return err + } + + dName := viper.GetString("database.delete.name") + if dName == "" { + return geodetic.NewRequiredFieldMissingError("name") + } + + d, err := cli.Client.DeleteDatabase(ctx, dName, cli.Interceptor) + if err != nil { + return err + } + + if viper.GetString("output.format") == "json" { + s, err := json.Marshal(d.DeleteDatabase) + if err != nil { + return err + } + + return geodetic.JSONPrint(s) + } + + return geodetic.SingleRowTablePrint(d.DeleteDatabase) +} diff --git a/cmd/cli/cmd/database/database_get.go b/cmd/cli/cmd/database/database_get.go new file mode 100644 index 0000000..e401f47 --- /dev/null +++ b/cmd/cli/cmd/database/database_get.go @@ -0,0 +1,86 @@ +package geodeticdatabase + +import ( + "context" + "encoding/json" + "log" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + + geodetic "github.com/datumforge/geodetic/cmd/cli/cmd" +) + +var databaseGetCmd = &cobra.Command{ + Use: "get", + Short: "get an existing new geodetic database", + RunE: func(cmd *cobra.Command, args []string) error { + return getDatabase(cmd.Context()) + }, +} + +func init() { + databaseCmd.AddCommand(databaseGetCmd) + + databaseGetCmd.Flags().StringP("name", "n", "", "database name to query") + geodetic.ViperBindFlag("database.get.name", databaseGetCmd.Flags().Lookup("name")) +} + +func getDatabase(ctx context.Context) error { + // setup geodetic http client + cli, err := geodetic.GetGraphClient() + if err != nil { + return err + } + + if cli.Client == nil { + log.Fatal("client is nil") + } + + // filter options + dName := viper.GetString("database.get.name") + + // if an db name is provided, filter on that db, otherwise get all + if dName != "" { + db, err := cli.Client.GetDatabase(ctx, dName, cli.Interceptor) + if err != nil { + return err + } + + if viper.GetString("output.format") == "json" { + s, err := json.Marshal(db.Database) + if err != nil { + return err + } + + return geodetic.JSONPrint(s) + } + + return geodetic.SingleRowTablePrint(db.Database) + } + + dbs, err := cli.Client.GetAllDatabases(ctx, cli.Interceptor) + if err != nil { + return err + } + + s, err := json.Marshal(dbs.Databases) + if err != nil { + return err + } + + // print json output + if viper.GetString("output.format") == "json" { + return geodetic.JSONPrint(s) + } + + // print table output + var resp geodetic.GraphResponse + + err = json.Unmarshal(s, &resp) + if err != nil { + return err + } + + return geodetic.RowsTablePrint(resp) +} diff --git a/cmd/cli/cmd/database/doc.go b/cmd/cli/cmd/database/doc.go new file mode 100644 index 0000000..0fa61d0 --- /dev/null +++ b/cmd/cli/cmd/database/doc.go @@ -0,0 +1,2 @@ +// Package geodeticdatabase is our cobra/viper cli for database endpoints +package geodeticdatabase diff --git a/cmd/cli/cmd/errors.go b/cmd/cli/cmd/errors.go new file mode 100644 index 0000000..0426283 --- /dev/null +++ b/cmd/cli/cmd/errors.go @@ -0,0 +1,23 @@ +package datum + +import ( + "fmt" +) + +// RequiredFieldMissingError is returned when a field is required but not provided +type RequiredFieldMissingError struct { + // Field contains the required field that was missing from the input + Field string +} + +// Error returns the RequiredFieldMissingError in string format +func (e *RequiredFieldMissingError) Error() string { + return fmt.Sprintf("%s is required", e.Field) +} + +// NewRequiredFieldMissingError returns an error for a missing required field +func NewRequiredFieldMissingError(f string) *RequiredFieldMissingError { + return &RequiredFieldMissingError{ + Field: f, + } +} diff --git a/cmd/cli/cmd/group/doc.go b/cmd/cli/cmd/group/doc.go new file mode 100644 index 0000000..f67c3b8 --- /dev/null +++ b/cmd/cli/cmd/group/doc.go @@ -0,0 +1,2 @@ +// Package geodeticgroup is our cobra/viper cli for group endpoints +package geodeticgroup diff --git a/cmd/cli/cmd/group/group.go b/cmd/cli/cmd/group/group.go new file mode 100644 index 0000000..bcbcada --- /dev/null +++ b/cmd/cli/cmd/group/group.go @@ -0,0 +1,17 @@ +package geodeticgroup + +import ( + "github.com/spf13/cobra" + + geodetic "github.com/datumforge/geodetic/cmd/cli/cmd" +) + +// groupCmd represents the base group command when called without any subcommands +var groupCmd = &cobra.Command{ + Use: "group", + Short: "The subcommands for working with geodetic groups", +} + +func init() { + geodetic.RootCmd.AddCommand(groupCmd) +} diff --git a/cmd/cli/cmd/group/group_create.go b/cmd/cli/cmd/group/group_create.go new file mode 100644 index 0000000..7f0495c --- /dev/null +++ b/cmd/cli/cmd/group/group_create.go @@ -0,0 +1,76 @@ +package geodeticgroup + +import ( + "context" + "encoding/json" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + + geodetic "github.com/datumforge/geodetic/cmd/cli/cmd" + "github.com/datumforge/geodetic/pkg/enums" + "github.com/datumforge/geodetic/pkg/geodeticclient" +) + +var groupCreateCmd = &cobra.Command{ + Use: "create", + Short: "Create a new geodetic group", + RunE: func(cmd *cobra.Command, args []string) error { + return createGroup(cmd.Context()) + }, +} + +func init() { + groupCmd.AddCommand(groupCreateCmd) + + groupCreateCmd.Flags().StringP("name", "n", "", "name of the group") + geodetic.ViperBindFlag("group.create.name", groupCreateCmd.Flags().Lookup("name")) + + groupCreateCmd.Flags().StringP("description", "d", "", "description of the group") + geodetic.ViperBindFlag("group.create.description", groupCreateCmd.Flags().Lookup("description")) + + groupCreateCmd.Flags().StringP("region", "r", "", "region of the group (AMER, EMEA, APAC)") + geodetic.ViperBindFlag("group.create.region", groupCreateCmd.Flags().Lookup("region")) + + groupCreateCmd.Flags().StringP("primary-location", "l", "", "primary location of the group") + geodetic.ViperBindFlag("group.create.location", groupCreateCmd.Flags().Lookup("primary-location")) +} + +func createGroup(ctx context.Context) error { + cli, err := geodetic.GetGraphClient() + if err != nil { + return err + } + + name := viper.GetString("group.create.name") + if name == "" { + return geodetic.NewRequiredFieldMissingError("name") + } + + description := viper.GetString("group.create.description") + location := viper.GetString("group.create.location") + region := viper.GetString("group.create.region") + + input := geodeticclient.CreateGroupInput{ + Name: name, + Description: &description, + PrimaryLocation: location, + Region: enums.ToRegion(region), + } + + g, err := cli.Client.CreateGroup(ctx, input, cli.Interceptor) + if err != nil { + return err + } + + if viper.GetString("output.format") == "json" { + s, err := json.Marshal(g.CreateGroup.Group) + if err != nil { + return err + } + + return geodetic.JSONPrint(s) + } + + return geodetic.SingleRowTablePrint(g.CreateGroup.Group) +} diff --git a/cmd/cli/cmd/group/group_delete.go b/cmd/cli/cmd/group/group_delete.go new file mode 100644 index 0000000..771888b --- /dev/null +++ b/cmd/cli/cmd/group/group_delete.go @@ -0,0 +1,55 @@ +package geodeticgroup + +import ( + "context" + "encoding/json" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + + geodetic "github.com/datumforge/geodetic/cmd/cli/cmd" +) + +var groupDeleteCmd = &cobra.Command{ + Use: "delete", + Short: "Delete an existing geodetic group", + RunE: func(cmd *cobra.Command, args []string) error { + return deleteGroup(cmd.Context()) + }, +} + +func init() { + groupCmd.AddCommand(groupDeleteCmd) + + groupDeleteCmd.Flags().StringP("name", "n", "", "group name to delete") + geodetic.ViperBindFlag("group.delete.name", groupDeleteCmd.Flags().Lookup("name")) +} + +func deleteGroup(ctx context.Context) error { + // setup geodetic http client + cli, err := geodetic.GetGraphClient() + if err != nil { + return err + } + + gName := viper.GetString("group.delete.name") + if gName == "" { + return geodetic.NewRequiredFieldMissingError("name") + } + + g, err := cli.Client.DeleteGroup(ctx, gName, cli.Interceptor) + if err != nil { + return err + } + + if viper.GetString("output.format") == "json" { + s, err := json.Marshal(g.DeleteGroup) + if err != nil { + return err + } + + return geodetic.JSONPrint(s) + } + + return geodetic.SingleRowTablePrint(g.DeleteGroup) +} diff --git a/cmd/cli/cmd/group/group_get.go b/cmd/cli/cmd/group/group_get.go new file mode 100644 index 0000000..d787ba9 --- /dev/null +++ b/cmd/cli/cmd/group/group_get.go @@ -0,0 +1,86 @@ +package geodeticgroup + +import ( + "context" + "encoding/json" + "log" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + + geodetic "github.com/datumforge/geodetic/cmd/cli/cmd" +) + +var groupGetCmd = &cobra.Command{ + Use: "get", + Short: "get an existing geodetic group", + RunE: func(cmd *cobra.Command, args []string) error { + return getGroup(cmd.Context()) + }, +} + +func init() { + groupCmd.AddCommand(groupGetCmd) + + groupGetCmd.Flags().StringP("name", "n", "", "group name to query") + geodetic.ViperBindFlag("group.get.name", groupGetCmd.Flags().Lookup("name")) +} + +func getGroup(ctx context.Context) error { + // setup geodetic http client + cli, err := geodetic.GetGraphClient() + if err != nil { + return err + } + + if cli.Client == nil { + log.Fatal("client is nil") + } + + // filter options + gName := viper.GetString("group.get.name") + + // if an group name is provided, filter on that group, otherwise get all + if gName != "" { + group, err := cli.Client.GetGroup(ctx, gName, cli.Interceptor) + if err != nil { + return err + } + + if viper.GetString("output.format") == "json" { + s, err := json.Marshal(group.Group) + if err != nil { + return err + } + + return geodetic.JSONPrint(s) + } + + return geodetic.SingleRowTablePrint(group.Group) + } + + groups, err := cli.Client.GetAllGroups(ctx, cli.Interceptor) + if err != nil { + return err + } + + s, err := json.Marshal(groups.Groups) + if err != nil { + return err + } + + // print json output + if viper.GetString("output.format") == "json" { + return geodetic.JSONPrint(s) + } + + // print table output + var resp geodetic.GraphResponse + + err = json.Unmarshal(s, &resp) + if err != nil { + return err + } + + return geodetic.RowsTablePrint(resp) +} diff --git a/cmd/cli/cmd/root.go b/cmd/cli/cmd/root.go new file mode 100644 index 0000000..068863c --- /dev/null +++ b/cmd/cli/cmd/root.go @@ -0,0 +1,312 @@ +// Package datum is our cobra/viper cli implementation +package datum + +import ( + "encoding/json" + "fmt" + "os" + "reflect" + "strings" + "text/tabwriter" + + "github.com/TylerBrock/colorjson" + "github.com/Yamashou/gqlgenc/clientv2" + "github.com/datumforge/datum/pkg/utils/cli/rows" + homedir "github.com/mitchellh/go-homedir" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "github.com/spf13/viper" + "go.uber.org/zap" + + "github.com/datumforge/geodetic/pkg/geodeticclient" +) + +const ( + appName = "geodetic" + defaultRootHost = "http://localhost:1337/" + graphEndpoint = "query" +) + +var ( + cfgFile string + Logger *zap.SugaredLogger +) + +var ( + // RootHost contains the root url for the Datum API + RootHost string + // GraphAPIHost contains the url for the Datum graph api + GraphAPIHost string +) + +type CLI struct { + Client geodeticclient.GeodeticClient + Interceptor clientv2.RequestInterceptor + AccessToken string +} + +// RootCmd represents the base command when called without any subcommands +var RootCmd = &cobra.Command{ + Use: appName, + Short: fmt.Sprintf("a %s cli", appName), +} + +// Execute adds all child commands to the root command and sets flags appropriately. +// This is called by main.main(). It only needs to happen once to the rootCmd. +func Execute() { + cobra.CheckErr(RootCmd.Execute()) +} + +func init() { + cobra.OnInitialize(initConfig) + + RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/."+appName+".yaml)") + ViperBindFlag("config", RootCmd.PersistentFlags().Lookup("config")) + + RootCmd.PersistentFlags().StringVar(&RootHost, "host", defaultRootHost, "api host url") + ViperBindFlag(appName+".host", RootCmd.PersistentFlags().Lookup("host")) + + RootCmd.PersistentFlags().StringP("format", "f", "table", "output format (json, table)") + ViperBindFlag("output.format", RootCmd.PersistentFlags().Lookup("format")) + + // Logging flags + RootCmd.PersistentFlags().Bool("debug", false, "enable debug logging") + ViperBindFlag("logging.debug", RootCmd.PersistentFlags().Lookup("debug")) + + RootCmd.PersistentFlags().Bool("pretty", false, "enable pretty (human readable) logging output") + ViperBindFlag("logging.pretty", RootCmd.PersistentFlags().Lookup("pretty")) +} + +// initConfig reads in config file and ENV variables if set. +func initConfig() { + if cfgFile != "" { + // Use config file from the flag. + viper.SetConfigFile(cfgFile) + } else { + // Find home directory. + home, err := homedir.Dir() + cobra.CheckErr(err) + + // Search config in home directory with name ".appName" (without extension). + viper.AddConfigPath(home) + viper.SetConfigName("." + appName) + } + + viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_")) + viper.SetEnvPrefix(appName) + viper.AutomaticEnv() // read in environment variables that match + + err := viper.ReadInConfig() + + GraphAPIHost = fmt.Sprintf("%s%s", RootHost, graphEndpoint) + + setupLogging() + + if err == nil { + Logger.Infow("using config file", "file", viper.ConfigFileUsed()) + } +} + +func setupLogging() { + cfg := zap.NewProductionConfig() + if viper.GetBool("logging.pretty") { + cfg = zap.NewDevelopmentConfig() + } + + if viper.GetBool("logging.debug") { + cfg.Level = zap.NewAtomicLevelAt(zap.DebugLevel) + } else { + cfg.Level = zap.NewAtomicLevelAt(zap.InfoLevel) + } + + l, err := cfg.Build() + if err != nil { + panic(err) + } + + Logger = l.Sugar().With("app", appName) + defer Logger.Sync() //nolint:errcheck +} + +// ViperBindFlag provides a wrapper around the viper bindings that panics if an error occurs +func ViperBindFlag(name string, flag *pflag.Flag) { + err := viper.BindPFlag(name, flag) + if err != nil { + panic(err) + } +} + +func createClient(baseURL string) (*CLI, error) { + cli := CLI{} + + c := geodeticclient.Config{ + BaseURL: baseURL, + Debug: viper.GetBool("logging.debug"), + } + + i := geodeticclient.WithEmptyInterceptor() + interceptors := []clientv2.RequestInterceptor{i} + + if viper.GetBool("logging.debug") { + interceptors = append(interceptors, geodeticclient.WithLoggingInterceptor()) + } + + cli.Client = c.NewClientWithInterceptors(interceptors) + cli.Interceptor = i + + // new client with params + return &cli, nil +} + +func GetGraphClient() (*CLI, error) { + return createClient(GraphAPIHost) +} + +func JSONPrint(s []byte) error { + var obj map[string]interface{} + + err := json.Unmarshal(s, &obj) + if err != nil { + return err + } + + f := colorjson.NewFormatter() + f.Indent = 2 + + o, err := f.Marshal(obj) + if err != nil { + return err + } + + fmt.Println(string(o)) + + return nil +} + +// TablePrint prints a table to the console +func TablePrint(header []string, data [][]string) error { + w := rows.NewTabRowWriter(tabwriter.NewWriter(os.Stdout, 1, 0, 4, ' ', 0)) //nolint:gomnd + defer w.(*rows.TabRowWriter).Flush() + + if err := w.Write(header); err != nil { + return err + } + + for _, r := range data { + if err := w.Write(r); err != nil { + return err + } + } + + return nil +} + +// GetHeaders returns the name of each field in a struct +func GetHeaders(s interface{}, prefix string) []string { + headers := []string{} + val := reflect.Indirect(reflect.ValueOf(s)) + + // ensure we have a struct otherwise this will panic + if val.Kind() == reflect.Struct { + for i := range val.NumField() { //nolint:typecheck // go 1.22+ allows this, linter is wrong + if val.Type().Field(i).Type.Kind() == reflect.Struct { + continue + } + + headers = append(headers, fmt.Sprintf("%s%s", prefix, val.Type().Field(i).Name)) + } + } else { + // if the struct is a map, get the keys + for k := range val.Interface().(map[string]interface{}) { + headers = append(headers, fmt.Sprintf("%s%s", prefix, k)) + } + } + + return headers +} + +// GetFields returns the value of each field in a struct +func GetFields(i interface{}) (res []string) { + v := reflect.ValueOf(i) + + // ensure we have a struct otherwise this will panic + if v.Kind() == reflect.Struct { + for j := range v.NumField() { //nolint:typecheck // go 1.22+ allows this, linter is wrong + t := v.Field(j).Type() + if t.Kind() == reflect.Struct { + continue + } + + var val string + + switch t.Kind() { + case reflect.Ptr: + val = v.Field(j).Elem().String() + case reflect.Slice: + val = fmt.Sprintf("%v", v.Field(j).Interface()) + default: + val = v.Field(j).String() + } + + res = append(res, val) + } + } + + return +} + +// GraphResponse is the response from the graph api containing a list of edges +type GraphResponse struct { + Edges []Edge `json:"edges"` +} + +// Edge is a single edge in the graph response +type Edge struct { + Node interface{} `json:"node"` +} + +// RowsTablePrint prints a table to the console with multiple rows using a map[string]interface{} as the row data +func RowsTablePrint(resp GraphResponse) error { + // check if there are any groups, otherwise we have nothing to print + if len(resp.Edges) > 0 { + rows := resp.Edges + + data := [][]string{} + + headers := GetHeaders(rows[0].Node, "") + + // get the field values using the header names as the key to ensure the order is correct + for _, r := range rows { + rowMap := r.Node.(map[string]interface{}) + row := []string{} + + for _, h := range headers { + row = append(row, rowMap[h].(string)) + } + + data = append(data, row) + } + + // print ze data + return TablePrint(headers, data) + } + + return nil +} + +// SingleRowTablePrint prints a single row table to the console +func SingleRowTablePrint(r interface{}) error { + // get the headers for the table for each struct + header := GetHeaders(r, "") + + data := [][]string{} + + // get the field values for each struct + fields := GetFields(r) + + // append the fields to the data slice + data = append(data, fields) + + // print ze data + return TablePrint(header, data) +} diff --git a/cmd/cli/cmd/version/doc.go b/cmd/cli/cmd/version/doc.go new file mode 100644 index 0000000..9fee35a --- /dev/null +++ b/cmd/cli/cmd/version/doc.go @@ -0,0 +1,2 @@ +// Package version contains the version information for the CLI +package version diff --git a/cmd/cli/cmd/version/version.go b/cmd/cli/cmd/version/version.go new file mode 100644 index 0000000..2831bda --- /dev/null +++ b/cmd/cli/cmd/version/version.go @@ -0,0 +1,25 @@ +package version + +import ( + "github.com/spf13/cobra" + + "github.com/datumforge/datum/pkg/utils/cli/useragent" + + geodetic "github.com/datumforge/geodetic/cmd/cli/cmd" + "github.com/datumforge/geodetic/internal/constants" +) + +// VersionCmd is the version command +var versionCmd = &cobra.Command{ + Use: "version", + Short: "Print geodetic CLI version", + Long: `The version command prints the version of the geodetic CLI`, + Run: func(cmd *cobra.Command, _ []string) { + cmd.Println(constants.VerboseCLIVersion) + cmd.Printf("User Agent: %s\n", useragent.GetUserAgent()) + }, +} + +func init() { + geodetic.RootCmd.AddCommand(versionCmd) +} diff --git a/cmd/cli/main.go b/cmd/cli/main.go new file mode 100644 index 0000000..af527d3 --- /dev/null +++ b/cmd/cli/main.go @@ -0,0 +1,15 @@ +package main + +import ( + geodetic "github.com/datumforge/geodetic/cmd/cli/cmd" + + // since the cmds are no longer part of the same package + // they must all be imported in main + _ "github.com/datumforge/geodetic/cmd/cli/cmd/database" + _ "github.com/datumforge/geodetic/cmd/cli/cmd/group" + _ "github.com/datumforge/geodetic/cmd/cli/cmd/version" +) + +func main() { + geodetic.Execute() +} diff --git a/cmd/doc.go b/cmd/doc.go new file mode 100644 index 0000000..227ac2e --- /dev/null +++ b/cmd/doc.go @@ -0,0 +1,2 @@ +// Package cmd is our cobra/viper cli implementation +package cmd diff --git a/cmd/root.go b/cmd/root.go new file mode 100644 index 0000000..73e4550 --- /dev/null +++ b/cmd/root.go @@ -0,0 +1,78 @@ +package cmd + +import ( + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "github.com/spf13/viper" + "go.uber.org/zap" +) + +const appName = "geodetic" + +var ( + logger *zap.SugaredLogger +) + +// rootCmd represents the base command when called without any subcommands +var rootCmd = &cobra.Command{ + Use: appName, + Short: "A server for scheduling geographically distributed databases", +} + +// Execute adds all child commands to the root command and sets flags appropriately. +// This is called by main.main(). It only needs to happen once to the rootCmd. +func Execute() { + cobra.CheckErr(rootCmd.Execute()) +} + +func init() { + cobra.OnInitialize(initConfig) + + rootCmd.PersistentFlags().Bool("pretty", false, "enable pretty (human readable) logging output") + viperBindFlag("pretty", rootCmd.PersistentFlags().Lookup("pretty")) + + rootCmd.PersistentFlags().Bool("debug", false, "debug logging output") + viperBindFlag("debug", rootCmd.PersistentFlags().Lookup("debug")) +} + +// initConfig reads in flags set for server startup +// all other configuration is done by the server with koanf +// refer to the README.md for more information +func initConfig() { + err := viper.ReadInConfig() + + logger = newLogger() + + if err == nil { + logger.Infow("using config file", "file", viper.ConfigFileUsed()) + } +} + +// viperBindFlag provides a wrapper around the viper bindings that panics if an error occurs +func viperBindFlag(name string, flag *pflag.Flag) { + err := viper.BindPFlag(name, flag) + if err != nil { + panic(err) + } +} + +// newLogger creates a new zap logger with the appropriate configuration based on the viper settings for pretty and debug +func newLogger() *zap.SugaredLogger { + cfg := zap.NewProductionConfig() + if viper.GetBool("pretty") { + cfg = zap.NewDevelopmentConfig() + } + + if viper.GetBool("debug") { + cfg.Level = zap.NewAtomicLevelAt(zap.DebugLevel) + } else { + cfg.Level = zap.NewAtomicLevelAt(zap.InfoLevel) + } + + logger, err := cfg.Build() + if err != nil { + panic(err) + } + + return logger.Sugar() +} diff --git a/cmd/serve.go b/cmd/serve.go new file mode 100644 index 0000000..05870b4 --- /dev/null +++ b/cmd/serve.go @@ -0,0 +1,98 @@ +package cmd + +import ( + "context" + + "github.com/datumforge/datum/pkg/otelx" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "go.uber.org/zap" + + "github.com/datumforge/datum/pkg/cache" + "github.com/datumforge/go-turso" + + ent "github.com/datumforge/geodetic/internal/ent/generated" + "github.com/datumforge/geodetic/internal/entdb" + "github.com/datumforge/geodetic/internal/httpserve/config" + "github.com/datumforge/geodetic/internal/httpserve/server" + "github.com/datumforge/geodetic/internal/httpserve/serveropts" +) + +var serveCmd = &cobra.Command{ + Use: "serve", + Short: "start the server", + RunE: func(cmd *cobra.Command, args []string) error { + return serve(cmd.Context()) + }, +} + +func init() { + rootCmd.AddCommand(serveCmd) + + serveCmd.PersistentFlags().String("config", "./config/.config.yaml", "config file location") + viperBindFlag("config", serveCmd.PersistentFlags().Lookup("config")) +} + +func serve(ctx context.Context) error { + // setup db connection for server + var ( + err error + ) + + serverOpts := []serveropts.ServerOption{} + serverOpts = append(serverOpts, + serveropts.WithConfigProvider(&config.ConfigProviderWithRefresh{}), + serveropts.WithLogger(logger), + serveropts.WithHTTPS(), + serveropts.WithMiddleware(), + ) + + so := serveropts.NewServerOptions(serverOpts, viper.GetString("config")) + + err = otelx.NewTracer(so.Config.Settings.Tracer, appName, logger) + if err != nil { + logger.Fatalw("failed to initialize tracer", "error", err) + } + + tursoClient, err := turso.NewClient(so.Config.Settings.Turso) + if err != nil { + logger.Fatalw("failed to initialize turso client", "error", err) + } + + // create ent dependency injection + entOpts := []ent.Option{ent.Logger(*logger), ent.Turso(tursoClient)} + + // Setup DB connection + entdbClient, dbConfig, err := entdb.NewMultiDriverDBClient(ctx, so.Config.Settings.DB, logger, entOpts) + if err != nil { + return err + } + + defer entdbClient.Close() + + // Setup Redis connection + redisClient := cache.New(so.Config.Settings.Redis) + defer redisClient.Close() + + // Add Driver to the Handlers Config + so.Config.Handler.DBClient = entdbClient + + // Add redis client to Handlers Config + so.Config.Handler.RedisClient = redisClient + + // add ready checks + so.AddServerOptions( + serveropts.WithReadyChecks(dbConfig, redisClient), + ) + + srv := server.NewServer(so.Config, so.Config.Logger) + + // Setup Graph API Handlers + so.AddServerOptions(serveropts.WithGraphRoute(srv, entdbClient)) + + if err := srv.StartEchoServer(ctx); err != nil { + logger.Error("failed to run server", zap.Error(err)) + } + + return nil +} diff --git a/config/.env.example b/config/.env.example new file mode 100644 index 0000000..f34ec31 --- /dev/null +++ b/config/.env.example @@ -0,0 +1,77 @@ +GEODETIC_REFRESH_INTERVAL="10m" +GEODETIC_SERVER_DEBUG="false" +GEODETIC_SERVER_DEV="false" +GEODETIC_SERVER_LISTEN=":1337" +GEODETIC_SERVER_SHUTDOWN_GRACE_PERIOD="10s" +GEODETIC_SERVER_READ_TIMEOUT="15s" +GEODETIC_SERVER_WRITE_TIMEOUT="15s" +GEODETIC_SERVER_IDLE_TIMEOUT="30s" +GEODETIC_SERVER_READ_HEADER_TIMEOUT="2s" +GEODETIC_SERVER_TLS_ENABLED="false" +GEODETIC_SERVER_TLS_CERT_FILE="server.crt" +GEODETIC_SERVER_TLS_CERT_KEY="server.key" +GEODETIC_SERVER_TLS_AUTO_CERT="false" +GEODETIC_SERVER_CORS_ENABLED="true" +GEODETIC_SERVER_CORS_PREFIXES="" +GEODETIC_SERVER_CORS_ALLOWORIGINS="" +GEODETIC_SERVER_CORS_COOKIEINSECURE="" +GEODETIC_SERVER_SECURE_ENABLED="true" +GEODETIC_SERVER_SECURE_XSSPROTECTION="1; mode=block" +GEODETIC_SERVER_SECURE_CONTENTTYPENOSNIFF="nosniff" +GEODETIC_SERVER_SECURE_XFRAMEOPTIONS="SAMEORIGIN" +GEODETIC_SERVER_SECURE_HSTSPRELOADENABLED="false" +GEODETIC_SERVER_SECURE_HSTSMAXAGE="31536000" +GEODETIC_SERVER_SECURE_CONTENTSECURITYPOLICY="default-src 'self'" +GEODETIC_SERVER_SECURE_REFERRERPOLICY="same-origin" +GEODETIC_SERVER_SECURE_CSPREPORTONLY="false" +GEODETIC_SERVER_REDIRECTS_ENABLED="true" +GEODETIC_SERVER_REDIRECTS_REDIRECTS="" +GEODETIC_SERVER_REDIRECTS_CODE="" +GEODETIC_SERVER_CACHECONTROL_ENABLED="true" +GEODETIC_SERVER_CACHECONTROL_NOCACHEHEADERS="" +GEODETIC_SERVER_CACHECONTROL_ETAGHEADERS="" +GEODETIC_SERVER_MIME_ENABLED="true" +GEODETIC_SERVER_MIME_MIMETYPESFILE="" +GEODETIC_SERVER_MIME_DEFAULTCONTENTTYPE="application/data" +GEODETIC_DB_DEBUG="false" +GEODETIC_DB_DATABASENAME="datum" +GEODETIC_DB_DRIVERNAME="libsql" +GEODETIC_DB_MULTIWRITE="false" +GEODETIC_DB_PRIMARYDBSOURCE="file:datum.db" +GEODETIC_DB_SECONDARYDBSOURCE="file:backup.db" +GEODETIC_DB_CACHETTL="1s" +GEODETIC_DB_RUNMIGRATIONS="true" +GEODETIC_DB_MIGRATIONPROVIDER="atlas" +GEODETIC_TURSO_TOKEN="" +GEODETIC_TURSO_BASEURL="https://api.turso.tech" +GEODETIC_TURSO_ORGNAME="" +GEODETIC_REDIS_ENABLED="true" +GEODETIC_REDIS_ADDRESS="localhost:6379" +GEODETIC_REDIS_NAME="datum" +GEODETIC_REDIS_USERNAME="" +GEODETIC_REDIS_PASSWORD="" +GEODETIC_REDIS_DB="0" +GEODETIC_REDIS_DIALTIMEOUT="5s" +GEODETIC_REDIS_READTIMEOUT="0" +GEODETIC_REDIS_WRITETIMEOUT="0" +GEODETIC_REDIS_MAXRETRIES="3" +GEODETIC_REDIS_MINIDLECONNS="0" +GEODETIC_REDIS_MAXIDLECONNS="0" +GEODETIC_REDIS_MAXACTIVECONNS="0" +GEODETIC_TRACER_ENABLED="false" +GEODETIC_TRACER_PROVIDER="stdout" +GEODETIC_TRACER_ENVIRONMENT="development" +GEODETIC_TRACER_STDOUT_PRETTY="true" +GEODETIC_TRACER_STDOUT_DISABLETIMESTAMP="false" +GEODETIC_TRACER_OTLP_ENDPOINT="localhost:4317" +GEODETIC_TRACER_OTLP_INSECURE="true" +GEODETIC_TRACER_OTLP_CERTIFICATE="" +GEODETIC_TRACER_OTLP_HEADERS="" +GEODETIC_TRACER_OTLP_COMPRESSION="" +GEODETIC_TRACER_OTLP_TIMEOUT="10s" +GEODETIC_SESSIONS_SIGNINGKEY="my-signing-secret" +GEODETIC_SESSIONS_ENCRYPTIONKEY="encryptionsecret" +GEODETIC_RATELIMIT_ENABLED="false" +GEODETIC_RATELIMIT_LIMIT="10" +GEODETIC_RATELIMIT_BURST="30" +GEODETIC_RATELIMIT_EXPIRES="10m" diff --git a/config/config-dev.example.yaml b/config/config-dev.example.yaml new file mode 100644 index 0000000..97a3443 --- /dev/null +++ b/config/config-dev.example.yaml @@ -0,0 +1,20 @@ +# server settings +server: + debug: true + dev: true + + # tls settings + tls: + enabled: false + +# db settings +db: + debug: true + driverName: libsql + primaryDbSource: "file:geodetic.db" + multiWrite: false + +turso: + baseUrl: https://api.turso.tech + orgName: "datum" + token: "" diff --git a/config/config.example.yaml b/config/config.example.yaml new file mode 100644 index 0000000..b016515 --- /dev/null +++ b/config/config.example.yaml @@ -0,0 +1,93 @@ +db: + catchTTL: 1000000000 + databaseName: datum + debug: false + driverName: libsql + migrationProvider: atlas + multiWrite: false + primaryDbSource: file:datum.db + runMigrations: true + secondaryDbSource: file:backup.db +ratelimit: + burst: 30 + enabled: false + expires: 600000000000 + limit: 10 +redis: + address: localhost:6379 + db: 0 + dialTimeout: 5000000000 + enabled: true + maxActiveConns: 0 + maxIdleConns: 0 + maxRetries: 3 + minIdleConns: 0 + name: datum + password: "" + readTimeout: 0 + username: "" + writeTimeout: 0 +refresh_interval: 600000000000 +server: + cacheControl: + enabled: true + etagHeaders: null + noCacheHeaders: null + cors: + allowOrigins: null + cookieInsecure: false + enabled: true + prefixes: null + debug: false + dev: false + idle_timeout: 30000000000 + listen: :1337 + mime: + defaultContentType: application/data + enabled: true + mimeTypesFile: "" + read_header_timeout: 2000000000 + read_timeout: 15000000000 + redirect: + code: 0 + enabled: true + redirects: null + secure: + contentsecuritypolicy: default-src 'self' + contenttypenosniff: nosniff + cspreportonly: false + enabled: true + hstsmaxage: 31536000 + hstspreloadenabled: false + referrerpolicy: same-origin + xframeoptions: SAMEORIGIN + xssprotection: 1; mode=block + shutdown_grace_period: 10000000000 + tls: + auto_cert: false + cert_file: server.crt + cert_key: server.key + config: null + enabled: false + write_timeout: 15000000000 +sessions: + encryptionKey: encryptionsecret + signingKey: my-signing-secret +tracer: + enabled: false + environment: development + otlp: + certificate: "" + compression: "" + endpoint: localhost:4317 + headers: null + insecure: true + timeout: 10000000000 + provider: stdout + stdout: + disableTimestamp: false + pretty: true +turso: + baseUrl: https://api.turso.tech + orgName: "" + token: "" diff --git a/config/config.go b/config/config.go new file mode 100644 index 0000000..1b81f40 --- /dev/null +++ b/config/config.go @@ -0,0 +1,155 @@ +package config + +import ( + "crypto/tls" + "strings" + "time" + + "github.com/datumforge/entx" + "github.com/knadh/koanf/parsers/yaml" + "github.com/knadh/koanf/providers/env" + "github.com/knadh/koanf/providers/file" + "github.com/knadh/koanf/v2" + "github.com/mcuadros/go-defaults" + + turso "github.com/datumforge/go-turso" + + "github.com/datumforge/datum/pkg/cache" + "github.com/datumforge/datum/pkg/middleware/cachecontrol" + "github.com/datumforge/datum/pkg/middleware/cors" + "github.com/datumforge/datum/pkg/middleware/mime" + "github.com/datumforge/datum/pkg/middleware/ratelimit" + "github.com/datumforge/datum/pkg/middleware/redirect" + "github.com/datumforge/datum/pkg/middleware/secure" + "github.com/datumforge/datum/pkg/otelx" + "github.com/datumforge/datum/pkg/sessions" +) + +var ( + DefaultConfigFilePath = "./config/.config.yaml" +) + +// Config contains the configuration for the datum server +type Config struct { + // RefreshInterval determines how often to reload the config + RefreshInterval time.Duration `json:"refresh_interval" koanf:"refresh_interval" default:"10m"` + + // Server contains the echo server settings + Server Server `json:"server" koanf:"server"` + + // DB contains the database configuration for the ent client + DB entx.Config `json:"db" koanf:"db"` + + // Turso contains the configuration for the turso client + Turso turso.Config `json:"turso" koanf:"turso"` + + // Redis contains the redis configuration for the key-value store + Redis cache.Config `json:"redis" koanf:"redis"` + + // Tracer contains the tracing config for opentelemetry + Tracer otelx.Config `json:"tracer" koanf:"tracer"` + + // Sessions config for user sessions and cookies + Sessions sessions.Config `json:"sessions" koanf:"sessions"` + + // Ratelimit contains the configuration for the rate limiter + Ratelimit ratelimit.Config `json:"ratelimit" koanf:"ratelimit"` +} + +// Server settings for the echo server +type Server struct { + // Debug enables debug mode for the server + Debug bool `json:"debug" koanf:"debug" default:"false"` + // Dev enables echo's dev mode options + Dev bool `json:"dev" koanf:"dev" default:"false"` + // Listen sets the listen address to serve the echo server on + Listen string `json:"listen" koanf:"listen" jsonschema:"required" default:":1337"` + // ShutdownGracePeriod sets the grace period for in flight requests before shutting down + ShutdownGracePeriod time.Duration `json:"shutdown_grace_period" koanf:"shutdown_grace_period" default:"10s"` + // ReadTimeout sets the maximum duration for reading the entire request including the body + ReadTimeout time.Duration `json:"read_timeout" koanf:"read_timeout" default:"15s"` + // WriteTimeout sets the maximum duration before timing out writes of the response + WriteTimeout time.Duration `json:"write_timeout" koanf:"write_timeout" default:"15s"` + // IdleTimeout sets the maximum amount of time to wait for the next request when keep-alives are enabled + IdleTimeout time.Duration `json:"idle_timeout" koanf:"idle_timeout" default:"30s"` + // ReadHeaderTimeout sets the amount of time allowed to read request headers + ReadHeaderTimeout time.Duration `json:"read_header_timeout" koanf:"read_header_timeout" default:"2s"` + // TLS contains the tls configuration settings + TLS TLS `json:"tls" koanf:"tls"` + // CORS contains settings to allow cross origin settings and insecure cookies + CORS cors.Config `json:"cors" koanf:"cors"` + // Secure contains settings for the secure middleware + Secure secure.Config `json:"secure" koanf:"secure"` + // Redirect contains settings for the redirect middleware + Redirects redirect.Config `json:"redirect" koanf:"redirects"` + // CacheControl contains settings for the cache control middleware + CacheControl cachecontrol.Config `json:"cacheControl" koanf:"cacheControl"` + // Mime contains settings for the mime middleware + Mime mime.Config `json:"mime" koanf:"mime"` +} + +// CORS settings for the server to allow cross origin requests +type CORS struct { + // AllowOrigins is a list of allowed origin to indicate whether the response can be shared with + // requesting code from the given origin + AllowOrigins []string `json:"allow_origins" koanf:"allow_origins"` + // CookieInsecure allows CSRF cookie to be sent to servers that the browser considers + // unsecured. Useful for cases where the connection is secured via VPN rather than + // HTTPS directly. + CookieInsecure bool `json:"cookie_insecure" koanf:"cookie_insecure"` +} + +// TLS settings for the server for secure connections +type TLS struct { + // Config contains the tls.Config settings + Config *tls.Config `json:"config" koanf:"config" jsonschema:"-"` + // Enabled turns on TLS settings for the server + Enabled bool `json:"enabled" koanf:"enabled" default:"false"` + // CertFile location for the TLS server + CertFile string `json:"cert_file" koanf:"cert_file" default:"server.crt"` + // CertKey file location for the TLS server + CertKey string `json:"cert_key" koanf:"cert_key" default:"server.key"` + // AutoCert generates the cert with letsencrypt, this does not work on localhost + AutoCert bool `json:"auto_cert" koanf:"auto_cert" default:"false"` +} + +// Load is responsible for loading the configuration from a YAML file and environment variables. +// If the `cfgFile` is empty or nil, it sets the default configuration file path. +// Config settings are taken from default values, then from the config file, and finally from environment +// the later overwriting the former. +func Load(cfgFile *string) (*Config, error) { + k := koanf.New(".") + + if cfgFile == nil || *cfgFile == "" { + *cfgFile = DefaultConfigFilePath + } + + // load defaults + conf := &Config{} + defaults.SetDefaults(conf) + + // parse yaml config + if err := k.Load(file.Provider(*cfgFile), yaml.Parser()); err != nil { + panic(err) + } + + // unmarshal the config + if err := k.Unmarshal("", &conf); err != nil { + panic(err) + } + + // load env vars + if err := k.Load(env.Provider("GEODETIC_", ".", func(s string) string { + return strings.ReplaceAll(strings.ToLower( + strings.TrimPrefix(s, "GEODETIC_")), "_", ".") + }), nil); err != nil { + panic(err) + } + + // unmarshal the env vars + if err := k.Unmarshal("", &conf); err != nil { + panic(err) + } + + return conf, nil +} diff --git a/config/configmap.yaml b/config/configmap.yaml new file mode 100644 index 0000000..2d67d14 --- /dev/null +++ b/config/configmap.yaml @@ -0,0 +1,89 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . -}}-config + {{ $labels := include "common.tplvalues.merge" (dict "values" ( list .Values.api.commonLabels (include "common.labels.standard" .) ) "context" . ) }} + labels: {{- include "common.tplvalues.render" ( dict "value" $labels "context" $) | nindent 4 }} + {{- if .Values.api.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.api.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +data: + GEODETIC_REFRESH_INTERVAL: {{ .Values.geodetic.refresh_interval | 10m }} + GEODETIC_SERVER_DEBUG: {{ .Values.geodetic.server.debug | false }} + GEODETIC_SERVER_DEV: {{ .Values.geodetic.server.dev | false }} + GEODETIC_SERVER_LISTEN: {{ .Values.geodetic.server.listen | ":1337" }} + GEODETIC_SERVER_SHUTDOWN_GRACE_PERIOD: {{ .Values.geodetic.server.shutdown_grace_period | 10s }} + GEODETIC_SERVER_READ_TIMEOUT: {{ .Values.geodetic.server.read_timeout | 15s }} + GEODETIC_SERVER_WRITE_TIMEOUT: {{ .Values.geodetic.server.write_timeout | 15s }} + GEODETIC_SERVER_IDLE_TIMEOUT: {{ .Values.geodetic.server.idle_timeout | 30s }} + GEODETIC_SERVER_READ_HEADER_TIMEOUT: {{ .Values.geodetic.server.read_header_timeout | 2s }} + GEODETIC_SERVER_TLS_ENABLED: {{ .Values.geodetic.server.tls.enabled | false }} + GEODETIC_SERVER_TLS_CERT_FILE: {{ .Values.geodetic.server.tls.cert_file | "server.crt" }} + GEODETIC_SERVER_TLS_CERT_KEY: {{ .Values.geodetic.server.tls.cert_key | "server.key" }} + GEODETIC_SERVER_TLS_AUTO_CERT: {{ .Values.geodetic.server.tls.auto_cert | false }} + GEODETIC_SERVER_CORS_ENABLED: {{ .Values.geodetic.server.cors.enabled | true }} + GEODETIC_SERVER_CORS_PREFIXES: {{ .Values.geodetic.server.cors.prefixes }} + GEODETIC_SERVER_CORS_ALLOWORIGINS: {{ .Values.geodetic.server.cors.allowOrigins }} + GEODETIC_SERVER_CORS_COOKIEINSECURE: {{ .Values.geodetic.server.cors.cookieInsecure }} + GEODETIC_SERVER_SECURE_ENABLED: {{ .Values.geodetic.server.secure.enabled | true }} + GEODETIC_SERVER_SECURE_XSSPROTECTION: {{ .Values.geodetic.server.secure.xssprotection | "1; mode=block" }} + GEODETIC_SERVER_SECURE_CONTENTTYPENOSNIFF: {{ .Values.geodetic.server.secure.contenttypenosniff | "nosniff" }} + GEODETIC_SERVER_SECURE_XFRAMEOPTIONS: {{ .Values.geodetic.server.secure.xframeoptions | "SAMEORIGIN" }} + GEODETIC_SERVER_SECURE_HSTSPRELOADENABLED: {{ .Values.geodetic.server.secure.hstspreloadenabled | false }} + GEODETIC_SERVER_SECURE_HSTSMAXAGE: {{ .Values.geodetic.server.secure.hstsmaxage | 31536000 }} + GEODETIC_SERVER_SECURE_CONTENTSECURITYPOLICY: {{ .Values.geodetic.server.secure.contentsecuritypolicy | "default-src 'self'" }} + GEODETIC_SERVER_SECURE_REFERRERPOLICY: {{ .Values.geodetic.server.secure.referrerpolicy | "same-origin" }} + GEODETIC_SERVER_SECURE_CSPREPORTONLY: {{ .Values.geodetic.server.secure.cspreportonly | false }} + GEODETIC_SERVER_REDIRECTS_ENABLED: {{ .Values.geodetic.server.redirects.enabled | true }} + GEODETIC_SERVER_REDIRECTS_REDIRECTS: {{ .Values.geodetic.server.redirects.redirects }} + GEODETIC_SERVER_REDIRECTS_CODE: {{ .Values.geodetic.server.redirects.code }} + GEODETIC_SERVER_CACHECONTROL_ENABLED: {{ .Values.geodetic.server.cachecontrol.enabled | true }} + GEODETIC_SERVER_CACHECONTROL_NOCACHEHEADERS: {{ .Values.geodetic.server.cachecontrol.noCacheHeaders }} + GEODETIC_SERVER_CACHECONTROL_ETAGHEADERS: {{ .Values.geodetic.server.cachecontrol.etagHeaders }} + GEODETIC_SERVER_MIME_ENABLED: {{ .Values.geodetic.server.mime.enabled | true }} + GEODETIC_SERVER_MIME_MIMETYPESFILE: {{ .Values.geodetic.server.mime.mimeTypesFile }} + GEODETIC_SERVER_MIME_DEFAULTCONTENTTYPE: {{ .Values.geodetic.server.mime.defaultContentType | "application/data" }} + GEODETIC_DB_DEBUG: {{ .Values.geodetic.db.debug | false }} + GEODETIC_DB_DATABASENAME: {{ .Values.geodetic.db.databaseName | "datum" }} + GEODETIC_DB_DRIVERNAME: {{ .Values.geodetic.db.driverName | "libsql" }} + GEODETIC_DB_MULTIWRITE: {{ .Values.geodetic.db.multiWrite | false }} + GEODETIC_DB_PRIMARYDBSOURCE: {{ .Values.geodetic.db.primaryDbSource | "file:datum.db" }} + GEODETIC_DB_SECONDARYDBSOURCE: {{ .Values.geodetic.db.secondaryDbSource | "file:backup.db" }} + GEODETIC_DB_CACHETTL: {{ .Values.geodetic.db.cacheTTL | 1s }} + GEODETIC_DB_RUNMIGRATIONS: {{ .Values.geodetic.db.runMigrations | true }} + GEODETIC_DB_MIGRATIONPROVIDER: {{ .Values.geodetic.db.migrationProvider | "atlas" }} + GEODETIC_TURSO_TOKEN: {{ .Values.geodetic.turso.token }} + GEODETIC_TURSO_BASEURL: {{ .Values.geodetic.turso.baseUrl | "https://api.turso.tech" }} + GEODETIC_TURSO_ORGNAME: {{ .Values.geodetic.turso.orgName }} + GEODETIC_REDIS_ENABLED: {{ .Values.geodetic.redis.enabled | true }} + GEODETIC_REDIS_ADDRESS: {{ .Values.geodetic.redis.address | "localhost:6379" }} + GEODETIC_REDIS_NAME: {{ .Values.geodetic.redis.name | "datum" }} + GEODETIC_REDIS_USERNAME: {{ .Values.geodetic.redis.username }} + GEODETIC_REDIS_PASSWORD: {{ .Values.geodetic.redis.password }} + GEODETIC_REDIS_DB: {{ .Values.geodetic.redis.db | 0 }} + GEODETIC_REDIS_DIALTIMEOUT: {{ .Values.geodetic.redis.dialTimeout | 5s }} + GEODETIC_REDIS_READTIMEOUT: {{ .Values.geodetic.redis.readTimeout | 0 }} + GEODETIC_REDIS_WRITETIMEOUT: {{ .Values.geodetic.redis.writeTimeout | 0 }} + GEODETIC_REDIS_MAXRETRIES: {{ .Values.geodetic.redis.maxRetries | 3 }} + GEODETIC_REDIS_MINIDLECONNS: {{ .Values.geodetic.redis.minIdleConns | 0 }} + GEODETIC_REDIS_MAXIDLECONNS: {{ .Values.geodetic.redis.maxIdleConns | 0 }} + GEODETIC_REDIS_MAXACTIVECONNS: {{ .Values.geodetic.redis.maxActiveConns | 0 }} + GEODETIC_TRACER_ENABLED: {{ .Values.geodetic.tracer.enabled | false }} + GEODETIC_TRACER_PROVIDER: {{ .Values.geodetic.tracer.provider | "stdout" }} + GEODETIC_TRACER_ENVIRONMENT: {{ .Values.geodetic.tracer.environment | "development" }} + GEODETIC_TRACER_STDOUT_PRETTY: {{ .Values.geodetic.tracer.stdout.pretty | true }} + GEODETIC_TRACER_STDOUT_DISABLETIMESTAMP: {{ .Values.geodetic.tracer.stdout.disableTimestamp | false }} + GEODETIC_TRACER_OTLP_ENDPOINT: {{ .Values.geodetic.tracer.otlp.endpoint | "localhost:4317" }} + GEODETIC_TRACER_OTLP_INSECURE: {{ .Values.geodetic.tracer.otlp.insecure | true }} + GEODETIC_TRACER_OTLP_CERTIFICATE: {{ .Values.geodetic.tracer.otlp.certificate }} + GEODETIC_TRACER_OTLP_HEADERS: {{ .Values.geodetic.tracer.otlp.headers }} + GEODETIC_TRACER_OTLP_COMPRESSION: {{ .Values.geodetic.tracer.otlp.compression }} + GEODETIC_TRACER_OTLP_TIMEOUT: {{ .Values.geodetic.tracer.otlp.timeout | 10s }} + GEODETIC_SESSIONS_SIGNINGKEY: {{ .Values.geodetic.sessions.signingKey | "my-signing-secret" }} + GEODETIC_SESSIONS_ENCRYPTIONKEY: {{ .Values.geodetic.sessions.encryptionKey | "encryptionsecret" }} + GEODETIC_RATELIMIT_ENABLED: {{ .Values.geodetic.ratelimit.enabled | false }} + GEODETIC_RATELIMIT_LIMIT: {{ .Values.geodetic.ratelimit.limit | 10 }} + GEODETIC_RATELIMIT_BURST: {{ .Values.geodetic.ratelimit.burst | 30 }} + GEODETIC_RATELIMIT_EXPIRES: {{ .Values.geodetic.ratelimit.expires | 10m }} diff --git a/config/doc.go b/config/doc.go new file mode 100644 index 0000000..6ccae48 --- /dev/null +++ b/config/doc.go @@ -0,0 +1,2 @@ +// Package config holds configuration stuff to configure the things +package config diff --git a/db/README.md b/db/README.md new file mode 100644 index 0000000..ce15222 --- /dev/null +++ b/db/README.md @@ -0,0 +1,65 @@ +# Database Support + +## Dependencies + +1. [ent](https://entgo.io/) - insane entity mapping tool, definitely not an ORM but kind of an ORM +1. [atlas](https://atlasgo.io/) - Schema generation and migration +1. [entx](https://github.com/datumforge/entx) - Wrapper to interact with the ent + +## Supported Drivers + +1. [libsql](https://github.com/tursodatabase/libsql) +1. [sqlite](https://gitlab.com/cznic/sqlite) +1. [postgres](https://github.com/lib/pq) + +## Local Development + +### Config Examples + +#### Libsql + +1. This will write to a local file `geodetic.db`, already included in `.gitignore` + +```yaml +db: + debug: true + driver_name: "libsql" + primary_db_source: "file:geodetic.db" + run_migrations: true +``` + +#### Sqlite + +1. This will write to a local file `geodetic.db`, already included in `.gitignore` + +```yaml +db: + debug: true + driverName: sqlite3 + primaryDbSource: "geodetic.db" + runMigrations: true +``` + +#### Postgres + +1. Postgres is included in `docker/docker-compose-fga.yml` and the same instance can be used for development. The following connection string should work when using `task docker:all:up` + +```yaml +db: + debug: true + driverName: postgres + primaryDbSource: "postgres://postgres:password@postgres:5432?sslmode=disable" + runMigrations: true +``` + +#### Turso + +1. Replace the url with your turso database url and token + +```yaml +db: + debug: true + driverName: libsql + primaryDbSource: "https://datum-golanglemonade.turso.io?authToken=$TURSO_TOKEN" # set TURSO_TOKEN to value + runMigrations: false +``` diff --git a/db/Taskfile.yml b/db/Taskfile.yml new file mode 100644 index 0000000..ff559e0 --- /dev/null +++ b/db/Taskfile.yml @@ -0,0 +1,45 @@ +version: '3' + +tasks: + create-and-lint: + desc: runs the atlas create and lint commands + aliases: [all] + cmds: + - task: create + - task: lint + + create: + desc: creates an atlas migration if one is needed based on the ent schema definitions + cmds: + - | + echo "If there is no schema to generate, this will not create a file (hint: name it your branch name if you're not sure) - enter the name of the migration:" + read migration; + go run create_migrations.go ${migration}; + + lint: + desc: lints the pushed migration files + ignore_error: true + cmds: + - atlas migrate lint --dev-url "sqlite://file?mode=memory&_fk=1" --dir "file://migrations" -w + + migrate: + desc: pushes the generated migration files to atlas cloud + cmds: + - atlas migrate push geodetic --dev-url "sqlite://dev?mode=memory&_fk=1" --dir "file://migrations" + + resethash: + desc: re-sets the checksum created by the atlas package so that a complete migration can be re-created if deleted + cmds: + - atlas migrate hash --dir="file://migrations" + - atlas migrate hash --dir="file://migrations-goose" + + console: + desc: launches an interactive terminal to the local geodetic db with some tasty options + cmds: + - sqlite3 -column -header -box ../geodetic.db + + newschema: + desc: generate a new ent schema for geodetic + silent: true + cmds: + - go run -mod=mod entgo.io/ent/cmd/ent new --template ../internal/ent/base/entinit.tmpl --target ../internal/ent/schema {{.CLI_ARGS}} \ No newline at end of file diff --git a/db/create_migrations.go b/db/create_migrations.go new file mode 100644 index 0000000..13a9a90 --- /dev/null +++ b/db/create_migrations.go @@ -0,0 +1,71 @@ +//go:build ignore + +package main + +import ( + "context" + "log" + "os" + + // supported ent database drivers + _ "github.com/datumforge/entx" // overlay for sqlite + _ "github.com/lib/pq" // postgres driver + _ "github.com/tursodatabase/libsql-client-go/libsql" // libsql driver + _ "modernc.org/sqlite" // sqlite driver (non-cgo) + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql/schema" + + atlas "ariga.io/atlas/sql/migrate" + "ariga.io/atlas/sql/sqltool" + "github.com/datumforge/geodetic/internal/ent/generated/migrate" +) + +func main() { + ctx := context.Background() + + // Create a local migration directory able to understand Atlas migration file format for replay. + atlasDir, err := atlas.NewLocalDir("migrations") + if err != nil { + log.Fatalf("failed creating atlas migration directory: %v", err) + } + + gooseDir, err := sqltool.NewGooseDir("migrations-goose") + if err != nil { + log.Fatalf("failed creating goose migration directory: %v", err) + } + + // Migrate diff options. + baseOpts := []schema.MigrateOption{ + schema.WithMigrationMode(schema.ModeReplay), // provide migration mode + schema.WithDialect(dialect.SQLite), // Ent dialect to use + schema.WithDropColumn(true), + schema.WithDropIndex(true), + } + + atlasOpts := append(baseOpts, + schema.WithDir(atlasDir), + schema.WithFormatter(atlas.DefaultFormatter), + ) + + if len(os.Args) != 2 { + log.Fatalln("migration name is required. Use: 'go run -mod=mod create_migration.go '") + } + + dbURI, ok := os.LookupEnv("ATLAS_DB_URI") + if !ok { + log.Fatalln("failed to load the ATLAS_DB_URI env var") + } + + // Generate migrations using Atlas support for sqlite (note the Ent dialect option passed above). + if err := migrate.NamedDiff(ctx, dbURI, os.Args[1], atlasOpts...); err != nil { + log.Fatalf("failed generating atlas migration file: %v", err) + } + + // Generate migrations using Goose support for sqlite + gooseOpts := append(baseOpts, schema.WithDir(gooseDir)) + + if err = migrate.NamedDiff(ctx, dbURI, os.Args[1], gooseOpts...); err != nil { + log.Fatalf("failed generating goose migration file: %v", err) + } +} diff --git a/db/migrate.go b/db/migrate.go new file mode 100644 index 0000000..5db6a93 --- /dev/null +++ b/db/migrate.go @@ -0,0 +1,16 @@ +// Package db provides an embedded filesystem containing all the database migrations +package db + +import ( + "embed" +) + +// Migrations contain an embedded filesystem with all the sql migration files +// +//go:embed migrations/*.sql +var Migrations embed.FS + +// GooseMigrations contain an embedded filesystem with all the goose migration files +// +//go:embed migrations-goose/*.sql +var GooseMigrations embed.FS diff --git a/db/migrations-goose/20240414234138_init.sql b/db/migrations-goose/20240414234138_init.sql new file mode 100644 index 0000000..35c168d --- /dev/null +++ b/db/migrations-goose/20240414234138_init.sql @@ -0,0 +1,23 @@ +-- +goose Up +-- create "databases" table +CREATE TABLE `databases` (`id` text NOT NULL, `created_at` datetime NULL, `updated_at` datetime NULL, `created_by` text NULL, `updated_by` text NULL, `deleted_at` datetime NULL, `deleted_by` text NULL, `organization_id` text NOT NULL, `name` text NOT NULL, `geo` text NULL, `dsn` text NOT NULL, `token` text NULL, `status` text NOT NULL DEFAULT ('CREATING'), `provider` text NOT NULL DEFAULT ('LOCAL'), `group_id` text NOT NULL, PRIMARY KEY (`id`), CONSTRAINT `databases_groups_databases` FOREIGN KEY (`group_id`) REFERENCES `groups` (`id`) ON DELETE NO ACTION); +-- create index "database_organization_id" to table: "databases" +CREATE UNIQUE INDEX `database_organization_id` ON `databases` (`organization_id`) WHERE deleted_at is NULL; +-- create index "database_name" to table: "databases" +CREATE UNIQUE INDEX `database_name` ON `databases` (`name`) WHERE deleted_at is NULL; +-- create "groups" table +CREATE TABLE `groups` (`id` text NOT NULL, `created_at` datetime NULL, `updated_at` datetime NULL, `created_by` text NULL, `updated_by` text NULL, `deleted_at` datetime NULL, `deleted_by` text NULL, `name` text NOT NULL, `description` text NULL, `primary_location` text NOT NULL, `locations` json NULL, `token` text NULL, `region` text NOT NULL DEFAULT ('AMER'), PRIMARY KEY (`id`)); +-- create index "group_name" to table: "groups" +CREATE UNIQUE INDEX `group_name` ON `groups` (`name`) WHERE deleted_at is NULL; + +-- +goose Down +-- reverse: create index "group_name" to table: "groups" +DROP INDEX `group_name`; +-- reverse: create "groups" table +DROP TABLE `groups`; +-- reverse: create index "database_name" to table: "databases" +DROP INDEX `database_name`; +-- reverse: create index "database_organization_id" to table: "databases" +DROP INDEX `database_organization_id`; +-- reverse: create "databases" table +DROP TABLE `databases`; diff --git a/db/migrations-goose/atlas.sum b/db/migrations-goose/atlas.sum new file mode 100644 index 0000000..e90fe47 --- /dev/null +++ b/db/migrations-goose/atlas.sum @@ -0,0 +1,2 @@ +h1:MGGzDOQ+INpZtq+dhzYi6B5dfa8uRKaOCp5Ndw0l1Ag= +20240414234138_init.sql h1:86tpeK+Jz9Z9DChwB4x4i1cavajTMl9YEM3JfDiJoxg= diff --git a/db/migrations/.gitkeep b/db/migrations/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/db/migrations/20240414234138_init.sql b/db/migrations/20240414234138_init.sql new file mode 100644 index 0000000..aa6c0a3 --- /dev/null +++ b/db/migrations/20240414234138_init.sql @@ -0,0 +1,10 @@ +-- Create "databases" table +CREATE TABLE `databases` (`id` text NOT NULL, `created_at` datetime NULL, `updated_at` datetime NULL, `created_by` text NULL, `updated_by` text NULL, `deleted_at` datetime NULL, `deleted_by` text NULL, `organization_id` text NOT NULL, `name` text NOT NULL, `geo` text NULL, `dsn` text NOT NULL, `token` text NULL, `status` text NOT NULL DEFAULT ('CREATING'), `provider` text NOT NULL DEFAULT ('LOCAL'), `group_id` text NOT NULL, PRIMARY KEY (`id`), CONSTRAINT `databases_groups_databases` FOREIGN KEY (`group_id`) REFERENCES `groups` (`id`) ON DELETE NO ACTION); +-- Create index "database_organization_id" to table: "databases" +CREATE UNIQUE INDEX `database_organization_id` ON `databases` (`organization_id`) WHERE deleted_at is NULL; +-- Create index "database_name" to table: "databases" +CREATE UNIQUE INDEX `database_name` ON `databases` (`name`) WHERE deleted_at is NULL; +-- Create "groups" table +CREATE TABLE `groups` (`id` text NOT NULL, `created_at` datetime NULL, `updated_at` datetime NULL, `created_by` text NULL, `updated_by` text NULL, `deleted_at` datetime NULL, `deleted_by` text NULL, `name` text NOT NULL, `description` text NULL, `primary_location` text NOT NULL, `locations` json NULL, `token` text NULL, `region` text NOT NULL DEFAULT ('AMER'), PRIMARY KEY (`id`)); +-- Create index "group_name" to table: "groups" +CREATE UNIQUE INDEX `group_name` ON `groups` (`name`) WHERE deleted_at is NULL; diff --git a/db/migrations/atlas.sum b/db/migrations/atlas.sum new file mode 100644 index 0000000..4ce2bab --- /dev/null +++ b/db/migrations/atlas.sum @@ -0,0 +1,2 @@ +h1:W+LDZz+A04VpsK4uI4t6GBY8HG/s7E7D7iAyF6kK4p4= +20240414234138_init.sql h1:S3xEc9qS7rxoGFJ36jNnrnPWND29/orLh3OhUhh2YlQ= diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 0000000..c5f96a9 --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,19 @@ +FROM golang:1.22 AS builder + +WORKDIR /go/src/app +COPY . . + +RUN go mod download +RUN CGO_ENABLED=1 GOOS=linux go build -o /go/bin/geodetic -a -ldflags '-linkmode external -extldflags "-static"' . + +FROM gcr.io/distroless/static:nonroot + +# `nonroot` coming from distroless +USER 65532:65532 + +# Copy the binary that goreleaser built +COPY --from=builder /go/bin/geodetic /bin/geodetic + +# Run the web service on container startup. +ENTRYPOINT [ "/bin/geodetic" ] +CMD ["serve"] diff --git a/docker/Taskfile.yml b/docker/Taskfile.yml new file mode 100644 index 0000000..d000be7 --- /dev/null +++ b/docker/Taskfile.yml @@ -0,0 +1,79 @@ +version: "3" + +tasks: + build: + dir: .. + desc: builds the geodetic docker image + cmds: + - "docker build -f docker/Dockerfile . -t geodetic:dev" + + geodetic: + dir: .. + desc: brings up the compose environment for the geodetic server configured with auth + deps: [build] + cmds: + - "docker compose -f ./docker/docker-compose-redis.yml -f ./docker/docker-compose.yml -f ./docker/docker-compose-fga.yml -p geodetic up -d" + + datum:down: + dir: .. + desc: brings the geodetic compose environment down + cmds: + - "docker compose -p geodetic down" + + all:up: + dir: .. + desc: brings up the full docker compose development environment including geodetic server, fga, and rover + cmds: + - task: geodetic + - task: rover + + all:down: + dir: .. + desc: brings down both fga and geodetic server compose environments + cmds: + - task: geodetic:down + + redis: + dir: .. + desc: brings up the compose environment for redis + cmds: + - "docker compose -f ./docker/docker-compose-redis.yml -p redis up -d" + + redis:down: + dir: .. + desc: brings up the compose environment for redis + cmds: + - "docker compose -p redis down" + + fga: + dir: .. + desc: brings up the compose environment for openfga development + cmds: + - "docker compose -f ./docker/docker-compose-fga.yml -p fga up -d" + + fga:down: + dir: .. + desc: brings the fga compose environment down + cmds: + - docker compose -p fga down + + fga:open: + dir: .. + desc: opens the fga playground in a browser + cmds: + - 'open "http://localhost:3000/playground"' + + fga:up: + dir: .. + desc: brings the fga compose environment up and opens the fga playground + aliases: [fgaup] + cmds: + - task: fga + - task: fga:open + + rover: + dir: .. + desc: launches an interactive browser to navigate the configured graph schema + cmds: + - 'open "http://localhost:4000"' + - rover dev --skip-update-check --skip-update -u http://localhost:1337/query -s schema.graphql -n geodetic --elv2-license=accept diff --git a/docker/docker-compose-fga.yml b/docker/docker-compose-fga.yml new file mode 100644 index 0000000..7626f4a --- /dev/null +++ b/docker/docker-compose-fga.yml @@ -0,0 +1,70 @@ +version: "3.9" +services: + postgres: + image: postgres:16 + container_name: postgres + command: postgres -c 'max_connections=100' + networks: + - default + ports: + - "5432:5432" + environment: + - POSTGRES_USER=postgres + - POSTGRES_PASSWORD=password + healthcheck: + test: [ "CMD-SHELL", "pg_isready -U postgres" ] + interval: 5s + timeout: 5s + retries: 5 + + migrate: + depends_on: + postgres: + condition: service_healthy + image: openfga/openfga:latest + container_name: migrate + command: migrate + environment: + - OPENFGA_DATASTORE_ENGINE=postgres + - OPENFGA_DATASTORE_URI=postgres://postgres:password@postgres:5432/postgres?sslmode=disable + networks: + - default + + openfga: + depends_on: + migrate: + condition: service_completed_successfully + image: openfga/openfga:latest + container_name: openfga + environment: + - OPENFGA_DATASTORE_ENGINE=postgres + - OPENFGA_DATASTORE_URI=postgres://postgres:password@postgres:5432/postgres?sslmode=disable + - OPENFGA_LOG_FORMAT=json + - OPENFGA_DATASTORE_MAX_OPEN_CONNS=100 #see postgres container + - OPENFGA_PLAYGROUND_ENABLED=true + command: + - run + - --experimentals + - check-query-cache + - --check-query-cache-enabled + healthcheck: + test: + [ + "CMD", + "/usr/local/bin/grpc_health_probe", + "-addr=openfga:8081" + ] + interval: 5s + timeout: 30s + retries: 3 + networks: + - default + ports: + # Needed for the http server + - "8080:8080" + # Needed for the grpc server (if used) + - "8081:8081" + # Needed for the playground (Do not enable in prod!) + - "3000:3000" + # Needed for the prometheus metrics + - "2112:2112" #prometheus metrics diff --git a/docker/docker-compose-redis.yml b/docker/docker-compose-redis.yml new file mode 100644 index 0000000..8239ca1 --- /dev/null +++ b/docker/docker-compose-redis.yml @@ -0,0 +1,16 @@ +version: "3.9" +services: + + redis: + image: redis:7.2.4-alpine + container_name: redis + restart: always + ports: + - '6379:6379' + command: redis-server --save 20 1 --loglevel warning + volumes: + - 'redis_data:/data' + +volumes: + redis_data: + driver: local diff --git a/docker/docker-compose-tracing.yml b/docker/docker-compose-tracing.yml new file mode 100644 index 0000000..6d3c6da --- /dev/null +++ b/docker/docker-compose-tracing.yml @@ -0,0 +1,31 @@ +version: "3.9" +services: + + jaeger: + image: jaegertracing/all-in-one:1.56 + container_name: jaeger + restart: always + ports: + - "16686:16686" #jaeger UI + + # Collector + otel-collector: + image: "otel/opentelemetry-collector:0.98.0" + restart: always + command: + [ + "--config=/etc/otel-collector-config.yaml", + "${OTELCOL_ARGS}" + ] + volumes: + - ./files/otel-collector-config.yaml:/etc/otel-collector-config.yaml + ports: + - "1888:1888" # pprof extension + - "8888:8888" # Prometheus metrics exposed by the collector + - "8889:8889" # Prometheus exporter metrics + - "13133:13133" # health_check extension + - "4317:4317" # OTLP gRPC receiver + - "4318:4318" # OTLP HTTP receiver + - "55679:55679" # zpages extension + depends_on: + - jaeger diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml new file mode 100644 index 0000000..00f6027 --- /dev/null +++ b/docker/docker-compose.yml @@ -0,0 +1,24 @@ +version: "3.9" + +services: + api: + image: geodetic:dev + depends_on: + - openfga + command: + - serve + - --debug + - --pretty + - --config=/config/.config.yaml + volumes: + - type: bind + source: ../config/.config.yaml + target: /config/.config.yaml + ports: + - "1337:1337" + restart: unless-stopped + environment: + - DATUM_REDIS_ADDRESS=redis:6379 + - DATUM_AUTHZ_HOST_URL=openfga:8080 + networks: + - default diff --git a/docker/files/otel-collector-config.yaml b/docker/files/otel-collector-config.yaml new file mode 100644 index 0000000..ecae65e --- /dev/null +++ b/docker/files/otel-collector-config.yaml @@ -0,0 +1,26 @@ +receivers: + otlp: + protocols: + http: + cors: + allowed_origins: + - "*" + +processors: + batch: + +exporters: + logging: + loglevel: debug + otlp: + endpoint: "jaeger:4317" + tls: + insecure: true + + +service: + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [logging, otlp] diff --git a/gen_schema.go b/gen_schema.go new file mode 100644 index 0000000..ccd6f86 --- /dev/null +++ b/gen_schema.go @@ -0,0 +1,27 @@ +//go:build ignore + +package main + +import ( + "log" + "os" + + "github.com/vektah/gqlparser/v2/formatter" + + "github.com/datumforge/geodetic/internal/graphapi" +) + +// read in schema from internal package and save it to the schema file +func main() { + execSchema := graphapi.NewExecutableSchema(graphapi.Config{}) + schema := execSchema.Schema() + + f, err := os.Create("schema.graphql") + if err != nil { + log.Fatal(err) + } + defer f.Close() + fmtr := formatter.NewFormatter(f) + + fmtr.FormatSchema(schema) +} diff --git a/generate.go b/generate.go new file mode 100644 index 0000000..c48865e --- /dev/null +++ b/generate.go @@ -0,0 +1,13 @@ +package main + +//go:generate echo "------> Generating code - running entc.go... <------" +//go:generate go run -mod=mod ./internal/ent/entc.go +//go:generate echo "------> Generating code - running gqlgen... <------" +//go:generate go run -mod=mod github.com/99designs/gqlgen generate --verbose +//go:generate echo "------> Generating code - running gen_schema.go... <------" +//go:generate go run -mod=mod ./gen_schema.go +//go:generate echo "------> Generating code - running gqlgenc... <------" +//go:generate go run -mod=mod github.com/Yamashou/gqlgenc generate --configdir schema +//go:generate echo "------> Tidying up... <------" +//go:generate go mod tidy +//go:generate echo "------> Code generation process completed! <------" diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..94dfe0d --- /dev/null +++ b/go.mod @@ -0,0 +1,222 @@ +module github.com/datumforge/geodetic + +go 1.22.2 + +require ( + ariga.io/entcache v0.1.0 + entgo.io/contrib v0.4.5 + entgo.io/ent v0.13.1 + github.com/99designs/gqlgen v0.17.45 + github.com/TylerBrock/colorjson v0.0.0-20200706003622-8a50f05110d2 + github.com/Yamashou/gqlgenc v0.19.3 + github.com/brianvoe/gofakeit/v7 v7.0.2 + github.com/datumforge/datum v0.4.7 + github.com/datumforge/echo-prometheus/v5 v5.0.0-20231205192725-e697eaa86d58 + github.com/datumforge/echox v0.0.0-20240312185605-fdb5a150410e + github.com/datumforge/echozap v0.0.0-20231205193458-b29cc54cd34c + github.com/datumforge/entx v0.0.8 + github.com/datumforge/fgax v0.1.5 + github.com/datumforge/go-turso v0.0.3 + github.com/gorilla/websocket v1.5.1 + github.com/hashicorp/go-multierror v1.1.1 + github.com/invopop/jsonschema v0.12.0 + github.com/invopop/yaml v0.3.1 + github.com/knadh/koanf/parsers/yaml v0.1.0 + github.com/knadh/koanf/providers/env v0.1.0 + github.com/knadh/koanf/providers/file v0.1.0 + github.com/knadh/koanf/v2 v2.1.1 + github.com/mcuadros/go-defaults v1.2.0 + github.com/mitchellh/go-homedir v1.1.0 + github.com/pressly/goose/v3 v3.19.2 + github.com/prometheus/client_golang v1.19.0 + github.com/ravilushqa/otelgqlgen v0.15.0 + github.com/redis/go-redis/v9 v9.5.1 + github.com/samber/lo v1.39.0 + github.com/spf13/cobra v1.8.0 + github.com/spf13/pflag v1.0.5 + github.com/spf13/viper v1.18.2 + github.com/stretchr/testify v1.9.0 + github.com/vektah/gqlparser/v2 v2.5.11 + github.com/wundergraph/graphql-go-tools v1.67.2 + go.uber.org/zap v1.27.0 + gocloud.dev v0.37.0 + golang.org/x/crypto v0.22.0 +) + +require ( + ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43 // indirect + dario.cat/mergo v1.0.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Microsoft/hcsshim v0.11.4 // indirect + github.com/XSAM/otelsql v0.29.0 // indirect + github.com/agext/levenshtein v1.2.3 // indirect + github.com/agnivade/levenshtein v1.1.1 // indirect + github.com/antlr4-go/antlr/v4 v4.13.0 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/bahlo/generic-list-go v0.2.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect + github.com/buger/jsonparser v1.1.1 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/containerd/containerd v1.7.12 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/cpuguy83/dockercfg v0.3.1 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/distribution/reference v0.5.0 // indirect + github.com/dlclark/regexp2 v1.11.0 // indirect + github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect + github.com/fatih/color v1.16.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-faster/errors v0.7.1 // indirect + github.com/go-faster/jx v1.1.0 // indirect + github.com/go-faster/yaml v0.4.6 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-openapi/inflect v0.19.0 // indirect + github.com/go-redis/redis/v8 v8.11.5 // indirect + github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect + github.com/goccy/go-json v0.10.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/gax-go/v2 v2.12.3 // indirect + github.com/gorilla/securecookie v1.1.2 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hashicorp/hcl/v2 v2.19.1 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/klauspost/compress v1.17.6 // indirect + github.com/knadh/koanf/maps v0.1.1 // indirect + github.com/lestrrat-go/blackmagic v1.0.2 // indirect + github.com/lestrrat-go/httpcc v1.0.1 // indirect + github.com/lestrrat-go/httprc v1.0.5 // indirect + github.com/lestrrat-go/iter v1.0.2 // indirect + github.com/lestrrat-go/jwx/v2 v2.0.21 // indirect + github.com/lestrrat-go/option v1.0.1 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/libsql/sqlite-antlr4-parser v0.0.0-20240327125255-dbf53b6cbf06 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mfridman/interpolate v0.0.2 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/hashstructure v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/patternmatcher v0.6.0 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect + github.com/moby/sys/user v0.1.0 // indirect + github.com/moby/term v0.5.0 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/ncruces/go-strftime v0.1.9 // indirect + github.com/ogen-go/ogen v1.0.0 // indirect + github.com/oklog/ulid/v2 v2.1.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect + github.com/openfga/api/proto v0.0.0-20240201160513-05de9d8be3ee // indirect + github.com/openfga/go-sdk v0.3.5 // indirect + github.com/openfga/language/pkg/go v0.0.0-20240328133052-aabba86a664b // indirect + github.com/pelletier/go-toml/v2 v2.1.1 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/posthog/posthog-go v0.0.0-20240315130956-036dfa9f3555 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/pquerna/otp v1.4.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.48.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/segmentio/asm v1.2.0 // indirect + github.com/sendgrid/rest v2.6.9+incompatible // indirect + github.com/sendgrid/sendgrid-go v3.14.0+incompatible // indirect + github.com/sethvargo/go-retry v0.2.4 // indirect + github.com/shirou/gopsutil/v3 v3.23.12 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sosodev/duration v1.2.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/stoewer/go-strcase v1.3.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/testcontainers/testcontainers-go v0.29.1 // indirect + github.com/testcontainers/testcontainers-go/modules/postgres v0.29.1 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/tursodatabase/libsql-client-go v0.0.0-20240401075953-8e79a99d828a // indirect + github.com/urfave/cli/v2 v2.27.1 // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/fasttemplate v1.2.2 // indirect + github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect + github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect + github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect + github.com/xhit/go-str2duration/v2 v2.1.0 // indirect + github.com/xrash/smetrics v0.0.0-20231213231151-1d8dd44e695e // indirect + github.com/yusufpapurcu/wmi v1.2.3 // indirect + github.com/zclconf/go-cty v1.14.1 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib v1.21.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/sdk v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect + go.opentelemetry.io/proto/otlp v1.1.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 // indirect + golang.org/x/mod v0.16.0 // indirect + golang.org/x/net v0.22.0 // indirect + golang.org/x/oauth2 v0.18.0 // indirect + golang.org/x/sync v0.6.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.19.0 // indirect + golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect + google.golang.org/api v0.171.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c // indirect + google.golang.org/grpc v1.62.1 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + gotest.tools/v3 v3.5.1 // indirect + modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 // indirect + modernc.org/libc v1.41.0 // indirect + modernc.org/mathutil v1.6.0 // indirect + modernc.org/memory v1.7.2 // indirect + modernc.org/sqlite v1.29.5 // indirect + modernc.org/strutil v1.2.0 // indirect + modernc.org/token v1.1.0 // indirect + nhooyr.io/websocket v1.8.10 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..00d3594 --- /dev/null +++ b/go.sum @@ -0,0 +1,766 @@ +ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43 h1:GwdJbXydHCYPedeeLt4x/lrlIISQ4JTH1mRWuE5ZZ14= +ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43/go.mod h1:uj3pm+hUTVN/X5yfdBexHlZv+1Xu5u5ZbZx7+CDavNU= +ariga.io/entcache v0.1.0 h1:nfJXzjB5CEvAK6SmjupHREMJrKLakeqU5tG3s4TO6JA= +ariga.io/entcache v0.1.0/go.mod h1:3Z1Sql5bcqPA1YV/jvMlZyh9T+ntSFOclaASAm1TiKQ= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= +cloud.google.com/go/compute v1.25.0 h1:H1/4SqSUhjPFE7L5ddzHOfY2bCAvjwNRZPNl6Ni5oYU= +cloud.google.com/go/compute v1.25.0/go.mod h1:GR7F0ZPZH8EhChlMo9FkLd7eUTwEymjqQagxzilIxIE= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= +cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= +cloud.google.com/go/kms v1.15.7 h1:7caV9K3yIxvlQPAcaFffhlT7d1qpxjB1wHBtjWa13SM= +cloud.google.com/go/kms v1.15.7/go.mod h1:ub54lbsa6tDkUwnu4W7Yt1aAIFLnspgh0kPGToDukeI= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +entgo.io/contrib v0.4.5 h1:BFaOHwFLE8WZjVJadP0XHCIaxgcC1BAtUvAyw7M/GHk= +entgo.io/contrib v0.4.5/go.mod h1:wpZyq2DJgthugFvDBlaqMXj9mV4/9ebyGEn7xlTVQqE= +entgo.io/ent v0.13.1 h1:uD8QwN1h6SNphdCCzmkMN3feSUzNnVvV/WIkHKMbzOE= +entgo.io/ent v0.13.1/go.mod h1:qCEmo+biw3ccBn9OyL4ZK5dfpwg++l1Gxwac5B1206A= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/99designs/gqlgen v0.17.45 h1:bH0AH67vIJo8JKNKPJP+pOPpQhZeuVRQLf53dKIpDik= +github.com/99designs/gqlgen v0.17.45/go.mod h1:Bas0XQ+Jiu/Xm5E33jC8sES3G+iC2esHBMXcq0fUPs0= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/ClickHouse/ch-go v0.58.2 h1:jSm2szHbT9MCAB1rJ3WuCJqmGLi5UTjlNu+f530UTS0= +github.com/ClickHouse/ch-go v0.58.2/go.mod h1:Ap/0bEmiLa14gYjCiRkYGbXvbe8vwdrfTYWhsuQ99aw= +github.com/ClickHouse/clickhouse-go/v2 v2.17.1 h1:ZCmAYWpu75IyEi7+Yrs/uaAjiCGY5wfW5kXo64exkX4= +github.com/ClickHouse/clickhouse-go/v2 v2.17.1/go.mod h1:rkGTvFDTLqLIm0ma+13xmcCfr/08Gvs7KmFt1tgiWHQ= +github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8= +github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/TylerBrock/colorjson v0.0.0-20200706003622-8a50f05110d2 h1:ZBbLwSJqkHBuFDA6DUhhse0IGJ7T5bemHyNILUjvOq4= +github.com/TylerBrock/colorjson v0.0.0-20200706003622-8a50f05110d2/go.mod h1:VSw57q4QFiWDbRnjdX8Cb3Ow0SFncRw+bA/ofY6Q83w= +github.com/XSAM/otelsql v0.29.0 h1:pEw9YXXs8ZrGRYfDc0cmArIz9lci5b42gmP5+tA1Huc= +github.com/XSAM/otelsql v0.29.0/go.mod h1:d3/0xGIGC5RVEE+Ld7KotwaLy6zDeaF3fLJHOPpdN2w= +github.com/Yamashou/gqlgenc v0.19.3 h1:StpiNvNDGjDh2gdN3s+9HTirLtFKzTrybciqnTWGybQ= +github.com/Yamashou/gqlgenc v0.19.3/go.mod h1:oMc4EQBQeDwLIODvgcvpaSp6rO+KMf47FuOhplv5D3A= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= +github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= +github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= +github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/alicebob/miniredis/v2 v2.32.1 h1:Bz7CciDnYSaa0mX5xODh6GUITRSx+cVhjNoOR4JssBo= +github.com/alicebob/miniredis/v2 v2.32.1/go.mod h1:AqkLNAfUm0K07J28hnAyyQKf/x0YkCY/g5DCtuL01Mw= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M= +github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY= +github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= +github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= +github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= +github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= +github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/brianvoe/gofakeit/v7 v7.0.2 h1:jzYT7Ge3RDHw7J1CM1kwu0OQywV9vbf2qSGxBS72TCY= +github.com/brianvoe/gofakeit/v7 v7.0.2/go.mod h1:QXuPeBw164PJCzCUZVmgpgHJ3Llj49jSLVkKPMtxtxA= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bytedance/sonic v1.9.2 h1:GDaNjuWSGu09guE9Oql0MSTNhNCLlWwO8y/xM5BzcbM= +github.com/bytedance/sonic v1.9.2/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0= +github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk= +github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8= +github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E= +github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.20 h1:VIPb/a2s17qNeQgDnkfZC35RScx+blkKF8GV68n80J4= +github.com/creack/pty v1.1.20/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/datumforge/datum v0.4.7 h1:D/vcl9yvnJWmZb4rqFtVZZt6kVtKNlrZ07v0uAscADA= +github.com/datumforge/datum v0.4.7/go.mod h1:M6PiiPWYjUS6BPKp25yMJ8IbjcnCI3YTySn0a8+3fx4= +github.com/datumforge/echo-prometheus/v5 v5.0.0-20231205192725-e697eaa86d58 h1:/coVSf+8DmRkPRYaH2pWn/DhvcMNbHSb1IgqjFb5GUg= +github.com/datumforge/echo-prometheus/v5 v5.0.0-20231205192725-e697eaa86d58/go.mod h1:nXVOsuzExmWX51klS2y/qQe0EUee4gALCOe3CP6BIjE= +github.com/datumforge/echox v0.0.0-20240312185605-fdb5a150410e h1:anHN8Bsa8x80oCYCuC3EC5kY+GO8ymIQOWvgAKZujlo= +github.com/datumforge/echox v0.0.0-20240312185605-fdb5a150410e/go.mod h1:ZCbNvIShUhAHKlURvgedaGUAghCNZli2I87uLJSPiZ0= +github.com/datumforge/echozap v0.0.0-20231205193458-b29cc54cd34c h1:4ycAoM6MJ8PBRCAXbE29GW+HJXm8a99x0mGtSSE8iHM= +github.com/datumforge/echozap v0.0.0-20231205193458-b29cc54cd34c/go.mod h1:Tp6J3q+sRxvsMLHCxARnBouLUiwWnquxmDgRjqJaLHw= +github.com/datumforge/entx v0.0.8 h1:qTlvWRxnoZwXMX2mf1NHvSASVqWZYzs7tgBnY1u6j0s= +github.com/datumforge/entx v0.0.8/go.mod h1:7eiomY5fsbc8HzO+pbydDaMUftV0PqMftUZ0BziuhJc= +github.com/datumforge/fgax v0.1.5 h1:cU7MTw+/7STl6KUBACfvvuPO5gbPLW7P4RlYXGLfxgw= +github.com/datumforge/fgax v0.1.5/go.mod h1:hOcVFfzYI7jI3jQu1KV+iEZzTJ5YzPbv5ptKlb9Vjf0= +github.com/datumforge/go-turso v0.0.3 h1:DVAfRgowxl5n5BbFktpYtFcF6AkI1avDXs8MXug8SNg= +github.com/datumforge/go-turso v0.0.3/go.mod h1:s5AAVKbvEjJzMg/8ZlSZxCG/RTL+w4K0Ycwd98cZ2YE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g= +github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= +github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg= +github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/elastic/go-sysinfo v1.11.2 h1:mcm4OSYVMyws6+n2HIVMGkln5HOpo5Ie1ZmbbNn0jg4= +github.com/elastic/go-sysinfo v1.11.2/go.mod h1:GKqR8bbMK/1ITnez9NIsIfXQr25aLhRJa7AfT8HpBFQ= +github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= +github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= +github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= +github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= +github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw= +github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= +github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AYg= +github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo= +github.com/go-faster/jx v1.1.0 h1:ZsW3wD+snOdmTDy9eIVgQdjUpXRRV4rqW8NS3t+20bg= +github.com/go-faster/jx v1.1.0/go.mod h1:vKDNikrKoyUmpzaJ0OkIkRQClNHFX/nF3dnTJZb3skg= +github.com/go-faster/yaml v0.4.6 h1:lOK/EhI04gCpPgPhgt0bChS6bvw7G3WwI8xxVe0sw9I= +github.com/go-faster/yaml v0.4.6/go.mod h1:390dRIvV4zbnO7qC9FGo6YYutc+wyyUSHBgbXL52eXk= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4= +github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.14.1 h1:9c50NUPC30zyuKprjL3vNZ0m5oG+jU0zvx4AqHGnv4k= +github.com/go-playground/validator/v10 v10.14.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= +github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= +github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= +github.com/go-redis/redismock/v8 v8.0.6 h1:rtuijPgGynsRB2Y7KDACm09WvjHWS4RaG44Nm7rcj4Y= +github.com/go-redis/redismock/v8 v8.0.6/go.mod h1:sDIF73OVsmaKzYe/1FJXGiCQ4+oHYbzjpaL9Vor0sS4= +github.com/go-sql-driver/mysql v1.8.0 h1:UtktXaU2Nb64z/pLiGIxY4431SJ4/dR5cjMmlVHgnT4= +github.com/go-sql-driver/mysql v1.8.0/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= +github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= +github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ= +github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/wire v0.6.0 h1:HBkoIh4BdSxoyo9PveV8giw7ZsaBOvzWKfcg/6MrVwI= +github.com/google/wire v0.6.0/go.mod h1:F4QhpQ9EDIdJ1Mbop/NZBRB+5yrR6qg3BnctaoUk6NA= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= +github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= +github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= +github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl/v2 v2.19.1 h1://i05Jqznmb2EXqa39Nsvyan2o5XyMowW5fnCKW5RPI= +github.com/hashicorp/hcl/v2 v2.19.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= +github.com/hokaccha/go-prettyjson v0.0.0-20211117102719-0474bc63780f h1:7LYC+Yfkj3CTRcShK0KOL/w6iTiKyqqBA9a41Wnggw8= +github.com/hokaccha/go-prettyjson v0.0.0-20211117102719-0474bc63780f/go.mod h1:pFlLw2CfqZiIBOx6BuCeRLCrfxBJipTY0nIOF/VbGcI= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/invopop/jsonschema v0.12.0 h1:6ovsNSuvn9wEQVOyc72aycBMVQFKz7cPdMJn10CvzRI= +github.com/invopop/jsonschema v0.12.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= +github.com/invopop/yaml v0.3.1 h1:f0+ZpmhfBSS4MhG+4HYseMdJhoeeopbSKbq5Rpeelso= +github.com/invopop/yaml v0.3.1/go.mod h1:PMOp3nn4/12yEZUFfmOuNHJsZToEEOwoWsT+D81KkeA= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw= +github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= +github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= +github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= +github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= +github.com/jensneuse/diffview v1.0.0 h1:4b6FQJ7y3295JUHU3tRko6euyEboL825ZsXeZZM47Z4= +github.com/jensneuse/diffview v1.0.0/go.mod h1:i6IacuD8LnEaPuiyzMHA+Wfz5mAuycMOf3R/orUY9y4= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= +github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= +github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= +github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= +github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= +github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= +github.com/knadh/koanf/parsers/yaml v0.1.0 h1:ZZ8/iGfRLvKSaMEECEBPM1HQslrZADk8fP1XFUxVI5w= +github.com/knadh/koanf/parsers/yaml v0.1.0/go.mod h1:cvbUDC7AL23pImuQP0oRw/hPuccrNBS2bps8asS0CwY= +github.com/knadh/koanf/providers/env v0.1.0 h1:LqKteXqfOWyx5Ab9VfGHmjY9BvRXi+clwyZozgVRiKg= +github.com/knadh/koanf/providers/env v0.1.0/go.mod h1:RE8K9GbACJkeEnkl8L/Qcj8p4ZyPXZIQ191HJi44ZaQ= +github.com/knadh/koanf/providers/file v0.1.0 h1:fs6U7nrV58d3CFAFh8VTde8TM262ObYf3ODrc//Lp+c= +github.com/knadh/koanf/providers/file v0.1.0/go.mod h1:rjJ/nHQl64iYCtAW2QQnF0eSmDEX/YZ/eNFj5yR6BvA= +github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= +github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= +github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/lestrrat-go/blackmagic v1.0.2 h1:Cg2gVSc9h7sz9NOByczrbUvLopQmXrfFx//N+AkAr5k= +github.com/lestrrat-go/blackmagic v1.0.2/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU= +github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE= +github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= +github.com/lestrrat-go/httprc v1.0.5 h1:bsTfiH8xaKOJPrg1R+E3iE/AWZr/x0Phj9PBTG/OLUk= +github.com/lestrrat-go/httprc v1.0.5/go.mod h1:mwwz3JMTPBjHUkkDv/IGJ39aALInZLrhBp0X7KGUZlo= +github.com/lestrrat-go/iter v1.0.2 h1:gMXo1q4c2pHmC3dn8LzRhJfP1ceCbgSiT9lUydIzltI= +github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4= +github.com/lestrrat-go/jwx/v2 v2.0.21 h1:jAPKupy4uHgrHFEdjVjNkUgoBKtVDgrQPB/h55FHrR0= +github.com/lestrrat-go/jwx/v2 v2.0.21/go.mod h1:09mLW8zto6bWL9GbwnqAli+ArLf+5M33QLQPDggkUWM= +github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU= +github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/libsql/sqlite-antlr4-parser v0.0.0-20240327125255-dbf53b6cbf06 h1:JLvn7D+wXjH9g4Jsjo+VqmzTUpl/LX7vfr6VOfSWTdM= +github.com/libsql/sqlite-antlr4-parser v0.0.0-20240327125255-dbf53b6cbf06/go.mod h1:FUkZ5OHjlGPjnM2UyGJz9TypXQFgYqw6AFNO1UiROTM= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mcuadros/go-defaults v1.2.0 h1:FODb8WSf0uGaY8elWJAkoLL0Ri6AlZ1bFlenk56oZtc= +github.com/mcuadros/go-defaults v1.2.0/go.mod h1:WEZtHEVIGYVDqkKSWBdWKUVdRyKlMfulPaGDWIVeCWY= +github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY= +github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/hashstructure v1.1.0 h1:P6P1hdjqAAknpY/M1CGipelZgp+4y9ja9kmUZPXP+H0= +github.com/mitchellh/hashstructure v1.1.0/go.mod h1:xUDAozZz0Wmdiufv0uyhnHkUTN6/6d8ulp4AwfLKrmA= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= +github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= +github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/ogen-go/ogen v1.0.0 h1:n1hkgOnLtA1Xn369KAzJhqzphQzNo/wAI82NIaFQNXA= +github.com/ogen-go/ogen v1.0.0/go.mod h1:NFn616zR+/DPsq8rPoezaHlhKcNQzlYfo5gUieW8utI= +github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU= +github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= +github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss= +github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8= +github.com/openfga/api/proto v0.0.0-20240201160513-05de9d8be3ee h1:ZqeLB0dxo4XgRLRpaF9dDt3S3BShbrGE9NO6L2eUDDE= +github.com/openfga/api/proto v0.0.0-20240201160513-05de9d8be3ee/go.mod h1:XF/4W9je/FGBZQ2M5pbQnrzdKF4VcEEtds3ole9sW5E= +github.com/openfga/go-sdk v0.3.5 h1:KQXhMREh+g/K7HNuZ/YmXuHkREkq0VMKteua4bYr3Uw= +github.com/openfga/go-sdk v0.3.5/go.mod h1:u1iErzj5E9/bhe+8nsMv0gigcYbJtImcdgcE5DmpbBg= +github.com/openfga/language/pkg/go v0.0.0-20240328133052-aabba86a664b h1:u741ckN7yfFib1uAMtkTaFYYIx761GmMf18Ou87pnOs= +github.com/openfga/language/pkg/go v0.0.0-20240328133052-aabba86a664b/go.mod h1:uuXSPj7C3ImG5UF3rAupq+aC8mZQ3pbR52GRU/DXjLU= +github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4= +github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg= +github.com/paulmach/orb v0.10.0 h1:guVYVqzxHE/CQ1KpfGO077TR0ATHSNjp4s6XGLn3W9s= +github.com/paulmach/orb v0.10.0/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= +github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= +github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI= +github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= +github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posthog/posthog-go v0.0.0-20240315130956-036dfa9f3555 h1:RqJZxk2VAaZYCCk4ZVo7iLqp4a03LWitjE0tNIMyvMU= +github.com/posthog/posthog-go v0.0.0-20240315130956-036dfa9f3555/go.mod h1:QjlpryJtfYLrZF2GUkAhejH4E7WlDbdKkvOi5hLmkdg= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/pquerna/otp v1.4.0 h1:wZvl1TIVxKRThZIBiwOOHOGP/1+nZyWBil9Y2XNEDzg= +github.com/pquerna/otp v1.4.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= +github.com/pressly/goose/v3 v3.19.2 h1:z1yuD41jS4iaqLkyjkzGkKBz4rgyz/BYtCyMMGHlgzQ= +github.com/pressly/goose/v3 v3.19.2/go.mod h1:BHkf3LzSBmO8E5FTMPupUYIpMTIh/ZuQVy+YTfhZLD4= +github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= +github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/ravilushqa/otelgqlgen v0.15.0 h1:U85nrlweMXTGaMChUViYM39/MXBZVeVVlpuHq+6eECQ= +github.com/ravilushqa/otelgqlgen v0.15.0/go.mod h1:o+1Eju0VySmgq2BP8Vupz2YrN21Bj7D7imBqu3m2uB8= +github.com/redis/go-redis/v9 v9.5.1 h1:H1X4D3yHPaYrkL5X06Wh6xNVM/pX0Ft4RV0vMGvLBh8= +github.com/redis/go-redis/v9 v9.5.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rs/zerolog v1.31.0 h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A= +github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA= +github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= +github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= +github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= +github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= +github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/sendgrid/rest v2.6.9+incompatible h1:1EyIcsNdn9KIisLW50MKwmSRSK+ekueiEMJ7NEoxJo0= +github.com/sendgrid/rest v2.6.9+incompatible/go.mod h1:kXX7q3jZtJXK5c5qK83bSGMdV6tsOE70KbHoqJls4lE= +github.com/sendgrid/sendgrid-go v3.14.0+incompatible h1:KDSasSTktAqMJCYClHVE94Fcif2i7P7wzISv1sU6DUA= +github.com/sendgrid/sendgrid-go v3.14.0+incompatible/go.mod h1:QRQt+LX/NmgVEvmdRw0VT/QgUn499+iza2FnDca9fg8= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec= +github.com/sethvargo/go-retry v0.2.4/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw= +github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= +github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sosodev/duration v1.2.0 h1:pqK/FLSjsAADWY74SyWDCjOcd5l7H8GSnnOGEB9A1Us= +github.com/sosodev/duration v1.2.0/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/testcontainers/testcontainers-go v0.29.1 h1:z8kxdFlovA2y97RWx98v/TQ+tR+SXZm6p35M+xB92zk= +github.com/testcontainers/testcontainers-go v0.29.1/go.mod h1:SnKnKQav8UcgtKqjp/AD8bE1MqZm+3TDb/B8crE3XnI= +github.com/testcontainers/testcontainers-go/modules/postgres v0.29.1 h1:hTn3MzhR9w4btwfzr/NborGCaeNZG0MPBpufeDj10KA= +github.com/testcontainers/testcontainers-go/modules/postgres v0.29.1/go.mod h1:YsWyy+pHDgvGdi0axGOx6CGXWsE6eqSaApyd1FYYSSc= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/trisacrypto/directory v1.7.6 h1:SOa1uynpHgcENjZwvVtkfp6qjyvCZBs/X9C0T3nQ0LA= +github.com/trisacrypto/directory v1.7.6/go.mod h1:JSIMzr3J79h3+Y+17ghtvsb0hUEHzo+islfpecrQfpk= +github.com/tursodatabase/libsql-client-go v0.0.0-20240401075953-8e79a99d828a h1:LMz5RmEKz1epPZgUO3MtA5/X9Peudqm4rUoteSjLkho= +github.com/tursodatabase/libsql-client-go v0.0.0-20240401075953-8e79a99d828a/go.mod h1:2Fu26tjM011BLeR5+jwTfs6DX/fNMEWV/3CBZvggrA4= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= +github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/urfave/cli/v2 v2.27.1 h1:8xSQ6szndafKVRmfyeUMxkNUJQMjL1F2zmsZ+qHpfho= +github.com/urfave/cli/v2 v2.27.1/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= +github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/vektah/gqlparser/v2 v2.5.11 h1:JJxLtXIoN7+3x6MBdtIP59TP1RANnY7pXOaDnADQSf8= +github.com/vektah/gqlparser/v2 v2.5.11/go.mod h1:1rCcfwB2ekJofmluGWXMSEnPMZgbxzwj6FaZ/4OT8Cc= +github.com/vertica/vertica-sql-go v1.3.3 h1:fL+FKEAEy5ONmsvya2WH5T8bhkvY27y/Ik3ReR2T+Qw= +github.com/vertica/vertica-sql-go v1.3.3/go.mod h1:jnn2GFuv+O2Jcjktb7zyc4Utlbu9YVqpHH/lx63+1M4= +github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= +github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= +github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= +github.com/wundergraph/graphql-go-tools v1.67.2 h1:79cKWyzL6oiYPePORqDgc00eugUK4HI9YuA79rHWMJM= +github.com/wundergraph/graphql-go-tools v1.67.2/go.mod h1:XPiFH1mHduFuQTYiGpQe6ZtNI7/BX4EJQVMXr6oWxw0= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= +github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= +github.com/xrash/smetrics v0.0.0-20231213231151-1d8dd44e695e h1:+SOyEddqYF09QP7vr7CgJ1eti3pY9Fn3LHO1M1r/0sI= +github.com/xrash/smetrics v0.0.0-20231213231151-1d8dd44e695e/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= +github.com/ydb-platform/ydb-go-genproto v0.0.0-20240126124512-dbb0e1720dbf h1:ckwNHVo4bv2tqNkgx3W3HANh3ta1j6TR5qw08J1A7Tw= +github.com/ydb-platform/ydb-go-genproto v0.0.0-20240126124512-dbb0e1720dbf/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I= +github.com/ydb-platform/ydb-go-sdk/v3 v3.55.1 h1:Ebo6J5AMXgJ3A438ECYotA0aK7ETqjQx9WoZvVxzKBE= +github.com/ydb-platform/ydb-go-sdk/v3 v3.55.1/go.mod h1:udNPW8eupyH/EZocecFmaSNJacKKYjzQa7cVgX5U2nc= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= +github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA= +github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib v1.21.1 h1:/U05KZ31iqMqAowhtW10cDPAViNY0tnpAacUgYBmuj8= +go.opentelemetry.io/contrib v1.21.1/go.mod h1:usW9bPlrjHiJFbK0a6yK/M5wNHs3nLmtrT3vzhoD3co= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 h1:t6wl9SPayj+c7lEIFgm4ooDBZVb01IhLB4InpomhRw8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0/go.mod h1:iSDOcsnSA5INXzZtwaBPrKp/lWu/V14Dd+llD0oI2EA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0 h1:Mw5xcxMwlqoJd97vwPxA8isEaIoxsta9/Q51+TTJLGE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0/go.mod h1:CQNu9bj7o7mC6U7+CA/schKEYakYXWr79ucDHTMGhCM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6u2f8DK2XAkGRFV7BBLENgnTGX9i4rQRxJf+/vs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.24.0 h1:s0PHtIkN+3xrbDOpt2M8OTG92cWqUESvzh2MxiR5xY8= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.24.0/go.mod h1:hZlFbDbRt++MMPCCfSJfmhkGIWnX1h3XjkfxZUjLrIA= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= +go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= +go.opentelemetry.io/otel/sdk/metric v1.24.0 h1:yyMQrPzF+k88/DbH7o4FMAs80puqd+9osbiBrJrz/w8= +go.opentelemetry.io/otel/sdk/metric v1.24.0/go.mod h1:I6Y5FjH6rvEnTTAYQz3Mmv2kl6Ek5IIrmwTLqMrrOE0= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= +go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI= +go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +gocloud.dev v0.37.0 h1:XF1rN6R0qZI/9DYjN16Uy0durAmSlf58DHOcb28GPro= +gocloud.dev v0.37.0/go.mod h1:7/O4kqdInCNsc6LqgmuFnS0GRew4XNNYWpA44yQnwco= +golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc= +golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 h1:aAcj0Da7eBAtrTp03QXWvm88pSyOt+UgdZw2BFZ+lEw= +golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8/go.mod h1:CQ1k9gNrJ50XIzaKCRR2hssIjF07kZFEiieALBM/ARQ= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= +golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw= +golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +google.golang.org/api v0.171.0 h1:w174hnBPqut76FzW5Qaupt7zY8Kql6fiVjgys4f58sU= +google.golang.org/api v0.171.0/go.mod h1:Hnq5AHm4OTMt2BUVjael2CWZFD6vksJdWCWiUAmjC9o= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 h1:ImUcDPHjTrAqNhlOkSocDLfG9rrNHH7w7uoKWPaWZ8s= +google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7/go.mod h1:/3XmxOjePkvmKrHuBy4zNFw7IzxJXtAgdpXi8Ll990U= +google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7 h1:oqta3O3AnlWbmIE3bFnWbu4bRxZjfbWCp0cKSuZh01E= +google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c h1:lfpJ/2rWPa/kJgxyyXM8PrNnfCzcmxJ265mADgwmvLI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= +google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= +howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= +modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE= +modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ= +modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 h1:5D53IMaUuA5InSeMu9eJtlQXS2NxAhyWQvkKEgXZhHI= +modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4= +modernc.org/libc v1.41.0 h1:g9YAc6BkKlgORsUWj+JwqoB1wU3o4DE3bM3yvA3k+Gk= +modernc.org/libc v1.41.0/go.mod h1:w0eszPsiXoOnoMJgrXjglgLuDy/bt5RR4y3QzUUeodY= +modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= +modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= +modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E= +modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E= +modernc.org/sqlite v1.29.5 h1:8l/SQKAjDtZFo9lkJLdk8g9JEOeYRG4/ghStDCCTiTE= +modernc.org/sqlite v1.29.5/go.mod h1:S02dvcmm7TnTRvGhv8IGYyLnIt7AS2KPaB1F/71p75U= +modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= +modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= +modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= +nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= diff --git a/gqlgen.yml b/gqlgen.yml new file mode 100644 index 0000000..488215a --- /dev/null +++ b/gqlgen.yml @@ -0,0 +1,88 @@ +# Where are all the schema files located? globs are supported eg src/**/*.graphqls +schema: + - schema/*.graphql + - subscriptions/*.graphql + +# Where should the generated server code go? +exec: + filename: internal/graphapi/gen_server.go + package: graphapi + +# Where should any generated models go? +model: + filename: internal/graphapi/gen_models.go + package: graphapi + +# Where should the resolver implementations go? +resolver: + layout: follow-schema + dir: internal/graphapi + package: graphapi + filename_template: "{name}.resolvers.go" + # Optional: turn on to not generate template comments above resolvers + # omit_template_comment: false + +# Optional: turn on use ` + "`" + `gqlgen:"fieldName"` + "`" + ` tags in your models +# struct_tag: json + +# Optional: turn on to use []Thing instead of []*Thing +# omit_slice_element_pointers: false + +# Optional: turn on to omit Is() methods to interface and unions +# omit_interface_checks : true + +# Optional: turn on to skip generation of ComplexityRoot struct content and Complexity function +# omit_complexity: false + +# Optional: turn on to not generate any file notice comments in generated files +# omit_gqlgen_file_notice: false + +# Optional: turn on to exclude the gqlgen version in the generated file notice. No effect if `omit_gqlgen_file_notice` is true. +omit_gqlgen_version_in_file_notice: true + +# Optional: turn off to make struct-type struct fields not use pointers +# e.g. type Thing struct { FieldA OtherThing } instead of { FieldA *OtherThing } +# struct_fields_always_pointers: true + +# Optional: turn off to make resolvers return values instead of pointers for structs +# resolvers_always_return_pointers: true + +# Optional: turn on to return pointers instead of values in unmarshalInput +# return_pointers_in_unmarshalinput: false + +# Optional: wrap nullable input fields with Omittable +# nullable_input_omittable: true + +# Optional: set to speed up generation time by not performing a final validation pass. +# skip_validation: true + +# Optional: set to skip running `go mod tidy` when generating server code +# skip_mod_tidy: true + +# gqlgen will search for any type names in the schema in these go packages +# if they match it will use them, otherwise it will generate them. +autobind: + - "github.com/datumforge/geodetic/internal/graphapi" + - "github.com/datumforge/geodetic/internal/ent/generated" + + +# This section declares type mapping between the GraphQL and go type systems +# +# The first line in each type will be used as defaults for resolver arguments and +# modelgen, the others will be allowed when binding to fields. Configure them to +# your liking +models: + ID: + model: + - github.com/99designs/gqlgen/graphql.String + Int: + model: + - github.com/99designs/gqlgen/graphql.Int + - github.com/99designs/gqlgen/graphql.Int64 + - github.com/99designs/gqlgen/graphql.Int32 + Node: + model: + - github.com/datumforge/geodetic/internal/ent/generated.Noder + JSON: + model: + - github.com/datumforge/entx.json.RawMessage \ No newline at end of file diff --git a/gqlgenc.yml b/gqlgenc.yml new file mode 100644 index 0000000..fb0c6a1 --- /dev/null +++ b/gqlgenc.yml @@ -0,0 +1,22 @@ +model: + package: geodeticclient + filename: ./pkg/geodeticclient/models.go +client: + package: geodeticclient + filename: ./pkg/geodeticclient/graphclient.go +models: + Int: + model: github.com/99designs/gqlgen/graphql.Int64 + ID: + model: + - github.com/99designs/gqlgen/graphql.String + JSON: + model: + - github.com/datumforge/entx.json.RawMessage +schema: ["schema.graphql" ] +query: ["query/*.graphql"] +generate: + clientV2: true + clientInterfaceName: "GeodeticClient" + query: false + mutation: false \ No newline at end of file diff --git a/internal/constants/doc.go b/internal/constants/doc.go new file mode 100644 index 0000000..4a36ed7 --- /dev/null +++ b/internal/constants/doc.go @@ -0,0 +1,2 @@ +// Package constants contains constants used throughout the application +package constants diff --git a/internal/constants/version.go b/internal/constants/version.go new file mode 100644 index 0000000..92562a1 --- /dev/null +++ b/internal/constants/version.go @@ -0,0 +1,76 @@ +package constants + +import ( + "runtime/debug" + "strings" + "text/template" +) + +var ( + // CLIVersion is the version of the application. Note that this is + // set at compile time using ldflags. + CLIVersion = "no-info" + // VerboseCLIVersion is the verbose version of the application. + // Note that this is set up at init time. + VerboseCLIVersion = "" +) + +type versionStruct struct { + Version string + GoVersion string + Time string + Commit string + OS string + Arch string + Modified bool +} + +const ( + verboseTemplate = `Version: {{.Version}} +Go Version: {{.GoVersion}} +Git Commit: {{.Commit}} +Commit Date: {{.Time}} +OS/Arch: {{.OS}}/{{.Arch}} +Dirty: {{.Modified}}` +) + +func init() { + bi, ok := debug.ReadBuildInfo() + if !ok { + return + } + + var vvs versionStruct + + vvs.Version = CLIVersion + vvs.GoVersion = bi.GoVersion + + for _, kv := range bi.Settings { + switch kv.Key { + case "vcs.time": + vvs.Time = kv.Value + case "vcs.revision": + vvs.Commit = kv.Value + case "vcs.modified": + vvs.Modified = kv.Value == "true" + case "GOOS": + vvs.OS = kv.Value + case "GOARCH": + vvs.Arch = kv.Value + } + } + + VerboseCLIVersion = vvs.String() +} + +func (vvs *versionStruct) String() string { + stringBuilder := &strings.Builder{} + tmpl := template.Must(template.New("version").Parse(verboseTemplate)) + err := tmpl.Execute(stringBuilder, vvs) + + if err != nil { + panic(err) + } + + return stringBuilder.String() +} diff --git a/internal/ent/.gitkeep b/internal/ent/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/internal/ent/base/entinit.tmpl b/internal/ent/base/entinit.tmpl new file mode 100644 index 0000000..8821d8d --- /dev/null +++ b/internal/ent/base/entinit.tmpl @@ -0,0 +1,51 @@ +package schema + +import ( + "entgo.io/contrib/entgql" + "entgo.io/ent" + "entgo.io/ent/schema" + + emixin "github.com/datumforge/entx/mixin" + + "github.com/datumforge/geodetic/internal/ent/mixin" +) + +// {{ . }} holds the schema definition for the {{ . }} entity +type {{ . }} struct { + ent.Schema +} + +// Fields of the {{ . }} +func ({{ . }}) Fields() []ent.Field { + return []ent.Field{ + // Fields go here + } +} + +// Mixin of the {{ . }} +func ({{ . }}) Mixin() []ent.Mixin { + return []ent.Mixin{ + emixin.AuditMixin{}, + emixin.IDMixin{}, + mixin.SoftDeleteMixin{}, + } +} + +// Edges of the {{ . }} +func ({{ . }}) Edges() []ent.Edge { + return []ent.Edge{ + // Edges go here + } +} + +// Annotations of the {{ . }} +func ({{ . }}) Annotations() []schema.Annotation { + return []schema.Annotation{ + entgql.QueryField(), + entgql.RelayConnection(), + entgql.Mutations(entgql.MutationCreate(), (entgql.MutationUpdate())), + // the above annotations create all the graphQL goodness; if you need the schema only and not the endpoints, use the below annotation instead + // if you do not need the graphql bits, also be certain to add an exclusion to scripts/files_to_skip.txt + entgql.Skip(entgql.SkipAll), + } +} \ No newline at end of file diff --git a/internal/ent/entc.go b/internal/ent/entc.go new file mode 100644 index 0000000..71b7889 --- /dev/null +++ b/internal/ent/entc.go @@ -0,0 +1,133 @@ +//go:build ignore + +// See Upstream docs for more details: https://entgo.io/docs/code-gen/#use-entc-as-a-package + +package main + +import ( + "log" + "net/http" + "os" + + "entgo.io/contrib/entgql" + "entgo.io/contrib/entoas" + "entgo.io/ent/entc" + "entgo.io/ent/entc/gen" + "github.com/datumforge/entx" + "github.com/datumforge/fgax" + "github.com/datumforge/fgax/entfga" + "github.com/datumforge/go-turso" + "github.com/ogen-go/ogen" + "go.uber.org/zap" + "gocloud.dev/secrets" +) + +var ( + graphSchemaDir = "./schema/" +) + +func main() { + xExt, err := entx.NewExtension( + entx.WithJSONScalar(), + ) + if err != nil { + log.Fatalf("creating entx extension: %v", err) + } + + // Ensure the schema directory exists before running entc. + _ = os.Mkdir("schema", 0755) + + ex, err := entoas.NewExtension( + entoas.SimpleModels(), + entoas.Mutations(func(graph *gen.Graph, spec *ogen.Spec) error { + spec.SetOpenAPI("3.1.0") + spec.SetServers([]ogen.Server{ + { + URL: "https://api.datum.net/v1", + Description: "Datum Production API Endpoint", + }, + { + URL: "http://localhost:17608/v1", + Description: "http localhost endpoint for testing purposes", + }}) + spec.Info.SetTitle("Datum OpenAPI 3.1.0 Specifications"). + SetDescription("Programmatic interfaces for interacting with Datum Services"). + SetVersion("1.0.1") + spec.Info.SetContact(&ogen.Contact{ + Name: "Datum Support", + URL: "https://datum.net/support", + Email: "support@datum.net", + }) + spec.Info.SetLicense(&ogen.License{ + Name: "Apache 2.0", + URL: "https://www.apache.org/licenses/LICENSE-2.0", + }) + spec.Info.SetTermsOfService("https://datum.net/tos") + + return nil + }), + ) + + if err != nil { + log.Fatalf("creating entoas extension: %v", err) + } + + gqlExt, err := entgql.NewExtension( + // Tell Ent to generate a GraphQL schema for + // the Ent schema in a file named ent.graphql. + entgql.WithSchemaGenerator(), + entgql.WithSchemaPath("schema/ent.graphql"), + entgql.WithConfigPath("gqlgen.yml"), + entgql.WithWhereInputs(true), + entgql.WithSchemaHook(xExt.GQLSchemaHooks()...), + ) + if err != nil { + log.Fatalf("creating entgql extension: %v", err) + } + + if err := entc.Generate("./internal/ent/schema", &gen.Config{ + Target: "./internal/ent/generated", + Templates: entgql.AllTemplates, + Hooks: []gen.Hook{ + entx.GenSchema(graphSchemaDir), + }, + Package: "github.com/datumforge/geodetic/internal/ent/generated", + Features: []gen.Feature{ + gen.FeatureVersionedMigration, + gen.FeaturePrivacy, + gen.FeatureSnapshot, + gen.FeatureEntQL, + gen.FeatureNamedEdges, + gen.FeatureSchemaConfig, + gen.FeatureIntercept, + }, + }, + entc.Dependency( + entc.DependencyType(&secrets.Keeper{}), + ), + entc.Dependency( + entc.DependencyName("Authz"), + entc.DependencyType(fgax.Client{}), + ), + entc.Dependency( + entc.DependencyName("Logger"), + entc.DependencyType(zap.SugaredLogger{}), + ), + entc.Dependency( + entc.DependencyName("Turso"), + entc.DependencyType(&turso.Client{}), + ), + entc.Dependency( + entc.DependencyType(&http.Client{}), + ), + entc.TemplateDir("./internal/ent/templates"), + entc.Extensions( + gqlExt, + ex, + entfga.NewFGAExtension( + entfga.WithSoftDeletes(), + ), + )); err != nil { + log.Fatalf("running ent codegen: %v", err) + } +} diff --git a/internal/ent/generated/auth_from_mutation.go b/internal/ent/generated/auth_from_mutation.go new file mode 100644 index 0000000..48965e1 --- /dev/null +++ b/internal/ent/generated/auth_from_mutation.go @@ -0,0 +1,5 @@ +// Code generated by entfga, DO NOT EDIT. + +// Code generated by ent, DO NOT EDIT. + +package generated diff --git a/internal/ent/generated/authz_checks.go b/internal/ent/generated/authz_checks.go new file mode 100644 index 0000000..48965e1 --- /dev/null +++ b/internal/ent/generated/authz_checks.go @@ -0,0 +1,5 @@ +// Code generated by entfga, DO NOT EDIT. + +// Code generated by ent, DO NOT EDIT. + +package generated diff --git a/internal/ent/generated/client.go b/internal/ent/generated/client.go new file mode 100644 index 0000000..0e3a917 --- /dev/null +++ b/internal/ent/generated/client.go @@ -0,0 +1,599 @@ +// Code generated by ent, DO NOT EDIT. + +package generated + +import ( + "context" + "errors" + "fmt" + "log" + "reflect" + + "github.com/datumforge/geodetic/internal/ent/generated/migrate" + + "net/http" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/datumforge/fgax" + "github.com/datumforge/geodetic/internal/ent/generated/database" + "github.com/datumforge/geodetic/internal/ent/generated/group" + "github.com/datumforge/go-turso" + "go.uber.org/zap" + "gocloud.dev/secrets" + + "github.com/datumforge/geodetic/internal/ent/generated/internal" +) + +// Client is the client that holds all ent builders. +type Client struct { + config + // Schema is the client for creating, migrating and dropping schema. + Schema *migrate.Schema + // Database is the client for interacting with the Database builders. + Database *DatabaseClient + // Group is the client for interacting with the Group builders. + Group *GroupClient + + // authzActivated determines if the authz hooks have already been activated + authzActivated bool +} + +// NewClient creates a new client configured with the given options. +func NewClient(opts ...Option) *Client { + cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}} + cfg.options(opts...) + client := &Client{config: cfg} + client.init() + return client +} + +func (c *Client) init() { + c.Schema = migrate.NewSchema(c.driver) + c.Database = NewDatabaseClient(c.config) + c.Group = NewGroupClient(c.config) +} + +// WithAuthz adds the authz hooks to the appropriate schemas - generated by entfga +func (c *Client) WithAuthz() { + if !c.authzActivated { + + c.authzActivated = true + } +} + +type ( + // config is the configuration for the client and its builder. + config struct { + // driver used for executing database requests. + driver dialect.Driver + // debug enable a debug logging. + debug bool + // log used for logging on debug mode. + log func(...any) + // hooks to execute on mutations. + hooks *hooks + // interceptors to execute on queries. + inters *inters + SecretsKeeper *secrets.Keeper + Authz fgax.Client + Logger zap.SugaredLogger + Turso *turso.Client + HTTPClient *http.Client + // schemaConfig contains alternative names for all tables. + schemaConfig SchemaConfig + } + // Option function to configure the client. + Option func(*config) +) + +// newConfig creates a new config for the client. +func newConfig(opts ...Option) config { + cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}} + cfg.options(opts...) + return cfg +} + +// options applies the options on the config object. +func (c *config) options(opts ...Option) { + for _, opt := range opts { + opt(c) + } + if c.debug { + c.driver = dialect.Debug(c.driver, c.log) + } +} + +// Debug enables debug logging on the ent.Driver. +func Debug() Option { + return func(c *config) { + c.debug = true + } +} + +// Log sets the logging function for debug mode. +func Log(fn func(...any)) Option { + return func(c *config) { + c.log = fn + } +} + +// Driver configures the client driver. +func Driver(driver dialect.Driver) Option { + return func(c *config) { + c.driver = driver + } +} + +// SecretsKeeper configures the SecretsKeeper. +func SecretsKeeper(v *secrets.Keeper) Option { + return func(c *config) { + c.SecretsKeeper = v + } +} + +// Authz configures the Authz. +func Authz(v fgax.Client) Option { + return func(c *config) { + c.Authz = v + } +} + +// Logger configures the Logger. +func Logger(v zap.SugaredLogger) Option { + return func(c *config) { + c.Logger = v + } +} + +// Turso configures the Turso. +func Turso(v *turso.Client) Option { + return func(c *config) { + c.Turso = v + } +} + +// HTTPClient configures the HTTPClient. +func HTTPClient(v *http.Client) Option { + return func(c *config) { + c.HTTPClient = v + } +} + +// Open opens a database/sql.DB specified by the driver name and +// the data source name, and returns a new client attached to it. +// Optional parameters can be added for configuring the client. +func Open(driverName, dataSourceName string, options ...Option) (*Client, error) { + switch driverName { + case dialect.MySQL, dialect.Postgres, dialect.SQLite: + drv, err := sql.Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + return NewClient(append(options, Driver(drv))...), nil + default: + return nil, fmt.Errorf("unsupported driver: %q", driverName) + } +} + +// ErrTxStarted is returned when trying to start a new transaction from a transactional client. +var ErrTxStarted = errors.New("generated: cannot start a transaction within a transaction") + +// Tx returns a new transactional client. The provided context +// is used until the transaction is committed or rolled back. +func (c *Client) Tx(ctx context.Context) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, ErrTxStarted + } + tx, err := newTx(ctx, c.driver) + if err != nil { + return nil, fmt.Errorf("generated: starting a transaction: %w", err) + } + cfg := c.config + cfg.driver = tx + return &Tx{ + ctx: ctx, + config: cfg, + Database: NewDatabaseClient(cfg), + Group: NewGroupClient(cfg), + }, nil +} + +// BeginTx returns a transactional client with specified options. +func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, errors.New("ent: cannot start a transaction within a transaction") + } + tx, err := c.driver.(interface { + BeginTx(context.Context, *sql.TxOptions) (dialect.Tx, error) + }).BeginTx(ctx, opts) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %w", err) + } + cfg := c.config + cfg.driver = &txDriver{tx: tx, drv: c.driver} + return &Tx{ + ctx: ctx, + config: cfg, + Database: NewDatabaseClient(cfg), + Group: NewGroupClient(cfg), + }, nil +} + +// Debug returns a new debug-client. It's used to get verbose logging on specific operations. +// +// client.Debug(). +// Database. +// Query(). +// Count(ctx) +func (c *Client) Debug() *Client { + if c.debug { + return c + } + cfg := c.config + cfg.driver = dialect.Debug(c.driver, c.log) + client := &Client{config: cfg} + client.init() + return client +} + +// Close closes the database connection and prevents new queries from starting. +func (c *Client) Close() error { + return c.driver.Close() +} + +// Use adds the mutation hooks to all the entity clients. +// In order to add hooks to a specific client, call: `client.Node.Use(...)`. +func (c *Client) Use(hooks ...Hook) { + c.Database.Use(hooks...) + c.Group.Use(hooks...) +} + +// Intercept adds the query interceptors to all the entity clients. +// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. +func (c *Client) Intercept(interceptors ...Interceptor) { + c.Database.Intercept(interceptors...) + c.Group.Intercept(interceptors...) +} + +// Mutate implements the ent.Mutator interface. +func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { + switch m := m.(type) { + case *DatabaseMutation: + return c.Database.mutate(ctx, m) + case *GroupMutation: + return c.Group.mutate(ctx, m) + default: + return nil, fmt.Errorf("generated: unknown mutation type %T", m) + } +} + +// DatabaseClient is a client for the Database schema. +type DatabaseClient struct { + config +} + +// NewDatabaseClient returns a client for the Database from the given config. +func NewDatabaseClient(c config) *DatabaseClient { + return &DatabaseClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `database.Hooks(f(g(h())))`. +func (c *DatabaseClient) Use(hooks ...Hook) { + c.hooks.Database = append(c.hooks.Database, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `database.Intercept(f(g(h())))`. +func (c *DatabaseClient) Intercept(interceptors ...Interceptor) { + c.inters.Database = append(c.inters.Database, interceptors...) +} + +// Create returns a builder for creating a Database entity. +func (c *DatabaseClient) Create() *DatabaseCreate { + mutation := newDatabaseMutation(c.config, OpCreate) + return &DatabaseCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Database entities. +func (c *DatabaseClient) CreateBulk(builders ...*DatabaseCreate) *DatabaseCreateBulk { + return &DatabaseCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *DatabaseClient) MapCreateBulk(slice any, setFunc func(*DatabaseCreate, int)) *DatabaseCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &DatabaseCreateBulk{err: fmt.Errorf("calling to DatabaseClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*DatabaseCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &DatabaseCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Database. +func (c *DatabaseClient) Update() *DatabaseUpdate { + mutation := newDatabaseMutation(c.config, OpUpdate) + return &DatabaseUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *DatabaseClient) UpdateOne(d *Database) *DatabaseUpdateOne { + mutation := newDatabaseMutation(c.config, OpUpdateOne, withDatabase(d)) + return &DatabaseUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *DatabaseClient) UpdateOneID(id string) *DatabaseUpdateOne { + mutation := newDatabaseMutation(c.config, OpUpdateOne, withDatabaseID(id)) + return &DatabaseUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Database. +func (c *DatabaseClient) Delete() *DatabaseDelete { + mutation := newDatabaseMutation(c.config, OpDelete) + return &DatabaseDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *DatabaseClient) DeleteOne(d *Database) *DatabaseDeleteOne { + return c.DeleteOneID(d.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *DatabaseClient) DeleteOneID(id string) *DatabaseDeleteOne { + builder := c.Delete().Where(database.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &DatabaseDeleteOne{builder} +} + +// Query returns a query builder for Database. +func (c *DatabaseClient) Query() *DatabaseQuery { + return &DatabaseQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeDatabase}, + inters: c.Interceptors(), + } +} + +// Get returns a Database entity by its id. +func (c *DatabaseClient) Get(ctx context.Context, id string) (*Database, error) { + return c.Query().Where(database.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *DatabaseClient) GetX(ctx context.Context, id string) *Database { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryGroup queries the group edge of a Database. +func (c *DatabaseClient) QueryGroup(d *Database) *GroupQuery { + query := (&GroupClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := d.ID + step := sqlgraph.NewStep( + sqlgraph.From(database.Table, database.FieldID, id), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, database.GroupTable, database.GroupColumn), + ) + schemaConfig := d.schemaConfig + step.To.Schema = schemaConfig.Group + step.Edge.Schema = schemaConfig.Database + fromV = sqlgraph.Neighbors(d.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *DatabaseClient) Hooks() []Hook { + hooks := c.hooks.Database + return append(hooks[:len(hooks):len(hooks)], database.Hooks[:]...) +} + +// Interceptors returns the client interceptors. +func (c *DatabaseClient) Interceptors() []Interceptor { + return c.inters.Database +} + +func (c *DatabaseClient) mutate(ctx context.Context, m *DatabaseMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&DatabaseCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&DatabaseUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&DatabaseUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&DatabaseDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("generated: unknown Database mutation op: %q", m.Op()) + } +} + +// GroupClient is a client for the Group schema. +type GroupClient struct { + config +} + +// NewGroupClient returns a client for the Group from the given config. +func NewGroupClient(c config) *GroupClient { + return &GroupClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `group.Hooks(f(g(h())))`. +func (c *GroupClient) Use(hooks ...Hook) { + c.hooks.Group = append(c.hooks.Group, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `group.Intercept(f(g(h())))`. +func (c *GroupClient) Intercept(interceptors ...Interceptor) { + c.inters.Group = append(c.inters.Group, interceptors...) +} + +// Create returns a builder for creating a Group entity. +func (c *GroupClient) Create() *GroupCreate { + mutation := newGroupMutation(c.config, OpCreate) + return &GroupCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Group entities. +func (c *GroupClient) CreateBulk(builders ...*GroupCreate) *GroupCreateBulk { + return &GroupCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *GroupClient) MapCreateBulk(slice any, setFunc func(*GroupCreate, int)) *GroupCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &GroupCreateBulk{err: fmt.Errorf("calling to GroupClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*GroupCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &GroupCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Group. +func (c *GroupClient) Update() *GroupUpdate { + mutation := newGroupMutation(c.config, OpUpdate) + return &GroupUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *GroupClient) UpdateOne(gr *Group) *GroupUpdateOne { + mutation := newGroupMutation(c.config, OpUpdateOne, withGroup(gr)) + return &GroupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *GroupClient) UpdateOneID(id string) *GroupUpdateOne { + mutation := newGroupMutation(c.config, OpUpdateOne, withGroupID(id)) + return &GroupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Group. +func (c *GroupClient) Delete() *GroupDelete { + mutation := newGroupMutation(c.config, OpDelete) + return &GroupDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *GroupClient) DeleteOne(gr *Group) *GroupDeleteOne { + return c.DeleteOneID(gr.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *GroupClient) DeleteOneID(id string) *GroupDeleteOne { + builder := c.Delete().Where(group.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &GroupDeleteOne{builder} +} + +// Query returns a query builder for Group. +func (c *GroupClient) Query() *GroupQuery { + return &GroupQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeGroup}, + inters: c.Interceptors(), + } +} + +// Get returns a Group entity by its id. +func (c *GroupClient) Get(ctx context.Context, id string) (*Group, error) { + return c.Query().Where(group.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *GroupClient) GetX(ctx context.Context, id string) *Group { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryDatabases queries the databases edge of a Group. +func (c *GroupClient) QueryDatabases(gr *Group) *DatabaseQuery { + query := (&DatabaseClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := gr.ID + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, id), + sqlgraph.To(database.Table, database.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, group.DatabasesTable, group.DatabasesColumn), + ) + schemaConfig := gr.schemaConfig + step.To.Schema = schemaConfig.Database + step.Edge.Schema = schemaConfig.Database + fromV = sqlgraph.Neighbors(gr.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *GroupClient) Hooks() []Hook { + hooks := c.hooks.Group + return append(hooks[:len(hooks):len(hooks)], group.Hooks[:]...) +} + +// Interceptors returns the client interceptors. +func (c *GroupClient) Interceptors() []Interceptor { + return c.inters.Group +} + +func (c *GroupClient) mutate(ctx context.Context, m *GroupMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&GroupCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&GroupUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&GroupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&GroupDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("generated: unknown Group mutation op: %q", m.Op()) + } +} + +// hooks and interceptors per client, for fast access. +type ( + hooks struct { + Database, Group []ent.Hook + } + inters struct { + Database, Group []ent.Interceptor + } +) + +// SchemaConfig represents alternative schema names for all tables +// that can be passed at runtime. +type SchemaConfig = internal.SchemaConfig + +// AlternateSchemas allows alternate schema names to be +// passed into ent operations. +func AlternateSchema(schemaConfig SchemaConfig) Option { + return func(c *config) { + c.schemaConfig = schemaConfig + } +} diff --git a/internal/ent/generated/database.go b/internal/ent/generated/database.go new file mode 100644 index 0000000..f05d03c --- /dev/null +++ b/internal/ent/generated/database.go @@ -0,0 +1,278 @@ +// Code generated by ent, DO NOT EDIT. + +package generated + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/datumforge/geodetic/internal/ent/generated/database" + "github.com/datumforge/geodetic/internal/ent/generated/group" + "github.com/datumforge/geodetic/pkg/enums" +) + +// Database is the model entity for the Database schema. +type Database struct { + config `json:"-"` + // ID of the ent. + ID string `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // CreatedBy holds the value of the "created_by" field. + CreatedBy string `json:"created_by,omitempty"` + // UpdatedBy holds the value of the "updated_by" field. + UpdatedBy string `json:"updated_by,omitempty"` + // DeletedAt holds the value of the "deleted_at" field. + DeletedAt time.Time `json:"deleted_at,omitempty"` + // DeletedBy holds the value of the "deleted_by" field. + DeletedBy string `json:"deleted_by,omitempty"` + // the ID of the organization + OrganizationID string `json:"organization_id,omitempty"` + // the name to the database + Name string `json:"name,omitempty"` + // the geo location of the database + Geo string `json:"geo,omitempty"` + // the DSN to the database + Dsn string `json:"dsn,omitempty"` + // the ID of the group + GroupID string `json:"group_id,omitempty"` + // the auth token used to connect to the database + Token string `json:"-"` + // status of the database + Status enums.DatabaseStatus `json:"status,omitempty"` + // provider of the database + Provider enums.DatabaseProvider `json:"provider,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the DatabaseQuery when eager-loading is set. + Edges DatabaseEdges `json:"edges"` + selectValues sql.SelectValues +} + +// DatabaseEdges holds the relations/edges for other nodes in the graph. +type DatabaseEdges struct { + // Group holds the value of the group edge. + Group *Group `json:"group,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool + // totalCount holds the count of the edges above. + totalCount [1]map[string]int +} + +// GroupOrErr returns the Group value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e DatabaseEdges) GroupOrErr() (*Group, error) { + if e.Group != nil { + return e.Group, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: group.Label} + } + return nil, &NotLoadedError{edge: "group"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Database) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case database.FieldID, database.FieldCreatedBy, database.FieldUpdatedBy, database.FieldDeletedBy, database.FieldOrganizationID, database.FieldName, database.FieldGeo, database.FieldDsn, database.FieldGroupID, database.FieldToken, database.FieldStatus, database.FieldProvider: + values[i] = new(sql.NullString) + case database.FieldCreatedAt, database.FieldUpdatedAt, database.FieldDeletedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Database fields. +func (d *Database) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case database.FieldID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value.Valid { + d.ID = value.String + } + case database.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + d.CreatedAt = value.Time + } + case database.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + d.UpdatedAt = value.Time + } + case database.FieldCreatedBy: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field created_by", values[i]) + } else if value.Valid { + d.CreatedBy = value.String + } + case database.FieldUpdatedBy: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field updated_by", values[i]) + } else if value.Valid { + d.UpdatedBy = value.String + } + case database.FieldDeletedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) + } else if value.Valid { + d.DeletedAt = value.Time + } + case database.FieldDeletedBy: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field deleted_by", values[i]) + } else if value.Valid { + d.DeletedBy = value.String + } + case database.FieldOrganizationID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field organization_id", values[i]) + } else if value.Valid { + d.OrganizationID = value.String + } + case database.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + d.Name = value.String + } + case database.FieldGeo: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field geo", values[i]) + } else if value.Valid { + d.Geo = value.String + } + case database.FieldDsn: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field dsn", values[i]) + } else if value.Valid { + d.Dsn = value.String + } + case database.FieldGroupID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field group_id", values[i]) + } else if value.Valid { + d.GroupID = value.String + } + case database.FieldToken: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field token", values[i]) + } else if value.Valid { + d.Token = value.String + } + case database.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + d.Status = enums.DatabaseStatus(value.String) + } + case database.FieldProvider: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field provider", values[i]) + } else if value.Valid { + d.Provider = enums.DatabaseProvider(value.String) + } + default: + d.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Database. +// This includes values selected through modifiers, order, etc. +func (d *Database) Value(name string) (ent.Value, error) { + return d.selectValues.Get(name) +} + +// QueryGroup queries the "group" edge of the Database entity. +func (d *Database) QueryGroup() *GroupQuery { + return NewDatabaseClient(d.config).QueryGroup(d) +} + +// Update returns a builder for updating this Database. +// Note that you need to call Database.Unwrap() before calling this method if this Database +// was returned from a transaction, and the transaction was committed or rolled back. +func (d *Database) Update() *DatabaseUpdateOne { + return NewDatabaseClient(d.config).UpdateOne(d) +} + +// Unwrap unwraps the Database entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (d *Database) Unwrap() *Database { + _tx, ok := d.config.driver.(*txDriver) + if !ok { + panic("generated: Database is not a transactional entity") + } + d.config.driver = _tx.drv + return d +} + +// String implements the fmt.Stringer. +func (d *Database) String() string { + var builder strings.Builder + builder.WriteString("Database(") + builder.WriteString(fmt.Sprintf("id=%v, ", d.ID)) + builder.WriteString("created_at=") + builder.WriteString(d.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(d.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("created_by=") + builder.WriteString(d.CreatedBy) + builder.WriteString(", ") + builder.WriteString("updated_by=") + builder.WriteString(d.UpdatedBy) + builder.WriteString(", ") + builder.WriteString("deleted_at=") + builder.WriteString(d.DeletedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("deleted_by=") + builder.WriteString(d.DeletedBy) + builder.WriteString(", ") + builder.WriteString("organization_id=") + builder.WriteString(d.OrganizationID) + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(d.Name) + builder.WriteString(", ") + builder.WriteString("geo=") + builder.WriteString(d.Geo) + builder.WriteString(", ") + builder.WriteString("dsn=") + builder.WriteString(d.Dsn) + builder.WriteString(", ") + builder.WriteString("group_id=") + builder.WriteString(d.GroupID) + builder.WriteString(", ") + builder.WriteString("token=") + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(fmt.Sprintf("%v", d.Status)) + builder.WriteString(", ") + builder.WriteString("provider=") + builder.WriteString(fmt.Sprintf("%v", d.Provider)) + builder.WriteByte(')') + return builder.String() +} + +// Databases is a parsable slice of Database. +type Databases []*Database diff --git a/internal/ent/generated/database/database.go b/internal/ent/generated/database/database.go new file mode 100644 index 0000000..1ade17e --- /dev/null +++ b/internal/ent/generated/database/database.go @@ -0,0 +1,242 @@ +// Code generated by ent, DO NOT EDIT. + +package database + +import ( + "fmt" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/99designs/gqlgen/graphql" + "github.com/datumforge/geodetic/pkg/enums" +) + +const ( + // Label holds the string label denoting the database type in the database. + Label = "database" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldCreatedBy holds the string denoting the created_by field in the database. + FieldCreatedBy = "created_by" + // FieldUpdatedBy holds the string denoting the updated_by field in the database. + FieldUpdatedBy = "updated_by" + // FieldDeletedAt holds the string denoting the deleted_at field in the database. + FieldDeletedAt = "deleted_at" + // FieldDeletedBy holds the string denoting the deleted_by field in the database. + FieldDeletedBy = "deleted_by" + // FieldOrganizationID holds the string denoting the organization_id field in the database. + FieldOrganizationID = "organization_id" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldGeo holds the string denoting the geo field in the database. + FieldGeo = "geo" + // FieldDsn holds the string denoting the dsn field in the database. + FieldDsn = "dsn" + // FieldGroupID holds the string denoting the group_id field in the database. + FieldGroupID = "group_id" + // FieldToken holds the string denoting the token field in the database. + FieldToken = "token" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldProvider holds the string denoting the provider field in the database. + FieldProvider = "provider" + // EdgeGroup holds the string denoting the group edge name in mutations. + EdgeGroup = "group" + // Table holds the table name of the database in the database. + Table = "databases" + // GroupTable is the table that holds the group relation/edge. + GroupTable = "databases" + // GroupInverseTable is the table name for the Group entity. + // It exists in this package in order to avoid circular dependency with the "group" package. + GroupInverseTable = "groups" + // GroupColumn is the table column denoting the group relation/edge. + GroupColumn = "group_id" +) + +// Columns holds all SQL columns for database fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldCreatedBy, + FieldUpdatedBy, + FieldDeletedAt, + FieldDeletedBy, + FieldOrganizationID, + FieldName, + FieldGeo, + FieldDsn, + FieldGroupID, + FieldToken, + FieldStatus, + FieldProvider, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +// Note that the variables below are initialized by the runtime +// package on the initialization of the application. Therefore, +// it should be imported in the main as follows: +// +// import _ "github.com/datumforge/geodetic/internal/ent/generated/runtime" +var ( + Hooks [3]ent.Hook + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // OrganizationIDValidator is a validator for the "organization_id" field. It is called by the builders before save. + OrganizationIDValidator func(string) error + // NameValidator is a validator for the "name" field. It is called by the builders before save. + NameValidator func(string) error + // DsnValidator is a validator for the "dsn" field. It is called by the builders before save. + DsnValidator func(string) error + // DefaultID holds the default value on creation for the "id" field. + DefaultID func() string +) + +const DefaultStatus enums.DatabaseStatus = "CREATING" + +// StatusValidator is a validator for the "status" field enum values. It is called by the builders before save. +func StatusValidator(s enums.DatabaseStatus) error { + switch s.String() { + case "ACTIVE", "CREATING", "DELETING", "DELETED": + return nil + default: + return fmt.Errorf("database: invalid enum value for status field: %q", s) + } +} + +const DefaultProvider enums.DatabaseProvider = "LOCAL" + +// ProviderValidator is a validator for the "provider" field enum values. It is called by the builders before save. +func ProviderValidator(pr enums.DatabaseProvider) error { + switch pr.String() { + case "LOCAL", "TURSO": + return nil + default: + return fmt.Errorf("database: invalid enum value for provider field: %q", pr) + } +} + +// OrderOption defines the ordering options for the Database queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByCreatedBy orders the results by the created_by field. +func ByCreatedBy(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedBy, opts...).ToFunc() +} + +// ByUpdatedBy orders the results by the updated_by field. +func ByUpdatedBy(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedBy, opts...).ToFunc() +} + +// ByDeletedAt orders the results by the deleted_at field. +func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDeletedAt, opts...).ToFunc() +} + +// ByDeletedBy orders the results by the deleted_by field. +func ByDeletedBy(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDeletedBy, opts...).ToFunc() +} + +// ByOrganizationID orders the results by the organization_id field. +func ByOrganizationID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOrganizationID, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByGeo orders the results by the geo field. +func ByGeo(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldGeo, opts...).ToFunc() +} + +// ByDsn orders the results by the dsn field. +func ByDsn(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDsn, opts...).ToFunc() +} + +// ByGroupID orders the results by the group_id field. +func ByGroupID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldGroupID, opts...).ToFunc() +} + +// ByToken orders the results by the token field. +func ByToken(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldToken, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByProvider orders the results by the provider field. +func ByProvider(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldProvider, opts...).ToFunc() +} + +// ByGroupField orders the results by group field. +func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...)) + } +} +func newGroupStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(GroupInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) +} + +var ( + // enums.DatabaseStatus must implement graphql.Marshaler. + _ graphql.Marshaler = (*enums.DatabaseStatus)(nil) + // enums.DatabaseStatus must implement graphql.Unmarshaler. + _ graphql.Unmarshaler = (*enums.DatabaseStatus)(nil) +) + +var ( + // enums.DatabaseProvider must implement graphql.Marshaler. + _ graphql.Marshaler = (*enums.DatabaseProvider)(nil) + // enums.DatabaseProvider must implement graphql.Unmarshaler. + _ graphql.Unmarshaler = (*enums.DatabaseProvider)(nil) +) diff --git a/internal/ent/generated/database/where.go b/internal/ent/generated/database/where.go new file mode 100644 index 0000000..96290d0 --- /dev/null +++ b/internal/ent/generated/database/where.go @@ -0,0 +1,1018 @@ +// Code generated by ent, DO NOT EDIT. + +package database + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/datumforge/geodetic/internal/ent/generated/predicate" + "github.com/datumforge/geodetic/pkg/enums" + + "github.com/datumforge/geodetic/internal/ent/generated/internal" +) + +// ID filters vertices based on their ID field. +func ID(id string) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) predicate.Database { + return predicate.Database(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) predicate.Database { + return predicate.Database(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) predicate.Database { + return predicate.Database(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) predicate.Database { + return predicate.Database(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) predicate.Database { + return predicate.Database(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) predicate.Database { + return predicate.Database(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) predicate.Database { + return predicate.Database(sql.FieldLTE(FieldID, id)) +} + +// IDEqualFold applies the EqualFold predicate on the ID field. +func IDEqualFold(id string) predicate.Database { + return predicate.Database(sql.FieldEqualFold(FieldID, id)) +} + +// IDContainsFold applies the ContainsFold predicate on the ID field. +func IDContainsFold(id string) predicate.Database { + return predicate.Database(sql.FieldContainsFold(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// CreatedBy applies equality check predicate on the "created_by" field. It's identical to CreatedByEQ. +func CreatedBy(v string) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldCreatedBy, v)) +} + +// UpdatedBy applies equality check predicate on the "updated_by" field. It's identical to UpdatedByEQ. +func UpdatedBy(v string) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldUpdatedBy, v)) +} + +// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ. +func DeletedAt(v time.Time) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldDeletedAt, v)) +} + +// DeletedBy applies equality check predicate on the "deleted_by" field. It's identical to DeletedByEQ. +func DeletedBy(v string) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldDeletedBy, v)) +} + +// OrganizationID applies equality check predicate on the "organization_id" field. It's identical to OrganizationIDEQ. +func OrganizationID(v string) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldOrganizationID, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldName, v)) +} + +// Geo applies equality check predicate on the "geo" field. It's identical to GeoEQ. +func Geo(v string) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldGeo, v)) +} + +// Dsn applies equality check predicate on the "dsn" field. It's identical to DsnEQ. +func Dsn(v string) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldDsn, v)) +} + +// GroupID applies equality check predicate on the "group_id" field. It's identical to GroupIDEQ. +func GroupID(v string) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldGroupID, v)) +} + +// Token applies equality check predicate on the "token" field. It's identical to TokenEQ. +func Token(v string) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldToken, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Database { + return predicate.Database(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Database { + return predicate.Database(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Database { + return predicate.Database(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Database { + return predicate.Database(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Database { + return predicate.Database(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Database { + return predicate.Database(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Database { + return predicate.Database(sql.FieldLTE(FieldCreatedAt, v)) +} + +// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. +func CreatedAtIsNil() predicate.Database { + return predicate.Database(sql.FieldIsNull(FieldCreatedAt)) +} + +// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. +func CreatedAtNotNil() predicate.Database { + return predicate.Database(sql.FieldNotNull(FieldCreatedAt)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Database { + return predicate.Database(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Database { + return predicate.Database(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Database { + return predicate.Database(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Database { + return predicate.Database(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Database { + return predicate.Database(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Database { + return predicate.Database(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Database { + return predicate.Database(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. +func UpdatedAtIsNil() predicate.Database { + return predicate.Database(sql.FieldIsNull(FieldUpdatedAt)) +} + +// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. +func UpdatedAtNotNil() predicate.Database { + return predicate.Database(sql.FieldNotNull(FieldUpdatedAt)) +} + +// CreatedByEQ applies the EQ predicate on the "created_by" field. +func CreatedByEQ(v string) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldCreatedBy, v)) +} + +// CreatedByNEQ applies the NEQ predicate on the "created_by" field. +func CreatedByNEQ(v string) predicate.Database { + return predicate.Database(sql.FieldNEQ(FieldCreatedBy, v)) +} + +// CreatedByIn applies the In predicate on the "created_by" field. +func CreatedByIn(vs ...string) predicate.Database { + return predicate.Database(sql.FieldIn(FieldCreatedBy, vs...)) +} + +// CreatedByNotIn applies the NotIn predicate on the "created_by" field. +func CreatedByNotIn(vs ...string) predicate.Database { + return predicate.Database(sql.FieldNotIn(FieldCreatedBy, vs...)) +} + +// CreatedByGT applies the GT predicate on the "created_by" field. +func CreatedByGT(v string) predicate.Database { + return predicate.Database(sql.FieldGT(FieldCreatedBy, v)) +} + +// CreatedByGTE applies the GTE predicate on the "created_by" field. +func CreatedByGTE(v string) predicate.Database { + return predicate.Database(sql.FieldGTE(FieldCreatedBy, v)) +} + +// CreatedByLT applies the LT predicate on the "created_by" field. +func CreatedByLT(v string) predicate.Database { + return predicate.Database(sql.FieldLT(FieldCreatedBy, v)) +} + +// CreatedByLTE applies the LTE predicate on the "created_by" field. +func CreatedByLTE(v string) predicate.Database { + return predicate.Database(sql.FieldLTE(FieldCreatedBy, v)) +} + +// CreatedByContains applies the Contains predicate on the "created_by" field. +func CreatedByContains(v string) predicate.Database { + return predicate.Database(sql.FieldContains(FieldCreatedBy, v)) +} + +// CreatedByHasPrefix applies the HasPrefix predicate on the "created_by" field. +func CreatedByHasPrefix(v string) predicate.Database { + return predicate.Database(sql.FieldHasPrefix(FieldCreatedBy, v)) +} + +// CreatedByHasSuffix applies the HasSuffix predicate on the "created_by" field. +func CreatedByHasSuffix(v string) predicate.Database { + return predicate.Database(sql.FieldHasSuffix(FieldCreatedBy, v)) +} + +// CreatedByIsNil applies the IsNil predicate on the "created_by" field. +func CreatedByIsNil() predicate.Database { + return predicate.Database(sql.FieldIsNull(FieldCreatedBy)) +} + +// CreatedByNotNil applies the NotNil predicate on the "created_by" field. +func CreatedByNotNil() predicate.Database { + return predicate.Database(sql.FieldNotNull(FieldCreatedBy)) +} + +// CreatedByEqualFold applies the EqualFold predicate on the "created_by" field. +func CreatedByEqualFold(v string) predicate.Database { + return predicate.Database(sql.FieldEqualFold(FieldCreatedBy, v)) +} + +// CreatedByContainsFold applies the ContainsFold predicate on the "created_by" field. +func CreatedByContainsFold(v string) predicate.Database { + return predicate.Database(sql.FieldContainsFold(FieldCreatedBy, v)) +} + +// UpdatedByEQ applies the EQ predicate on the "updated_by" field. +func UpdatedByEQ(v string) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldUpdatedBy, v)) +} + +// UpdatedByNEQ applies the NEQ predicate on the "updated_by" field. +func UpdatedByNEQ(v string) predicate.Database { + return predicate.Database(sql.FieldNEQ(FieldUpdatedBy, v)) +} + +// UpdatedByIn applies the In predicate on the "updated_by" field. +func UpdatedByIn(vs ...string) predicate.Database { + return predicate.Database(sql.FieldIn(FieldUpdatedBy, vs...)) +} + +// UpdatedByNotIn applies the NotIn predicate on the "updated_by" field. +func UpdatedByNotIn(vs ...string) predicate.Database { + return predicate.Database(sql.FieldNotIn(FieldUpdatedBy, vs...)) +} + +// UpdatedByGT applies the GT predicate on the "updated_by" field. +func UpdatedByGT(v string) predicate.Database { + return predicate.Database(sql.FieldGT(FieldUpdatedBy, v)) +} + +// UpdatedByGTE applies the GTE predicate on the "updated_by" field. +func UpdatedByGTE(v string) predicate.Database { + return predicate.Database(sql.FieldGTE(FieldUpdatedBy, v)) +} + +// UpdatedByLT applies the LT predicate on the "updated_by" field. +func UpdatedByLT(v string) predicate.Database { + return predicate.Database(sql.FieldLT(FieldUpdatedBy, v)) +} + +// UpdatedByLTE applies the LTE predicate on the "updated_by" field. +func UpdatedByLTE(v string) predicate.Database { + return predicate.Database(sql.FieldLTE(FieldUpdatedBy, v)) +} + +// UpdatedByContains applies the Contains predicate on the "updated_by" field. +func UpdatedByContains(v string) predicate.Database { + return predicate.Database(sql.FieldContains(FieldUpdatedBy, v)) +} + +// UpdatedByHasPrefix applies the HasPrefix predicate on the "updated_by" field. +func UpdatedByHasPrefix(v string) predicate.Database { + return predicate.Database(sql.FieldHasPrefix(FieldUpdatedBy, v)) +} + +// UpdatedByHasSuffix applies the HasSuffix predicate on the "updated_by" field. +func UpdatedByHasSuffix(v string) predicate.Database { + return predicate.Database(sql.FieldHasSuffix(FieldUpdatedBy, v)) +} + +// UpdatedByIsNil applies the IsNil predicate on the "updated_by" field. +func UpdatedByIsNil() predicate.Database { + return predicate.Database(sql.FieldIsNull(FieldUpdatedBy)) +} + +// UpdatedByNotNil applies the NotNil predicate on the "updated_by" field. +func UpdatedByNotNil() predicate.Database { + return predicate.Database(sql.FieldNotNull(FieldUpdatedBy)) +} + +// UpdatedByEqualFold applies the EqualFold predicate on the "updated_by" field. +func UpdatedByEqualFold(v string) predicate.Database { + return predicate.Database(sql.FieldEqualFold(FieldUpdatedBy, v)) +} + +// UpdatedByContainsFold applies the ContainsFold predicate on the "updated_by" field. +func UpdatedByContainsFold(v string) predicate.Database { + return predicate.Database(sql.FieldContainsFold(FieldUpdatedBy, v)) +} + +// DeletedAtEQ applies the EQ predicate on the "deleted_at" field. +func DeletedAtEQ(v time.Time) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldDeletedAt, v)) +} + +// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field. +func DeletedAtNEQ(v time.Time) predicate.Database { + return predicate.Database(sql.FieldNEQ(FieldDeletedAt, v)) +} + +// DeletedAtIn applies the In predicate on the "deleted_at" field. +func DeletedAtIn(vs ...time.Time) predicate.Database { + return predicate.Database(sql.FieldIn(FieldDeletedAt, vs...)) +} + +// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field. +func DeletedAtNotIn(vs ...time.Time) predicate.Database { + return predicate.Database(sql.FieldNotIn(FieldDeletedAt, vs...)) +} + +// DeletedAtGT applies the GT predicate on the "deleted_at" field. +func DeletedAtGT(v time.Time) predicate.Database { + return predicate.Database(sql.FieldGT(FieldDeletedAt, v)) +} + +// DeletedAtGTE applies the GTE predicate on the "deleted_at" field. +func DeletedAtGTE(v time.Time) predicate.Database { + return predicate.Database(sql.FieldGTE(FieldDeletedAt, v)) +} + +// DeletedAtLT applies the LT predicate on the "deleted_at" field. +func DeletedAtLT(v time.Time) predicate.Database { + return predicate.Database(sql.FieldLT(FieldDeletedAt, v)) +} + +// DeletedAtLTE applies the LTE predicate on the "deleted_at" field. +func DeletedAtLTE(v time.Time) predicate.Database { + return predicate.Database(sql.FieldLTE(FieldDeletedAt, v)) +} + +// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field. +func DeletedAtIsNil() predicate.Database { + return predicate.Database(sql.FieldIsNull(FieldDeletedAt)) +} + +// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field. +func DeletedAtNotNil() predicate.Database { + return predicate.Database(sql.FieldNotNull(FieldDeletedAt)) +} + +// DeletedByEQ applies the EQ predicate on the "deleted_by" field. +func DeletedByEQ(v string) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldDeletedBy, v)) +} + +// DeletedByNEQ applies the NEQ predicate on the "deleted_by" field. +func DeletedByNEQ(v string) predicate.Database { + return predicate.Database(sql.FieldNEQ(FieldDeletedBy, v)) +} + +// DeletedByIn applies the In predicate on the "deleted_by" field. +func DeletedByIn(vs ...string) predicate.Database { + return predicate.Database(sql.FieldIn(FieldDeletedBy, vs...)) +} + +// DeletedByNotIn applies the NotIn predicate on the "deleted_by" field. +func DeletedByNotIn(vs ...string) predicate.Database { + return predicate.Database(sql.FieldNotIn(FieldDeletedBy, vs...)) +} + +// DeletedByGT applies the GT predicate on the "deleted_by" field. +func DeletedByGT(v string) predicate.Database { + return predicate.Database(sql.FieldGT(FieldDeletedBy, v)) +} + +// DeletedByGTE applies the GTE predicate on the "deleted_by" field. +func DeletedByGTE(v string) predicate.Database { + return predicate.Database(sql.FieldGTE(FieldDeletedBy, v)) +} + +// DeletedByLT applies the LT predicate on the "deleted_by" field. +func DeletedByLT(v string) predicate.Database { + return predicate.Database(sql.FieldLT(FieldDeletedBy, v)) +} + +// DeletedByLTE applies the LTE predicate on the "deleted_by" field. +func DeletedByLTE(v string) predicate.Database { + return predicate.Database(sql.FieldLTE(FieldDeletedBy, v)) +} + +// DeletedByContains applies the Contains predicate on the "deleted_by" field. +func DeletedByContains(v string) predicate.Database { + return predicate.Database(sql.FieldContains(FieldDeletedBy, v)) +} + +// DeletedByHasPrefix applies the HasPrefix predicate on the "deleted_by" field. +func DeletedByHasPrefix(v string) predicate.Database { + return predicate.Database(sql.FieldHasPrefix(FieldDeletedBy, v)) +} + +// DeletedByHasSuffix applies the HasSuffix predicate on the "deleted_by" field. +func DeletedByHasSuffix(v string) predicate.Database { + return predicate.Database(sql.FieldHasSuffix(FieldDeletedBy, v)) +} + +// DeletedByIsNil applies the IsNil predicate on the "deleted_by" field. +func DeletedByIsNil() predicate.Database { + return predicate.Database(sql.FieldIsNull(FieldDeletedBy)) +} + +// DeletedByNotNil applies the NotNil predicate on the "deleted_by" field. +func DeletedByNotNil() predicate.Database { + return predicate.Database(sql.FieldNotNull(FieldDeletedBy)) +} + +// DeletedByEqualFold applies the EqualFold predicate on the "deleted_by" field. +func DeletedByEqualFold(v string) predicate.Database { + return predicate.Database(sql.FieldEqualFold(FieldDeletedBy, v)) +} + +// DeletedByContainsFold applies the ContainsFold predicate on the "deleted_by" field. +func DeletedByContainsFold(v string) predicate.Database { + return predicate.Database(sql.FieldContainsFold(FieldDeletedBy, v)) +} + +// OrganizationIDEQ applies the EQ predicate on the "organization_id" field. +func OrganizationIDEQ(v string) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldOrganizationID, v)) +} + +// OrganizationIDNEQ applies the NEQ predicate on the "organization_id" field. +func OrganizationIDNEQ(v string) predicate.Database { + return predicate.Database(sql.FieldNEQ(FieldOrganizationID, v)) +} + +// OrganizationIDIn applies the In predicate on the "organization_id" field. +func OrganizationIDIn(vs ...string) predicate.Database { + return predicate.Database(sql.FieldIn(FieldOrganizationID, vs...)) +} + +// OrganizationIDNotIn applies the NotIn predicate on the "organization_id" field. +func OrganizationIDNotIn(vs ...string) predicate.Database { + return predicate.Database(sql.FieldNotIn(FieldOrganizationID, vs...)) +} + +// OrganizationIDGT applies the GT predicate on the "organization_id" field. +func OrganizationIDGT(v string) predicate.Database { + return predicate.Database(sql.FieldGT(FieldOrganizationID, v)) +} + +// OrganizationIDGTE applies the GTE predicate on the "organization_id" field. +func OrganizationIDGTE(v string) predicate.Database { + return predicate.Database(sql.FieldGTE(FieldOrganizationID, v)) +} + +// OrganizationIDLT applies the LT predicate on the "organization_id" field. +func OrganizationIDLT(v string) predicate.Database { + return predicate.Database(sql.FieldLT(FieldOrganizationID, v)) +} + +// OrganizationIDLTE applies the LTE predicate on the "organization_id" field. +func OrganizationIDLTE(v string) predicate.Database { + return predicate.Database(sql.FieldLTE(FieldOrganizationID, v)) +} + +// OrganizationIDContains applies the Contains predicate on the "organization_id" field. +func OrganizationIDContains(v string) predicate.Database { + return predicate.Database(sql.FieldContains(FieldOrganizationID, v)) +} + +// OrganizationIDHasPrefix applies the HasPrefix predicate on the "organization_id" field. +func OrganizationIDHasPrefix(v string) predicate.Database { + return predicate.Database(sql.FieldHasPrefix(FieldOrganizationID, v)) +} + +// OrganizationIDHasSuffix applies the HasSuffix predicate on the "organization_id" field. +func OrganizationIDHasSuffix(v string) predicate.Database { + return predicate.Database(sql.FieldHasSuffix(FieldOrganizationID, v)) +} + +// OrganizationIDEqualFold applies the EqualFold predicate on the "organization_id" field. +func OrganizationIDEqualFold(v string) predicate.Database { + return predicate.Database(sql.FieldEqualFold(FieldOrganizationID, v)) +} + +// OrganizationIDContainsFold applies the ContainsFold predicate on the "organization_id" field. +func OrganizationIDContainsFold(v string) predicate.Database { + return predicate.Database(sql.FieldContainsFold(FieldOrganizationID, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.Database { + return predicate.Database(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.Database { + return predicate.Database(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.Database { + return predicate.Database(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.Database { + return predicate.Database(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.Database { + return predicate.Database(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.Database { + return predicate.Database(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.Database { + return predicate.Database(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.Database { + return predicate.Database(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.Database { + return predicate.Database(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.Database { + return predicate.Database(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.Database { + return predicate.Database(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.Database { + return predicate.Database(sql.FieldContainsFold(FieldName, v)) +} + +// GeoEQ applies the EQ predicate on the "geo" field. +func GeoEQ(v string) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldGeo, v)) +} + +// GeoNEQ applies the NEQ predicate on the "geo" field. +func GeoNEQ(v string) predicate.Database { + return predicate.Database(sql.FieldNEQ(FieldGeo, v)) +} + +// GeoIn applies the In predicate on the "geo" field. +func GeoIn(vs ...string) predicate.Database { + return predicate.Database(sql.FieldIn(FieldGeo, vs...)) +} + +// GeoNotIn applies the NotIn predicate on the "geo" field. +func GeoNotIn(vs ...string) predicate.Database { + return predicate.Database(sql.FieldNotIn(FieldGeo, vs...)) +} + +// GeoGT applies the GT predicate on the "geo" field. +func GeoGT(v string) predicate.Database { + return predicate.Database(sql.FieldGT(FieldGeo, v)) +} + +// GeoGTE applies the GTE predicate on the "geo" field. +func GeoGTE(v string) predicate.Database { + return predicate.Database(sql.FieldGTE(FieldGeo, v)) +} + +// GeoLT applies the LT predicate on the "geo" field. +func GeoLT(v string) predicate.Database { + return predicate.Database(sql.FieldLT(FieldGeo, v)) +} + +// GeoLTE applies the LTE predicate on the "geo" field. +func GeoLTE(v string) predicate.Database { + return predicate.Database(sql.FieldLTE(FieldGeo, v)) +} + +// GeoContains applies the Contains predicate on the "geo" field. +func GeoContains(v string) predicate.Database { + return predicate.Database(sql.FieldContains(FieldGeo, v)) +} + +// GeoHasPrefix applies the HasPrefix predicate on the "geo" field. +func GeoHasPrefix(v string) predicate.Database { + return predicate.Database(sql.FieldHasPrefix(FieldGeo, v)) +} + +// GeoHasSuffix applies the HasSuffix predicate on the "geo" field. +func GeoHasSuffix(v string) predicate.Database { + return predicate.Database(sql.FieldHasSuffix(FieldGeo, v)) +} + +// GeoIsNil applies the IsNil predicate on the "geo" field. +func GeoIsNil() predicate.Database { + return predicate.Database(sql.FieldIsNull(FieldGeo)) +} + +// GeoNotNil applies the NotNil predicate on the "geo" field. +func GeoNotNil() predicate.Database { + return predicate.Database(sql.FieldNotNull(FieldGeo)) +} + +// GeoEqualFold applies the EqualFold predicate on the "geo" field. +func GeoEqualFold(v string) predicate.Database { + return predicate.Database(sql.FieldEqualFold(FieldGeo, v)) +} + +// GeoContainsFold applies the ContainsFold predicate on the "geo" field. +func GeoContainsFold(v string) predicate.Database { + return predicate.Database(sql.FieldContainsFold(FieldGeo, v)) +} + +// DsnEQ applies the EQ predicate on the "dsn" field. +func DsnEQ(v string) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldDsn, v)) +} + +// DsnNEQ applies the NEQ predicate on the "dsn" field. +func DsnNEQ(v string) predicate.Database { + return predicate.Database(sql.FieldNEQ(FieldDsn, v)) +} + +// DsnIn applies the In predicate on the "dsn" field. +func DsnIn(vs ...string) predicate.Database { + return predicate.Database(sql.FieldIn(FieldDsn, vs...)) +} + +// DsnNotIn applies the NotIn predicate on the "dsn" field. +func DsnNotIn(vs ...string) predicate.Database { + return predicate.Database(sql.FieldNotIn(FieldDsn, vs...)) +} + +// DsnGT applies the GT predicate on the "dsn" field. +func DsnGT(v string) predicate.Database { + return predicate.Database(sql.FieldGT(FieldDsn, v)) +} + +// DsnGTE applies the GTE predicate on the "dsn" field. +func DsnGTE(v string) predicate.Database { + return predicate.Database(sql.FieldGTE(FieldDsn, v)) +} + +// DsnLT applies the LT predicate on the "dsn" field. +func DsnLT(v string) predicate.Database { + return predicate.Database(sql.FieldLT(FieldDsn, v)) +} + +// DsnLTE applies the LTE predicate on the "dsn" field. +func DsnLTE(v string) predicate.Database { + return predicate.Database(sql.FieldLTE(FieldDsn, v)) +} + +// DsnContains applies the Contains predicate on the "dsn" field. +func DsnContains(v string) predicate.Database { + return predicate.Database(sql.FieldContains(FieldDsn, v)) +} + +// DsnHasPrefix applies the HasPrefix predicate on the "dsn" field. +func DsnHasPrefix(v string) predicate.Database { + return predicate.Database(sql.FieldHasPrefix(FieldDsn, v)) +} + +// DsnHasSuffix applies the HasSuffix predicate on the "dsn" field. +func DsnHasSuffix(v string) predicate.Database { + return predicate.Database(sql.FieldHasSuffix(FieldDsn, v)) +} + +// DsnEqualFold applies the EqualFold predicate on the "dsn" field. +func DsnEqualFold(v string) predicate.Database { + return predicate.Database(sql.FieldEqualFold(FieldDsn, v)) +} + +// DsnContainsFold applies the ContainsFold predicate on the "dsn" field. +func DsnContainsFold(v string) predicate.Database { + return predicate.Database(sql.FieldContainsFold(FieldDsn, v)) +} + +// GroupIDEQ applies the EQ predicate on the "group_id" field. +func GroupIDEQ(v string) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldGroupID, v)) +} + +// GroupIDNEQ applies the NEQ predicate on the "group_id" field. +func GroupIDNEQ(v string) predicate.Database { + return predicate.Database(sql.FieldNEQ(FieldGroupID, v)) +} + +// GroupIDIn applies the In predicate on the "group_id" field. +func GroupIDIn(vs ...string) predicate.Database { + return predicate.Database(sql.FieldIn(FieldGroupID, vs...)) +} + +// GroupIDNotIn applies the NotIn predicate on the "group_id" field. +func GroupIDNotIn(vs ...string) predicate.Database { + return predicate.Database(sql.FieldNotIn(FieldGroupID, vs...)) +} + +// GroupIDGT applies the GT predicate on the "group_id" field. +func GroupIDGT(v string) predicate.Database { + return predicate.Database(sql.FieldGT(FieldGroupID, v)) +} + +// GroupIDGTE applies the GTE predicate on the "group_id" field. +func GroupIDGTE(v string) predicate.Database { + return predicate.Database(sql.FieldGTE(FieldGroupID, v)) +} + +// GroupIDLT applies the LT predicate on the "group_id" field. +func GroupIDLT(v string) predicate.Database { + return predicate.Database(sql.FieldLT(FieldGroupID, v)) +} + +// GroupIDLTE applies the LTE predicate on the "group_id" field. +func GroupIDLTE(v string) predicate.Database { + return predicate.Database(sql.FieldLTE(FieldGroupID, v)) +} + +// GroupIDContains applies the Contains predicate on the "group_id" field. +func GroupIDContains(v string) predicate.Database { + return predicate.Database(sql.FieldContains(FieldGroupID, v)) +} + +// GroupIDHasPrefix applies the HasPrefix predicate on the "group_id" field. +func GroupIDHasPrefix(v string) predicate.Database { + return predicate.Database(sql.FieldHasPrefix(FieldGroupID, v)) +} + +// GroupIDHasSuffix applies the HasSuffix predicate on the "group_id" field. +func GroupIDHasSuffix(v string) predicate.Database { + return predicate.Database(sql.FieldHasSuffix(FieldGroupID, v)) +} + +// GroupIDEqualFold applies the EqualFold predicate on the "group_id" field. +func GroupIDEqualFold(v string) predicate.Database { + return predicate.Database(sql.FieldEqualFold(FieldGroupID, v)) +} + +// GroupIDContainsFold applies the ContainsFold predicate on the "group_id" field. +func GroupIDContainsFold(v string) predicate.Database { + return predicate.Database(sql.FieldContainsFold(FieldGroupID, v)) +} + +// TokenEQ applies the EQ predicate on the "token" field. +func TokenEQ(v string) predicate.Database { + return predicate.Database(sql.FieldEQ(FieldToken, v)) +} + +// TokenNEQ applies the NEQ predicate on the "token" field. +func TokenNEQ(v string) predicate.Database { + return predicate.Database(sql.FieldNEQ(FieldToken, v)) +} + +// TokenIn applies the In predicate on the "token" field. +func TokenIn(vs ...string) predicate.Database { + return predicate.Database(sql.FieldIn(FieldToken, vs...)) +} + +// TokenNotIn applies the NotIn predicate on the "token" field. +func TokenNotIn(vs ...string) predicate.Database { + return predicate.Database(sql.FieldNotIn(FieldToken, vs...)) +} + +// TokenGT applies the GT predicate on the "token" field. +func TokenGT(v string) predicate.Database { + return predicate.Database(sql.FieldGT(FieldToken, v)) +} + +// TokenGTE applies the GTE predicate on the "token" field. +func TokenGTE(v string) predicate.Database { + return predicate.Database(sql.FieldGTE(FieldToken, v)) +} + +// TokenLT applies the LT predicate on the "token" field. +func TokenLT(v string) predicate.Database { + return predicate.Database(sql.FieldLT(FieldToken, v)) +} + +// TokenLTE applies the LTE predicate on the "token" field. +func TokenLTE(v string) predicate.Database { + return predicate.Database(sql.FieldLTE(FieldToken, v)) +} + +// TokenContains applies the Contains predicate on the "token" field. +func TokenContains(v string) predicate.Database { + return predicate.Database(sql.FieldContains(FieldToken, v)) +} + +// TokenHasPrefix applies the HasPrefix predicate on the "token" field. +func TokenHasPrefix(v string) predicate.Database { + return predicate.Database(sql.FieldHasPrefix(FieldToken, v)) +} + +// TokenHasSuffix applies the HasSuffix predicate on the "token" field. +func TokenHasSuffix(v string) predicate.Database { + return predicate.Database(sql.FieldHasSuffix(FieldToken, v)) +} + +// TokenIsNil applies the IsNil predicate on the "token" field. +func TokenIsNil() predicate.Database { + return predicate.Database(sql.FieldIsNull(FieldToken)) +} + +// TokenNotNil applies the NotNil predicate on the "token" field. +func TokenNotNil() predicate.Database { + return predicate.Database(sql.FieldNotNull(FieldToken)) +} + +// TokenEqualFold applies the EqualFold predicate on the "token" field. +func TokenEqualFold(v string) predicate.Database { + return predicate.Database(sql.FieldEqualFold(FieldToken, v)) +} + +// TokenContainsFold applies the ContainsFold predicate on the "token" field. +func TokenContainsFold(v string) predicate.Database { + return predicate.Database(sql.FieldContainsFold(FieldToken, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v enums.DatabaseStatus) predicate.Database { + vc := v + return predicate.Database(sql.FieldEQ(FieldStatus, vc)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v enums.DatabaseStatus) predicate.Database { + vc := v + return predicate.Database(sql.FieldNEQ(FieldStatus, vc)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...enums.DatabaseStatus) predicate.Database { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Database(sql.FieldIn(FieldStatus, v...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...enums.DatabaseStatus) predicate.Database { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Database(sql.FieldNotIn(FieldStatus, v...)) +} + +// ProviderEQ applies the EQ predicate on the "provider" field. +func ProviderEQ(v enums.DatabaseProvider) predicate.Database { + vc := v + return predicate.Database(sql.FieldEQ(FieldProvider, vc)) +} + +// ProviderNEQ applies the NEQ predicate on the "provider" field. +func ProviderNEQ(v enums.DatabaseProvider) predicate.Database { + vc := v + return predicate.Database(sql.FieldNEQ(FieldProvider, vc)) +} + +// ProviderIn applies the In predicate on the "provider" field. +func ProviderIn(vs ...enums.DatabaseProvider) predicate.Database { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Database(sql.FieldIn(FieldProvider, v...)) +} + +// ProviderNotIn applies the NotIn predicate on the "provider" field. +func ProviderNotIn(vs ...enums.DatabaseProvider) predicate.Database { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Database(sql.FieldNotIn(FieldProvider, v...)) +} + +// HasGroup applies the HasEdge predicate on the "group" edge. +func HasGroup() predicate.Database { + return predicate.Database(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) + schemaConfig := internal.SchemaConfigFromContext(s.Context()) + step.To.Schema = schemaConfig.Group + step.Edge.Schema = schemaConfig.Database + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates). +func HasGroupWith(preds ...predicate.Group) predicate.Database { + return predicate.Database(func(s *sql.Selector) { + step := newGroupStep() + schemaConfig := internal.SchemaConfigFromContext(s.Context()) + step.To.Schema = schemaConfig.Group + step.Edge.Schema = schemaConfig.Database + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Database) predicate.Database { + return predicate.Database(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Database) predicate.Database { + return predicate.Database(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Database) predicate.Database { + return predicate.Database(sql.NotPredicates(p)) +} diff --git a/internal/ent/generated/database_create.go b/internal/ent/generated/database_create.go new file mode 100644 index 0000000..a276fc7 --- /dev/null +++ b/internal/ent/generated/database_create.go @@ -0,0 +1,516 @@ +// Code generated by ent, DO NOT EDIT. + +package generated + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/datumforge/geodetic/internal/ent/generated/database" + "github.com/datumforge/geodetic/internal/ent/generated/group" + "github.com/datumforge/geodetic/pkg/enums" +) + +// DatabaseCreate is the builder for creating a Database entity. +type DatabaseCreate struct { + config + mutation *DatabaseMutation + hooks []Hook +} + +// SetCreatedAt sets the "created_at" field. +func (dc *DatabaseCreate) SetCreatedAt(t time.Time) *DatabaseCreate { + dc.mutation.SetCreatedAt(t) + return dc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (dc *DatabaseCreate) SetNillableCreatedAt(t *time.Time) *DatabaseCreate { + if t != nil { + dc.SetCreatedAt(*t) + } + return dc +} + +// SetUpdatedAt sets the "updated_at" field. +func (dc *DatabaseCreate) SetUpdatedAt(t time.Time) *DatabaseCreate { + dc.mutation.SetUpdatedAt(t) + return dc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (dc *DatabaseCreate) SetNillableUpdatedAt(t *time.Time) *DatabaseCreate { + if t != nil { + dc.SetUpdatedAt(*t) + } + return dc +} + +// SetCreatedBy sets the "created_by" field. +func (dc *DatabaseCreate) SetCreatedBy(s string) *DatabaseCreate { + dc.mutation.SetCreatedBy(s) + return dc +} + +// SetNillableCreatedBy sets the "created_by" field if the given value is not nil. +func (dc *DatabaseCreate) SetNillableCreatedBy(s *string) *DatabaseCreate { + if s != nil { + dc.SetCreatedBy(*s) + } + return dc +} + +// SetUpdatedBy sets the "updated_by" field. +func (dc *DatabaseCreate) SetUpdatedBy(s string) *DatabaseCreate { + dc.mutation.SetUpdatedBy(s) + return dc +} + +// SetNillableUpdatedBy sets the "updated_by" field if the given value is not nil. +func (dc *DatabaseCreate) SetNillableUpdatedBy(s *string) *DatabaseCreate { + if s != nil { + dc.SetUpdatedBy(*s) + } + return dc +} + +// SetDeletedAt sets the "deleted_at" field. +func (dc *DatabaseCreate) SetDeletedAt(t time.Time) *DatabaseCreate { + dc.mutation.SetDeletedAt(t) + return dc +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (dc *DatabaseCreate) SetNillableDeletedAt(t *time.Time) *DatabaseCreate { + if t != nil { + dc.SetDeletedAt(*t) + } + return dc +} + +// SetDeletedBy sets the "deleted_by" field. +func (dc *DatabaseCreate) SetDeletedBy(s string) *DatabaseCreate { + dc.mutation.SetDeletedBy(s) + return dc +} + +// SetNillableDeletedBy sets the "deleted_by" field if the given value is not nil. +func (dc *DatabaseCreate) SetNillableDeletedBy(s *string) *DatabaseCreate { + if s != nil { + dc.SetDeletedBy(*s) + } + return dc +} + +// SetOrganizationID sets the "organization_id" field. +func (dc *DatabaseCreate) SetOrganizationID(s string) *DatabaseCreate { + dc.mutation.SetOrganizationID(s) + return dc +} + +// SetName sets the "name" field. +func (dc *DatabaseCreate) SetName(s string) *DatabaseCreate { + dc.mutation.SetName(s) + return dc +} + +// SetGeo sets the "geo" field. +func (dc *DatabaseCreate) SetGeo(s string) *DatabaseCreate { + dc.mutation.SetGeo(s) + return dc +} + +// SetNillableGeo sets the "geo" field if the given value is not nil. +func (dc *DatabaseCreate) SetNillableGeo(s *string) *DatabaseCreate { + if s != nil { + dc.SetGeo(*s) + } + return dc +} + +// SetDsn sets the "dsn" field. +func (dc *DatabaseCreate) SetDsn(s string) *DatabaseCreate { + dc.mutation.SetDsn(s) + return dc +} + +// SetGroupID sets the "group_id" field. +func (dc *DatabaseCreate) SetGroupID(s string) *DatabaseCreate { + dc.mutation.SetGroupID(s) + return dc +} + +// SetToken sets the "token" field. +func (dc *DatabaseCreate) SetToken(s string) *DatabaseCreate { + dc.mutation.SetToken(s) + return dc +} + +// SetNillableToken sets the "token" field if the given value is not nil. +func (dc *DatabaseCreate) SetNillableToken(s *string) *DatabaseCreate { + if s != nil { + dc.SetToken(*s) + } + return dc +} + +// SetStatus sets the "status" field. +func (dc *DatabaseCreate) SetStatus(es enums.DatabaseStatus) *DatabaseCreate { + dc.mutation.SetStatus(es) + return dc +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (dc *DatabaseCreate) SetNillableStatus(es *enums.DatabaseStatus) *DatabaseCreate { + if es != nil { + dc.SetStatus(*es) + } + return dc +} + +// SetProvider sets the "provider" field. +func (dc *DatabaseCreate) SetProvider(ep enums.DatabaseProvider) *DatabaseCreate { + dc.mutation.SetProvider(ep) + return dc +} + +// SetNillableProvider sets the "provider" field if the given value is not nil. +func (dc *DatabaseCreate) SetNillableProvider(ep *enums.DatabaseProvider) *DatabaseCreate { + if ep != nil { + dc.SetProvider(*ep) + } + return dc +} + +// SetID sets the "id" field. +func (dc *DatabaseCreate) SetID(s string) *DatabaseCreate { + dc.mutation.SetID(s) + return dc +} + +// SetNillableID sets the "id" field if the given value is not nil. +func (dc *DatabaseCreate) SetNillableID(s *string) *DatabaseCreate { + if s != nil { + dc.SetID(*s) + } + return dc +} + +// SetGroup sets the "group" edge to the Group entity. +func (dc *DatabaseCreate) SetGroup(g *Group) *DatabaseCreate { + return dc.SetGroupID(g.ID) +} + +// Mutation returns the DatabaseMutation object of the builder. +func (dc *DatabaseCreate) Mutation() *DatabaseMutation { + return dc.mutation +} + +// Save creates the Database in the database. +func (dc *DatabaseCreate) Save(ctx context.Context) (*Database, error) { + if err := dc.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, dc.sqlSave, dc.mutation, dc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (dc *DatabaseCreate) SaveX(ctx context.Context) *Database { + v, err := dc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (dc *DatabaseCreate) Exec(ctx context.Context) error { + _, err := dc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dc *DatabaseCreate) ExecX(ctx context.Context) { + if err := dc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (dc *DatabaseCreate) defaults() error { + if _, ok := dc.mutation.CreatedAt(); !ok { + if database.DefaultCreatedAt == nil { + return fmt.Errorf("generated: uninitialized database.DefaultCreatedAt (forgotten import generated/runtime?)") + } + v := database.DefaultCreatedAt() + dc.mutation.SetCreatedAt(v) + } + if _, ok := dc.mutation.UpdatedAt(); !ok { + if database.DefaultUpdatedAt == nil { + return fmt.Errorf("generated: uninitialized database.DefaultUpdatedAt (forgotten import generated/runtime?)") + } + v := database.DefaultUpdatedAt() + dc.mutation.SetUpdatedAt(v) + } + if _, ok := dc.mutation.Status(); !ok { + v := database.DefaultStatus + dc.mutation.SetStatus(v) + } + if _, ok := dc.mutation.Provider(); !ok { + v := database.DefaultProvider + dc.mutation.SetProvider(v) + } + if _, ok := dc.mutation.ID(); !ok { + if database.DefaultID == nil { + return fmt.Errorf("generated: uninitialized database.DefaultID (forgotten import generated/runtime?)") + } + v := database.DefaultID() + dc.mutation.SetID(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (dc *DatabaseCreate) check() error { + if _, ok := dc.mutation.OrganizationID(); !ok { + return &ValidationError{Name: "organization_id", err: errors.New(`generated: missing required field "Database.organization_id"`)} + } + if v, ok := dc.mutation.OrganizationID(); ok { + if err := database.OrganizationIDValidator(v); err != nil { + return &ValidationError{Name: "organization_id", err: fmt.Errorf(`generated: validator failed for field "Database.organization_id": %w`, err)} + } + } + if _, ok := dc.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`generated: missing required field "Database.name"`)} + } + if v, ok := dc.mutation.Name(); ok { + if err := database.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`generated: validator failed for field "Database.name": %w`, err)} + } + } + if _, ok := dc.mutation.Dsn(); !ok { + return &ValidationError{Name: "dsn", err: errors.New(`generated: missing required field "Database.dsn"`)} + } + if v, ok := dc.mutation.Dsn(); ok { + if err := database.DsnValidator(v); err != nil { + return &ValidationError{Name: "dsn", err: fmt.Errorf(`generated: validator failed for field "Database.dsn": %w`, err)} + } + } + if _, ok := dc.mutation.GroupID(); !ok { + return &ValidationError{Name: "group_id", err: errors.New(`generated: missing required field "Database.group_id"`)} + } + if _, ok := dc.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`generated: missing required field "Database.status"`)} + } + if v, ok := dc.mutation.Status(); ok { + if err := database.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`generated: validator failed for field "Database.status": %w`, err)} + } + } + if _, ok := dc.mutation.Provider(); !ok { + return &ValidationError{Name: "provider", err: errors.New(`generated: missing required field "Database.provider"`)} + } + if v, ok := dc.mutation.Provider(); ok { + if err := database.ProviderValidator(v); err != nil { + return &ValidationError{Name: "provider", err: fmt.Errorf(`generated: validator failed for field "Database.provider": %w`, err)} + } + } + if _, ok := dc.mutation.GroupID(); !ok { + return &ValidationError{Name: "group", err: errors.New(`generated: missing required edge "Database.group"`)} + } + return nil +} + +func (dc *DatabaseCreate) sqlSave(ctx context.Context) (*Database, error) { + if err := dc.check(); err != nil { + return nil, err + } + _node, _spec := dc.createSpec() + if err := sqlgraph.CreateNode(ctx, dc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(string); ok { + _node.ID = id + } else { + return nil, fmt.Errorf("unexpected Database.ID type: %T", _spec.ID.Value) + } + } + dc.mutation.id = &_node.ID + dc.mutation.done = true + return _node, nil +} + +func (dc *DatabaseCreate) createSpec() (*Database, *sqlgraph.CreateSpec) { + var ( + _node = &Database{config: dc.config} + _spec = sqlgraph.NewCreateSpec(database.Table, sqlgraph.NewFieldSpec(database.FieldID, field.TypeString)) + ) + _spec.Schema = dc.schemaConfig.Database + if id, ok := dc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = id + } + if value, ok := dc.mutation.CreatedAt(); ok { + _spec.SetField(database.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := dc.mutation.UpdatedAt(); ok { + _spec.SetField(database.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := dc.mutation.CreatedBy(); ok { + _spec.SetField(database.FieldCreatedBy, field.TypeString, value) + _node.CreatedBy = value + } + if value, ok := dc.mutation.UpdatedBy(); ok { + _spec.SetField(database.FieldUpdatedBy, field.TypeString, value) + _node.UpdatedBy = value + } + if value, ok := dc.mutation.DeletedAt(); ok { + _spec.SetField(database.FieldDeletedAt, field.TypeTime, value) + _node.DeletedAt = value + } + if value, ok := dc.mutation.DeletedBy(); ok { + _spec.SetField(database.FieldDeletedBy, field.TypeString, value) + _node.DeletedBy = value + } + if value, ok := dc.mutation.OrganizationID(); ok { + _spec.SetField(database.FieldOrganizationID, field.TypeString, value) + _node.OrganizationID = value + } + if value, ok := dc.mutation.Name(); ok { + _spec.SetField(database.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := dc.mutation.Geo(); ok { + _spec.SetField(database.FieldGeo, field.TypeString, value) + _node.Geo = value + } + if value, ok := dc.mutation.Dsn(); ok { + _spec.SetField(database.FieldDsn, field.TypeString, value) + _node.Dsn = value + } + if value, ok := dc.mutation.Token(); ok { + _spec.SetField(database.FieldToken, field.TypeString, value) + _node.Token = value + } + if value, ok := dc.mutation.Status(); ok { + _spec.SetField(database.FieldStatus, field.TypeEnum, value) + _node.Status = value + } + if value, ok := dc.mutation.Provider(); ok { + _spec.SetField(database.FieldProvider, field.TypeEnum, value) + _node.Provider = value + } + if nodes := dc.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: database.GroupTable, + Columns: []string{database.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeString), + }, + } + edge.Schema = dc.schemaConfig.Database + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.GroupID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// DatabaseCreateBulk is the builder for creating many Database entities in bulk. +type DatabaseCreateBulk struct { + config + err error + builders []*DatabaseCreate +} + +// Save creates the Database entities in the database. +func (dcb *DatabaseCreateBulk) Save(ctx context.Context) ([]*Database, error) { + if dcb.err != nil { + return nil, dcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(dcb.builders)) + nodes := make([]*Database, len(dcb.builders)) + mutators := make([]Mutator, len(dcb.builders)) + for i := range dcb.builders { + func(i int, root context.Context) { + builder := dcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DatabaseMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, dcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, dcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, dcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (dcb *DatabaseCreateBulk) SaveX(ctx context.Context) []*Database { + v, err := dcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (dcb *DatabaseCreateBulk) Exec(ctx context.Context) error { + _, err := dcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dcb *DatabaseCreateBulk) ExecX(ctx context.Context) { + if err := dcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/internal/ent/generated/database_delete.go b/internal/ent/generated/database_delete.go new file mode 100644 index 0000000..025457b --- /dev/null +++ b/internal/ent/generated/database_delete.go @@ -0,0 +1,92 @@ +// Code generated by ent, DO NOT EDIT. + +package generated + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/datumforge/geodetic/internal/ent/generated/predicate" + + "github.com/datumforge/geodetic/internal/ent/generated/database" + "github.com/datumforge/geodetic/internal/ent/generated/internal" +) + +// DatabaseDelete is the builder for deleting a Database entity. +type DatabaseDelete struct { + config + hooks []Hook + mutation *DatabaseMutation +} + +// Where appends a list predicates to the DatabaseDelete builder. +func (dd *DatabaseDelete) Where(ps ...predicate.Database) *DatabaseDelete { + dd.mutation.Where(ps...) + return dd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (dd *DatabaseDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, dd.sqlExec, dd.mutation, dd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (dd *DatabaseDelete) ExecX(ctx context.Context) int { + n, err := dd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (dd *DatabaseDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(database.Table, sqlgraph.NewFieldSpec(database.FieldID, field.TypeString)) + _spec.Node.Schema = dd.schemaConfig.Database + ctx = internal.NewSchemaConfigContext(ctx, dd.schemaConfig) + if ps := dd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, dd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + dd.mutation.done = true + return affected, err +} + +// DatabaseDeleteOne is the builder for deleting a single Database entity. +type DatabaseDeleteOne struct { + dd *DatabaseDelete +} + +// Where appends a list predicates to the DatabaseDelete builder. +func (ddo *DatabaseDeleteOne) Where(ps ...predicate.Database) *DatabaseDeleteOne { + ddo.dd.mutation.Where(ps...) + return ddo +} + +// Exec executes the deletion query. +func (ddo *DatabaseDeleteOne) Exec(ctx context.Context) error { + n, err := ddo.dd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{database.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (ddo *DatabaseDeleteOne) ExecX(ctx context.Context) { + if err := ddo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/internal/ent/generated/database_query.go b/internal/ent/generated/database_query.go new file mode 100644 index 0000000..4d71154 --- /dev/null +++ b/internal/ent/generated/database_query.go @@ -0,0 +1,630 @@ +// Code generated by ent, DO NOT EDIT. + +package generated + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/datumforge/geodetic/internal/ent/generated/database" + "github.com/datumforge/geodetic/internal/ent/generated/group" + "github.com/datumforge/geodetic/internal/ent/generated/predicate" + + "github.com/datumforge/geodetic/internal/ent/generated/internal" +) + +// DatabaseQuery is the builder for querying Database entities. +type DatabaseQuery struct { + config + ctx *QueryContext + order []database.OrderOption + inters []Interceptor + predicates []predicate.Database + withGroup *GroupQuery + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*Database) error + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the DatabaseQuery builder. +func (dq *DatabaseQuery) Where(ps ...predicate.Database) *DatabaseQuery { + dq.predicates = append(dq.predicates, ps...) + return dq +} + +// Limit the number of records to be returned by this query. +func (dq *DatabaseQuery) Limit(limit int) *DatabaseQuery { + dq.ctx.Limit = &limit + return dq +} + +// Offset to start from. +func (dq *DatabaseQuery) Offset(offset int) *DatabaseQuery { + dq.ctx.Offset = &offset + return dq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (dq *DatabaseQuery) Unique(unique bool) *DatabaseQuery { + dq.ctx.Unique = &unique + return dq +} + +// Order specifies how the records should be ordered. +func (dq *DatabaseQuery) Order(o ...database.OrderOption) *DatabaseQuery { + dq.order = append(dq.order, o...) + return dq +} + +// QueryGroup chains the current query on the "group" edge. +func (dq *DatabaseQuery) QueryGroup() *GroupQuery { + query := (&GroupClient{config: dq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := dq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := dq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(database.Table, database.FieldID, selector), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, database.GroupTable, database.GroupColumn), + ) + schemaConfig := dq.schemaConfig + step.To.Schema = schemaConfig.Group + step.Edge.Schema = schemaConfig.Database + fromU = sqlgraph.SetNeighbors(dq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Database entity from the query. +// Returns a *NotFoundError when no Database was found. +func (dq *DatabaseQuery) First(ctx context.Context) (*Database, error) { + nodes, err := dq.Limit(1).All(setContextOp(ctx, dq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{database.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (dq *DatabaseQuery) FirstX(ctx context.Context) *Database { + node, err := dq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Database ID from the query. +// Returns a *NotFoundError when no Database ID was found. +func (dq *DatabaseQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = dq.Limit(1).IDs(setContextOp(ctx, dq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{database.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (dq *DatabaseQuery) FirstIDX(ctx context.Context) string { + id, err := dq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Database entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Database entity is found. +// Returns a *NotFoundError when no Database entities are found. +func (dq *DatabaseQuery) Only(ctx context.Context) (*Database, error) { + nodes, err := dq.Limit(2).All(setContextOp(ctx, dq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{database.Label} + default: + return nil, &NotSingularError{database.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (dq *DatabaseQuery) OnlyX(ctx context.Context) *Database { + node, err := dq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Database ID in the query. +// Returns a *NotSingularError when more than one Database ID is found. +// Returns a *NotFoundError when no entities are found. +func (dq *DatabaseQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = dq.Limit(2).IDs(setContextOp(ctx, dq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{database.Label} + default: + err = &NotSingularError{database.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (dq *DatabaseQuery) OnlyIDX(ctx context.Context) string { + id, err := dq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Databases. +func (dq *DatabaseQuery) All(ctx context.Context) ([]*Database, error) { + ctx = setContextOp(ctx, dq.ctx, "All") + if err := dq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Database, *DatabaseQuery]() + return withInterceptors[[]*Database](ctx, dq, qr, dq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (dq *DatabaseQuery) AllX(ctx context.Context) []*Database { + nodes, err := dq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Database IDs. +func (dq *DatabaseQuery) IDs(ctx context.Context) (ids []string, err error) { + if dq.ctx.Unique == nil && dq.path != nil { + dq.Unique(true) + } + ctx = setContextOp(ctx, dq.ctx, "IDs") + if err = dq.Select(database.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (dq *DatabaseQuery) IDsX(ctx context.Context) []string { + ids, err := dq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (dq *DatabaseQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, dq.ctx, "Count") + if err := dq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, dq, querierCount[*DatabaseQuery](), dq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (dq *DatabaseQuery) CountX(ctx context.Context) int { + count, err := dq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (dq *DatabaseQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, dq.ctx, "Exist") + switch _, err := dq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("generated: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (dq *DatabaseQuery) ExistX(ctx context.Context) bool { + exist, err := dq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the DatabaseQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (dq *DatabaseQuery) Clone() *DatabaseQuery { + if dq == nil { + return nil + } + return &DatabaseQuery{ + config: dq.config, + ctx: dq.ctx.Clone(), + order: append([]database.OrderOption{}, dq.order...), + inters: append([]Interceptor{}, dq.inters...), + predicates: append([]predicate.Database{}, dq.predicates...), + withGroup: dq.withGroup.Clone(), + // clone intermediate query. + sql: dq.sql.Clone(), + path: dq.path, + } +} + +// WithGroup tells the query-builder to eager-load the nodes that are connected to +// the "group" edge. The optional arguments are used to configure the query builder of the edge. +func (dq *DatabaseQuery) WithGroup(opts ...func(*GroupQuery)) *DatabaseQuery { + query := (&GroupClient{config: dq.config}).Query() + for _, opt := range opts { + opt(query) + } + dq.withGroup = query + return dq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Database.Query(). +// GroupBy(database.FieldCreatedAt). +// Aggregate(generated.Count()). +// Scan(ctx, &v) +func (dq *DatabaseQuery) GroupBy(field string, fields ...string) *DatabaseGroupBy { + dq.ctx.Fields = append([]string{field}, fields...) + grbuild := &DatabaseGroupBy{build: dq} + grbuild.flds = &dq.ctx.Fields + grbuild.label = database.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.Database.Query(). +// Select(database.FieldCreatedAt). +// Scan(ctx, &v) +func (dq *DatabaseQuery) Select(fields ...string) *DatabaseSelect { + dq.ctx.Fields = append(dq.ctx.Fields, fields...) + sbuild := &DatabaseSelect{DatabaseQuery: dq} + sbuild.label = database.Label + sbuild.flds, sbuild.scan = &dq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a DatabaseSelect configured with the given aggregations. +func (dq *DatabaseQuery) Aggregate(fns ...AggregateFunc) *DatabaseSelect { + return dq.Select().Aggregate(fns...) +} + +func (dq *DatabaseQuery) prepareQuery(ctx context.Context) error { + for _, inter := range dq.inters { + if inter == nil { + return fmt.Errorf("generated: uninitialized interceptor (forgotten import generated/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, dq); err != nil { + return err + } + } + } + for _, f := range dq.ctx.Fields { + if !database.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("generated: invalid field %q for query", f)} + } + } + if dq.path != nil { + prev, err := dq.path(ctx) + if err != nil { + return err + } + dq.sql = prev + } + return nil +} + +func (dq *DatabaseQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Database, error) { + var ( + nodes = []*Database{} + _spec = dq.querySpec() + loadedTypes = [1]bool{ + dq.withGroup != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Database).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Database{config: dq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + _spec.Node.Schema = dq.schemaConfig.Database + ctx = internal.NewSchemaConfigContext(ctx, dq.schemaConfig) + if len(dq.modifiers) > 0 { + _spec.Modifiers = dq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, dq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := dq.withGroup; query != nil { + if err := dq.loadGroup(ctx, query, nodes, nil, + func(n *Database, e *Group) { n.Edges.Group = e }); err != nil { + return nil, err + } + } + for i := range dq.loadTotal { + if err := dq.loadTotal[i](ctx, nodes); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (dq *DatabaseQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*Database, init func(*Database), assign func(*Database, *Group)) error { + ids := make([]string, 0, len(nodes)) + nodeids := make(map[string][]*Database) + for i := range nodes { + fk := nodes[i].GroupID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(group.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "group_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (dq *DatabaseQuery) sqlCount(ctx context.Context) (int, error) { + _spec := dq.querySpec() + _spec.Node.Schema = dq.schemaConfig.Database + ctx = internal.NewSchemaConfigContext(ctx, dq.schemaConfig) + if len(dq.modifiers) > 0 { + _spec.Modifiers = dq.modifiers + } + _spec.Node.Columns = dq.ctx.Fields + if len(dq.ctx.Fields) > 0 { + _spec.Unique = dq.ctx.Unique != nil && *dq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, dq.driver, _spec) +} + +func (dq *DatabaseQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(database.Table, database.Columns, sqlgraph.NewFieldSpec(database.FieldID, field.TypeString)) + _spec.From = dq.sql + if unique := dq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if dq.path != nil { + _spec.Unique = true + } + if fields := dq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, database.FieldID) + for i := range fields { + if fields[i] != database.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + if dq.withGroup != nil { + _spec.Node.AddColumnOnce(database.FieldGroupID) + } + } + if ps := dq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := dq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := dq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := dq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (dq *DatabaseQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(dq.driver.Dialect()) + t1 := builder.Table(database.Table) + columns := dq.ctx.Fields + if len(columns) == 0 { + columns = database.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if dq.sql != nil { + selector = dq.sql + selector.Select(selector.Columns(columns...)...) + } + if dq.ctx.Unique != nil && *dq.ctx.Unique { + selector.Distinct() + } + t1.Schema(dq.schemaConfig.Database) + ctx = internal.NewSchemaConfigContext(ctx, dq.schemaConfig) + selector.WithContext(ctx) + for _, p := range dq.predicates { + p(selector) + } + for _, p := range dq.order { + p(selector) + } + if offset := dq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := dq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// DatabaseGroupBy is the group-by builder for Database entities. +type DatabaseGroupBy struct { + selector + build *DatabaseQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (dgb *DatabaseGroupBy) Aggregate(fns ...AggregateFunc) *DatabaseGroupBy { + dgb.fns = append(dgb.fns, fns...) + return dgb +} + +// Scan applies the selector query and scans the result into the given value. +func (dgb *DatabaseGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, dgb.build.ctx, "GroupBy") + if err := dgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*DatabaseQuery, *DatabaseGroupBy](ctx, dgb.build, dgb, dgb.build.inters, v) +} + +func (dgb *DatabaseGroupBy) sqlScan(ctx context.Context, root *DatabaseQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(dgb.fns)) + for _, fn := range dgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*dgb.flds)+len(dgb.fns)) + for _, f := range *dgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*dgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := dgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// DatabaseSelect is the builder for selecting fields of Database entities. +type DatabaseSelect struct { + *DatabaseQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ds *DatabaseSelect) Aggregate(fns ...AggregateFunc) *DatabaseSelect { + ds.fns = append(ds.fns, fns...) + return ds +} + +// Scan applies the selector query and scans the result into the given value. +func (ds *DatabaseSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ds.ctx, "Select") + if err := ds.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*DatabaseQuery, *DatabaseSelect](ctx, ds.DatabaseQuery, ds, ds.inters, v) +} + +func (ds *DatabaseSelect) sqlScan(ctx context.Context, root *DatabaseQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ds.fns)) + for _, fn := range ds.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ds.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ds.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/internal/ent/generated/database_update.go b/internal/ent/generated/database_update.go new file mode 100644 index 0000000..15b5870 --- /dev/null +++ b/internal/ent/generated/database_update.go @@ -0,0 +1,876 @@ +// Code generated by ent, DO NOT EDIT. + +package generated + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/datumforge/geodetic/internal/ent/generated/database" + "github.com/datumforge/geodetic/internal/ent/generated/group" + "github.com/datumforge/geodetic/internal/ent/generated/predicate" + "github.com/datumforge/geodetic/pkg/enums" + + "github.com/datumforge/geodetic/internal/ent/generated/internal" +) + +// DatabaseUpdate is the builder for updating Database entities. +type DatabaseUpdate struct { + config + hooks []Hook + mutation *DatabaseMutation +} + +// Where appends a list predicates to the DatabaseUpdate builder. +func (du *DatabaseUpdate) Where(ps ...predicate.Database) *DatabaseUpdate { + du.mutation.Where(ps...) + return du +} + +// SetUpdatedAt sets the "updated_at" field. +func (du *DatabaseUpdate) SetUpdatedAt(t time.Time) *DatabaseUpdate { + du.mutation.SetUpdatedAt(t) + return du +} + +// ClearUpdatedAt clears the value of the "updated_at" field. +func (du *DatabaseUpdate) ClearUpdatedAt() *DatabaseUpdate { + du.mutation.ClearUpdatedAt() + return du +} + +// SetUpdatedBy sets the "updated_by" field. +func (du *DatabaseUpdate) SetUpdatedBy(s string) *DatabaseUpdate { + du.mutation.SetUpdatedBy(s) + return du +} + +// SetNillableUpdatedBy sets the "updated_by" field if the given value is not nil. +func (du *DatabaseUpdate) SetNillableUpdatedBy(s *string) *DatabaseUpdate { + if s != nil { + du.SetUpdatedBy(*s) + } + return du +} + +// ClearUpdatedBy clears the value of the "updated_by" field. +func (du *DatabaseUpdate) ClearUpdatedBy() *DatabaseUpdate { + du.mutation.ClearUpdatedBy() + return du +} + +// SetDeletedAt sets the "deleted_at" field. +func (du *DatabaseUpdate) SetDeletedAt(t time.Time) *DatabaseUpdate { + du.mutation.SetDeletedAt(t) + return du +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (du *DatabaseUpdate) SetNillableDeletedAt(t *time.Time) *DatabaseUpdate { + if t != nil { + du.SetDeletedAt(*t) + } + return du +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (du *DatabaseUpdate) ClearDeletedAt() *DatabaseUpdate { + du.mutation.ClearDeletedAt() + return du +} + +// SetDeletedBy sets the "deleted_by" field. +func (du *DatabaseUpdate) SetDeletedBy(s string) *DatabaseUpdate { + du.mutation.SetDeletedBy(s) + return du +} + +// SetNillableDeletedBy sets the "deleted_by" field if the given value is not nil. +func (du *DatabaseUpdate) SetNillableDeletedBy(s *string) *DatabaseUpdate { + if s != nil { + du.SetDeletedBy(*s) + } + return du +} + +// ClearDeletedBy clears the value of the "deleted_by" field. +func (du *DatabaseUpdate) ClearDeletedBy() *DatabaseUpdate { + du.mutation.ClearDeletedBy() + return du +} + +// SetOrganizationID sets the "organization_id" field. +func (du *DatabaseUpdate) SetOrganizationID(s string) *DatabaseUpdate { + du.mutation.SetOrganizationID(s) + return du +} + +// SetNillableOrganizationID sets the "organization_id" field if the given value is not nil. +func (du *DatabaseUpdate) SetNillableOrganizationID(s *string) *DatabaseUpdate { + if s != nil { + du.SetOrganizationID(*s) + } + return du +} + +// SetName sets the "name" field. +func (du *DatabaseUpdate) SetName(s string) *DatabaseUpdate { + du.mutation.SetName(s) + return du +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (du *DatabaseUpdate) SetNillableName(s *string) *DatabaseUpdate { + if s != nil { + du.SetName(*s) + } + return du +} + +// SetGeo sets the "geo" field. +func (du *DatabaseUpdate) SetGeo(s string) *DatabaseUpdate { + du.mutation.SetGeo(s) + return du +} + +// SetNillableGeo sets the "geo" field if the given value is not nil. +func (du *DatabaseUpdate) SetNillableGeo(s *string) *DatabaseUpdate { + if s != nil { + du.SetGeo(*s) + } + return du +} + +// ClearGeo clears the value of the "geo" field. +func (du *DatabaseUpdate) ClearGeo() *DatabaseUpdate { + du.mutation.ClearGeo() + return du +} + +// SetDsn sets the "dsn" field. +func (du *DatabaseUpdate) SetDsn(s string) *DatabaseUpdate { + du.mutation.SetDsn(s) + return du +} + +// SetNillableDsn sets the "dsn" field if the given value is not nil. +func (du *DatabaseUpdate) SetNillableDsn(s *string) *DatabaseUpdate { + if s != nil { + du.SetDsn(*s) + } + return du +} + +// SetGroupID sets the "group_id" field. +func (du *DatabaseUpdate) SetGroupID(s string) *DatabaseUpdate { + du.mutation.SetGroupID(s) + return du +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (du *DatabaseUpdate) SetNillableGroupID(s *string) *DatabaseUpdate { + if s != nil { + du.SetGroupID(*s) + } + return du +} + +// SetToken sets the "token" field. +func (du *DatabaseUpdate) SetToken(s string) *DatabaseUpdate { + du.mutation.SetToken(s) + return du +} + +// SetNillableToken sets the "token" field if the given value is not nil. +func (du *DatabaseUpdate) SetNillableToken(s *string) *DatabaseUpdate { + if s != nil { + du.SetToken(*s) + } + return du +} + +// ClearToken clears the value of the "token" field. +func (du *DatabaseUpdate) ClearToken() *DatabaseUpdate { + du.mutation.ClearToken() + return du +} + +// SetStatus sets the "status" field. +func (du *DatabaseUpdate) SetStatus(es enums.DatabaseStatus) *DatabaseUpdate { + du.mutation.SetStatus(es) + return du +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (du *DatabaseUpdate) SetNillableStatus(es *enums.DatabaseStatus) *DatabaseUpdate { + if es != nil { + du.SetStatus(*es) + } + return du +} + +// SetProvider sets the "provider" field. +func (du *DatabaseUpdate) SetProvider(ep enums.DatabaseProvider) *DatabaseUpdate { + du.mutation.SetProvider(ep) + return du +} + +// SetNillableProvider sets the "provider" field if the given value is not nil. +func (du *DatabaseUpdate) SetNillableProvider(ep *enums.DatabaseProvider) *DatabaseUpdate { + if ep != nil { + du.SetProvider(*ep) + } + return du +} + +// SetGroup sets the "group" edge to the Group entity. +func (du *DatabaseUpdate) SetGroup(g *Group) *DatabaseUpdate { + return du.SetGroupID(g.ID) +} + +// Mutation returns the DatabaseMutation object of the builder. +func (du *DatabaseUpdate) Mutation() *DatabaseMutation { + return du.mutation +} + +// ClearGroup clears the "group" edge to the Group entity. +func (du *DatabaseUpdate) ClearGroup() *DatabaseUpdate { + du.mutation.ClearGroup() + return du +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (du *DatabaseUpdate) Save(ctx context.Context) (int, error) { + if err := du.defaults(); err != nil { + return 0, err + } + return withHooks(ctx, du.sqlSave, du.mutation, du.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (du *DatabaseUpdate) SaveX(ctx context.Context) int { + affected, err := du.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (du *DatabaseUpdate) Exec(ctx context.Context) error { + _, err := du.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (du *DatabaseUpdate) ExecX(ctx context.Context) { + if err := du.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (du *DatabaseUpdate) defaults() error { + if _, ok := du.mutation.UpdatedAt(); !ok && !du.mutation.UpdatedAtCleared() { + if database.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("generated: uninitialized database.UpdateDefaultUpdatedAt (forgotten import generated/runtime?)") + } + v := database.UpdateDefaultUpdatedAt() + du.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (du *DatabaseUpdate) check() error { + if v, ok := du.mutation.OrganizationID(); ok { + if err := database.OrganizationIDValidator(v); err != nil { + return &ValidationError{Name: "organization_id", err: fmt.Errorf(`generated: validator failed for field "Database.organization_id": %w`, err)} + } + } + if v, ok := du.mutation.Name(); ok { + if err := database.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`generated: validator failed for field "Database.name": %w`, err)} + } + } + if v, ok := du.mutation.Dsn(); ok { + if err := database.DsnValidator(v); err != nil { + return &ValidationError{Name: "dsn", err: fmt.Errorf(`generated: validator failed for field "Database.dsn": %w`, err)} + } + } + if v, ok := du.mutation.Status(); ok { + if err := database.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`generated: validator failed for field "Database.status": %w`, err)} + } + } + if v, ok := du.mutation.Provider(); ok { + if err := database.ProviderValidator(v); err != nil { + return &ValidationError{Name: "provider", err: fmt.Errorf(`generated: validator failed for field "Database.provider": %w`, err)} + } + } + if _, ok := du.mutation.GroupID(); du.mutation.GroupCleared() && !ok { + return errors.New(`generated: clearing a required unique edge "Database.group"`) + } + return nil +} + +func (du *DatabaseUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := du.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(database.Table, database.Columns, sqlgraph.NewFieldSpec(database.FieldID, field.TypeString)) + if ps := du.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if du.mutation.CreatedAtCleared() { + _spec.ClearField(database.FieldCreatedAt, field.TypeTime) + } + if value, ok := du.mutation.UpdatedAt(); ok { + _spec.SetField(database.FieldUpdatedAt, field.TypeTime, value) + } + if du.mutation.UpdatedAtCleared() { + _spec.ClearField(database.FieldUpdatedAt, field.TypeTime) + } + if du.mutation.CreatedByCleared() { + _spec.ClearField(database.FieldCreatedBy, field.TypeString) + } + if value, ok := du.mutation.UpdatedBy(); ok { + _spec.SetField(database.FieldUpdatedBy, field.TypeString, value) + } + if du.mutation.UpdatedByCleared() { + _spec.ClearField(database.FieldUpdatedBy, field.TypeString) + } + if value, ok := du.mutation.DeletedAt(); ok { + _spec.SetField(database.FieldDeletedAt, field.TypeTime, value) + } + if du.mutation.DeletedAtCleared() { + _spec.ClearField(database.FieldDeletedAt, field.TypeTime) + } + if value, ok := du.mutation.DeletedBy(); ok { + _spec.SetField(database.FieldDeletedBy, field.TypeString, value) + } + if du.mutation.DeletedByCleared() { + _spec.ClearField(database.FieldDeletedBy, field.TypeString) + } + if value, ok := du.mutation.OrganizationID(); ok { + _spec.SetField(database.FieldOrganizationID, field.TypeString, value) + } + if value, ok := du.mutation.Name(); ok { + _spec.SetField(database.FieldName, field.TypeString, value) + } + if value, ok := du.mutation.Geo(); ok { + _spec.SetField(database.FieldGeo, field.TypeString, value) + } + if du.mutation.GeoCleared() { + _spec.ClearField(database.FieldGeo, field.TypeString) + } + if value, ok := du.mutation.Dsn(); ok { + _spec.SetField(database.FieldDsn, field.TypeString, value) + } + if value, ok := du.mutation.Token(); ok { + _spec.SetField(database.FieldToken, field.TypeString, value) + } + if du.mutation.TokenCleared() { + _spec.ClearField(database.FieldToken, field.TypeString) + } + if value, ok := du.mutation.Status(); ok { + _spec.SetField(database.FieldStatus, field.TypeEnum, value) + } + if value, ok := du.mutation.Provider(); ok { + _spec.SetField(database.FieldProvider, field.TypeEnum, value) + } + if du.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: database.GroupTable, + Columns: []string{database.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeString), + }, + } + edge.Schema = du.schemaConfig.Database + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := du.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: database.GroupTable, + Columns: []string{database.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeString), + }, + } + edge.Schema = du.schemaConfig.Database + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _spec.Node.Schema = du.schemaConfig.Database + ctx = internal.NewSchemaConfigContext(ctx, du.schemaConfig) + if n, err = sqlgraph.UpdateNodes(ctx, du.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{database.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + du.mutation.done = true + return n, nil +} + +// DatabaseUpdateOne is the builder for updating a single Database entity. +type DatabaseUpdateOne struct { + config + fields []string + hooks []Hook + mutation *DatabaseMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (duo *DatabaseUpdateOne) SetUpdatedAt(t time.Time) *DatabaseUpdateOne { + duo.mutation.SetUpdatedAt(t) + return duo +} + +// ClearUpdatedAt clears the value of the "updated_at" field. +func (duo *DatabaseUpdateOne) ClearUpdatedAt() *DatabaseUpdateOne { + duo.mutation.ClearUpdatedAt() + return duo +} + +// SetUpdatedBy sets the "updated_by" field. +func (duo *DatabaseUpdateOne) SetUpdatedBy(s string) *DatabaseUpdateOne { + duo.mutation.SetUpdatedBy(s) + return duo +} + +// SetNillableUpdatedBy sets the "updated_by" field if the given value is not nil. +func (duo *DatabaseUpdateOne) SetNillableUpdatedBy(s *string) *DatabaseUpdateOne { + if s != nil { + duo.SetUpdatedBy(*s) + } + return duo +} + +// ClearUpdatedBy clears the value of the "updated_by" field. +func (duo *DatabaseUpdateOne) ClearUpdatedBy() *DatabaseUpdateOne { + duo.mutation.ClearUpdatedBy() + return duo +} + +// SetDeletedAt sets the "deleted_at" field. +func (duo *DatabaseUpdateOne) SetDeletedAt(t time.Time) *DatabaseUpdateOne { + duo.mutation.SetDeletedAt(t) + return duo +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (duo *DatabaseUpdateOne) SetNillableDeletedAt(t *time.Time) *DatabaseUpdateOne { + if t != nil { + duo.SetDeletedAt(*t) + } + return duo +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (duo *DatabaseUpdateOne) ClearDeletedAt() *DatabaseUpdateOne { + duo.mutation.ClearDeletedAt() + return duo +} + +// SetDeletedBy sets the "deleted_by" field. +func (duo *DatabaseUpdateOne) SetDeletedBy(s string) *DatabaseUpdateOne { + duo.mutation.SetDeletedBy(s) + return duo +} + +// SetNillableDeletedBy sets the "deleted_by" field if the given value is not nil. +func (duo *DatabaseUpdateOne) SetNillableDeletedBy(s *string) *DatabaseUpdateOne { + if s != nil { + duo.SetDeletedBy(*s) + } + return duo +} + +// ClearDeletedBy clears the value of the "deleted_by" field. +func (duo *DatabaseUpdateOne) ClearDeletedBy() *DatabaseUpdateOne { + duo.mutation.ClearDeletedBy() + return duo +} + +// SetOrganizationID sets the "organization_id" field. +func (duo *DatabaseUpdateOne) SetOrganizationID(s string) *DatabaseUpdateOne { + duo.mutation.SetOrganizationID(s) + return duo +} + +// SetNillableOrganizationID sets the "organization_id" field if the given value is not nil. +func (duo *DatabaseUpdateOne) SetNillableOrganizationID(s *string) *DatabaseUpdateOne { + if s != nil { + duo.SetOrganizationID(*s) + } + return duo +} + +// SetName sets the "name" field. +func (duo *DatabaseUpdateOne) SetName(s string) *DatabaseUpdateOne { + duo.mutation.SetName(s) + return duo +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (duo *DatabaseUpdateOne) SetNillableName(s *string) *DatabaseUpdateOne { + if s != nil { + duo.SetName(*s) + } + return duo +} + +// SetGeo sets the "geo" field. +func (duo *DatabaseUpdateOne) SetGeo(s string) *DatabaseUpdateOne { + duo.mutation.SetGeo(s) + return duo +} + +// SetNillableGeo sets the "geo" field if the given value is not nil. +func (duo *DatabaseUpdateOne) SetNillableGeo(s *string) *DatabaseUpdateOne { + if s != nil { + duo.SetGeo(*s) + } + return duo +} + +// ClearGeo clears the value of the "geo" field. +func (duo *DatabaseUpdateOne) ClearGeo() *DatabaseUpdateOne { + duo.mutation.ClearGeo() + return duo +} + +// SetDsn sets the "dsn" field. +func (duo *DatabaseUpdateOne) SetDsn(s string) *DatabaseUpdateOne { + duo.mutation.SetDsn(s) + return duo +} + +// SetNillableDsn sets the "dsn" field if the given value is not nil. +func (duo *DatabaseUpdateOne) SetNillableDsn(s *string) *DatabaseUpdateOne { + if s != nil { + duo.SetDsn(*s) + } + return duo +} + +// SetGroupID sets the "group_id" field. +func (duo *DatabaseUpdateOne) SetGroupID(s string) *DatabaseUpdateOne { + duo.mutation.SetGroupID(s) + return duo +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (duo *DatabaseUpdateOne) SetNillableGroupID(s *string) *DatabaseUpdateOne { + if s != nil { + duo.SetGroupID(*s) + } + return duo +} + +// SetToken sets the "token" field. +func (duo *DatabaseUpdateOne) SetToken(s string) *DatabaseUpdateOne { + duo.mutation.SetToken(s) + return duo +} + +// SetNillableToken sets the "token" field if the given value is not nil. +func (duo *DatabaseUpdateOne) SetNillableToken(s *string) *DatabaseUpdateOne { + if s != nil { + duo.SetToken(*s) + } + return duo +} + +// ClearToken clears the value of the "token" field. +func (duo *DatabaseUpdateOne) ClearToken() *DatabaseUpdateOne { + duo.mutation.ClearToken() + return duo +} + +// SetStatus sets the "status" field. +func (duo *DatabaseUpdateOne) SetStatus(es enums.DatabaseStatus) *DatabaseUpdateOne { + duo.mutation.SetStatus(es) + return duo +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (duo *DatabaseUpdateOne) SetNillableStatus(es *enums.DatabaseStatus) *DatabaseUpdateOne { + if es != nil { + duo.SetStatus(*es) + } + return duo +} + +// SetProvider sets the "provider" field. +func (duo *DatabaseUpdateOne) SetProvider(ep enums.DatabaseProvider) *DatabaseUpdateOne { + duo.mutation.SetProvider(ep) + return duo +} + +// SetNillableProvider sets the "provider" field if the given value is not nil. +func (duo *DatabaseUpdateOne) SetNillableProvider(ep *enums.DatabaseProvider) *DatabaseUpdateOne { + if ep != nil { + duo.SetProvider(*ep) + } + return duo +} + +// SetGroup sets the "group" edge to the Group entity. +func (duo *DatabaseUpdateOne) SetGroup(g *Group) *DatabaseUpdateOne { + return duo.SetGroupID(g.ID) +} + +// Mutation returns the DatabaseMutation object of the builder. +func (duo *DatabaseUpdateOne) Mutation() *DatabaseMutation { + return duo.mutation +} + +// ClearGroup clears the "group" edge to the Group entity. +func (duo *DatabaseUpdateOne) ClearGroup() *DatabaseUpdateOne { + duo.mutation.ClearGroup() + return duo +} + +// Where appends a list predicates to the DatabaseUpdate builder. +func (duo *DatabaseUpdateOne) Where(ps ...predicate.Database) *DatabaseUpdateOne { + duo.mutation.Where(ps...) + return duo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (duo *DatabaseUpdateOne) Select(field string, fields ...string) *DatabaseUpdateOne { + duo.fields = append([]string{field}, fields...) + return duo +} + +// Save executes the query and returns the updated Database entity. +func (duo *DatabaseUpdateOne) Save(ctx context.Context) (*Database, error) { + if err := duo.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, duo.sqlSave, duo.mutation, duo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (duo *DatabaseUpdateOne) SaveX(ctx context.Context) *Database { + node, err := duo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (duo *DatabaseUpdateOne) Exec(ctx context.Context) error { + _, err := duo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (duo *DatabaseUpdateOne) ExecX(ctx context.Context) { + if err := duo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (duo *DatabaseUpdateOne) defaults() error { + if _, ok := duo.mutation.UpdatedAt(); !ok && !duo.mutation.UpdatedAtCleared() { + if database.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("generated: uninitialized database.UpdateDefaultUpdatedAt (forgotten import generated/runtime?)") + } + v := database.UpdateDefaultUpdatedAt() + duo.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (duo *DatabaseUpdateOne) check() error { + if v, ok := duo.mutation.OrganizationID(); ok { + if err := database.OrganizationIDValidator(v); err != nil { + return &ValidationError{Name: "organization_id", err: fmt.Errorf(`generated: validator failed for field "Database.organization_id": %w`, err)} + } + } + if v, ok := duo.mutation.Name(); ok { + if err := database.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`generated: validator failed for field "Database.name": %w`, err)} + } + } + if v, ok := duo.mutation.Dsn(); ok { + if err := database.DsnValidator(v); err != nil { + return &ValidationError{Name: "dsn", err: fmt.Errorf(`generated: validator failed for field "Database.dsn": %w`, err)} + } + } + if v, ok := duo.mutation.Status(); ok { + if err := database.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`generated: validator failed for field "Database.status": %w`, err)} + } + } + if v, ok := duo.mutation.Provider(); ok { + if err := database.ProviderValidator(v); err != nil { + return &ValidationError{Name: "provider", err: fmt.Errorf(`generated: validator failed for field "Database.provider": %w`, err)} + } + } + if _, ok := duo.mutation.GroupID(); duo.mutation.GroupCleared() && !ok { + return errors.New(`generated: clearing a required unique edge "Database.group"`) + } + return nil +} + +func (duo *DatabaseUpdateOne) sqlSave(ctx context.Context) (_node *Database, err error) { + if err := duo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(database.Table, database.Columns, sqlgraph.NewFieldSpec(database.FieldID, field.TypeString)) + id, ok := duo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`generated: missing "Database.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := duo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, database.FieldID) + for _, f := range fields { + if !database.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("generated: invalid field %q for query", f)} + } + if f != database.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := duo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if duo.mutation.CreatedAtCleared() { + _spec.ClearField(database.FieldCreatedAt, field.TypeTime) + } + if value, ok := duo.mutation.UpdatedAt(); ok { + _spec.SetField(database.FieldUpdatedAt, field.TypeTime, value) + } + if duo.mutation.UpdatedAtCleared() { + _spec.ClearField(database.FieldUpdatedAt, field.TypeTime) + } + if duo.mutation.CreatedByCleared() { + _spec.ClearField(database.FieldCreatedBy, field.TypeString) + } + if value, ok := duo.mutation.UpdatedBy(); ok { + _spec.SetField(database.FieldUpdatedBy, field.TypeString, value) + } + if duo.mutation.UpdatedByCleared() { + _spec.ClearField(database.FieldUpdatedBy, field.TypeString) + } + if value, ok := duo.mutation.DeletedAt(); ok { + _spec.SetField(database.FieldDeletedAt, field.TypeTime, value) + } + if duo.mutation.DeletedAtCleared() { + _spec.ClearField(database.FieldDeletedAt, field.TypeTime) + } + if value, ok := duo.mutation.DeletedBy(); ok { + _spec.SetField(database.FieldDeletedBy, field.TypeString, value) + } + if duo.mutation.DeletedByCleared() { + _spec.ClearField(database.FieldDeletedBy, field.TypeString) + } + if value, ok := duo.mutation.OrganizationID(); ok { + _spec.SetField(database.FieldOrganizationID, field.TypeString, value) + } + if value, ok := duo.mutation.Name(); ok { + _spec.SetField(database.FieldName, field.TypeString, value) + } + if value, ok := duo.mutation.Geo(); ok { + _spec.SetField(database.FieldGeo, field.TypeString, value) + } + if duo.mutation.GeoCleared() { + _spec.ClearField(database.FieldGeo, field.TypeString) + } + if value, ok := duo.mutation.Dsn(); ok { + _spec.SetField(database.FieldDsn, field.TypeString, value) + } + if value, ok := duo.mutation.Token(); ok { + _spec.SetField(database.FieldToken, field.TypeString, value) + } + if duo.mutation.TokenCleared() { + _spec.ClearField(database.FieldToken, field.TypeString) + } + if value, ok := duo.mutation.Status(); ok { + _spec.SetField(database.FieldStatus, field.TypeEnum, value) + } + if value, ok := duo.mutation.Provider(); ok { + _spec.SetField(database.FieldProvider, field.TypeEnum, value) + } + if duo.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: database.GroupTable, + Columns: []string{database.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeString), + }, + } + edge.Schema = duo.schemaConfig.Database + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := duo.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: database.GroupTable, + Columns: []string{database.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeString), + }, + } + edge.Schema = duo.schemaConfig.Database + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _spec.Node.Schema = duo.schemaConfig.Database + ctx = internal.NewSchemaConfigContext(ctx, duo.schemaConfig) + _node = &Database{config: duo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, duo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{database.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + duo.mutation.done = true + return _node, nil +} diff --git a/internal/ent/generated/doc.go b/internal/ent/generated/doc.go new file mode 100644 index 0000000..9682b93 --- /dev/null +++ b/internal/ent/generated/doc.go @@ -0,0 +1,2 @@ +// Package generated is the ent generated package +package generated diff --git a/internal/ent/generated/edge_cleanup.go b/internal/ent/generated/edge_cleanup.go new file mode 100644 index 0000000..8665f96 --- /dev/null +++ b/internal/ent/generated/edge_cleanup.go @@ -0,0 +1,27 @@ +// Code generated by ent, DO NOT EDIT. + +package generated + +import ( + "context" + + "github.com/datumforge/geodetic/internal/ent/generated/database" + "github.com/datumforge/geodetic/internal/ent/generated/group" +) + +func DatabaseEdgeCleanup(ctx context.Context, id string) error { + + return nil +} + +func GroupEdgeCleanup(ctx context.Context, id string) error { + + if exists, err := FromContext(ctx).Database.Query().Where((database.HasGroupWith(group.ID(id)))).Exist(ctx); err == nil && exists { + if databaseCount, err := FromContext(ctx).Database.Delete().Where(database.HasGroupWith(group.ID(id))).Exec(ctx); err != nil { + FromContext(ctx).Logger.Debugw("deleting database", "count", databaseCount, "err", err) + return err + } + } + + return nil +} diff --git a/internal/ent/generated/ent.go b/internal/ent/generated/ent.go new file mode 100644 index 0000000..126062e --- /dev/null +++ b/internal/ent/generated/ent.go @@ -0,0 +1,610 @@ +// Code generated by ent, DO NOT EDIT. + +package generated + +import ( + "context" + "errors" + "fmt" + "reflect" + "sync" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/datumforge/geodetic/internal/ent/generated/database" + "github.com/datumforge/geodetic/internal/ent/generated/group" +) + +// ent aliases to avoid import conflicts in user's code. +type ( + Op = ent.Op + Hook = ent.Hook + Value = ent.Value + Query = ent.Query + QueryContext = ent.QueryContext + Querier = ent.Querier + QuerierFunc = ent.QuerierFunc + Interceptor = ent.Interceptor + InterceptFunc = ent.InterceptFunc + Traverser = ent.Traverser + TraverseFunc = ent.TraverseFunc + Policy = ent.Policy + Mutator = ent.Mutator + Mutation = ent.Mutation + MutateFunc = ent.MutateFunc +) + +type clientCtxKey struct{} + +// FromContext returns a Client stored inside a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Client { + c, _ := ctx.Value(clientCtxKey{}).(*Client) + return c +} + +// NewContext returns a new context with the given Client attached. +func NewContext(parent context.Context, c *Client) context.Context { + return context.WithValue(parent, clientCtxKey{}, c) +} + +type txCtxKey struct{} + +// TxFromContext returns a Tx stored inside a context, or nil if there isn't one. +func TxFromContext(ctx context.Context) *Tx { + tx, _ := ctx.Value(txCtxKey{}).(*Tx) + return tx +} + +// NewTxContext returns a new context with the given Tx attached. +func NewTxContext(parent context.Context, tx *Tx) context.Context { + return context.WithValue(parent, txCtxKey{}, tx) +} + +// OrderFunc applies an ordering on the sql selector. +// Deprecated: Use Asc/Desc functions or the package builders instead. +type OrderFunc func(*sql.Selector) + +var ( + initCheck sync.Once + columnCheck sql.ColumnCheck +) + +// columnChecker checks if the column exists in the given table. +func checkColumn(table, column string) error { + initCheck.Do(func() { + columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ + database.Table: database.ValidColumn, + group.Table: group.ValidColumn, + }) + }) + return columnCheck(table, column) +} + +// Asc applies the given fields in ASC order. +func Asc(fields ...string) func(*sql.Selector) { + return func(s *sql.Selector) { + for _, f := range fields { + if err := checkColumn(s.TableName(), f); err != nil { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("generated: %w", err)}) + } + s.OrderBy(sql.Asc(s.C(f))) + } + } +} + +// Desc applies the given fields in DESC order. +func Desc(fields ...string) func(*sql.Selector) { + return func(s *sql.Selector) { + for _, f := range fields { + if err := checkColumn(s.TableName(), f); err != nil { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("generated: %w", err)}) + } + s.OrderBy(sql.Desc(s.C(f))) + } + } +} + +// AggregateFunc applies an aggregation step on the group-by traversal/selector. +type AggregateFunc func(*sql.Selector) string + +// As is a pseudo aggregation function for renaming another other functions with custom names. For example: +// +// GroupBy(field1, field2). +// Aggregate(generated.As(generated.Sum(field1), "sum_field1"), (generated.As(generated.Sum(field2), "sum_field2")). +// Scan(ctx, &v) +func As(fn AggregateFunc, end string) AggregateFunc { + return func(s *sql.Selector) string { + return sql.As(fn(s), end) + } +} + +// Count applies the "count" aggregation function on each group. +func Count() AggregateFunc { + return func(s *sql.Selector) string { + return sql.Count("*") + } +} + +// Max applies the "max" aggregation function on the given field of each group. +func Max(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("generated: %w", err)}) + return "" + } + return sql.Max(s.C(field)) + } +} + +// Mean applies the "mean" aggregation function on the given field of each group. +func Mean(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("generated: %w", err)}) + return "" + } + return sql.Avg(s.C(field)) + } +} + +// Min applies the "min" aggregation function on the given field of each group. +func Min(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("generated: %w", err)}) + return "" + } + return sql.Min(s.C(field)) + } +} + +// Sum applies the "sum" aggregation function on the given field of each group. +func Sum(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("generated: %w", err)}) + return "" + } + return sql.Sum(s.C(field)) + } +} + +// ValidationError returns when validating a field or edge fails. +type ValidationError struct { + Name string // Field or edge name. + err error +} + +// Error implements the error interface. +func (e *ValidationError) Error() string { + return e.err.Error() +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ValidationError) Unwrap() error { + return e.err +} + +// IsValidationError returns a boolean indicating whether the error is a validation error. +func IsValidationError(err error) bool { + if err == nil { + return false + } + var e *ValidationError + return errors.As(err, &e) +} + +// NotFoundError returns when trying to fetch a specific entity and it was not found in the database. +type NotFoundError struct { + label string +} + +// Error implements the error interface. +func (e *NotFoundError) Error() string { + return "generated: " + e.label + " not found" +} + +// IsNotFound returns a boolean indicating whether the error is a not found error. +func IsNotFound(err error) bool { + if err == nil { + return false + } + var e *NotFoundError + return errors.As(err, &e) +} + +// MaskNotFound masks not found error. +func MaskNotFound(err error) error { + if IsNotFound(err) { + return nil + } + return err +} + +// NotSingularError returns when trying to fetch a singular entity and more then one was found in the database. +type NotSingularError struct { + label string +} + +// Error implements the error interface. +func (e *NotSingularError) Error() string { + return "generated: " + e.label + " not singular" +} + +// IsNotSingular returns a boolean indicating whether the error is a not singular error. +func IsNotSingular(err error) bool { + if err == nil { + return false + } + var e *NotSingularError + return errors.As(err, &e) +} + +// NotLoadedError returns when trying to get a node that was not loaded by the query. +type NotLoadedError struct { + edge string +} + +// Error implements the error interface. +func (e *NotLoadedError) Error() string { + return "generated: " + e.edge + " edge was not loaded" +} + +// IsNotLoaded returns a boolean indicating whether the error is a not loaded error. +func IsNotLoaded(err error) bool { + if err == nil { + return false + } + var e *NotLoadedError + return errors.As(err, &e) +} + +// ConstraintError returns when trying to create/update one or more entities and +// one or more of their constraints failed. For example, violation of edge or +// field uniqueness. +type ConstraintError struct { + msg string + wrap error +} + +// Error implements the error interface. +func (e ConstraintError) Error() string { + return "generated: constraint failed: " + e.msg +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ConstraintError) Unwrap() error { + return e.wrap +} + +// IsConstraintError returns a boolean indicating whether the error is a constraint failure. +func IsConstraintError(err error) bool { + if err == nil { + return false + } + var e *ConstraintError + return errors.As(err, &e) +} + +// selector embedded by the different Select/GroupBy builders. +type selector struct { + label string + flds *[]string + fns []AggregateFunc + scan func(context.Context, any) error +} + +// ScanX is like Scan, but panics if an error occurs. +func (s *selector) ScanX(ctx context.Context, v any) { + if err := s.scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from a selector. It is only allowed when selecting one field. +func (s *selector) Strings(ctx context.Context) ([]string, error) { + if len(*s.flds) > 1 { + return nil, errors.New("generated: Strings is not achievable when selecting more than 1 field") + } + var v []string + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (s *selector) StringsX(ctx context.Context) []string { + v, err := s.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from a selector. It is only allowed when selecting one field. +func (s *selector) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = s.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("generated: Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (s *selector) StringX(ctx context.Context) string { + v, err := s.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from a selector. It is only allowed when selecting one field. +func (s *selector) Ints(ctx context.Context) ([]int, error) { + if len(*s.flds) > 1 { + return nil, errors.New("generated: Ints is not achievable when selecting more than 1 field") + } + var v []int + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (s *selector) IntsX(ctx context.Context) []int { + v, err := s.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from a selector. It is only allowed when selecting one field. +func (s *selector) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = s.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("generated: Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (s *selector) IntX(ctx context.Context) int { + v, err := s.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. +func (s *selector) Float64s(ctx context.Context) ([]float64, error) { + if len(*s.flds) > 1 { + return nil, errors.New("generated: Float64s is not achievable when selecting more than 1 field") + } + var v []float64 + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (s *selector) Float64sX(ctx context.Context) []float64 { + v, err := s.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. +func (s *selector) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = s.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("generated: Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (s *selector) Float64X(ctx context.Context) float64 { + v, err := s.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from a selector. It is only allowed when selecting one field. +func (s *selector) Bools(ctx context.Context) ([]bool, error) { + if len(*s.flds) > 1 { + return nil, errors.New("generated: Bools is not achievable when selecting more than 1 field") + } + var v []bool + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (s *selector) BoolsX(ctx context.Context) []bool { + v, err := s.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from a selector. It is only allowed when selecting one field. +func (s *selector) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = s.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("generated: Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (s *selector) BoolX(ctx context.Context) bool { + v, err := s.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +// withHooks invokes the builder operation with the given hooks, if any. +func withHooks[V Value, M any, PM interface { + *M + Mutation +}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) { + if len(hooks) == 0 { + return exec(ctx) + } + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutationT, ok := any(m).(PM) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + // Set the mutation to the builder. + *mutation = *mutationT + return exec(ctx) + }) + for i := len(hooks) - 1; i >= 0; i-- { + if hooks[i] == nil { + return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = hooks[i](mut) + } + v, err := mut.Mutate(ctx, mutation) + if err != nil { + return value, err + } + nv, ok := v.(V) + if !ok { + return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation) + } + return nv, nil +} + +// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist. +func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context { + if ent.QueryFromContext(ctx) == nil { + qc.Op = op + ctx = ent.NewQueryContext(ctx, qc) + } + return ctx +} + +func querierAll[V Value, Q interface { + sqlAll(context.Context, ...queryHook) (V, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlAll(ctx) + }) +} + +func querierCount[Q interface { + sqlCount(context.Context) (int, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlCount(ctx) + }) +} + +func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) { + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + rv, err := qr.Query(ctx, q) + if err != nil { + return v, err + } + vt, ok := rv.(V) + if !ok { + return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v) + } + return vt, nil +} + +func scanWithInterceptors[Q1 ent.Query, Q2 interface { + sqlScan(context.Context, Q1, any) error +}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error { + rv := reflect.ValueOf(v) + var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q1) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + if err := selectOrGroup.sqlScan(ctx, query, v); err != nil { + return nil, err + } + if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() { + return rv.Elem().Interface(), nil + } + return v, nil + }) + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + vv, err := qr.Query(ctx, rootQuery) + if err != nil { + return err + } + switch rv2 := reflect.ValueOf(vv); { + case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer: + case rv.Type() == rv2.Type(): + rv.Elem().Set(rv2.Elem()) + case rv.Elem().Type() == rv2.Type(): + rv.Elem().Set(rv2) + } + return nil +} + +// queryHook describes an internal hook for the different sqlAll methods. +type queryHook func(context.Context, *sqlgraph.QuerySpec) diff --git a/internal/ent/generated/entql.go b/internal/ent/generated/entql.go new file mode 100644 index 0000000..fdb5a06 --- /dev/null +++ b/internal/ent/generated/entql.go @@ -0,0 +1,340 @@ +// Code generated by ent, DO NOT EDIT. + +package generated + +import ( + "github.com/datumforge/geodetic/internal/ent/generated/database" + "github.com/datumforge/geodetic/internal/ent/generated/group" + "github.com/datumforge/geodetic/internal/ent/generated/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/entql" + "entgo.io/ent/schema/field" +) + +// schemaGraph holds a representation of ent/schema at runtime. +var schemaGraph = func() *sqlgraph.Schema { + graph := &sqlgraph.Schema{Nodes: make([]*sqlgraph.Node, 2)} + graph.Nodes[0] = &sqlgraph.Node{ + NodeSpec: sqlgraph.NodeSpec{ + Table: database.Table, + Columns: database.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: database.FieldID, + }, + }, + Type: "Database", + Fields: map[string]*sqlgraph.FieldSpec{ + database.FieldCreatedAt: {Type: field.TypeTime, Column: database.FieldCreatedAt}, + database.FieldUpdatedAt: {Type: field.TypeTime, Column: database.FieldUpdatedAt}, + database.FieldCreatedBy: {Type: field.TypeString, Column: database.FieldCreatedBy}, + database.FieldUpdatedBy: {Type: field.TypeString, Column: database.FieldUpdatedBy}, + database.FieldDeletedAt: {Type: field.TypeTime, Column: database.FieldDeletedAt}, + database.FieldDeletedBy: {Type: field.TypeString, Column: database.FieldDeletedBy}, + database.FieldOrganizationID: {Type: field.TypeString, Column: database.FieldOrganizationID}, + database.FieldName: {Type: field.TypeString, Column: database.FieldName}, + database.FieldGeo: {Type: field.TypeString, Column: database.FieldGeo}, + database.FieldDsn: {Type: field.TypeString, Column: database.FieldDsn}, + database.FieldGroupID: {Type: field.TypeString, Column: database.FieldGroupID}, + database.FieldToken: {Type: field.TypeString, Column: database.FieldToken}, + database.FieldStatus: {Type: field.TypeEnum, Column: database.FieldStatus}, + database.FieldProvider: {Type: field.TypeEnum, Column: database.FieldProvider}, + }, + } + graph.Nodes[1] = &sqlgraph.Node{ + NodeSpec: sqlgraph.NodeSpec{ + Table: group.Table, + Columns: group.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: group.FieldID, + }, + }, + Type: "Group", + Fields: map[string]*sqlgraph.FieldSpec{ + group.FieldCreatedAt: {Type: field.TypeTime, Column: group.FieldCreatedAt}, + group.FieldUpdatedAt: {Type: field.TypeTime, Column: group.FieldUpdatedAt}, + group.FieldCreatedBy: {Type: field.TypeString, Column: group.FieldCreatedBy}, + group.FieldUpdatedBy: {Type: field.TypeString, Column: group.FieldUpdatedBy}, + group.FieldDeletedAt: {Type: field.TypeTime, Column: group.FieldDeletedAt}, + group.FieldDeletedBy: {Type: field.TypeString, Column: group.FieldDeletedBy}, + group.FieldName: {Type: field.TypeString, Column: group.FieldName}, + group.FieldDescription: {Type: field.TypeString, Column: group.FieldDescription}, + group.FieldPrimaryLocation: {Type: field.TypeString, Column: group.FieldPrimaryLocation}, + group.FieldLocations: {Type: field.TypeJSON, Column: group.FieldLocations}, + group.FieldToken: {Type: field.TypeString, Column: group.FieldToken}, + group.FieldRegion: {Type: field.TypeEnum, Column: group.FieldRegion}, + }, + } + graph.MustAddE( + "group", + &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: database.GroupTable, + Columns: []string{database.GroupColumn}, + Bidi: false, + }, + "Database", + "Group", + ) + graph.MustAddE( + "databases", + &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.DatabasesTable, + Columns: []string{group.DatabasesColumn}, + Bidi: false, + }, + "Group", + "Database", + ) + return graph +}() + +// predicateAdder wraps the addPredicate method. +// All update, update-one and query builders implement this interface. +type predicateAdder interface { + addPredicate(func(s *sql.Selector)) +} + +// addPredicate implements the predicateAdder interface. +func (dq *DatabaseQuery) addPredicate(pred func(s *sql.Selector)) { + dq.predicates = append(dq.predicates, pred) +} + +// Filter returns a Filter implementation to apply filters on the DatabaseQuery builder. +func (dq *DatabaseQuery) Filter() *DatabaseFilter { + return &DatabaseFilter{config: dq.config, predicateAdder: dq} +} + +// addPredicate implements the predicateAdder interface. +func (m *DatabaseMutation) addPredicate(pred func(s *sql.Selector)) { + m.predicates = append(m.predicates, pred) +} + +// Filter returns an entql.Where implementation to apply filters on the DatabaseMutation builder. +func (m *DatabaseMutation) Filter() *DatabaseFilter { + return &DatabaseFilter{config: m.config, predicateAdder: m} +} + +// DatabaseFilter provides a generic filtering capability at runtime for DatabaseQuery. +type DatabaseFilter struct { + predicateAdder + config +} + +// Where applies the entql predicate on the query filter. +func (f *DatabaseFilter) Where(p entql.P) { + f.addPredicate(func(s *sql.Selector) { + if err := schemaGraph.EvalP(schemaGraph.Nodes[0].Type, p, s); err != nil { + s.AddError(err) + } + }) +} + +// WhereID applies the entql string predicate on the id field. +func (f *DatabaseFilter) WhereID(p entql.StringP) { + f.Where(p.Field(database.FieldID)) +} + +// WhereCreatedAt applies the entql time.Time predicate on the created_at field. +func (f *DatabaseFilter) WhereCreatedAt(p entql.TimeP) { + f.Where(p.Field(database.FieldCreatedAt)) +} + +// WhereUpdatedAt applies the entql time.Time predicate on the updated_at field. +func (f *DatabaseFilter) WhereUpdatedAt(p entql.TimeP) { + f.Where(p.Field(database.FieldUpdatedAt)) +} + +// WhereCreatedBy applies the entql string predicate on the created_by field. +func (f *DatabaseFilter) WhereCreatedBy(p entql.StringP) { + f.Where(p.Field(database.FieldCreatedBy)) +} + +// WhereUpdatedBy applies the entql string predicate on the updated_by field. +func (f *DatabaseFilter) WhereUpdatedBy(p entql.StringP) { + f.Where(p.Field(database.FieldUpdatedBy)) +} + +// WhereDeletedAt applies the entql time.Time predicate on the deleted_at field. +func (f *DatabaseFilter) WhereDeletedAt(p entql.TimeP) { + f.Where(p.Field(database.FieldDeletedAt)) +} + +// WhereDeletedBy applies the entql string predicate on the deleted_by field. +func (f *DatabaseFilter) WhereDeletedBy(p entql.StringP) { + f.Where(p.Field(database.FieldDeletedBy)) +} + +// WhereOrganizationID applies the entql string predicate on the organization_id field. +func (f *DatabaseFilter) WhereOrganizationID(p entql.StringP) { + f.Where(p.Field(database.FieldOrganizationID)) +} + +// WhereName applies the entql string predicate on the name field. +func (f *DatabaseFilter) WhereName(p entql.StringP) { + f.Where(p.Field(database.FieldName)) +} + +// WhereGeo applies the entql string predicate on the geo field. +func (f *DatabaseFilter) WhereGeo(p entql.StringP) { + f.Where(p.Field(database.FieldGeo)) +} + +// WhereDsn applies the entql string predicate on the dsn field. +func (f *DatabaseFilter) WhereDsn(p entql.StringP) { + f.Where(p.Field(database.FieldDsn)) +} + +// WhereGroupID applies the entql string predicate on the group_id field. +func (f *DatabaseFilter) WhereGroupID(p entql.StringP) { + f.Where(p.Field(database.FieldGroupID)) +} + +// WhereToken applies the entql string predicate on the token field. +func (f *DatabaseFilter) WhereToken(p entql.StringP) { + f.Where(p.Field(database.FieldToken)) +} + +// WhereStatus applies the entql string predicate on the status field. +func (f *DatabaseFilter) WhereStatus(p entql.StringP) { + f.Where(p.Field(database.FieldStatus)) +} + +// WhereProvider applies the entql string predicate on the provider field. +func (f *DatabaseFilter) WhereProvider(p entql.StringP) { + f.Where(p.Field(database.FieldProvider)) +} + +// WhereHasGroup applies a predicate to check if query has an edge group. +func (f *DatabaseFilter) WhereHasGroup() { + f.Where(entql.HasEdge("group")) +} + +// WhereHasGroupWith applies a predicate to check if query has an edge group with a given conditions (other predicates). +func (f *DatabaseFilter) WhereHasGroupWith(preds ...predicate.Group) { + f.Where(entql.HasEdgeWith("group", sqlgraph.WrapFunc(func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }))) +} + +// addPredicate implements the predicateAdder interface. +func (gq *GroupQuery) addPredicate(pred func(s *sql.Selector)) { + gq.predicates = append(gq.predicates, pred) +} + +// Filter returns a Filter implementation to apply filters on the GroupQuery builder. +func (gq *GroupQuery) Filter() *GroupFilter { + return &GroupFilter{config: gq.config, predicateAdder: gq} +} + +// addPredicate implements the predicateAdder interface. +func (m *GroupMutation) addPredicate(pred func(s *sql.Selector)) { + m.predicates = append(m.predicates, pred) +} + +// Filter returns an entql.Where implementation to apply filters on the GroupMutation builder. +func (m *GroupMutation) Filter() *GroupFilter { + return &GroupFilter{config: m.config, predicateAdder: m} +} + +// GroupFilter provides a generic filtering capability at runtime for GroupQuery. +type GroupFilter struct { + predicateAdder + config +} + +// Where applies the entql predicate on the query filter. +func (f *GroupFilter) Where(p entql.P) { + f.addPredicate(func(s *sql.Selector) { + if err := schemaGraph.EvalP(schemaGraph.Nodes[1].Type, p, s); err != nil { + s.AddError(err) + } + }) +} + +// WhereID applies the entql string predicate on the id field. +func (f *GroupFilter) WhereID(p entql.StringP) { + f.Where(p.Field(group.FieldID)) +} + +// WhereCreatedAt applies the entql time.Time predicate on the created_at field. +func (f *GroupFilter) WhereCreatedAt(p entql.TimeP) { + f.Where(p.Field(group.FieldCreatedAt)) +} + +// WhereUpdatedAt applies the entql time.Time predicate on the updated_at field. +func (f *GroupFilter) WhereUpdatedAt(p entql.TimeP) { + f.Where(p.Field(group.FieldUpdatedAt)) +} + +// WhereCreatedBy applies the entql string predicate on the created_by field. +func (f *GroupFilter) WhereCreatedBy(p entql.StringP) { + f.Where(p.Field(group.FieldCreatedBy)) +} + +// WhereUpdatedBy applies the entql string predicate on the updated_by field. +func (f *GroupFilter) WhereUpdatedBy(p entql.StringP) { + f.Where(p.Field(group.FieldUpdatedBy)) +} + +// WhereDeletedAt applies the entql time.Time predicate on the deleted_at field. +func (f *GroupFilter) WhereDeletedAt(p entql.TimeP) { + f.Where(p.Field(group.FieldDeletedAt)) +} + +// WhereDeletedBy applies the entql string predicate on the deleted_by field. +func (f *GroupFilter) WhereDeletedBy(p entql.StringP) { + f.Where(p.Field(group.FieldDeletedBy)) +} + +// WhereName applies the entql string predicate on the name field. +func (f *GroupFilter) WhereName(p entql.StringP) { + f.Where(p.Field(group.FieldName)) +} + +// WhereDescription applies the entql string predicate on the description field. +func (f *GroupFilter) WhereDescription(p entql.StringP) { + f.Where(p.Field(group.FieldDescription)) +} + +// WherePrimaryLocation applies the entql string predicate on the primary_location field. +func (f *GroupFilter) WherePrimaryLocation(p entql.StringP) { + f.Where(p.Field(group.FieldPrimaryLocation)) +} + +// WhereLocations applies the entql json.RawMessage predicate on the locations field. +func (f *GroupFilter) WhereLocations(p entql.BytesP) { + f.Where(p.Field(group.FieldLocations)) +} + +// WhereToken applies the entql string predicate on the token field. +func (f *GroupFilter) WhereToken(p entql.StringP) { + f.Where(p.Field(group.FieldToken)) +} + +// WhereRegion applies the entql string predicate on the region field. +func (f *GroupFilter) WhereRegion(p entql.StringP) { + f.Where(p.Field(group.FieldRegion)) +} + +// WhereHasDatabases applies a predicate to check if query has an edge databases. +func (f *GroupFilter) WhereHasDatabases() { + f.Where(entql.HasEdge("databases")) +} + +// WhereHasDatabasesWith applies a predicate to check if query has an edge databases with a given conditions (other predicates). +func (f *GroupFilter) WhereHasDatabasesWith(preds ...predicate.Database) { + f.Where(entql.HasEdgeWith("databases", sqlgraph.WrapFunc(func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }))) +} diff --git a/internal/ent/generated/enttest/enttest.go b/internal/ent/generated/enttest/enttest.go new file mode 100644 index 0000000..104efbe --- /dev/null +++ b/internal/ent/generated/enttest/enttest.go @@ -0,0 +1,84 @@ +// Code generated by ent, DO NOT EDIT. + +package enttest + +import ( + "context" + + "github.com/datumforge/geodetic/internal/ent/generated" + // required by schema hooks. + _ "github.com/datumforge/geodetic/internal/ent/generated/runtime" + + "entgo.io/ent/dialect/sql/schema" + "github.com/datumforge/geodetic/internal/ent/generated/migrate" +) + +type ( + // TestingT is the interface that is shared between + // testing.T and testing.B and used by enttest. + TestingT interface { + FailNow() + Error(...any) + } + + // Option configures client creation. + Option func(*options) + + options struct { + opts []generated.Option + migrateOpts []schema.MigrateOption + } +) + +// WithOptions forwards options to client creation. +func WithOptions(opts ...generated.Option) Option { + return func(o *options) { + o.opts = append(o.opts, opts...) + } +} + +// WithMigrateOptions forwards options to auto migration. +func WithMigrateOptions(opts ...schema.MigrateOption) Option { + return func(o *options) { + o.migrateOpts = append(o.migrateOpts, opts...) + } +} + +func newOptions(opts []Option) *options { + o := &options{} + for _, opt := range opts { + opt(o) + } + return o +} + +// Open calls generated.Open and auto-run migration. +func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *generated.Client { + o := newOptions(opts) + c, err := generated.Open(driverName, dataSourceName, o.opts...) + if err != nil { + t.Error(err) + t.FailNow() + } + migrateSchema(t, c, o) + return c +} + +// NewClient calls generated.NewClient and auto-run migration. +func NewClient(t TestingT, opts ...Option) *generated.Client { + o := newOptions(opts) + c := generated.NewClient(o.opts...) + migrateSchema(t, c, o) + return c +} +func migrateSchema(t TestingT, c *generated.Client, o *options) { + tables, err := schema.CopyTables(migrate.Tables) + if err != nil { + t.Error(err) + t.FailNow() + } + if err := migrate.Create(context.Background(), c.Schema, tables, o.migrateOpts...); err != nil { + t.Error(err) + t.FailNow() + } +} diff --git a/internal/ent/generated/gql_collection.go b/internal/ent/generated/gql_collection.go new file mode 100644 index 0000000..e7dd9ed --- /dev/null +++ b/internal/ent/generated/gql_collection.go @@ -0,0 +1,371 @@ +// Code generated by ent, DO NOT EDIT. + +package generated + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "github.com/99designs/gqlgen/graphql" + "github.com/datumforge/geodetic/internal/ent/generated/database" + "github.com/datumforge/geodetic/internal/ent/generated/group" +) + +// CollectFields tells the query-builder to eagerly load connected nodes by resolver context. +func (d *DatabaseQuery) CollectFields(ctx context.Context, satisfies ...string) (*DatabaseQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return d, nil + } + if err := d.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return d, nil +} + +func (d *DatabaseQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(database.Columns)) + selectedFields = []string{database.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "group": + var ( + alias = field.Alias + path = append(path, alias) + query = (&GroupClient{config: d.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + d.withGroup = query + if _, ok := fieldSeen[database.FieldGroupID]; !ok { + selectedFields = append(selectedFields, database.FieldGroupID) + fieldSeen[database.FieldGroupID] = struct{}{} + } + case "createdAt": + if _, ok := fieldSeen[database.FieldCreatedAt]; !ok { + selectedFields = append(selectedFields, database.FieldCreatedAt) + fieldSeen[database.FieldCreatedAt] = struct{}{} + } + case "updatedAt": + if _, ok := fieldSeen[database.FieldUpdatedAt]; !ok { + selectedFields = append(selectedFields, database.FieldUpdatedAt) + fieldSeen[database.FieldUpdatedAt] = struct{}{} + } + case "createdBy": + if _, ok := fieldSeen[database.FieldCreatedBy]; !ok { + selectedFields = append(selectedFields, database.FieldCreatedBy) + fieldSeen[database.FieldCreatedBy] = struct{}{} + } + case "updatedBy": + if _, ok := fieldSeen[database.FieldUpdatedBy]; !ok { + selectedFields = append(selectedFields, database.FieldUpdatedBy) + fieldSeen[database.FieldUpdatedBy] = struct{}{} + } + case "deletedAt": + if _, ok := fieldSeen[database.FieldDeletedAt]; !ok { + selectedFields = append(selectedFields, database.FieldDeletedAt) + fieldSeen[database.FieldDeletedAt] = struct{}{} + } + case "deletedBy": + if _, ok := fieldSeen[database.FieldDeletedBy]; !ok { + selectedFields = append(selectedFields, database.FieldDeletedBy) + fieldSeen[database.FieldDeletedBy] = struct{}{} + } + case "organizationID": + if _, ok := fieldSeen[database.FieldOrganizationID]; !ok { + selectedFields = append(selectedFields, database.FieldOrganizationID) + fieldSeen[database.FieldOrganizationID] = struct{}{} + } + case "name": + if _, ok := fieldSeen[database.FieldName]; !ok { + selectedFields = append(selectedFields, database.FieldName) + fieldSeen[database.FieldName] = struct{}{} + } + case "geo": + if _, ok := fieldSeen[database.FieldGeo]; !ok { + selectedFields = append(selectedFields, database.FieldGeo) + fieldSeen[database.FieldGeo] = struct{}{} + } + case "dsn": + if _, ok := fieldSeen[database.FieldDsn]; !ok { + selectedFields = append(selectedFields, database.FieldDsn) + fieldSeen[database.FieldDsn] = struct{}{} + } + case "groupID": + if _, ok := fieldSeen[database.FieldGroupID]; !ok { + selectedFields = append(selectedFields, database.FieldGroupID) + fieldSeen[database.FieldGroupID] = struct{}{} + } + case "status": + if _, ok := fieldSeen[database.FieldStatus]; !ok { + selectedFields = append(selectedFields, database.FieldStatus) + fieldSeen[database.FieldStatus] = struct{}{} + } + case "provider": + if _, ok := fieldSeen[database.FieldProvider]; !ok { + selectedFields = append(selectedFields, database.FieldProvider) + fieldSeen[database.FieldProvider] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + d.Select(selectedFields...) + } + return nil +} + +type databasePaginateArgs struct { + first, last *int + after, before *Cursor + opts []DatabasePaginateOption +} + +func newDatabasePaginateArgs(rv map[string]any) *databasePaginateArgs { + args := &databasePaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + if v, ok := rv[whereField].(*DatabaseWhereInput); ok { + args.opts = append(args.opts, WithDatabaseFilter(v.Filter)) + } + return args +} + +// CollectFields tells the query-builder to eagerly load connected nodes by resolver context. +func (gr *GroupQuery) CollectFields(ctx context.Context, satisfies ...string) (*GroupQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return gr, nil + } + if err := gr.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return gr, nil +} + +func (gr *GroupQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(group.Columns)) + selectedFields = []string{group.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "databases": + var ( + alias = field.Alias + path = append(path, alias) + query = (&DatabaseClient{config: gr.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + gr.WithNamedDatabases(alias, func(wq *DatabaseQuery) { + *wq = *query + }) + case "createdAt": + if _, ok := fieldSeen[group.FieldCreatedAt]; !ok { + selectedFields = append(selectedFields, group.FieldCreatedAt) + fieldSeen[group.FieldCreatedAt] = struct{}{} + } + case "updatedAt": + if _, ok := fieldSeen[group.FieldUpdatedAt]; !ok { + selectedFields = append(selectedFields, group.FieldUpdatedAt) + fieldSeen[group.FieldUpdatedAt] = struct{}{} + } + case "createdBy": + if _, ok := fieldSeen[group.FieldCreatedBy]; !ok { + selectedFields = append(selectedFields, group.FieldCreatedBy) + fieldSeen[group.FieldCreatedBy] = struct{}{} + } + case "updatedBy": + if _, ok := fieldSeen[group.FieldUpdatedBy]; !ok { + selectedFields = append(selectedFields, group.FieldUpdatedBy) + fieldSeen[group.FieldUpdatedBy] = struct{}{} + } + case "deletedAt": + if _, ok := fieldSeen[group.FieldDeletedAt]; !ok { + selectedFields = append(selectedFields, group.FieldDeletedAt) + fieldSeen[group.FieldDeletedAt] = struct{}{} + } + case "deletedBy": + if _, ok := fieldSeen[group.FieldDeletedBy]; !ok { + selectedFields = append(selectedFields, group.FieldDeletedBy) + fieldSeen[group.FieldDeletedBy] = struct{}{} + } + case "name": + if _, ok := fieldSeen[group.FieldName]; !ok { + selectedFields = append(selectedFields, group.FieldName) + fieldSeen[group.FieldName] = struct{}{} + } + case "description": + if _, ok := fieldSeen[group.FieldDescription]; !ok { + selectedFields = append(selectedFields, group.FieldDescription) + fieldSeen[group.FieldDescription] = struct{}{} + } + case "primaryLocation": + if _, ok := fieldSeen[group.FieldPrimaryLocation]; !ok { + selectedFields = append(selectedFields, group.FieldPrimaryLocation) + fieldSeen[group.FieldPrimaryLocation] = struct{}{} + } + case "locations": + if _, ok := fieldSeen[group.FieldLocations]; !ok { + selectedFields = append(selectedFields, group.FieldLocations) + fieldSeen[group.FieldLocations] = struct{}{} + } + case "region": + if _, ok := fieldSeen[group.FieldRegion]; !ok { + selectedFields = append(selectedFields, group.FieldRegion) + fieldSeen[group.FieldRegion] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + gr.Select(selectedFields...) + } + return nil +} + +type groupPaginateArgs struct { + first, last *int + after, before *Cursor + opts []GroupPaginateOption +} + +func newGroupPaginateArgs(rv map[string]any) *groupPaginateArgs { + args := &groupPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + if v, ok := rv[whereField].(*GroupWhereInput); ok { + args.opts = append(args.opts, WithGroupFilter(v.Filter)) + } + return args +} + +const ( + afterField = "after" + firstField = "first" + beforeField = "before" + lastField = "last" + orderByField = "orderBy" + directionField = "direction" + fieldField = "field" + whereField = "where" +) + +func fieldArgs(ctx context.Context, whereInput any, path ...string) map[string]any { + field := collectedField(ctx, path...) + if field == nil || field.Arguments == nil { + return nil + } + oc := graphql.GetOperationContext(ctx) + args := field.ArgumentMap(oc.Variables) + return unmarshalArgs(ctx, whereInput, args) +} + +// unmarshalArgs allows extracting the field arguments from their raw representation. +func unmarshalArgs(ctx context.Context, whereInput any, args map[string]any) map[string]any { + for _, k := range []string{firstField, lastField} { + v, ok := args[k] + if !ok { + continue + } + i, err := graphql.UnmarshalInt(v) + if err == nil { + args[k] = &i + } + } + for _, k := range []string{beforeField, afterField} { + v, ok := args[k] + if !ok { + continue + } + c := &Cursor{} + if c.UnmarshalGQL(v) == nil { + args[k] = c + } + } + if v, ok := args[whereField]; ok && whereInput != nil { + if err := graphql.UnmarshalInputFromContext(ctx, v, whereInput); err == nil { + args[whereField] = whereInput + } + } + + return args +} + +func limitRows(partitionBy string, limit int, orderBy ...sql.Querier) func(s *sql.Selector) { + return func(s *sql.Selector) { + d := sql.Dialect(s.Dialect()) + s.SetDistinct(false) + with := d.With("src_query"). + As(s.Clone()). + With("limited_query"). + As( + d.Select("*"). + AppendSelectExprAs( + sql.RowNumber().PartitionBy(partitionBy).OrderExpr(orderBy...), + "row_number", + ). + From(d.Table("src_query")), + ) + t := d.Table("limited_query").As(s.TableName()) + *s = *d.Select(s.UnqualifiedColumns()...). + From(t). + Where(sql.LTE(t.C("row_number"), limit)). + Prefix(with) + } +} + +// mayAddCondition appends another type condition to the satisfies list +// if condition is enabled (Node/Nodes) and it does not exist in the list. +func mayAddCondition(satisfies []string, typeCond string) []string { + if len(satisfies) == 0 { + return satisfies + } + for _, s := range satisfies { + if typeCond == s { + return satisfies + } + } + return append(satisfies, typeCond) +} diff --git a/internal/ent/generated/gql_edge.go b/internal/ent/generated/gql_edge.go new file mode 100644 index 0000000..c3b5dfd --- /dev/null +++ b/internal/ent/generated/gql_edge.go @@ -0,0 +1,29 @@ +// Code generated by ent, DO NOT EDIT. + +package generated + +import ( + "context" + + "github.com/99designs/gqlgen/graphql" +) + +func (d *Database) Group(ctx context.Context) (*Group, error) { + result, err := d.Edges.GroupOrErr() + if IsNotLoaded(err) { + result, err = d.QueryGroup().Only(ctx) + } + return result, err +} + +func (gr *Group) Databases(ctx context.Context) (result []*Database, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = gr.NamedDatabases(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = gr.Edges.DatabasesOrErr() + } + if IsNotLoaded(err) { + result, err = gr.QueryDatabases().All(ctx) + } + return result, err +} diff --git a/internal/ent/generated/gql_mutation_input.go b/internal/ent/generated/gql_mutation_input.go new file mode 100644 index 0000000..d313a54 --- /dev/null +++ b/internal/ent/generated/gql_mutation_input.go @@ -0,0 +1,281 @@ +// Code generated by ent, DO NOT EDIT. + +package generated + +import ( + "time" + + "github.com/datumforge/geodetic/pkg/enums" +) + +// CreateDatabaseInput represents a mutation input for creating databases. +type CreateDatabaseInput struct { + CreatedAt *time.Time + UpdatedAt *time.Time + CreatedBy *string + UpdatedBy *string + OrganizationID string + Name string + Geo *string + Dsn string + Token *string + Status *enums.DatabaseStatus + Provider *enums.DatabaseProvider + GroupID string +} + +// Mutate applies the CreateDatabaseInput on the DatabaseMutation builder. +func (i *CreateDatabaseInput) Mutate(m *DatabaseMutation) { + if v := i.CreatedAt; v != nil { + m.SetCreatedAt(*v) + } + if v := i.UpdatedAt; v != nil { + m.SetUpdatedAt(*v) + } + if v := i.CreatedBy; v != nil { + m.SetCreatedBy(*v) + } + if v := i.UpdatedBy; v != nil { + m.SetUpdatedBy(*v) + } + m.SetOrganizationID(i.OrganizationID) + m.SetName(i.Name) + if v := i.Geo; v != nil { + m.SetGeo(*v) + } + m.SetDsn(i.Dsn) + if v := i.Token; v != nil { + m.SetToken(*v) + } + if v := i.Status; v != nil { + m.SetStatus(*v) + } + if v := i.Provider; v != nil { + m.SetProvider(*v) + } + m.SetGroupID(i.GroupID) +} + +// SetInput applies the change-set in the CreateDatabaseInput on the DatabaseCreate builder. +func (c *DatabaseCreate) SetInput(i CreateDatabaseInput) *DatabaseCreate { + i.Mutate(c.Mutation()) + return c +} + +// UpdateDatabaseInput represents a mutation input for updating databases. +type UpdateDatabaseInput struct { + ClearUpdatedAt bool + UpdatedAt *time.Time + ClearUpdatedBy bool + UpdatedBy *string + OrganizationID *string + Name *string + ClearGeo bool + Geo *string + Dsn *string + ClearToken bool + Token *string + Status *enums.DatabaseStatus + Provider *enums.DatabaseProvider + GroupID *string +} + +// Mutate applies the UpdateDatabaseInput on the DatabaseMutation builder. +func (i *UpdateDatabaseInput) Mutate(m *DatabaseMutation) { + if i.ClearUpdatedAt { + m.ClearUpdatedAt() + } + if v := i.UpdatedAt; v != nil { + m.SetUpdatedAt(*v) + } + if i.ClearUpdatedBy { + m.ClearUpdatedBy() + } + if v := i.UpdatedBy; v != nil { + m.SetUpdatedBy(*v) + } + if v := i.OrganizationID; v != nil { + m.SetOrganizationID(*v) + } + if v := i.Name; v != nil { + m.SetName(*v) + } + if i.ClearGeo { + m.ClearGeo() + } + if v := i.Geo; v != nil { + m.SetGeo(*v) + } + if v := i.Dsn; v != nil { + m.SetDsn(*v) + } + if i.ClearToken { + m.ClearToken() + } + if v := i.Token; v != nil { + m.SetToken(*v) + } + if v := i.Status; v != nil { + m.SetStatus(*v) + } + if v := i.Provider; v != nil { + m.SetProvider(*v) + } + if v := i.GroupID; v != nil { + m.SetGroupID(*v) + } +} + +// SetInput applies the change-set in the UpdateDatabaseInput on the DatabaseUpdate builder. +func (c *DatabaseUpdate) SetInput(i UpdateDatabaseInput) *DatabaseUpdate { + i.Mutate(c.Mutation()) + return c +} + +// SetInput applies the change-set in the UpdateDatabaseInput on the DatabaseUpdateOne builder. +func (c *DatabaseUpdateOne) SetInput(i UpdateDatabaseInput) *DatabaseUpdateOne { + i.Mutate(c.Mutation()) + return c +} + +// CreateGroupInput represents a mutation input for creating groups. +type CreateGroupInput struct { + CreatedAt *time.Time + UpdatedAt *time.Time + CreatedBy *string + UpdatedBy *string + Name string + Description *string + PrimaryLocation string + Locations []string + Token *string + Region *enums.Region + DatabaseIDs []string +} + +// Mutate applies the CreateGroupInput on the GroupMutation builder. +func (i *CreateGroupInput) Mutate(m *GroupMutation) { + if v := i.CreatedAt; v != nil { + m.SetCreatedAt(*v) + } + if v := i.UpdatedAt; v != nil { + m.SetUpdatedAt(*v) + } + if v := i.CreatedBy; v != nil { + m.SetCreatedBy(*v) + } + if v := i.UpdatedBy; v != nil { + m.SetUpdatedBy(*v) + } + m.SetName(i.Name) + if v := i.Description; v != nil { + m.SetDescription(*v) + } + m.SetPrimaryLocation(i.PrimaryLocation) + if v := i.Locations; v != nil { + m.SetLocations(v) + } + if v := i.Token; v != nil { + m.SetToken(*v) + } + if v := i.Region; v != nil { + m.SetRegion(*v) + } + if v := i.DatabaseIDs; len(v) > 0 { + m.AddDatabaseIDs(v...) + } +} + +// SetInput applies the change-set in the CreateGroupInput on the GroupCreate builder. +func (c *GroupCreate) SetInput(i CreateGroupInput) *GroupCreate { + i.Mutate(c.Mutation()) + return c +} + +// UpdateGroupInput represents a mutation input for updating groups. +type UpdateGroupInput struct { + ClearUpdatedAt bool + UpdatedAt *time.Time + ClearUpdatedBy bool + UpdatedBy *string + Name *string + ClearDescription bool + Description *string + PrimaryLocation *string + ClearLocations bool + Locations []string + AppendLocations []string + ClearToken bool + Token *string + Region *enums.Region + ClearDatabases bool + AddDatabaseIDs []string + RemoveDatabaseIDs []string +} + +// Mutate applies the UpdateGroupInput on the GroupMutation builder. +func (i *UpdateGroupInput) Mutate(m *GroupMutation) { + if i.ClearUpdatedAt { + m.ClearUpdatedAt() + } + if v := i.UpdatedAt; v != nil { + m.SetUpdatedAt(*v) + } + if i.ClearUpdatedBy { + m.ClearUpdatedBy() + } + if v := i.UpdatedBy; v != nil { + m.SetUpdatedBy(*v) + } + if v := i.Name; v != nil { + m.SetName(*v) + } + if i.ClearDescription { + m.ClearDescription() + } + if v := i.Description; v != nil { + m.SetDescription(*v) + } + if v := i.PrimaryLocation; v != nil { + m.SetPrimaryLocation(*v) + } + if i.ClearLocations { + m.ClearLocations() + } + if v := i.Locations; v != nil { + m.SetLocations(v) + } + if i.AppendLocations != nil { + m.AppendLocations(i.Locations) + } + if i.ClearToken { + m.ClearToken() + } + if v := i.Token; v != nil { + m.SetToken(*v) + } + if v := i.Region; v != nil { + m.SetRegion(*v) + } + if i.ClearDatabases { + m.ClearDatabases() + } + if v := i.AddDatabaseIDs; len(v) > 0 { + m.AddDatabaseIDs(v...) + } + if v := i.RemoveDatabaseIDs; len(v) > 0 { + m.RemoveDatabaseIDs(v...) + } +} + +// SetInput applies the change-set in the UpdateGroupInput on the GroupUpdate builder. +func (c *GroupUpdate) SetInput(i UpdateGroupInput) *GroupUpdate { + i.Mutate(c.Mutation()) + return c +} + +// SetInput applies the change-set in the UpdateGroupInput on the GroupUpdateOne builder. +func (c *GroupUpdateOne) SetInput(i UpdateGroupInput) *GroupUpdateOne { + i.Mutate(c.Mutation()) + return c +} diff --git a/internal/ent/generated/gql_node.go b/internal/ent/generated/gql_node.go new file mode 100644 index 0000000..80aaf18 --- /dev/null +++ b/internal/ent/generated/gql_node.go @@ -0,0 +1,218 @@ +// Code generated by ent, DO NOT EDIT. + +package generated + +import ( + "context" + "fmt" + + "entgo.io/contrib/entgql" + "github.com/99designs/gqlgen/graphql" + "github.com/datumforge/geodetic/internal/ent/generated/database" + "github.com/datumforge/geodetic/internal/ent/generated/group" + "github.com/hashicorp/go-multierror" +) + +// Noder wraps the basic Node method. +type Noder interface { + IsNode() +} + +// IsNode implements the Node interface check for GQLGen. +func (n *Database) IsNode() {} + +// IsNode implements the Node interface check for GQLGen. +func (n *Group) IsNode() {} + +var errNodeInvalidID = &NotFoundError{"node"} + +// NodeOption allows configuring the Noder execution using functional options. +type NodeOption func(*nodeOptions) + +// WithNodeType sets the node Type resolver function (i.e. the table to query). +// If was not provided, the table will be derived from the universal-id +// configuration as described in: https://entgo.io/docs/migrate/#universal-ids. +func WithNodeType(f func(context.Context, string) (string, error)) NodeOption { + return func(o *nodeOptions) { + o.nodeType = f + } +} + +// WithFixedNodeType sets the Type of the node to a fixed value. +func WithFixedNodeType(t string) NodeOption { + return WithNodeType(func(context.Context, string) (string, error) { + return t, nil + }) +} + +type nodeOptions struct { + nodeType func(context.Context, string) (string, error) +} + +func (c *Client) newNodeOpts(opts []NodeOption) *nodeOptions { + nopts := &nodeOptions{} + for _, opt := range opts { + opt(nopts) + } + if nopts.nodeType == nil { + nopts.nodeType = func(ctx context.Context, id string) (string, error) { + return "", fmt.Errorf("cannot resolve noder (%v) without its type", id) + } + } + return nopts +} + +// Noder returns a Node by its id. If the NodeType was not provided, it will +// be derived from the id value according to the universal-id configuration. +// +// c.Noder(ctx, id) +// c.Noder(ctx, id, ent.WithNodeType(typeResolver)) +func (c *Client) Noder(ctx context.Context, id string, opts ...NodeOption) (_ Noder, err error) { + defer func() { + if IsNotFound(err) { + err = multierror.Append(err, entgql.ErrNodeNotFound(id)) + } + }() + table, err := c.newNodeOpts(opts).nodeType(ctx, id) + if err != nil { + return nil, err + } + return c.noder(ctx, table, id) +} + +func (c *Client) noder(ctx context.Context, table string, id string) (Noder, error) { + switch table { + case database.Table: + query := c.Database.Query(). + Where(database.ID(id)) + query, err := query.CollectFields(ctx, "Database") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) + if err != nil { + return nil, err + } + return n, nil + case group.Table: + query := c.Group.Query(). + Where(group.ID(id)) + query, err := query.CollectFields(ctx, "Group") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) + if err != nil { + return nil, err + } + return n, nil + default: + return nil, fmt.Errorf("cannot resolve noder from table %q: %w", table, errNodeInvalidID) + } +} + +func (c *Client) Noders(ctx context.Context, ids []string, opts ...NodeOption) ([]Noder, error) { + switch len(ids) { + case 1: + noder, err := c.Noder(ctx, ids[0], opts...) + if err != nil { + return nil, err + } + return []Noder{noder}, nil + case 0: + return []Noder{}, nil + } + + noders := make([]Noder, len(ids)) + errors := make([]error, len(ids)) + tables := make(map[string][]string) + id2idx := make(map[string][]int, len(ids)) + nopts := c.newNodeOpts(opts) + for i, id := range ids { + table, err := nopts.nodeType(ctx, id) + if err != nil { + errors[i] = err + continue + } + tables[table] = append(tables[table], id) + id2idx[id] = append(id2idx[id], i) + } + + for table, ids := range tables { + nodes, err := c.noders(ctx, table, ids) + if err != nil { + for _, id := range ids { + for _, idx := range id2idx[id] { + errors[idx] = err + } + } + } else { + for i, id := range ids { + for _, idx := range id2idx[id] { + noders[idx] = nodes[i] + } + } + } + } + + for i, id := range ids { + if errors[i] == nil { + if noders[i] != nil { + continue + } + errors[i] = entgql.ErrNodeNotFound(id) + } else if IsNotFound(errors[i]) { + errors[i] = multierror.Append(errors[i], entgql.ErrNodeNotFound(id)) + } + ctx := graphql.WithPathContext(ctx, + graphql.NewPathWithIndex(i), + ) + graphql.AddError(ctx, errors[i]) + } + return noders, nil +} + +func (c *Client) noders(ctx context.Context, table string, ids []string) ([]Noder, error) { + noders := make([]Noder, len(ids)) + idmap := make(map[string][]*Noder, len(ids)) + for i, id := range ids { + idmap[id] = append(idmap[id], &noders[i]) + } + switch table { + case database.Table: + query := c.Database.Query(). + Where(database.IDIn(ids...)) + query, err := query.CollectFields(ctx, "Database") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) + if err != nil { + return nil, err + } + for _, node := range nodes { + for _, noder := range idmap[node.ID] { + *noder = node + } + } + case group.Table: + query := c.Group.Query(). + Where(group.IDIn(ids...)) + query, err := query.CollectFields(ctx, "Group") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) + if err != nil { + return nil, err + } + for _, node := range nodes { + for _, noder := range idmap[node.ID] { + *noder = node + } + } + default: + return nil, fmt.Errorf("cannot resolve noders from table %q: %w", table, errNodeInvalidID) + } + return noders, nil +} diff --git a/internal/ent/generated/gql_pagination.go b/internal/ent/generated/gql_pagination.go new file mode 100644 index 0000000..ce24038 --- /dev/null +++ b/internal/ent/generated/gql_pagination.go @@ -0,0 +1,589 @@ +// Code generated by ent, DO NOT EDIT. + +package generated + +import ( + "context" + "errors" + + "entgo.io/contrib/entgql" + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/99designs/gqlgen/graphql" + "github.com/99designs/gqlgen/graphql/errcode" + "github.com/datumforge/geodetic/internal/ent/generated/database" + "github.com/datumforge/geodetic/internal/ent/generated/group" + "github.com/vektah/gqlparser/v2/gqlerror" +) + +// Common entgql types. +type ( + Cursor = entgql.Cursor[string] + PageInfo = entgql.PageInfo[string] + OrderDirection = entgql.OrderDirection +) + +func orderFunc(o OrderDirection, field string) func(*sql.Selector) { + if o == entgql.OrderDirectionDesc { + return Desc(field) + } + return Asc(field) +} + +const errInvalidPagination = "INVALID_PAGINATION" + +func validateFirstLast(first, last *int) (err *gqlerror.Error) { + switch { + case first != nil && last != nil: + err = &gqlerror.Error{ + Message: "Passing both `first` and `last` to paginate a connection is not supported.", + } + case first != nil && *first < 0: + err = &gqlerror.Error{ + Message: "`first` on a connection cannot be less than zero.", + } + errcode.Set(err, errInvalidPagination) + case last != nil && *last < 0: + err = &gqlerror.Error{ + Message: "`last` on a connection cannot be less than zero.", + } + errcode.Set(err, errInvalidPagination) + } + return err +} + +func collectedField(ctx context.Context, path ...string) *graphql.CollectedField { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return nil + } + field := fc.Field + oc := graphql.GetOperationContext(ctx) +walk: + for _, name := range path { + for _, f := range graphql.CollectFields(oc, field.Selections, nil) { + if f.Alias == name { + field = f + continue walk + } + } + return nil + } + return &field +} + +func hasCollectedField(ctx context.Context, path ...string) bool { + if graphql.GetFieldContext(ctx) == nil { + return true + } + return collectedField(ctx, path...) != nil +} + +const ( + edgesField = "edges" + nodeField = "node" + pageInfoField = "pageInfo" + totalCountField = "totalCount" +) + +func paginateLimit(first, last *int) int { + var limit int + if first != nil { + limit = *first + 1 + } else if last != nil { + limit = *last + 1 + } + return limit +} + +// DatabaseEdge is the edge representation of Database. +type DatabaseEdge struct { + Node *Database `json:"node"` + Cursor Cursor `json:"cursor"` +} + +// DatabaseConnection is the connection containing edges to Database. +type DatabaseConnection struct { + Edges []*DatabaseEdge `json:"edges"` + PageInfo PageInfo `json:"pageInfo"` + TotalCount int `json:"totalCount"` +} + +func (c *DatabaseConnection) build(nodes []*Database, pager *databasePager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *Database + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *Database { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *Database { + return nodes[i] + } + } + c.Edges = make([]*DatabaseEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &DatabaseEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + +// DatabasePaginateOption enables pagination customization. +type DatabasePaginateOption func(*databasePager) error + +// WithDatabaseOrder configures pagination ordering. +func WithDatabaseOrder(order *DatabaseOrder) DatabasePaginateOption { + if order == nil { + order = DefaultDatabaseOrder + } + o := *order + return func(pager *databasePager) error { + if err := o.Direction.Validate(); err != nil { + return err + } + if o.Field == nil { + o.Field = DefaultDatabaseOrder.Field + } + pager.order = &o + return nil + } +} + +// WithDatabaseFilter configures pagination filter. +func WithDatabaseFilter(filter func(*DatabaseQuery) (*DatabaseQuery, error)) DatabasePaginateOption { + return func(pager *databasePager) error { + if filter == nil { + return errors.New("DatabaseQuery filter cannot be nil") + } + pager.filter = filter + return nil + } +} + +type databasePager struct { + reverse bool + order *DatabaseOrder + filter func(*DatabaseQuery) (*DatabaseQuery, error) +} + +func newDatabasePager(opts []DatabasePaginateOption, reverse bool) (*databasePager, error) { + pager := &databasePager{reverse: reverse} + for _, opt := range opts { + if err := opt(pager); err != nil { + return nil, err + } + } + if pager.order == nil { + pager.order = DefaultDatabaseOrder + } + return pager, nil +} + +func (p *databasePager) applyFilter(query *DatabaseQuery) (*DatabaseQuery, error) { + if p.filter != nil { + return p.filter(query) + } + return query, nil +} + +func (p *databasePager) toCursor(d *Database) Cursor { + return p.order.Field.toCursor(d) +} + +func (p *databasePager) applyCursors(query *DatabaseQuery, after, before *Cursor) (*DatabaseQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultDatabaseOrder.Field.column, p.order.Field.column, direction) { + query = query.Where(predicate) + } + return query, nil +} + +func (p *databasePager) applyOrder(query *DatabaseQuery) *DatabaseQuery { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) + if p.order.Field != DefaultDatabaseOrder.Field { + query = query.Order(DefaultDatabaseOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return query +} + +func (p *databasePager) orderExpr(query *DatabaseQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultDatabaseOrder.Field { + b.Comma().Ident(DefaultDatabaseOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + +// Paginate executes the query and returns a relay based cursor connection to Database. +func (d *DatabaseQuery) Paginate( + ctx context.Context, after *Cursor, first *int, + before *Cursor, last *int, opts ...DatabasePaginateOption, +) (*DatabaseConnection, error) { + if err := validateFirstLast(first, last); err != nil { + return nil, err + } + pager, err := newDatabasePager(opts, last != nil) + if err != nil { + return nil, err + } + if d, err = pager.applyFilter(d); err != nil { + return nil, err + } + conn := &DatabaseConnection{Edges: []*DatabaseEdge{}} + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = d.Clone().Count(ctx); err != nil { + return nil, err + } + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 + } + } + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil + } + if d, err = pager.applyCursors(d, after, before); err != nil { + return nil, err + } + if limit := paginateLimit(first, last); limit != 0 { + d.Limit(limit) + } + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := d.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err + } + } + d = pager.applyOrder(d) + nodes, err := d.All(ctx) + if err != nil { + return nil, err + } + conn.build(nodes, pager, after, first, before, last) + return conn, nil +} + +// DatabaseOrderField defines the ordering field of Database. +type DatabaseOrderField struct { + // Value extracts the ordering value from the given Database. + Value func(*Database) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) database.OrderOption + toCursor func(*Database) Cursor +} + +// DatabaseOrder defines the ordering of Database. +type DatabaseOrder struct { + Direction OrderDirection `json:"direction"` + Field *DatabaseOrderField `json:"field"` +} + +// DefaultDatabaseOrder is the default ordering of Database. +var DefaultDatabaseOrder = &DatabaseOrder{ + Direction: entgql.OrderDirectionAsc, + Field: &DatabaseOrderField{ + Value: func(d *Database) (ent.Value, error) { + return d.ID, nil + }, + column: database.FieldID, + toTerm: database.ByID, + toCursor: func(d *Database) Cursor { + return Cursor{ID: d.ID} + }, + }, +} + +// ToEdge converts Database into DatabaseEdge. +func (d *Database) ToEdge(order *DatabaseOrder) *DatabaseEdge { + if order == nil { + order = DefaultDatabaseOrder + } + return &DatabaseEdge{ + Node: d, + Cursor: order.Field.toCursor(d), + } +} + +// GroupEdge is the edge representation of Group. +type GroupEdge struct { + Node *Group `json:"node"` + Cursor Cursor `json:"cursor"` +} + +// GroupConnection is the connection containing edges to Group. +type GroupConnection struct { + Edges []*GroupEdge `json:"edges"` + PageInfo PageInfo `json:"pageInfo"` + TotalCount int `json:"totalCount"` +} + +func (c *GroupConnection) build(nodes []*Group, pager *groupPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *Group + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *Group { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *Group { + return nodes[i] + } + } + c.Edges = make([]*GroupEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &GroupEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + +// GroupPaginateOption enables pagination customization. +type GroupPaginateOption func(*groupPager) error + +// WithGroupOrder configures pagination ordering. +func WithGroupOrder(order *GroupOrder) GroupPaginateOption { + if order == nil { + order = DefaultGroupOrder + } + o := *order + return func(pager *groupPager) error { + if err := o.Direction.Validate(); err != nil { + return err + } + if o.Field == nil { + o.Field = DefaultGroupOrder.Field + } + pager.order = &o + return nil + } +} + +// WithGroupFilter configures pagination filter. +func WithGroupFilter(filter func(*GroupQuery) (*GroupQuery, error)) GroupPaginateOption { + return func(pager *groupPager) error { + if filter == nil { + return errors.New("GroupQuery filter cannot be nil") + } + pager.filter = filter + return nil + } +} + +type groupPager struct { + reverse bool + order *GroupOrder + filter func(*GroupQuery) (*GroupQuery, error) +} + +func newGroupPager(opts []GroupPaginateOption, reverse bool) (*groupPager, error) { + pager := &groupPager{reverse: reverse} + for _, opt := range opts { + if err := opt(pager); err != nil { + return nil, err + } + } + if pager.order == nil { + pager.order = DefaultGroupOrder + } + return pager, nil +} + +func (p *groupPager) applyFilter(query *GroupQuery) (*GroupQuery, error) { + if p.filter != nil { + return p.filter(query) + } + return query, nil +} + +func (p *groupPager) toCursor(gr *Group) Cursor { + return p.order.Field.toCursor(gr) +} + +func (p *groupPager) applyCursors(query *GroupQuery, after, before *Cursor) (*GroupQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultGroupOrder.Field.column, p.order.Field.column, direction) { + query = query.Where(predicate) + } + return query, nil +} + +func (p *groupPager) applyOrder(query *GroupQuery) *GroupQuery { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) + if p.order.Field != DefaultGroupOrder.Field { + query = query.Order(DefaultGroupOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return query +} + +func (p *groupPager) orderExpr(query *GroupQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultGroupOrder.Field { + b.Comma().Ident(DefaultGroupOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + +// Paginate executes the query and returns a relay based cursor connection to Group. +func (gr *GroupQuery) Paginate( + ctx context.Context, after *Cursor, first *int, + before *Cursor, last *int, opts ...GroupPaginateOption, +) (*GroupConnection, error) { + if err := validateFirstLast(first, last); err != nil { + return nil, err + } + pager, err := newGroupPager(opts, last != nil) + if err != nil { + return nil, err + } + if gr, err = pager.applyFilter(gr); err != nil { + return nil, err + } + conn := &GroupConnection{Edges: []*GroupEdge{}} + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = gr.Clone().Count(ctx); err != nil { + return nil, err + } + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 + } + } + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil + } + if gr, err = pager.applyCursors(gr, after, before); err != nil { + return nil, err + } + if limit := paginateLimit(first, last); limit != 0 { + gr.Limit(limit) + } + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := gr.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err + } + } + gr = pager.applyOrder(gr) + nodes, err := gr.All(ctx) + if err != nil { + return nil, err + } + conn.build(nodes, pager, after, first, before, last) + return conn, nil +} + +// GroupOrderField defines the ordering field of Group. +type GroupOrderField struct { + // Value extracts the ordering value from the given Group. + Value func(*Group) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) group.OrderOption + toCursor func(*Group) Cursor +} + +// GroupOrder defines the ordering of Group. +type GroupOrder struct { + Direction OrderDirection `json:"direction"` + Field *GroupOrderField `json:"field"` +} + +// DefaultGroupOrder is the default ordering of Group. +var DefaultGroupOrder = &GroupOrder{ + Direction: entgql.OrderDirectionAsc, + Field: &GroupOrderField{ + Value: func(gr *Group) (ent.Value, error) { + return gr.ID, nil + }, + column: group.FieldID, + toTerm: group.ByID, + toCursor: func(gr *Group) Cursor { + return Cursor{ID: gr.ID} + }, + }, +} + +// ToEdge converts Group into GroupEdge. +func (gr *Group) ToEdge(order *GroupOrder) *GroupEdge { + if order == nil { + order = DefaultGroupOrder + } + return &GroupEdge{ + Node: gr, + Cursor: order.Field.toCursor(gr), + } +} diff --git a/internal/ent/generated/gql_transaction.go b/internal/ent/generated/gql_transaction.go new file mode 100644 index 0000000..40087a4 --- /dev/null +++ b/internal/ent/generated/gql_transaction.go @@ -0,0 +1,30 @@ +// Code generated by ent, DO NOT EDIT. + +package generated + +import ( + "context" + "database/sql/driver" + "errors" +) + +// OpenTx opens a transaction and returns a transactional +// context along with the created transaction. +func (c *Client) OpenTx(ctx context.Context) (context.Context, driver.Tx, error) { + tx, err := c.Tx(ctx) + if err != nil { + return nil, nil, err + } + ctx = NewTxContext(ctx, tx) + ctx = NewContext(ctx, tx.Client()) + return ctx, tx, nil +} + +// OpenTxFromContext open transactions from client stored in context. +func OpenTxFromContext(ctx context.Context) (context.Context, driver.Tx, error) { + client := FromContext(ctx) + if client == nil { + return nil, nil, errors.New("no client attached to context") + } + return client.OpenTx(ctx) +} diff --git a/internal/ent/generated/gql_where_input.go b/internal/ent/generated/gql_where_input.go new file mode 100644 index 0000000..0ffa074 --- /dev/null +++ b/internal/ent/generated/gql_where_input.go @@ -0,0 +1,1572 @@ +// Code generated by ent, DO NOT EDIT. + +package generated + +import ( + "errors" + "fmt" + "time" + + "github.com/datumforge/geodetic/internal/ent/generated/database" + "github.com/datumforge/geodetic/internal/ent/generated/group" + "github.com/datumforge/geodetic/internal/ent/generated/predicate" + "github.com/datumforge/geodetic/pkg/enums" +) + +// DatabaseWhereInput represents a where input for filtering Database queries. +type DatabaseWhereInput struct { + Predicates []predicate.Database `json:"-"` + Not *DatabaseWhereInput `json:"not,omitempty"` + Or []*DatabaseWhereInput `json:"or,omitempty"` + And []*DatabaseWhereInput `json:"and,omitempty"` + + // "id" field predicates. + ID *string `json:"id,omitempty"` + IDNEQ *string `json:"idNEQ,omitempty"` + IDIn []string `json:"idIn,omitempty"` + IDNotIn []string `json:"idNotIn,omitempty"` + IDGT *string `json:"idGT,omitempty"` + IDGTE *string `json:"idGTE,omitempty"` + IDLT *string `json:"idLT,omitempty"` + IDLTE *string `json:"idLTE,omitempty"` + IDEqualFold *string `json:"idEqualFold,omitempty"` + IDContainsFold *string `json:"idContainsFold,omitempty"` + + // "created_at" field predicates. + CreatedAt *time.Time `json:"createdAt,omitempty"` + CreatedAtNEQ *time.Time `json:"createdAtNEQ,omitempty"` + CreatedAtIn []time.Time `json:"createdAtIn,omitempty"` + CreatedAtNotIn []time.Time `json:"createdAtNotIn,omitempty"` + CreatedAtGT *time.Time `json:"createdAtGT,omitempty"` + CreatedAtGTE *time.Time `json:"createdAtGTE,omitempty"` + CreatedAtLT *time.Time `json:"createdAtLT,omitempty"` + CreatedAtLTE *time.Time `json:"createdAtLTE,omitempty"` + CreatedAtIsNil bool `json:"createdAtIsNil,omitempty"` + CreatedAtNotNil bool `json:"createdAtNotNil,omitempty"` + + // "updated_at" field predicates. + UpdatedAt *time.Time `json:"updatedAt,omitempty"` + UpdatedAtNEQ *time.Time `json:"updatedAtNEQ,omitempty"` + UpdatedAtIn []time.Time `json:"updatedAtIn,omitempty"` + UpdatedAtNotIn []time.Time `json:"updatedAtNotIn,omitempty"` + UpdatedAtGT *time.Time `json:"updatedAtGT,omitempty"` + UpdatedAtGTE *time.Time `json:"updatedAtGTE,omitempty"` + UpdatedAtLT *time.Time `json:"updatedAtLT,omitempty"` + UpdatedAtLTE *time.Time `json:"updatedAtLTE,omitempty"` + UpdatedAtIsNil bool `json:"updatedAtIsNil,omitempty"` + UpdatedAtNotNil bool `json:"updatedAtNotNil,omitempty"` + + // "created_by" field predicates. + CreatedBy *string `json:"createdBy,omitempty"` + CreatedByNEQ *string `json:"createdByNEQ,omitempty"` + CreatedByIn []string `json:"createdByIn,omitempty"` + CreatedByNotIn []string `json:"createdByNotIn,omitempty"` + CreatedByGT *string `json:"createdByGT,omitempty"` + CreatedByGTE *string `json:"createdByGTE,omitempty"` + CreatedByLT *string `json:"createdByLT,omitempty"` + CreatedByLTE *string `json:"createdByLTE,omitempty"` + CreatedByContains *string `json:"createdByContains,omitempty"` + CreatedByHasPrefix *string `json:"createdByHasPrefix,omitempty"` + CreatedByHasSuffix *string `json:"createdByHasSuffix,omitempty"` + CreatedByIsNil bool `json:"createdByIsNil,omitempty"` + CreatedByNotNil bool `json:"createdByNotNil,omitempty"` + CreatedByEqualFold *string `json:"createdByEqualFold,omitempty"` + CreatedByContainsFold *string `json:"createdByContainsFold,omitempty"` + + // "updated_by" field predicates. + UpdatedBy *string `json:"updatedBy,omitempty"` + UpdatedByNEQ *string `json:"updatedByNEQ,omitempty"` + UpdatedByIn []string `json:"updatedByIn,omitempty"` + UpdatedByNotIn []string `json:"updatedByNotIn,omitempty"` + UpdatedByGT *string `json:"updatedByGT,omitempty"` + UpdatedByGTE *string `json:"updatedByGTE,omitempty"` + UpdatedByLT *string `json:"updatedByLT,omitempty"` + UpdatedByLTE *string `json:"updatedByLTE,omitempty"` + UpdatedByContains *string `json:"updatedByContains,omitempty"` + UpdatedByHasPrefix *string `json:"updatedByHasPrefix,omitempty"` + UpdatedByHasSuffix *string `json:"updatedByHasSuffix,omitempty"` + UpdatedByIsNil bool `json:"updatedByIsNil,omitempty"` + UpdatedByNotNil bool `json:"updatedByNotNil,omitempty"` + UpdatedByEqualFold *string `json:"updatedByEqualFold,omitempty"` + UpdatedByContainsFold *string `json:"updatedByContainsFold,omitempty"` + + // "deleted_at" field predicates. + DeletedAt *time.Time `json:"deletedAt,omitempty"` + DeletedAtNEQ *time.Time `json:"deletedAtNEQ,omitempty"` + DeletedAtIn []time.Time `json:"deletedAtIn,omitempty"` + DeletedAtNotIn []time.Time `json:"deletedAtNotIn,omitempty"` + DeletedAtGT *time.Time `json:"deletedAtGT,omitempty"` + DeletedAtGTE *time.Time `json:"deletedAtGTE,omitempty"` + DeletedAtLT *time.Time `json:"deletedAtLT,omitempty"` + DeletedAtLTE *time.Time `json:"deletedAtLTE,omitempty"` + DeletedAtIsNil bool `json:"deletedAtIsNil,omitempty"` + DeletedAtNotNil bool `json:"deletedAtNotNil,omitempty"` + + // "deleted_by" field predicates. + DeletedBy *string `json:"deletedBy,omitempty"` + DeletedByNEQ *string `json:"deletedByNEQ,omitempty"` + DeletedByIn []string `json:"deletedByIn,omitempty"` + DeletedByNotIn []string `json:"deletedByNotIn,omitempty"` + DeletedByGT *string `json:"deletedByGT,omitempty"` + DeletedByGTE *string `json:"deletedByGTE,omitempty"` + DeletedByLT *string `json:"deletedByLT,omitempty"` + DeletedByLTE *string `json:"deletedByLTE,omitempty"` + DeletedByContains *string `json:"deletedByContains,omitempty"` + DeletedByHasPrefix *string `json:"deletedByHasPrefix,omitempty"` + DeletedByHasSuffix *string `json:"deletedByHasSuffix,omitempty"` + DeletedByIsNil bool `json:"deletedByIsNil,omitempty"` + DeletedByNotNil bool `json:"deletedByNotNil,omitempty"` + DeletedByEqualFold *string `json:"deletedByEqualFold,omitempty"` + DeletedByContainsFold *string `json:"deletedByContainsFold,omitempty"` + + // "organization_id" field predicates. + OrganizationID *string `json:"organizationID,omitempty"` + OrganizationIDNEQ *string `json:"organizationIDNEQ,omitempty"` + OrganizationIDIn []string `json:"organizationIDIn,omitempty"` + OrganizationIDNotIn []string `json:"organizationIDNotIn,omitempty"` + OrganizationIDGT *string `json:"organizationIDGT,omitempty"` + OrganizationIDGTE *string `json:"organizationIDGTE,omitempty"` + OrganizationIDLT *string `json:"organizationIDLT,omitempty"` + OrganizationIDLTE *string `json:"organizationIDLTE,omitempty"` + OrganizationIDContains *string `json:"organizationIDContains,omitempty"` + OrganizationIDHasPrefix *string `json:"organizationIDHasPrefix,omitempty"` + OrganizationIDHasSuffix *string `json:"organizationIDHasSuffix,omitempty"` + OrganizationIDEqualFold *string `json:"organizationIDEqualFold,omitempty"` + OrganizationIDContainsFold *string `json:"organizationIDContainsFold,omitempty"` + + // "name" field predicates. + Name *string `json:"name,omitempty"` + NameNEQ *string `json:"nameNEQ,omitempty"` + NameIn []string `json:"nameIn,omitempty"` + NameNotIn []string `json:"nameNotIn,omitempty"` + NameGT *string `json:"nameGT,omitempty"` + NameGTE *string `json:"nameGTE,omitempty"` + NameLT *string `json:"nameLT,omitempty"` + NameLTE *string `json:"nameLTE,omitempty"` + NameContains *string `json:"nameContains,omitempty"` + NameHasPrefix *string `json:"nameHasPrefix,omitempty"` + NameHasSuffix *string `json:"nameHasSuffix,omitempty"` + NameEqualFold *string `json:"nameEqualFold,omitempty"` + NameContainsFold *string `json:"nameContainsFold,omitempty"` + + // "geo" field predicates. + Geo *string `json:"geo,omitempty"` + GeoNEQ *string `json:"geoNEQ,omitempty"` + GeoIn []string `json:"geoIn,omitempty"` + GeoNotIn []string `json:"geoNotIn,omitempty"` + GeoGT *string `json:"geoGT,omitempty"` + GeoGTE *string `json:"geoGTE,omitempty"` + GeoLT *string `json:"geoLT,omitempty"` + GeoLTE *string `json:"geoLTE,omitempty"` + GeoContains *string `json:"geoContains,omitempty"` + GeoHasPrefix *string `json:"geoHasPrefix,omitempty"` + GeoHasSuffix *string `json:"geoHasSuffix,omitempty"` + GeoIsNil bool `json:"geoIsNil,omitempty"` + GeoNotNil bool `json:"geoNotNil,omitempty"` + GeoEqualFold *string `json:"geoEqualFold,omitempty"` + GeoContainsFold *string `json:"geoContainsFold,omitempty"` + + // "dsn" field predicates. + Dsn *string `json:"dsn,omitempty"` + DsnNEQ *string `json:"dsnNEQ,omitempty"` + DsnIn []string `json:"dsnIn,omitempty"` + DsnNotIn []string `json:"dsnNotIn,omitempty"` + DsnGT *string `json:"dsnGT,omitempty"` + DsnGTE *string `json:"dsnGTE,omitempty"` + DsnLT *string `json:"dsnLT,omitempty"` + DsnLTE *string `json:"dsnLTE,omitempty"` + DsnContains *string `json:"dsnContains,omitempty"` + DsnHasPrefix *string `json:"dsnHasPrefix,omitempty"` + DsnHasSuffix *string `json:"dsnHasSuffix,omitempty"` + DsnEqualFold *string `json:"dsnEqualFold,omitempty"` + DsnContainsFold *string `json:"dsnContainsFold,omitempty"` + + // "group_id" field predicates. + GroupID *string `json:"groupID,omitempty"` + GroupIDNEQ *string `json:"groupIDNEQ,omitempty"` + GroupIDIn []string `json:"groupIDIn,omitempty"` + GroupIDNotIn []string `json:"groupIDNotIn,omitempty"` + GroupIDGT *string `json:"groupIDGT,omitempty"` + GroupIDGTE *string `json:"groupIDGTE,omitempty"` + GroupIDLT *string `json:"groupIDLT,omitempty"` + GroupIDLTE *string `json:"groupIDLTE,omitempty"` + GroupIDContains *string `json:"groupIDContains,omitempty"` + GroupIDHasPrefix *string `json:"groupIDHasPrefix,omitempty"` + GroupIDHasSuffix *string `json:"groupIDHasSuffix,omitempty"` + GroupIDEqualFold *string `json:"groupIDEqualFold,omitempty"` + GroupIDContainsFold *string `json:"groupIDContainsFold,omitempty"` + + // "token" field predicates. + Token *string `json:"token,omitempty"` + TokenNEQ *string `json:"tokenNEQ,omitempty"` + TokenIn []string `json:"tokenIn,omitempty"` + TokenNotIn []string `json:"tokenNotIn,omitempty"` + TokenGT *string `json:"tokenGT,omitempty"` + TokenGTE *string `json:"tokenGTE,omitempty"` + TokenLT *string `json:"tokenLT,omitempty"` + TokenLTE *string `json:"tokenLTE,omitempty"` + TokenContains *string `json:"tokenContains,omitempty"` + TokenHasPrefix *string `json:"tokenHasPrefix,omitempty"` + TokenHasSuffix *string `json:"tokenHasSuffix,omitempty"` + TokenIsNil bool `json:"tokenIsNil,omitempty"` + TokenNotNil bool `json:"tokenNotNil,omitempty"` + TokenEqualFold *string `json:"tokenEqualFold,omitempty"` + TokenContainsFold *string `json:"tokenContainsFold,omitempty"` + + // "status" field predicates. + Status *enums.DatabaseStatus `json:"status,omitempty"` + StatusNEQ *enums.DatabaseStatus `json:"statusNEQ,omitempty"` + StatusIn []enums.DatabaseStatus `json:"statusIn,omitempty"` + StatusNotIn []enums.DatabaseStatus `json:"statusNotIn,omitempty"` + + // "provider" field predicates. + Provider *enums.DatabaseProvider `json:"provider,omitempty"` + ProviderNEQ *enums.DatabaseProvider `json:"providerNEQ,omitempty"` + ProviderIn []enums.DatabaseProvider `json:"providerIn,omitempty"` + ProviderNotIn []enums.DatabaseProvider `json:"providerNotIn,omitempty"` + + // "group" edge predicates. + HasGroup *bool `json:"hasGroup,omitempty"` + HasGroupWith []*GroupWhereInput `json:"hasGroupWith,omitempty"` +} + +// AddPredicates adds custom predicates to the where input to be used during the filtering phase. +func (i *DatabaseWhereInput) AddPredicates(predicates ...predicate.Database) { + i.Predicates = append(i.Predicates, predicates...) +} + +// Filter applies the DatabaseWhereInput filter on the DatabaseQuery builder. +func (i *DatabaseWhereInput) Filter(q *DatabaseQuery) (*DatabaseQuery, error) { + if i == nil { + return q, nil + } + p, err := i.P() + if err != nil { + if err == ErrEmptyDatabaseWhereInput { + return q, nil + } + return nil, err + } + return q.Where(p), nil +} + +// ErrEmptyDatabaseWhereInput is returned in case the DatabaseWhereInput is empty. +var ErrEmptyDatabaseWhereInput = errors.New("generated: empty predicate DatabaseWhereInput") + +// P returns a predicate for filtering databases. +// An error is returned if the input is empty or invalid. +func (i *DatabaseWhereInput) P() (predicate.Database, error) { + var predicates []predicate.Database + if i.Not != nil { + p, err := i.Not.P() + if err != nil { + return nil, fmt.Errorf("%w: field 'not'", err) + } + predicates = append(predicates, database.Not(p)) + } + switch n := len(i.Or); { + case n == 1: + p, err := i.Or[0].P() + if err != nil { + return nil, fmt.Errorf("%w: field 'or'", err) + } + predicates = append(predicates, p) + case n > 1: + or := make([]predicate.Database, 0, n) + for _, w := range i.Or { + p, err := w.P() + if err != nil { + return nil, fmt.Errorf("%w: field 'or'", err) + } + or = append(or, p) + } + predicates = append(predicates, database.Or(or...)) + } + switch n := len(i.And); { + case n == 1: + p, err := i.And[0].P() + if err != nil { + return nil, fmt.Errorf("%w: field 'and'", err) + } + predicates = append(predicates, p) + case n > 1: + and := make([]predicate.Database, 0, n) + for _, w := range i.And { + p, err := w.P() + if err != nil { + return nil, fmt.Errorf("%w: field 'and'", err) + } + and = append(and, p) + } + predicates = append(predicates, database.And(and...)) + } + predicates = append(predicates, i.Predicates...) + if i.ID != nil { + predicates = append(predicates, database.IDEQ(*i.ID)) + } + if i.IDNEQ != nil { + predicates = append(predicates, database.IDNEQ(*i.IDNEQ)) + } + if len(i.IDIn) > 0 { + predicates = append(predicates, database.IDIn(i.IDIn...)) + } + if len(i.IDNotIn) > 0 { + predicates = append(predicates, database.IDNotIn(i.IDNotIn...)) + } + if i.IDGT != nil { + predicates = append(predicates, database.IDGT(*i.IDGT)) + } + if i.IDGTE != nil { + predicates = append(predicates, database.IDGTE(*i.IDGTE)) + } + if i.IDLT != nil { + predicates = append(predicates, database.IDLT(*i.IDLT)) + } + if i.IDLTE != nil { + predicates = append(predicates, database.IDLTE(*i.IDLTE)) + } + if i.IDEqualFold != nil { + predicates = append(predicates, database.IDEqualFold(*i.IDEqualFold)) + } + if i.IDContainsFold != nil { + predicates = append(predicates, database.IDContainsFold(*i.IDContainsFold)) + } + if i.CreatedAt != nil { + predicates = append(predicates, database.CreatedAtEQ(*i.CreatedAt)) + } + if i.CreatedAtNEQ != nil { + predicates = append(predicates, database.CreatedAtNEQ(*i.CreatedAtNEQ)) + } + if len(i.CreatedAtIn) > 0 { + predicates = append(predicates, database.CreatedAtIn(i.CreatedAtIn...)) + } + if len(i.CreatedAtNotIn) > 0 { + predicates = append(predicates, database.CreatedAtNotIn(i.CreatedAtNotIn...)) + } + if i.CreatedAtGT != nil { + predicates = append(predicates, database.CreatedAtGT(*i.CreatedAtGT)) + } + if i.CreatedAtGTE != nil { + predicates = append(predicates, database.CreatedAtGTE(*i.CreatedAtGTE)) + } + if i.CreatedAtLT != nil { + predicates = append(predicates, database.CreatedAtLT(*i.CreatedAtLT)) + } + if i.CreatedAtLTE != nil { + predicates = append(predicates, database.CreatedAtLTE(*i.CreatedAtLTE)) + } + if i.CreatedAtIsNil { + predicates = append(predicates, database.CreatedAtIsNil()) + } + if i.CreatedAtNotNil { + predicates = append(predicates, database.CreatedAtNotNil()) + } + if i.UpdatedAt != nil { + predicates = append(predicates, database.UpdatedAtEQ(*i.UpdatedAt)) + } + if i.UpdatedAtNEQ != nil { + predicates = append(predicates, database.UpdatedAtNEQ(*i.UpdatedAtNEQ)) + } + if len(i.UpdatedAtIn) > 0 { + predicates = append(predicates, database.UpdatedAtIn(i.UpdatedAtIn...)) + } + if len(i.UpdatedAtNotIn) > 0 { + predicates = append(predicates, database.UpdatedAtNotIn(i.UpdatedAtNotIn...)) + } + if i.UpdatedAtGT != nil { + predicates = append(predicates, database.UpdatedAtGT(*i.UpdatedAtGT)) + } + if i.UpdatedAtGTE != nil { + predicates = append(predicates, database.UpdatedAtGTE(*i.UpdatedAtGTE)) + } + if i.UpdatedAtLT != nil { + predicates = append(predicates, database.UpdatedAtLT(*i.UpdatedAtLT)) + } + if i.UpdatedAtLTE != nil { + predicates = append(predicates, database.UpdatedAtLTE(*i.UpdatedAtLTE)) + } + if i.UpdatedAtIsNil { + predicates = append(predicates, database.UpdatedAtIsNil()) + } + if i.UpdatedAtNotNil { + predicates = append(predicates, database.UpdatedAtNotNil()) + } + if i.CreatedBy != nil { + predicates = append(predicates, database.CreatedByEQ(*i.CreatedBy)) + } + if i.CreatedByNEQ != nil { + predicates = append(predicates, database.CreatedByNEQ(*i.CreatedByNEQ)) + } + if len(i.CreatedByIn) > 0 { + predicates = append(predicates, database.CreatedByIn(i.CreatedByIn...)) + } + if len(i.CreatedByNotIn) > 0 { + predicates = append(predicates, database.CreatedByNotIn(i.CreatedByNotIn...)) + } + if i.CreatedByGT != nil { + predicates = append(predicates, database.CreatedByGT(*i.CreatedByGT)) + } + if i.CreatedByGTE != nil { + predicates = append(predicates, database.CreatedByGTE(*i.CreatedByGTE)) + } + if i.CreatedByLT != nil { + predicates = append(predicates, database.CreatedByLT(*i.CreatedByLT)) + } + if i.CreatedByLTE != nil { + predicates = append(predicates, database.CreatedByLTE(*i.CreatedByLTE)) + } + if i.CreatedByContains != nil { + predicates = append(predicates, database.CreatedByContains(*i.CreatedByContains)) + } + if i.CreatedByHasPrefix != nil { + predicates = append(predicates, database.CreatedByHasPrefix(*i.CreatedByHasPrefix)) + } + if i.CreatedByHasSuffix != nil { + predicates = append(predicates, database.CreatedByHasSuffix(*i.CreatedByHasSuffix)) + } + if i.CreatedByIsNil { + predicates = append(predicates, database.CreatedByIsNil()) + } + if i.CreatedByNotNil { + predicates = append(predicates, database.CreatedByNotNil()) + } + if i.CreatedByEqualFold != nil { + predicates = append(predicates, database.CreatedByEqualFold(*i.CreatedByEqualFold)) + } + if i.CreatedByContainsFold != nil { + predicates = append(predicates, database.CreatedByContainsFold(*i.CreatedByContainsFold)) + } + if i.UpdatedBy != nil { + predicates = append(predicates, database.UpdatedByEQ(*i.UpdatedBy)) + } + if i.UpdatedByNEQ != nil { + predicates = append(predicates, database.UpdatedByNEQ(*i.UpdatedByNEQ)) + } + if len(i.UpdatedByIn) > 0 { + predicates = append(predicates, database.UpdatedByIn(i.UpdatedByIn...)) + } + if len(i.UpdatedByNotIn) > 0 { + predicates = append(predicates, database.UpdatedByNotIn(i.UpdatedByNotIn...)) + } + if i.UpdatedByGT != nil { + predicates = append(predicates, database.UpdatedByGT(*i.UpdatedByGT)) + } + if i.UpdatedByGTE != nil { + predicates = append(predicates, database.UpdatedByGTE(*i.UpdatedByGTE)) + } + if i.UpdatedByLT != nil { + predicates = append(predicates, database.UpdatedByLT(*i.UpdatedByLT)) + } + if i.UpdatedByLTE != nil { + predicates = append(predicates, database.UpdatedByLTE(*i.UpdatedByLTE)) + } + if i.UpdatedByContains != nil { + predicates = append(predicates, database.UpdatedByContains(*i.UpdatedByContains)) + } + if i.UpdatedByHasPrefix != nil { + predicates = append(predicates, database.UpdatedByHasPrefix(*i.UpdatedByHasPrefix)) + } + if i.UpdatedByHasSuffix != nil { + predicates = append(predicates, database.UpdatedByHasSuffix(*i.UpdatedByHasSuffix)) + } + if i.UpdatedByIsNil { + predicates = append(predicates, database.UpdatedByIsNil()) + } + if i.UpdatedByNotNil { + predicates = append(predicates, database.UpdatedByNotNil()) + } + if i.UpdatedByEqualFold != nil { + predicates = append(predicates, database.UpdatedByEqualFold(*i.UpdatedByEqualFold)) + } + if i.UpdatedByContainsFold != nil { + predicates = append(predicates, database.UpdatedByContainsFold(*i.UpdatedByContainsFold)) + } + if i.DeletedAt != nil { + predicates = append(predicates, database.DeletedAtEQ(*i.DeletedAt)) + } + if i.DeletedAtNEQ != nil { + predicates = append(predicates, database.DeletedAtNEQ(*i.DeletedAtNEQ)) + } + if len(i.DeletedAtIn) > 0 { + predicates = append(predicates, database.DeletedAtIn(i.DeletedAtIn...)) + } + if len(i.DeletedAtNotIn) > 0 { + predicates = append(predicates, database.DeletedAtNotIn(i.DeletedAtNotIn...)) + } + if i.DeletedAtGT != nil { + predicates = append(predicates, database.DeletedAtGT(*i.DeletedAtGT)) + } + if i.DeletedAtGTE != nil { + predicates = append(predicates, database.DeletedAtGTE(*i.DeletedAtGTE)) + } + if i.DeletedAtLT != nil { + predicates = append(predicates, database.DeletedAtLT(*i.DeletedAtLT)) + } + if i.DeletedAtLTE != nil { + predicates = append(predicates, database.DeletedAtLTE(*i.DeletedAtLTE)) + } + if i.DeletedAtIsNil { + predicates = append(predicates, database.DeletedAtIsNil()) + } + if i.DeletedAtNotNil { + predicates = append(predicates, database.DeletedAtNotNil()) + } + if i.DeletedBy != nil { + predicates = append(predicates, database.DeletedByEQ(*i.DeletedBy)) + } + if i.DeletedByNEQ != nil { + predicates = append(predicates, database.DeletedByNEQ(*i.DeletedByNEQ)) + } + if len(i.DeletedByIn) > 0 { + predicates = append(predicates, database.DeletedByIn(i.DeletedByIn...)) + } + if len(i.DeletedByNotIn) > 0 { + predicates = append(predicates, database.DeletedByNotIn(i.DeletedByNotIn...)) + } + if i.DeletedByGT != nil { + predicates = append(predicates, database.DeletedByGT(*i.DeletedByGT)) + } + if i.DeletedByGTE != nil { + predicates = append(predicates, database.DeletedByGTE(*i.DeletedByGTE)) + } + if i.DeletedByLT != nil { + predicates = append(predicates, database.DeletedByLT(*i.DeletedByLT)) + } + if i.DeletedByLTE != nil { + predicates = append(predicates, database.DeletedByLTE(*i.DeletedByLTE)) + } + if i.DeletedByContains != nil { + predicates = append(predicates, database.DeletedByContains(*i.DeletedByContains)) + } + if i.DeletedByHasPrefix != nil { + predicates = append(predicates, database.DeletedByHasPrefix(*i.DeletedByHasPrefix)) + } + if i.DeletedByHasSuffix != nil { + predicates = append(predicates, database.DeletedByHasSuffix(*i.DeletedByHasSuffix)) + } + if i.DeletedByIsNil { + predicates = append(predicates, database.DeletedByIsNil()) + } + if i.DeletedByNotNil { + predicates = append(predicates, database.DeletedByNotNil()) + } + if i.DeletedByEqualFold != nil { + predicates = append(predicates, database.DeletedByEqualFold(*i.DeletedByEqualFold)) + } + if i.DeletedByContainsFold != nil { + predicates = append(predicates, database.DeletedByContainsFold(*i.DeletedByContainsFold)) + } + if i.OrganizationID != nil { + predicates = append(predicates, database.OrganizationIDEQ(*i.OrganizationID)) + } + if i.OrganizationIDNEQ != nil { + predicates = append(predicates, database.OrganizationIDNEQ(*i.OrganizationIDNEQ)) + } + if len(i.OrganizationIDIn) > 0 { + predicates = append(predicates, database.OrganizationIDIn(i.OrganizationIDIn...)) + } + if len(i.OrganizationIDNotIn) > 0 { + predicates = append(predicates, database.OrganizationIDNotIn(i.OrganizationIDNotIn...)) + } + if i.OrganizationIDGT != nil { + predicates = append(predicates, database.OrganizationIDGT(*i.OrganizationIDGT)) + } + if i.OrganizationIDGTE != nil { + predicates = append(predicates, database.OrganizationIDGTE(*i.OrganizationIDGTE)) + } + if i.OrganizationIDLT != nil { + predicates = append(predicates, database.OrganizationIDLT(*i.OrganizationIDLT)) + } + if i.OrganizationIDLTE != nil { + predicates = append(predicates, database.OrganizationIDLTE(*i.OrganizationIDLTE)) + } + if i.OrganizationIDContains != nil { + predicates = append(predicates, database.OrganizationIDContains(*i.OrganizationIDContains)) + } + if i.OrganizationIDHasPrefix != nil { + predicates = append(predicates, database.OrganizationIDHasPrefix(*i.OrganizationIDHasPrefix)) + } + if i.OrganizationIDHasSuffix != nil { + predicates = append(predicates, database.OrganizationIDHasSuffix(*i.OrganizationIDHasSuffix)) + } + if i.OrganizationIDEqualFold != nil { + predicates = append(predicates, database.OrganizationIDEqualFold(*i.OrganizationIDEqualFold)) + } + if i.OrganizationIDContainsFold != nil { + predicates = append(predicates, database.OrganizationIDContainsFold(*i.OrganizationIDContainsFold)) + } + if i.Name != nil { + predicates = append(predicates, database.NameEQ(*i.Name)) + } + if i.NameNEQ != nil { + predicates = append(predicates, database.NameNEQ(*i.NameNEQ)) + } + if len(i.NameIn) > 0 { + predicates = append(predicates, database.NameIn(i.NameIn...)) + } + if len(i.NameNotIn) > 0 { + predicates = append(predicates, database.NameNotIn(i.NameNotIn...)) + } + if i.NameGT != nil { + predicates = append(predicates, database.NameGT(*i.NameGT)) + } + if i.NameGTE != nil { + predicates = append(predicates, database.NameGTE(*i.NameGTE)) + } + if i.NameLT != nil { + predicates = append(predicates, database.NameLT(*i.NameLT)) + } + if i.NameLTE != nil { + predicates = append(predicates, database.NameLTE(*i.NameLTE)) + } + if i.NameContains != nil { + predicates = append(predicates, database.NameContains(*i.NameContains)) + } + if i.NameHasPrefix != nil { + predicates = append(predicates, database.NameHasPrefix(*i.NameHasPrefix)) + } + if i.NameHasSuffix != nil { + predicates = append(predicates, database.NameHasSuffix(*i.NameHasSuffix)) + } + if i.NameEqualFold != nil { + predicates = append(predicates, database.NameEqualFold(*i.NameEqualFold)) + } + if i.NameContainsFold != nil { + predicates = append(predicates, database.NameContainsFold(*i.NameContainsFold)) + } + if i.Geo != nil { + predicates = append(predicates, database.GeoEQ(*i.Geo)) + } + if i.GeoNEQ != nil { + predicates = append(predicates, database.GeoNEQ(*i.GeoNEQ)) + } + if len(i.GeoIn) > 0 { + predicates = append(predicates, database.GeoIn(i.GeoIn...)) + } + if len(i.GeoNotIn) > 0 { + predicates = append(predicates, database.GeoNotIn(i.GeoNotIn...)) + } + if i.GeoGT != nil { + predicates = append(predicates, database.GeoGT(*i.GeoGT)) + } + if i.GeoGTE != nil { + predicates = append(predicates, database.GeoGTE(*i.GeoGTE)) + } + if i.GeoLT != nil { + predicates = append(predicates, database.GeoLT(*i.GeoLT)) + } + if i.GeoLTE != nil { + predicates = append(predicates, database.GeoLTE(*i.GeoLTE)) + } + if i.GeoContains != nil { + predicates = append(predicates, database.GeoContains(*i.GeoContains)) + } + if i.GeoHasPrefix != nil { + predicates = append(predicates, database.GeoHasPrefix(*i.GeoHasPrefix)) + } + if i.GeoHasSuffix != nil { + predicates = append(predicates, database.GeoHasSuffix(*i.GeoHasSuffix)) + } + if i.GeoIsNil { + predicates = append(predicates, database.GeoIsNil()) + } + if i.GeoNotNil { + predicates = append(predicates, database.GeoNotNil()) + } + if i.GeoEqualFold != nil { + predicates = append(predicates, database.GeoEqualFold(*i.GeoEqualFold)) + } + if i.GeoContainsFold != nil { + predicates = append(predicates, database.GeoContainsFold(*i.GeoContainsFold)) + } + if i.Dsn != nil { + predicates = append(predicates, database.DsnEQ(*i.Dsn)) + } + if i.DsnNEQ != nil { + predicates = append(predicates, database.DsnNEQ(*i.DsnNEQ)) + } + if len(i.DsnIn) > 0 { + predicates = append(predicates, database.DsnIn(i.DsnIn...)) + } + if len(i.DsnNotIn) > 0 { + predicates = append(predicates, database.DsnNotIn(i.DsnNotIn...)) + } + if i.DsnGT != nil { + predicates = append(predicates, database.DsnGT(*i.DsnGT)) + } + if i.DsnGTE != nil { + predicates = append(predicates, database.DsnGTE(*i.DsnGTE)) + } + if i.DsnLT != nil { + predicates = append(predicates, database.DsnLT(*i.DsnLT)) + } + if i.DsnLTE != nil { + predicates = append(predicates, database.DsnLTE(*i.DsnLTE)) + } + if i.DsnContains != nil { + predicates = append(predicates, database.DsnContains(*i.DsnContains)) + } + if i.DsnHasPrefix != nil { + predicates = append(predicates, database.DsnHasPrefix(*i.DsnHasPrefix)) + } + if i.DsnHasSuffix != nil { + predicates = append(predicates, database.DsnHasSuffix(*i.DsnHasSuffix)) + } + if i.DsnEqualFold != nil { + predicates = append(predicates, database.DsnEqualFold(*i.DsnEqualFold)) + } + if i.DsnContainsFold != nil { + predicates = append(predicates, database.DsnContainsFold(*i.DsnContainsFold)) + } + if i.GroupID != nil { + predicates = append(predicates, database.GroupIDEQ(*i.GroupID)) + } + if i.GroupIDNEQ != nil { + predicates = append(predicates, database.GroupIDNEQ(*i.GroupIDNEQ)) + } + if len(i.GroupIDIn) > 0 { + predicates = append(predicates, database.GroupIDIn(i.GroupIDIn...)) + } + if len(i.GroupIDNotIn) > 0 { + predicates = append(predicates, database.GroupIDNotIn(i.GroupIDNotIn...)) + } + if i.GroupIDGT != nil { + predicates = append(predicates, database.GroupIDGT(*i.GroupIDGT)) + } + if i.GroupIDGTE != nil { + predicates = append(predicates, database.GroupIDGTE(*i.GroupIDGTE)) + } + if i.GroupIDLT != nil { + predicates = append(predicates, database.GroupIDLT(*i.GroupIDLT)) + } + if i.GroupIDLTE != nil { + predicates = append(predicates, database.GroupIDLTE(*i.GroupIDLTE)) + } + if i.GroupIDContains != nil { + predicates = append(predicates, database.GroupIDContains(*i.GroupIDContains)) + } + if i.GroupIDHasPrefix != nil { + predicates = append(predicates, database.GroupIDHasPrefix(*i.GroupIDHasPrefix)) + } + if i.GroupIDHasSuffix != nil { + predicates = append(predicates, database.GroupIDHasSuffix(*i.GroupIDHasSuffix)) + } + if i.GroupIDEqualFold != nil { + predicates = append(predicates, database.GroupIDEqualFold(*i.GroupIDEqualFold)) + } + if i.GroupIDContainsFold != nil { + predicates = append(predicates, database.GroupIDContainsFold(*i.GroupIDContainsFold)) + } + if i.Token != nil { + predicates = append(predicates, database.TokenEQ(*i.Token)) + } + if i.TokenNEQ != nil { + predicates = append(predicates, database.TokenNEQ(*i.TokenNEQ)) + } + if len(i.TokenIn) > 0 { + predicates = append(predicates, database.TokenIn(i.TokenIn...)) + } + if len(i.TokenNotIn) > 0 { + predicates = append(predicates, database.TokenNotIn(i.TokenNotIn...)) + } + if i.TokenGT != nil { + predicates = append(predicates, database.TokenGT(*i.TokenGT)) + } + if i.TokenGTE != nil { + predicates = append(predicates, database.TokenGTE(*i.TokenGTE)) + } + if i.TokenLT != nil { + predicates = append(predicates, database.TokenLT(*i.TokenLT)) + } + if i.TokenLTE != nil { + predicates = append(predicates, database.TokenLTE(*i.TokenLTE)) + } + if i.TokenContains != nil { + predicates = append(predicates, database.TokenContains(*i.TokenContains)) + } + if i.TokenHasPrefix != nil { + predicates = append(predicates, database.TokenHasPrefix(*i.TokenHasPrefix)) + } + if i.TokenHasSuffix != nil { + predicates = append(predicates, database.TokenHasSuffix(*i.TokenHasSuffix)) + } + if i.TokenIsNil { + predicates = append(predicates, database.TokenIsNil()) + } + if i.TokenNotNil { + predicates = append(predicates, database.TokenNotNil()) + } + if i.TokenEqualFold != nil { + predicates = append(predicates, database.TokenEqualFold(*i.TokenEqualFold)) + } + if i.TokenContainsFold != nil { + predicates = append(predicates, database.TokenContainsFold(*i.TokenContainsFold)) + } + if i.Status != nil { + predicates = append(predicates, database.StatusEQ(*i.Status)) + } + if i.StatusNEQ != nil { + predicates = append(predicates, database.StatusNEQ(*i.StatusNEQ)) + } + if len(i.StatusIn) > 0 { + predicates = append(predicates, database.StatusIn(i.StatusIn...)) + } + if len(i.StatusNotIn) > 0 { + predicates = append(predicates, database.StatusNotIn(i.StatusNotIn...)) + } + if i.Provider != nil { + predicates = append(predicates, database.ProviderEQ(*i.Provider)) + } + if i.ProviderNEQ != nil { + predicates = append(predicates, database.ProviderNEQ(*i.ProviderNEQ)) + } + if len(i.ProviderIn) > 0 { + predicates = append(predicates, database.ProviderIn(i.ProviderIn...)) + } + if len(i.ProviderNotIn) > 0 { + predicates = append(predicates, database.ProviderNotIn(i.ProviderNotIn...)) + } + + if i.HasGroup != nil { + p := database.HasGroup() + if !*i.HasGroup { + p = database.Not(p) + } + predicates = append(predicates, p) + } + if len(i.HasGroupWith) > 0 { + with := make([]predicate.Group, 0, len(i.HasGroupWith)) + for _, w := range i.HasGroupWith { + p, err := w.P() + if err != nil { + return nil, fmt.Errorf("%w: field 'HasGroupWith'", err) + } + with = append(with, p) + } + predicates = append(predicates, database.HasGroupWith(with...)) + } + switch len(predicates) { + case 0: + return nil, ErrEmptyDatabaseWhereInput + case 1: + return predicates[0], nil + default: + return database.And(predicates...), nil + } +} + +// GroupWhereInput represents a where input for filtering Group queries. +type GroupWhereInput struct { + Predicates []predicate.Group `json:"-"` + Not *GroupWhereInput `json:"not,omitempty"` + Or []*GroupWhereInput `json:"or,omitempty"` + And []*GroupWhereInput `json:"and,omitempty"` + + // "id" field predicates. + ID *string `json:"id,omitempty"` + IDNEQ *string `json:"idNEQ,omitempty"` + IDIn []string `json:"idIn,omitempty"` + IDNotIn []string `json:"idNotIn,omitempty"` + IDGT *string `json:"idGT,omitempty"` + IDGTE *string `json:"idGTE,omitempty"` + IDLT *string `json:"idLT,omitempty"` + IDLTE *string `json:"idLTE,omitempty"` + IDEqualFold *string `json:"idEqualFold,omitempty"` + IDContainsFold *string `json:"idContainsFold,omitempty"` + + // "created_at" field predicates. + CreatedAt *time.Time `json:"createdAt,omitempty"` + CreatedAtNEQ *time.Time `json:"createdAtNEQ,omitempty"` + CreatedAtIn []time.Time `json:"createdAtIn,omitempty"` + CreatedAtNotIn []time.Time `json:"createdAtNotIn,omitempty"` + CreatedAtGT *time.Time `json:"createdAtGT,omitempty"` + CreatedAtGTE *time.Time `json:"createdAtGTE,omitempty"` + CreatedAtLT *time.Time `json:"createdAtLT,omitempty"` + CreatedAtLTE *time.Time `json:"createdAtLTE,omitempty"` + CreatedAtIsNil bool `json:"createdAtIsNil,omitempty"` + CreatedAtNotNil bool `json:"createdAtNotNil,omitempty"` + + // "updated_at" field predicates. + UpdatedAt *time.Time `json:"updatedAt,omitempty"` + UpdatedAtNEQ *time.Time `json:"updatedAtNEQ,omitempty"` + UpdatedAtIn []time.Time `json:"updatedAtIn,omitempty"` + UpdatedAtNotIn []time.Time `json:"updatedAtNotIn,omitempty"` + UpdatedAtGT *time.Time `json:"updatedAtGT,omitempty"` + UpdatedAtGTE *time.Time `json:"updatedAtGTE,omitempty"` + UpdatedAtLT *time.Time `json:"updatedAtLT,omitempty"` + UpdatedAtLTE *time.Time `json:"updatedAtLTE,omitempty"` + UpdatedAtIsNil bool `json:"updatedAtIsNil,omitempty"` + UpdatedAtNotNil bool `json:"updatedAtNotNil,omitempty"` + + // "created_by" field predicates. + CreatedBy *string `json:"createdBy,omitempty"` + CreatedByNEQ *string `json:"createdByNEQ,omitempty"` + CreatedByIn []string `json:"createdByIn,omitempty"` + CreatedByNotIn []string `json:"createdByNotIn,omitempty"` + CreatedByGT *string `json:"createdByGT,omitempty"` + CreatedByGTE *string `json:"createdByGTE,omitempty"` + CreatedByLT *string `json:"createdByLT,omitempty"` + CreatedByLTE *string `json:"createdByLTE,omitempty"` + CreatedByContains *string `json:"createdByContains,omitempty"` + CreatedByHasPrefix *string `json:"createdByHasPrefix,omitempty"` + CreatedByHasSuffix *string `json:"createdByHasSuffix,omitempty"` + CreatedByIsNil bool `json:"createdByIsNil,omitempty"` + CreatedByNotNil bool `json:"createdByNotNil,omitempty"` + CreatedByEqualFold *string `json:"createdByEqualFold,omitempty"` + CreatedByContainsFold *string `json:"createdByContainsFold,omitempty"` + + // "updated_by" field predicates. + UpdatedBy *string `json:"updatedBy,omitempty"` + UpdatedByNEQ *string `json:"updatedByNEQ,omitempty"` + UpdatedByIn []string `json:"updatedByIn,omitempty"` + UpdatedByNotIn []string `json:"updatedByNotIn,omitempty"` + UpdatedByGT *string `json:"updatedByGT,omitempty"` + UpdatedByGTE *string `json:"updatedByGTE,omitempty"` + UpdatedByLT *string `json:"updatedByLT,omitempty"` + UpdatedByLTE *string `json:"updatedByLTE,omitempty"` + UpdatedByContains *string `json:"updatedByContains,omitempty"` + UpdatedByHasPrefix *string `json:"updatedByHasPrefix,omitempty"` + UpdatedByHasSuffix *string `json:"updatedByHasSuffix,omitempty"` + UpdatedByIsNil bool `json:"updatedByIsNil,omitempty"` + UpdatedByNotNil bool `json:"updatedByNotNil,omitempty"` + UpdatedByEqualFold *string `json:"updatedByEqualFold,omitempty"` + UpdatedByContainsFold *string `json:"updatedByContainsFold,omitempty"` + + // "deleted_at" field predicates. + DeletedAt *time.Time `json:"deletedAt,omitempty"` + DeletedAtNEQ *time.Time `json:"deletedAtNEQ,omitempty"` + DeletedAtIn []time.Time `json:"deletedAtIn,omitempty"` + DeletedAtNotIn []time.Time `json:"deletedAtNotIn,omitempty"` + DeletedAtGT *time.Time `json:"deletedAtGT,omitempty"` + DeletedAtGTE *time.Time `json:"deletedAtGTE,omitempty"` + DeletedAtLT *time.Time `json:"deletedAtLT,omitempty"` + DeletedAtLTE *time.Time `json:"deletedAtLTE,omitempty"` + DeletedAtIsNil bool `json:"deletedAtIsNil,omitempty"` + DeletedAtNotNil bool `json:"deletedAtNotNil,omitempty"` + + // "deleted_by" field predicates. + DeletedBy *string `json:"deletedBy,omitempty"` + DeletedByNEQ *string `json:"deletedByNEQ,omitempty"` + DeletedByIn []string `json:"deletedByIn,omitempty"` + DeletedByNotIn []string `json:"deletedByNotIn,omitempty"` + DeletedByGT *string `json:"deletedByGT,omitempty"` + DeletedByGTE *string `json:"deletedByGTE,omitempty"` + DeletedByLT *string `json:"deletedByLT,omitempty"` + DeletedByLTE *string `json:"deletedByLTE,omitempty"` + DeletedByContains *string `json:"deletedByContains,omitempty"` + DeletedByHasPrefix *string `json:"deletedByHasPrefix,omitempty"` + DeletedByHasSuffix *string `json:"deletedByHasSuffix,omitempty"` + DeletedByIsNil bool `json:"deletedByIsNil,omitempty"` + DeletedByNotNil bool `json:"deletedByNotNil,omitempty"` + DeletedByEqualFold *string `json:"deletedByEqualFold,omitempty"` + DeletedByContainsFold *string `json:"deletedByContainsFold,omitempty"` + + // "name" field predicates. + Name *string `json:"name,omitempty"` + NameNEQ *string `json:"nameNEQ,omitempty"` + NameIn []string `json:"nameIn,omitempty"` + NameNotIn []string `json:"nameNotIn,omitempty"` + NameGT *string `json:"nameGT,omitempty"` + NameGTE *string `json:"nameGTE,omitempty"` + NameLT *string `json:"nameLT,omitempty"` + NameLTE *string `json:"nameLTE,omitempty"` + NameContains *string `json:"nameContains,omitempty"` + NameHasPrefix *string `json:"nameHasPrefix,omitempty"` + NameHasSuffix *string `json:"nameHasSuffix,omitempty"` + NameEqualFold *string `json:"nameEqualFold,omitempty"` + NameContainsFold *string `json:"nameContainsFold,omitempty"` + + // "description" field predicates. + Description *string `json:"description,omitempty"` + DescriptionNEQ *string `json:"descriptionNEQ,omitempty"` + DescriptionIn []string `json:"descriptionIn,omitempty"` + DescriptionNotIn []string `json:"descriptionNotIn,omitempty"` + DescriptionGT *string `json:"descriptionGT,omitempty"` + DescriptionGTE *string `json:"descriptionGTE,omitempty"` + DescriptionLT *string `json:"descriptionLT,omitempty"` + DescriptionLTE *string `json:"descriptionLTE,omitempty"` + DescriptionContains *string `json:"descriptionContains,omitempty"` + DescriptionHasPrefix *string `json:"descriptionHasPrefix,omitempty"` + DescriptionHasSuffix *string `json:"descriptionHasSuffix,omitempty"` + DescriptionIsNil bool `json:"descriptionIsNil,omitempty"` + DescriptionNotNil bool `json:"descriptionNotNil,omitempty"` + DescriptionEqualFold *string `json:"descriptionEqualFold,omitempty"` + DescriptionContainsFold *string `json:"descriptionContainsFold,omitempty"` + + // "primary_location" field predicates. + PrimaryLocation *string `json:"primaryLocation,omitempty"` + PrimaryLocationNEQ *string `json:"primaryLocationNEQ,omitempty"` + PrimaryLocationIn []string `json:"primaryLocationIn,omitempty"` + PrimaryLocationNotIn []string `json:"primaryLocationNotIn,omitempty"` + PrimaryLocationGT *string `json:"primaryLocationGT,omitempty"` + PrimaryLocationGTE *string `json:"primaryLocationGTE,omitempty"` + PrimaryLocationLT *string `json:"primaryLocationLT,omitempty"` + PrimaryLocationLTE *string `json:"primaryLocationLTE,omitempty"` + PrimaryLocationContains *string `json:"primaryLocationContains,omitempty"` + PrimaryLocationHasPrefix *string `json:"primaryLocationHasPrefix,omitempty"` + PrimaryLocationHasSuffix *string `json:"primaryLocationHasSuffix,omitempty"` + PrimaryLocationEqualFold *string `json:"primaryLocationEqualFold,omitempty"` + PrimaryLocationContainsFold *string `json:"primaryLocationContainsFold,omitempty"` + + // "token" field predicates. + Token *string `json:"token,omitempty"` + TokenNEQ *string `json:"tokenNEQ,omitempty"` + TokenIn []string `json:"tokenIn,omitempty"` + TokenNotIn []string `json:"tokenNotIn,omitempty"` + TokenGT *string `json:"tokenGT,omitempty"` + TokenGTE *string `json:"tokenGTE,omitempty"` + TokenLT *string `json:"tokenLT,omitempty"` + TokenLTE *string `json:"tokenLTE,omitempty"` + TokenContains *string `json:"tokenContains,omitempty"` + TokenHasPrefix *string `json:"tokenHasPrefix,omitempty"` + TokenHasSuffix *string `json:"tokenHasSuffix,omitempty"` + TokenIsNil bool `json:"tokenIsNil,omitempty"` + TokenNotNil bool `json:"tokenNotNil,omitempty"` + TokenEqualFold *string `json:"tokenEqualFold,omitempty"` + TokenContainsFold *string `json:"tokenContainsFold,omitempty"` + + // "region" field predicates. + Region *enums.Region `json:"region,omitempty"` + RegionNEQ *enums.Region `json:"regionNEQ,omitempty"` + RegionIn []enums.Region `json:"regionIn,omitempty"` + RegionNotIn []enums.Region `json:"regionNotIn,omitempty"` + + // "databases" edge predicates. + HasDatabases *bool `json:"hasDatabases,omitempty"` + HasDatabasesWith []*DatabaseWhereInput `json:"hasDatabasesWith,omitempty"` +} + +// AddPredicates adds custom predicates to the where input to be used during the filtering phase. +func (i *GroupWhereInput) AddPredicates(predicates ...predicate.Group) { + i.Predicates = append(i.Predicates, predicates...) +} + +// Filter applies the GroupWhereInput filter on the GroupQuery builder. +func (i *GroupWhereInput) Filter(q *GroupQuery) (*GroupQuery, error) { + if i == nil { + return q, nil + } + p, err := i.P() + if err != nil { + if err == ErrEmptyGroupWhereInput { + return q, nil + } + return nil, err + } + return q.Where(p), nil +} + +// ErrEmptyGroupWhereInput is returned in case the GroupWhereInput is empty. +var ErrEmptyGroupWhereInput = errors.New("generated: empty predicate GroupWhereInput") + +// P returns a predicate for filtering groups. +// An error is returned if the input is empty or invalid. +func (i *GroupWhereInput) P() (predicate.Group, error) { + var predicates []predicate.Group + if i.Not != nil { + p, err := i.Not.P() + if err != nil { + return nil, fmt.Errorf("%w: field 'not'", err) + } + predicates = append(predicates, group.Not(p)) + } + switch n := len(i.Or); { + case n == 1: + p, err := i.Or[0].P() + if err != nil { + return nil, fmt.Errorf("%w: field 'or'", err) + } + predicates = append(predicates, p) + case n > 1: + or := make([]predicate.Group, 0, n) + for _, w := range i.Or { + p, err := w.P() + if err != nil { + return nil, fmt.Errorf("%w: field 'or'", err) + } + or = append(or, p) + } + predicates = append(predicates, group.Or(or...)) + } + switch n := len(i.And); { + case n == 1: + p, err := i.And[0].P() + if err != nil { + return nil, fmt.Errorf("%w: field 'and'", err) + } + predicates = append(predicates, p) + case n > 1: + and := make([]predicate.Group, 0, n) + for _, w := range i.And { + p, err := w.P() + if err != nil { + return nil, fmt.Errorf("%w: field 'and'", err) + } + and = append(and, p) + } + predicates = append(predicates, group.And(and...)) + } + predicates = append(predicates, i.Predicates...) + if i.ID != nil { + predicates = append(predicates, group.IDEQ(*i.ID)) + } + if i.IDNEQ != nil { + predicates = append(predicates, group.IDNEQ(*i.IDNEQ)) + } + if len(i.IDIn) > 0 { + predicates = append(predicates, group.IDIn(i.IDIn...)) + } + if len(i.IDNotIn) > 0 { + predicates = append(predicates, group.IDNotIn(i.IDNotIn...)) + } + if i.IDGT != nil { + predicates = append(predicates, group.IDGT(*i.IDGT)) + } + if i.IDGTE != nil { + predicates = append(predicates, group.IDGTE(*i.IDGTE)) + } + if i.IDLT != nil { + predicates = append(predicates, group.IDLT(*i.IDLT)) + } + if i.IDLTE != nil { + predicates = append(predicates, group.IDLTE(*i.IDLTE)) + } + if i.IDEqualFold != nil { + predicates = append(predicates, group.IDEqualFold(*i.IDEqualFold)) + } + if i.IDContainsFold != nil { + predicates = append(predicates, group.IDContainsFold(*i.IDContainsFold)) + } + if i.CreatedAt != nil { + predicates = append(predicates, group.CreatedAtEQ(*i.CreatedAt)) + } + if i.CreatedAtNEQ != nil { + predicates = append(predicates, group.CreatedAtNEQ(*i.CreatedAtNEQ)) + } + if len(i.CreatedAtIn) > 0 { + predicates = append(predicates, group.CreatedAtIn(i.CreatedAtIn...)) + } + if len(i.CreatedAtNotIn) > 0 { + predicates = append(predicates, group.CreatedAtNotIn(i.CreatedAtNotIn...)) + } + if i.CreatedAtGT != nil { + predicates = append(predicates, group.CreatedAtGT(*i.CreatedAtGT)) + } + if i.CreatedAtGTE != nil { + predicates = append(predicates, group.CreatedAtGTE(*i.CreatedAtGTE)) + } + if i.CreatedAtLT != nil { + predicates = append(predicates, group.CreatedAtLT(*i.CreatedAtLT)) + } + if i.CreatedAtLTE != nil { + predicates = append(predicates, group.CreatedAtLTE(*i.CreatedAtLTE)) + } + if i.CreatedAtIsNil { + predicates = append(predicates, group.CreatedAtIsNil()) + } + if i.CreatedAtNotNil { + predicates = append(predicates, group.CreatedAtNotNil()) + } + if i.UpdatedAt != nil { + predicates = append(predicates, group.UpdatedAtEQ(*i.UpdatedAt)) + } + if i.UpdatedAtNEQ != nil { + predicates = append(predicates, group.UpdatedAtNEQ(*i.UpdatedAtNEQ)) + } + if len(i.UpdatedAtIn) > 0 { + predicates = append(predicates, group.UpdatedAtIn(i.UpdatedAtIn...)) + } + if len(i.UpdatedAtNotIn) > 0 { + predicates = append(predicates, group.UpdatedAtNotIn(i.UpdatedAtNotIn...)) + } + if i.UpdatedAtGT != nil { + predicates = append(predicates, group.UpdatedAtGT(*i.UpdatedAtGT)) + } + if i.UpdatedAtGTE != nil { + predicates = append(predicates, group.UpdatedAtGTE(*i.UpdatedAtGTE)) + } + if i.UpdatedAtLT != nil { + predicates = append(predicates, group.UpdatedAtLT(*i.UpdatedAtLT)) + } + if i.UpdatedAtLTE != nil { + predicates = append(predicates, group.UpdatedAtLTE(*i.UpdatedAtLTE)) + } + if i.UpdatedAtIsNil { + predicates = append(predicates, group.UpdatedAtIsNil()) + } + if i.UpdatedAtNotNil { + predicates = append(predicates, group.UpdatedAtNotNil()) + } + if i.CreatedBy != nil { + predicates = append(predicates, group.CreatedByEQ(*i.CreatedBy)) + } + if i.CreatedByNEQ != nil { + predicates = append(predicates, group.CreatedByNEQ(*i.CreatedByNEQ)) + } + if len(i.CreatedByIn) > 0 { + predicates = append(predicates, group.CreatedByIn(i.CreatedByIn...)) + } + if len(i.CreatedByNotIn) > 0 { + predicates = append(predicates, group.CreatedByNotIn(i.CreatedByNotIn...)) + } + if i.CreatedByGT != nil { + predicates = append(predicates, group.CreatedByGT(*i.CreatedByGT)) + } + if i.CreatedByGTE != nil { + predicates = append(predicates, group.CreatedByGTE(*i.CreatedByGTE)) + } + if i.CreatedByLT != nil { + predicates = append(predicates, group.CreatedByLT(*i.CreatedByLT)) + } + if i.CreatedByLTE != nil { + predicates = append(predicates, group.CreatedByLTE(*i.CreatedByLTE)) + } + if i.CreatedByContains != nil { + predicates = append(predicates, group.CreatedByContains(*i.CreatedByContains)) + } + if i.CreatedByHasPrefix != nil { + predicates = append(predicates, group.CreatedByHasPrefix(*i.CreatedByHasPrefix)) + } + if i.CreatedByHasSuffix != nil { + predicates = append(predicates, group.CreatedByHasSuffix(*i.CreatedByHasSuffix)) + } + if i.CreatedByIsNil { + predicates = append(predicates, group.CreatedByIsNil()) + } + if i.CreatedByNotNil { + predicates = append(predicates, group.CreatedByNotNil()) + } + if i.CreatedByEqualFold != nil { + predicates = append(predicates, group.CreatedByEqualFold(*i.CreatedByEqualFold)) + } + if i.CreatedByContainsFold != nil { + predicates = append(predicates, group.CreatedByContainsFold(*i.CreatedByContainsFold)) + } + if i.UpdatedBy != nil { + predicates = append(predicates, group.UpdatedByEQ(*i.UpdatedBy)) + } + if i.UpdatedByNEQ != nil { + predicates = append(predicates, group.UpdatedByNEQ(*i.UpdatedByNEQ)) + } + if len(i.UpdatedByIn) > 0 { + predicates = append(predicates, group.UpdatedByIn(i.UpdatedByIn...)) + } + if len(i.UpdatedByNotIn) > 0 { + predicates = append(predicates, group.UpdatedByNotIn(i.UpdatedByNotIn...)) + } + if i.UpdatedByGT != nil { + predicates = append(predicates, group.UpdatedByGT(*i.UpdatedByGT)) + } + if i.UpdatedByGTE != nil { + predicates = append(predicates, group.UpdatedByGTE(*i.UpdatedByGTE)) + } + if i.UpdatedByLT != nil { + predicates = append(predicates, group.UpdatedByLT(*i.UpdatedByLT)) + } + if i.UpdatedByLTE != nil { + predicates = append(predicates, group.UpdatedByLTE(*i.UpdatedByLTE)) + } + if i.UpdatedByContains != nil { + predicates = append(predicates, group.UpdatedByContains(*i.UpdatedByContains)) + } + if i.UpdatedByHasPrefix != nil { + predicates = append(predicates, group.UpdatedByHasPrefix(*i.UpdatedByHasPrefix)) + } + if i.UpdatedByHasSuffix != nil { + predicates = append(predicates, group.UpdatedByHasSuffix(*i.UpdatedByHasSuffix)) + } + if i.UpdatedByIsNil { + predicates = append(predicates, group.UpdatedByIsNil()) + } + if i.UpdatedByNotNil { + predicates = append(predicates, group.UpdatedByNotNil()) + } + if i.UpdatedByEqualFold != nil { + predicates = append(predicates, group.UpdatedByEqualFold(*i.UpdatedByEqualFold)) + } + if i.UpdatedByContainsFold != nil { + predicates = append(predicates, group.UpdatedByContainsFold(*i.UpdatedByContainsFold)) + } + if i.DeletedAt != nil { + predicates = append(predicates, group.DeletedAtEQ(*i.DeletedAt)) + } + if i.DeletedAtNEQ != nil { + predicates = append(predicates, group.DeletedAtNEQ(*i.DeletedAtNEQ)) + } + if len(i.DeletedAtIn) > 0 { + predicates = append(predicates, group.DeletedAtIn(i.DeletedAtIn...)) + } + if len(i.DeletedAtNotIn) > 0 { + predicates = append(predicates, group.DeletedAtNotIn(i.DeletedAtNotIn...)) + } + if i.DeletedAtGT != nil { + predicates = append(predicates, group.DeletedAtGT(*i.DeletedAtGT)) + } + if i.DeletedAtGTE != nil { + predicates = append(predicates, group.DeletedAtGTE(*i.DeletedAtGTE)) + } + if i.DeletedAtLT != nil { + predicates = append(predicates, group.DeletedAtLT(*i.DeletedAtLT)) + } + if i.DeletedAtLTE != nil { + predicates = append(predicates, group.DeletedAtLTE(*i.DeletedAtLTE)) + } + if i.DeletedAtIsNil { + predicates = append(predicates, group.DeletedAtIsNil()) + } + if i.DeletedAtNotNil { + predicates = append(predicates, group.DeletedAtNotNil()) + } + if i.DeletedBy != nil { + predicates = append(predicates, group.DeletedByEQ(*i.DeletedBy)) + } + if i.DeletedByNEQ != nil { + predicates = append(predicates, group.DeletedByNEQ(*i.DeletedByNEQ)) + } + if len(i.DeletedByIn) > 0 { + predicates = append(predicates, group.DeletedByIn(i.DeletedByIn...)) + } + if len(i.DeletedByNotIn) > 0 { + predicates = append(predicates, group.DeletedByNotIn(i.DeletedByNotIn...)) + } + if i.DeletedByGT != nil { + predicates = append(predicates, group.DeletedByGT(*i.DeletedByGT)) + } + if i.DeletedByGTE != nil { + predicates = append(predicates, group.DeletedByGTE(*i.DeletedByGTE)) + } + if i.DeletedByLT != nil { + predicates = append(predicates, group.DeletedByLT(*i.DeletedByLT)) + } + if i.DeletedByLTE != nil { + predicates = append(predicates, group.DeletedByLTE(*i.DeletedByLTE)) + } + if i.DeletedByContains != nil { + predicates = append(predicates, group.DeletedByContains(*i.DeletedByContains)) + } + if i.DeletedByHasPrefix != nil { + predicates = append(predicates, group.DeletedByHasPrefix(*i.DeletedByHasPrefix)) + } + if i.DeletedByHasSuffix != nil { + predicates = append(predicates, group.DeletedByHasSuffix(*i.DeletedByHasSuffix)) + } + if i.DeletedByIsNil { + predicates = append(predicates, group.DeletedByIsNil()) + } + if i.DeletedByNotNil { + predicates = append(predicates, group.DeletedByNotNil()) + } + if i.DeletedByEqualFold != nil { + predicates = append(predicates, group.DeletedByEqualFold(*i.DeletedByEqualFold)) + } + if i.DeletedByContainsFold != nil { + predicates = append(predicates, group.DeletedByContainsFold(*i.DeletedByContainsFold)) + } + if i.Name != nil { + predicates = append(predicates, group.NameEQ(*i.Name)) + } + if i.NameNEQ != nil { + predicates = append(predicates, group.NameNEQ(*i.NameNEQ)) + } + if len(i.NameIn) > 0 { + predicates = append(predicates, group.NameIn(i.NameIn...)) + } + if len(i.NameNotIn) > 0 { + predicates = append(predicates, group.NameNotIn(i.NameNotIn...)) + } + if i.NameGT != nil { + predicates = append(predicates, group.NameGT(*i.NameGT)) + } + if i.NameGTE != nil { + predicates = append(predicates, group.NameGTE(*i.NameGTE)) + } + if i.NameLT != nil { + predicates = append(predicates, group.NameLT(*i.NameLT)) + } + if i.NameLTE != nil { + predicates = append(predicates, group.NameLTE(*i.NameLTE)) + } + if i.NameContains != nil { + predicates = append(predicates, group.NameContains(*i.NameContains)) + } + if i.NameHasPrefix != nil { + predicates = append(predicates, group.NameHasPrefix(*i.NameHasPrefix)) + } + if i.NameHasSuffix != nil { + predicates = append(predicates, group.NameHasSuffix(*i.NameHasSuffix)) + } + if i.NameEqualFold != nil { + predicates = append(predicates, group.NameEqualFold(*i.NameEqualFold)) + } + if i.NameContainsFold != nil { + predicates = append(predicates, group.NameContainsFold(*i.NameContainsFold)) + } + if i.Description != nil { + predicates = append(predicates, group.DescriptionEQ(*i.Description)) + } + if i.DescriptionNEQ != nil { + predicates = append(predicates, group.DescriptionNEQ(*i.DescriptionNEQ)) + } + if len(i.DescriptionIn) > 0 { + predicates = append(predicates, group.DescriptionIn(i.DescriptionIn...)) + } + if len(i.DescriptionNotIn) > 0 { + predicates = append(predicates, group.DescriptionNotIn(i.DescriptionNotIn...)) + } + if i.DescriptionGT != nil { + predicates = append(predicates, group.DescriptionGT(*i.DescriptionGT)) + } + if i.DescriptionGTE != nil { + predicates = append(predicates, group.DescriptionGTE(*i.DescriptionGTE)) + } + if i.DescriptionLT != nil { + predicates = append(predicates, group.DescriptionLT(*i.DescriptionLT)) + } + if i.DescriptionLTE != nil { + predicates = append(predicates, group.DescriptionLTE(*i.DescriptionLTE)) + } + if i.DescriptionContains != nil { + predicates = append(predicates, group.DescriptionContains(*i.DescriptionContains)) + } + if i.DescriptionHasPrefix != nil { + predicates = append(predicates, group.DescriptionHasPrefix(*i.DescriptionHasPrefix)) + } + if i.DescriptionHasSuffix != nil { + predicates = append(predicates, group.DescriptionHasSuffix(*i.DescriptionHasSuffix)) + } + if i.DescriptionIsNil { + predicates = append(predicates, group.DescriptionIsNil()) + } + if i.DescriptionNotNil { + predicates = append(predicates, group.DescriptionNotNil()) + } + if i.DescriptionEqualFold != nil { + predicates = append(predicates, group.DescriptionEqualFold(*i.DescriptionEqualFold)) + } + if i.DescriptionContainsFold != nil { + predicates = append(predicates, group.DescriptionContainsFold(*i.DescriptionContainsFold)) + } + if i.PrimaryLocation != nil { + predicates = append(predicates, group.PrimaryLocationEQ(*i.PrimaryLocation)) + } + if i.PrimaryLocationNEQ != nil { + predicates = append(predicates, group.PrimaryLocationNEQ(*i.PrimaryLocationNEQ)) + } + if len(i.PrimaryLocationIn) > 0 { + predicates = append(predicates, group.PrimaryLocationIn(i.PrimaryLocationIn...)) + } + if len(i.PrimaryLocationNotIn) > 0 { + predicates = append(predicates, group.PrimaryLocationNotIn(i.PrimaryLocationNotIn...)) + } + if i.PrimaryLocationGT != nil { + predicates = append(predicates, group.PrimaryLocationGT(*i.PrimaryLocationGT)) + } + if i.PrimaryLocationGTE != nil { + predicates = append(predicates, group.PrimaryLocationGTE(*i.PrimaryLocationGTE)) + } + if i.PrimaryLocationLT != nil { + predicates = append(predicates, group.PrimaryLocationLT(*i.PrimaryLocationLT)) + } + if i.PrimaryLocationLTE != nil { + predicates = append(predicates, group.PrimaryLocationLTE(*i.PrimaryLocationLTE)) + } + if i.PrimaryLocationContains != nil { + predicates = append(predicates, group.PrimaryLocationContains(*i.PrimaryLocationContains)) + } + if i.PrimaryLocationHasPrefix != nil { + predicates = append(predicates, group.PrimaryLocationHasPrefix(*i.PrimaryLocationHasPrefix)) + } + if i.PrimaryLocationHasSuffix != nil { + predicates = append(predicates, group.PrimaryLocationHasSuffix(*i.PrimaryLocationHasSuffix)) + } + if i.PrimaryLocationEqualFold != nil { + predicates = append(predicates, group.PrimaryLocationEqualFold(*i.PrimaryLocationEqualFold)) + } + if i.PrimaryLocationContainsFold != nil { + predicates = append(predicates, group.PrimaryLocationContainsFold(*i.PrimaryLocationContainsFold)) + } + if i.Token != nil { + predicates = append(predicates, group.TokenEQ(*i.Token)) + } + if i.TokenNEQ != nil { + predicates = append(predicates, group.TokenNEQ(*i.TokenNEQ)) + } + if len(i.TokenIn) > 0 { + predicates = append(predicates, group.TokenIn(i.TokenIn...)) + } + if len(i.TokenNotIn) > 0 { + predicates = append(predicates, group.TokenNotIn(i.TokenNotIn...)) + } + if i.TokenGT != nil { + predicates = append(predicates, group.TokenGT(*i.TokenGT)) + } + if i.TokenGTE != nil { + predicates = append(predicates, group.TokenGTE(*i.TokenGTE)) + } + if i.TokenLT != nil { + predicates = append(predicates, group.TokenLT(*i.TokenLT)) + } + if i.TokenLTE != nil { + predicates = append(predicates, group.TokenLTE(*i.TokenLTE)) + } + if i.TokenContains != nil { + predicates = append(predicates, group.TokenContains(*i.TokenContains)) + } + if i.TokenHasPrefix != nil { + predicates = append(predicates, group.TokenHasPrefix(*i.TokenHasPrefix)) + } + if i.TokenHasSuffix != nil { + predicates = append(predicates, group.TokenHasSuffix(*i.TokenHasSuffix)) + } + if i.TokenIsNil { + predicates = append(predicates, group.TokenIsNil()) + } + if i.TokenNotNil { + predicates = append(predicates, group.TokenNotNil()) + } + if i.TokenEqualFold != nil { + predicates = append(predicates, group.TokenEqualFold(*i.TokenEqualFold)) + } + if i.TokenContainsFold != nil { + predicates = append(predicates, group.TokenContainsFold(*i.TokenContainsFold)) + } + if i.Region != nil { + predicates = append(predicates, group.RegionEQ(*i.Region)) + } + if i.RegionNEQ != nil { + predicates = append(predicates, group.RegionNEQ(*i.RegionNEQ)) + } + if len(i.RegionIn) > 0 { + predicates = append(predicates, group.RegionIn(i.RegionIn...)) + } + if len(i.RegionNotIn) > 0 { + predicates = append(predicates, group.RegionNotIn(i.RegionNotIn...)) + } + + if i.HasDatabases != nil { + p := group.HasDatabases() + if !*i.HasDatabases { + p = group.Not(p) + } + predicates = append(predicates, p) + } + if len(i.HasDatabasesWith) > 0 { + with := make([]predicate.Database, 0, len(i.HasDatabasesWith)) + for _, w := range i.HasDatabasesWith { + p, err := w.P() + if err != nil { + return nil, fmt.Errorf("%w: field 'HasDatabasesWith'", err) + } + with = append(with, p) + } + predicates = append(predicates, group.HasDatabasesWith(with...)) + } + switch len(predicates) { + case 0: + return nil, ErrEmptyGroupWhereInput + case 1: + return predicates[0], nil + default: + return group.And(predicates...), nil + } +} diff --git a/internal/ent/generated/group.go b/internal/ent/generated/group.go new file mode 100644 index 0000000..2342bc3 --- /dev/null +++ b/internal/ent/generated/group.go @@ -0,0 +1,284 @@ +// Code generated by ent, DO NOT EDIT. + +package generated + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/datumforge/geodetic/internal/ent/generated/group" + "github.com/datumforge/geodetic/pkg/enums" +) + +// Group is the model entity for the Group schema. +type Group struct { + config `json:"-"` + // ID of the ent. + ID string `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // CreatedBy holds the value of the "created_by" field. + CreatedBy string `json:"created_by,omitempty"` + // UpdatedBy holds the value of the "updated_by" field. + UpdatedBy string `json:"updated_by,omitempty"` + // DeletedAt holds the value of the "deleted_at" field. + DeletedAt time.Time `json:"deleted_at,omitempty"` + // DeletedBy holds the value of the "deleted_by" field. + DeletedBy string `json:"deleted_by,omitempty"` + // the name of the group in turso + Name string `json:"name,omitempty"` + // the description of the group + Description string `json:"description,omitempty"` + // the primary of the group + PrimaryLocation string `json:"primary_location,omitempty"` + // the replica locations of the group + Locations []string `json:"locations,omitempty"` + // the auth token used to connect to the group + Token string `json:"-"` + // region the group + Region enums.Region `json:"region,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the GroupQuery when eager-loading is set. + Edges GroupEdges `json:"edges"` + selectValues sql.SelectValues +} + +// GroupEdges holds the relations/edges for other nodes in the graph. +type GroupEdges struct { + // Databases holds the value of the databases edge. + Databases []*Database `json:"databases,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool + // totalCount holds the count of the edges above. + totalCount [1]map[string]int + + namedDatabases map[string][]*Database +} + +// DatabasesOrErr returns the Databases value or an error if the edge +// was not loaded in eager-loading. +func (e GroupEdges) DatabasesOrErr() ([]*Database, error) { + if e.loadedTypes[0] { + return e.Databases, nil + } + return nil, &NotLoadedError{edge: "databases"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Group) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case group.FieldLocations: + values[i] = new([]byte) + case group.FieldID, group.FieldCreatedBy, group.FieldUpdatedBy, group.FieldDeletedBy, group.FieldName, group.FieldDescription, group.FieldPrimaryLocation, group.FieldToken, group.FieldRegion: + values[i] = new(sql.NullString) + case group.FieldCreatedAt, group.FieldUpdatedAt, group.FieldDeletedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Group fields. +func (gr *Group) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case group.FieldID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value.Valid { + gr.ID = value.String + } + case group.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + gr.CreatedAt = value.Time + } + case group.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + gr.UpdatedAt = value.Time + } + case group.FieldCreatedBy: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field created_by", values[i]) + } else if value.Valid { + gr.CreatedBy = value.String + } + case group.FieldUpdatedBy: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field updated_by", values[i]) + } else if value.Valid { + gr.UpdatedBy = value.String + } + case group.FieldDeletedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) + } else if value.Valid { + gr.DeletedAt = value.Time + } + case group.FieldDeletedBy: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field deleted_by", values[i]) + } else if value.Valid { + gr.DeletedBy = value.String + } + case group.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + gr.Name = value.String + } + case group.FieldDescription: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field description", values[i]) + } else if value.Valid { + gr.Description = value.String + } + case group.FieldPrimaryLocation: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field primary_location", values[i]) + } else if value.Valid { + gr.PrimaryLocation = value.String + } + case group.FieldLocations: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field locations", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &gr.Locations); err != nil { + return fmt.Errorf("unmarshal field locations: %w", err) + } + } + case group.FieldToken: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field token", values[i]) + } else if value.Valid { + gr.Token = value.String + } + case group.FieldRegion: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field region", values[i]) + } else if value.Valid { + gr.Region = enums.Region(value.String) + } + default: + gr.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Group. +// This includes values selected through modifiers, order, etc. +func (gr *Group) Value(name string) (ent.Value, error) { + return gr.selectValues.Get(name) +} + +// QueryDatabases queries the "databases" edge of the Group entity. +func (gr *Group) QueryDatabases() *DatabaseQuery { + return NewGroupClient(gr.config).QueryDatabases(gr) +} + +// Update returns a builder for updating this Group. +// Note that you need to call Group.Unwrap() before calling this method if this Group +// was returned from a transaction, and the transaction was committed or rolled back. +func (gr *Group) Update() *GroupUpdateOne { + return NewGroupClient(gr.config).UpdateOne(gr) +} + +// Unwrap unwraps the Group entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (gr *Group) Unwrap() *Group { + _tx, ok := gr.config.driver.(*txDriver) + if !ok { + panic("generated: Group is not a transactional entity") + } + gr.config.driver = _tx.drv + return gr +} + +// String implements the fmt.Stringer. +func (gr *Group) String() string { + var builder strings.Builder + builder.WriteString("Group(") + builder.WriteString(fmt.Sprintf("id=%v, ", gr.ID)) + builder.WriteString("created_at=") + builder.WriteString(gr.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(gr.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("created_by=") + builder.WriteString(gr.CreatedBy) + builder.WriteString(", ") + builder.WriteString("updated_by=") + builder.WriteString(gr.UpdatedBy) + builder.WriteString(", ") + builder.WriteString("deleted_at=") + builder.WriteString(gr.DeletedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("deleted_by=") + builder.WriteString(gr.DeletedBy) + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(gr.Name) + builder.WriteString(", ") + builder.WriteString("description=") + builder.WriteString(gr.Description) + builder.WriteString(", ") + builder.WriteString("primary_location=") + builder.WriteString(gr.PrimaryLocation) + builder.WriteString(", ") + builder.WriteString("locations=") + builder.WriteString(fmt.Sprintf("%v", gr.Locations)) + builder.WriteString(", ") + builder.WriteString("token=") + builder.WriteString(", ") + builder.WriteString("region=") + builder.WriteString(fmt.Sprintf("%v", gr.Region)) + builder.WriteByte(')') + return builder.String() +} + +// NamedDatabases returns the Databases named value or an error if the edge was not +// loaded in eager-loading with this name. +func (gr *Group) NamedDatabases(name string) ([]*Database, error) { + if gr.Edges.namedDatabases == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := gr.Edges.namedDatabases[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (gr *Group) appendNamedDatabases(name string, edges ...*Database) { + if gr.Edges.namedDatabases == nil { + gr.Edges.namedDatabases = make(map[string][]*Database) + } + if len(edges) == 0 { + gr.Edges.namedDatabases[name] = []*Database{} + } else { + gr.Edges.namedDatabases[name] = append(gr.Edges.namedDatabases[name], edges...) + } +} + +// Groups is a parsable slice of Group. +type Groups []*Group diff --git a/internal/ent/generated/group/group.go b/internal/ent/generated/group/group.go new file mode 100644 index 0000000..e5a62ac --- /dev/null +++ b/internal/ent/generated/group/group.go @@ -0,0 +1,207 @@ +// Code generated by ent, DO NOT EDIT. + +package group + +import ( + "fmt" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/99designs/gqlgen/graphql" + "github.com/datumforge/geodetic/pkg/enums" +) + +const ( + // Label holds the string label denoting the group type in the database. + Label = "group" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldCreatedBy holds the string denoting the created_by field in the database. + FieldCreatedBy = "created_by" + // FieldUpdatedBy holds the string denoting the updated_by field in the database. + FieldUpdatedBy = "updated_by" + // FieldDeletedAt holds the string denoting the deleted_at field in the database. + FieldDeletedAt = "deleted_at" + // FieldDeletedBy holds the string denoting the deleted_by field in the database. + FieldDeletedBy = "deleted_by" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldDescription holds the string denoting the description field in the database. + FieldDescription = "description" + // FieldPrimaryLocation holds the string denoting the primary_location field in the database. + FieldPrimaryLocation = "primary_location" + // FieldLocations holds the string denoting the locations field in the database. + FieldLocations = "locations" + // FieldToken holds the string denoting the token field in the database. + FieldToken = "token" + // FieldRegion holds the string denoting the region field in the database. + FieldRegion = "region" + // EdgeDatabases holds the string denoting the databases edge name in mutations. + EdgeDatabases = "databases" + // Table holds the table name of the group in the database. + Table = "groups" + // DatabasesTable is the table that holds the databases relation/edge. + DatabasesTable = "databases" + // DatabasesInverseTable is the table name for the Database entity. + // It exists in this package in order to avoid circular dependency with the "database" package. + DatabasesInverseTable = "databases" + // DatabasesColumn is the table column denoting the databases relation/edge. + DatabasesColumn = "group_id" +) + +// Columns holds all SQL columns for group fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldCreatedBy, + FieldUpdatedBy, + FieldDeletedAt, + FieldDeletedBy, + FieldName, + FieldDescription, + FieldPrimaryLocation, + FieldLocations, + FieldToken, + FieldRegion, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +// Note that the variables below are initialized by the runtime +// package on the initialization of the application. Therefore, +// it should be imported in the main as follows: +// +// import _ "github.com/datumforge/geodetic/internal/ent/generated/runtime" +var ( + Hooks [4]ent.Hook + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // NameValidator is a validator for the "name" field. It is called by the builders before save. + NameValidator func(string) error + // PrimaryLocationValidator is a validator for the "primary_location" field. It is called by the builders before save. + PrimaryLocationValidator func(string) error + // DefaultID holds the default value on creation for the "id" field. + DefaultID func() string +) + +const DefaultRegion enums.Region = "AMER" + +// RegionValidator is a validator for the "region" field enum values. It is called by the builders before save. +func RegionValidator(r enums.Region) error { + switch r.String() { + case "AMER", "EMEA", "APAC": + return nil + default: + return fmt.Errorf("group: invalid enum value for region field: %q", r) + } +} + +// OrderOption defines the ordering options for the Group queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByCreatedBy orders the results by the created_by field. +func ByCreatedBy(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedBy, opts...).ToFunc() +} + +// ByUpdatedBy orders the results by the updated_by field. +func ByUpdatedBy(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedBy, opts...).ToFunc() +} + +// ByDeletedAt orders the results by the deleted_at field. +func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDeletedAt, opts...).ToFunc() +} + +// ByDeletedBy orders the results by the deleted_by field. +func ByDeletedBy(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDeletedBy, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// ByPrimaryLocation orders the results by the primary_location field. +func ByPrimaryLocation(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPrimaryLocation, opts...).ToFunc() +} + +// ByToken orders the results by the token field. +func ByToken(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldToken, opts...).ToFunc() +} + +// ByRegion orders the results by the region field. +func ByRegion(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRegion, opts...).ToFunc() +} + +// ByDatabasesCount orders the results by databases count. +func ByDatabasesCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newDatabasesStep(), opts...) + } +} + +// ByDatabases orders the results by databases terms. +func ByDatabases(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newDatabasesStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newDatabasesStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DatabasesInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, DatabasesTable, DatabasesColumn), + ) +} + +var ( + // enums.Region must implement graphql.Marshaler. + _ graphql.Marshaler = (*enums.Region)(nil) + // enums.Region must implement graphql.Unmarshaler. + _ graphql.Unmarshaler = (*enums.Region)(nil) +) diff --git a/internal/ent/generated/group/where.go b/internal/ent/generated/group/where.go new file mode 100644 index 0000000..195d0b9 --- /dev/null +++ b/internal/ent/generated/group/where.go @@ -0,0 +1,858 @@ +// Code generated by ent, DO NOT EDIT. + +package group + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/datumforge/geodetic/internal/ent/generated/predicate" + "github.com/datumforge/geodetic/pkg/enums" + + "github.com/datumforge/geodetic/internal/ent/generated/internal" +) + +// ID filters vertices based on their ID field. +func ID(id string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) predicate.Group { + return predicate.Group(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) predicate.Group { + return predicate.Group(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) predicate.Group { + return predicate.Group(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldID, id)) +} + +// IDEqualFold applies the EqualFold predicate on the ID field. +func IDEqualFold(id string) predicate.Group { + return predicate.Group(sql.FieldEqualFold(FieldID, id)) +} + +// IDContainsFold applies the ContainsFold predicate on the ID field. +func IDContainsFold(id string) predicate.Group { + return predicate.Group(sql.FieldContainsFold(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// CreatedBy applies equality check predicate on the "created_by" field. It's identical to CreatedByEQ. +func CreatedBy(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldCreatedBy, v)) +} + +// UpdatedBy applies equality check predicate on the "updated_by" field. It's identical to UpdatedByEQ. +func UpdatedBy(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldUpdatedBy, v)) +} + +// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ. +func DeletedAt(v time.Time) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldDeletedAt, v)) +} + +// DeletedBy applies equality check predicate on the "deleted_by" field. It's identical to DeletedByEQ. +func DeletedBy(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldDeletedBy, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldName, v)) +} + +// Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ. +func Description(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldDescription, v)) +} + +// PrimaryLocation applies equality check predicate on the "primary_location" field. It's identical to PrimaryLocationEQ. +func PrimaryLocation(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldPrimaryLocation, v)) +} + +// Token applies equality check predicate on the "token" field. It's identical to TokenEQ. +func Token(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldToken, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Group { + return predicate.Group(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Group { + return predicate.Group(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Group { + return predicate.Group(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldCreatedAt, v)) +} + +// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. +func CreatedAtIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldCreatedAt)) +} + +// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. +func CreatedAtNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldCreatedAt)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Group { + return predicate.Group(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Group { + return predicate.Group(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Group { + return predicate.Group(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. +func UpdatedAtIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldUpdatedAt)) +} + +// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. +func UpdatedAtNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldUpdatedAt)) +} + +// CreatedByEQ applies the EQ predicate on the "created_by" field. +func CreatedByEQ(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldCreatedBy, v)) +} + +// CreatedByNEQ applies the NEQ predicate on the "created_by" field. +func CreatedByNEQ(v string) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldCreatedBy, v)) +} + +// CreatedByIn applies the In predicate on the "created_by" field. +func CreatedByIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldIn(FieldCreatedBy, vs...)) +} + +// CreatedByNotIn applies the NotIn predicate on the "created_by" field. +func CreatedByNotIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldCreatedBy, vs...)) +} + +// CreatedByGT applies the GT predicate on the "created_by" field. +func CreatedByGT(v string) predicate.Group { + return predicate.Group(sql.FieldGT(FieldCreatedBy, v)) +} + +// CreatedByGTE applies the GTE predicate on the "created_by" field. +func CreatedByGTE(v string) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldCreatedBy, v)) +} + +// CreatedByLT applies the LT predicate on the "created_by" field. +func CreatedByLT(v string) predicate.Group { + return predicate.Group(sql.FieldLT(FieldCreatedBy, v)) +} + +// CreatedByLTE applies the LTE predicate on the "created_by" field. +func CreatedByLTE(v string) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldCreatedBy, v)) +} + +// CreatedByContains applies the Contains predicate on the "created_by" field. +func CreatedByContains(v string) predicate.Group { + return predicate.Group(sql.FieldContains(FieldCreatedBy, v)) +} + +// CreatedByHasPrefix applies the HasPrefix predicate on the "created_by" field. +func CreatedByHasPrefix(v string) predicate.Group { + return predicate.Group(sql.FieldHasPrefix(FieldCreatedBy, v)) +} + +// CreatedByHasSuffix applies the HasSuffix predicate on the "created_by" field. +func CreatedByHasSuffix(v string) predicate.Group { + return predicate.Group(sql.FieldHasSuffix(FieldCreatedBy, v)) +} + +// CreatedByIsNil applies the IsNil predicate on the "created_by" field. +func CreatedByIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldCreatedBy)) +} + +// CreatedByNotNil applies the NotNil predicate on the "created_by" field. +func CreatedByNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldCreatedBy)) +} + +// CreatedByEqualFold applies the EqualFold predicate on the "created_by" field. +func CreatedByEqualFold(v string) predicate.Group { + return predicate.Group(sql.FieldEqualFold(FieldCreatedBy, v)) +} + +// CreatedByContainsFold applies the ContainsFold predicate on the "created_by" field. +func CreatedByContainsFold(v string) predicate.Group { + return predicate.Group(sql.FieldContainsFold(FieldCreatedBy, v)) +} + +// UpdatedByEQ applies the EQ predicate on the "updated_by" field. +func UpdatedByEQ(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldUpdatedBy, v)) +} + +// UpdatedByNEQ applies the NEQ predicate on the "updated_by" field. +func UpdatedByNEQ(v string) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldUpdatedBy, v)) +} + +// UpdatedByIn applies the In predicate on the "updated_by" field. +func UpdatedByIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldIn(FieldUpdatedBy, vs...)) +} + +// UpdatedByNotIn applies the NotIn predicate on the "updated_by" field. +func UpdatedByNotIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldUpdatedBy, vs...)) +} + +// UpdatedByGT applies the GT predicate on the "updated_by" field. +func UpdatedByGT(v string) predicate.Group { + return predicate.Group(sql.FieldGT(FieldUpdatedBy, v)) +} + +// UpdatedByGTE applies the GTE predicate on the "updated_by" field. +func UpdatedByGTE(v string) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldUpdatedBy, v)) +} + +// UpdatedByLT applies the LT predicate on the "updated_by" field. +func UpdatedByLT(v string) predicate.Group { + return predicate.Group(sql.FieldLT(FieldUpdatedBy, v)) +} + +// UpdatedByLTE applies the LTE predicate on the "updated_by" field. +func UpdatedByLTE(v string) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldUpdatedBy, v)) +} + +// UpdatedByContains applies the Contains predicate on the "updated_by" field. +func UpdatedByContains(v string) predicate.Group { + return predicate.Group(sql.FieldContains(FieldUpdatedBy, v)) +} + +// UpdatedByHasPrefix applies the HasPrefix predicate on the "updated_by" field. +func UpdatedByHasPrefix(v string) predicate.Group { + return predicate.Group(sql.FieldHasPrefix(FieldUpdatedBy, v)) +} + +// UpdatedByHasSuffix applies the HasSuffix predicate on the "updated_by" field. +func UpdatedByHasSuffix(v string) predicate.Group { + return predicate.Group(sql.FieldHasSuffix(FieldUpdatedBy, v)) +} + +// UpdatedByIsNil applies the IsNil predicate on the "updated_by" field. +func UpdatedByIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldUpdatedBy)) +} + +// UpdatedByNotNil applies the NotNil predicate on the "updated_by" field. +func UpdatedByNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldUpdatedBy)) +} + +// UpdatedByEqualFold applies the EqualFold predicate on the "updated_by" field. +func UpdatedByEqualFold(v string) predicate.Group { + return predicate.Group(sql.FieldEqualFold(FieldUpdatedBy, v)) +} + +// UpdatedByContainsFold applies the ContainsFold predicate on the "updated_by" field. +func UpdatedByContainsFold(v string) predicate.Group { + return predicate.Group(sql.FieldContainsFold(FieldUpdatedBy, v)) +} + +// DeletedAtEQ applies the EQ predicate on the "deleted_at" field. +func DeletedAtEQ(v time.Time) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldDeletedAt, v)) +} + +// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field. +func DeletedAtNEQ(v time.Time) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldDeletedAt, v)) +} + +// DeletedAtIn applies the In predicate on the "deleted_at" field. +func DeletedAtIn(vs ...time.Time) predicate.Group { + return predicate.Group(sql.FieldIn(FieldDeletedAt, vs...)) +} + +// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field. +func DeletedAtNotIn(vs ...time.Time) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldDeletedAt, vs...)) +} + +// DeletedAtGT applies the GT predicate on the "deleted_at" field. +func DeletedAtGT(v time.Time) predicate.Group { + return predicate.Group(sql.FieldGT(FieldDeletedAt, v)) +} + +// DeletedAtGTE applies the GTE predicate on the "deleted_at" field. +func DeletedAtGTE(v time.Time) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldDeletedAt, v)) +} + +// DeletedAtLT applies the LT predicate on the "deleted_at" field. +func DeletedAtLT(v time.Time) predicate.Group { + return predicate.Group(sql.FieldLT(FieldDeletedAt, v)) +} + +// DeletedAtLTE applies the LTE predicate on the "deleted_at" field. +func DeletedAtLTE(v time.Time) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldDeletedAt, v)) +} + +// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field. +func DeletedAtIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldDeletedAt)) +} + +// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field. +func DeletedAtNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldDeletedAt)) +} + +// DeletedByEQ applies the EQ predicate on the "deleted_by" field. +func DeletedByEQ(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldDeletedBy, v)) +} + +// DeletedByNEQ applies the NEQ predicate on the "deleted_by" field. +func DeletedByNEQ(v string) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldDeletedBy, v)) +} + +// DeletedByIn applies the In predicate on the "deleted_by" field. +func DeletedByIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldIn(FieldDeletedBy, vs...)) +} + +// DeletedByNotIn applies the NotIn predicate on the "deleted_by" field. +func DeletedByNotIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldDeletedBy, vs...)) +} + +// DeletedByGT applies the GT predicate on the "deleted_by" field. +func DeletedByGT(v string) predicate.Group { + return predicate.Group(sql.FieldGT(FieldDeletedBy, v)) +} + +// DeletedByGTE applies the GTE predicate on the "deleted_by" field. +func DeletedByGTE(v string) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldDeletedBy, v)) +} + +// DeletedByLT applies the LT predicate on the "deleted_by" field. +func DeletedByLT(v string) predicate.Group { + return predicate.Group(sql.FieldLT(FieldDeletedBy, v)) +} + +// DeletedByLTE applies the LTE predicate on the "deleted_by" field. +func DeletedByLTE(v string) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldDeletedBy, v)) +} + +// DeletedByContains applies the Contains predicate on the "deleted_by" field. +func DeletedByContains(v string) predicate.Group { + return predicate.Group(sql.FieldContains(FieldDeletedBy, v)) +} + +// DeletedByHasPrefix applies the HasPrefix predicate on the "deleted_by" field. +func DeletedByHasPrefix(v string) predicate.Group { + return predicate.Group(sql.FieldHasPrefix(FieldDeletedBy, v)) +} + +// DeletedByHasSuffix applies the HasSuffix predicate on the "deleted_by" field. +func DeletedByHasSuffix(v string) predicate.Group { + return predicate.Group(sql.FieldHasSuffix(FieldDeletedBy, v)) +} + +// DeletedByIsNil applies the IsNil predicate on the "deleted_by" field. +func DeletedByIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldDeletedBy)) +} + +// DeletedByNotNil applies the NotNil predicate on the "deleted_by" field. +func DeletedByNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldDeletedBy)) +} + +// DeletedByEqualFold applies the EqualFold predicate on the "deleted_by" field. +func DeletedByEqualFold(v string) predicate.Group { + return predicate.Group(sql.FieldEqualFold(FieldDeletedBy, v)) +} + +// DeletedByContainsFold applies the ContainsFold predicate on the "deleted_by" field. +func DeletedByContainsFold(v string) predicate.Group { + return predicate.Group(sql.FieldContainsFold(FieldDeletedBy, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.Group { + return predicate.Group(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.Group { + return predicate.Group(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.Group { + return predicate.Group(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.Group { + return predicate.Group(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.Group { + return predicate.Group(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.Group { + return predicate.Group(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.Group { + return predicate.Group(sql.FieldContainsFold(FieldName, v)) +} + +// DescriptionEQ applies the EQ predicate on the "description" field. +func DescriptionEQ(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldDescription, v)) +} + +// DescriptionNEQ applies the NEQ predicate on the "description" field. +func DescriptionNEQ(v string) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldDescription, v)) +} + +// DescriptionIn applies the In predicate on the "description" field. +func DescriptionIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldIn(FieldDescription, vs...)) +} + +// DescriptionNotIn applies the NotIn predicate on the "description" field. +func DescriptionNotIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldDescription, vs...)) +} + +// DescriptionGT applies the GT predicate on the "description" field. +func DescriptionGT(v string) predicate.Group { + return predicate.Group(sql.FieldGT(FieldDescription, v)) +} + +// DescriptionGTE applies the GTE predicate on the "description" field. +func DescriptionGTE(v string) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldDescription, v)) +} + +// DescriptionLT applies the LT predicate on the "description" field. +func DescriptionLT(v string) predicate.Group { + return predicate.Group(sql.FieldLT(FieldDescription, v)) +} + +// DescriptionLTE applies the LTE predicate on the "description" field. +func DescriptionLTE(v string) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldDescription, v)) +} + +// DescriptionContains applies the Contains predicate on the "description" field. +func DescriptionContains(v string) predicate.Group { + return predicate.Group(sql.FieldContains(FieldDescription, v)) +} + +// DescriptionHasPrefix applies the HasPrefix predicate on the "description" field. +func DescriptionHasPrefix(v string) predicate.Group { + return predicate.Group(sql.FieldHasPrefix(FieldDescription, v)) +} + +// DescriptionHasSuffix applies the HasSuffix predicate on the "description" field. +func DescriptionHasSuffix(v string) predicate.Group { + return predicate.Group(sql.FieldHasSuffix(FieldDescription, v)) +} + +// DescriptionIsNil applies the IsNil predicate on the "description" field. +func DescriptionIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldDescription)) +} + +// DescriptionNotNil applies the NotNil predicate on the "description" field. +func DescriptionNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldDescription)) +} + +// DescriptionEqualFold applies the EqualFold predicate on the "description" field. +func DescriptionEqualFold(v string) predicate.Group { + return predicate.Group(sql.FieldEqualFold(FieldDescription, v)) +} + +// DescriptionContainsFold applies the ContainsFold predicate on the "description" field. +func DescriptionContainsFold(v string) predicate.Group { + return predicate.Group(sql.FieldContainsFold(FieldDescription, v)) +} + +// PrimaryLocationEQ applies the EQ predicate on the "primary_location" field. +func PrimaryLocationEQ(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldPrimaryLocation, v)) +} + +// PrimaryLocationNEQ applies the NEQ predicate on the "primary_location" field. +func PrimaryLocationNEQ(v string) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldPrimaryLocation, v)) +} + +// PrimaryLocationIn applies the In predicate on the "primary_location" field. +func PrimaryLocationIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldIn(FieldPrimaryLocation, vs...)) +} + +// PrimaryLocationNotIn applies the NotIn predicate on the "primary_location" field. +func PrimaryLocationNotIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldPrimaryLocation, vs...)) +} + +// PrimaryLocationGT applies the GT predicate on the "primary_location" field. +func PrimaryLocationGT(v string) predicate.Group { + return predicate.Group(sql.FieldGT(FieldPrimaryLocation, v)) +} + +// PrimaryLocationGTE applies the GTE predicate on the "primary_location" field. +func PrimaryLocationGTE(v string) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldPrimaryLocation, v)) +} + +// PrimaryLocationLT applies the LT predicate on the "primary_location" field. +func PrimaryLocationLT(v string) predicate.Group { + return predicate.Group(sql.FieldLT(FieldPrimaryLocation, v)) +} + +// PrimaryLocationLTE applies the LTE predicate on the "primary_location" field. +func PrimaryLocationLTE(v string) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldPrimaryLocation, v)) +} + +// PrimaryLocationContains applies the Contains predicate on the "primary_location" field. +func PrimaryLocationContains(v string) predicate.Group { + return predicate.Group(sql.FieldContains(FieldPrimaryLocation, v)) +} + +// PrimaryLocationHasPrefix applies the HasPrefix predicate on the "primary_location" field. +func PrimaryLocationHasPrefix(v string) predicate.Group { + return predicate.Group(sql.FieldHasPrefix(FieldPrimaryLocation, v)) +} + +// PrimaryLocationHasSuffix applies the HasSuffix predicate on the "primary_location" field. +func PrimaryLocationHasSuffix(v string) predicate.Group { + return predicate.Group(sql.FieldHasSuffix(FieldPrimaryLocation, v)) +} + +// PrimaryLocationEqualFold applies the EqualFold predicate on the "primary_location" field. +func PrimaryLocationEqualFold(v string) predicate.Group { + return predicate.Group(sql.FieldEqualFold(FieldPrimaryLocation, v)) +} + +// PrimaryLocationContainsFold applies the ContainsFold predicate on the "primary_location" field. +func PrimaryLocationContainsFold(v string) predicate.Group { + return predicate.Group(sql.FieldContainsFold(FieldPrimaryLocation, v)) +} + +// LocationsIsNil applies the IsNil predicate on the "locations" field. +func LocationsIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldLocations)) +} + +// LocationsNotNil applies the NotNil predicate on the "locations" field. +func LocationsNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldLocations)) +} + +// TokenEQ applies the EQ predicate on the "token" field. +func TokenEQ(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldToken, v)) +} + +// TokenNEQ applies the NEQ predicate on the "token" field. +func TokenNEQ(v string) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldToken, v)) +} + +// TokenIn applies the In predicate on the "token" field. +func TokenIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldIn(FieldToken, vs...)) +} + +// TokenNotIn applies the NotIn predicate on the "token" field. +func TokenNotIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldToken, vs...)) +} + +// TokenGT applies the GT predicate on the "token" field. +func TokenGT(v string) predicate.Group { + return predicate.Group(sql.FieldGT(FieldToken, v)) +} + +// TokenGTE applies the GTE predicate on the "token" field. +func TokenGTE(v string) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldToken, v)) +} + +// TokenLT applies the LT predicate on the "token" field. +func TokenLT(v string) predicate.Group { + return predicate.Group(sql.FieldLT(FieldToken, v)) +} + +// TokenLTE applies the LTE predicate on the "token" field. +func TokenLTE(v string) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldToken, v)) +} + +// TokenContains applies the Contains predicate on the "token" field. +func TokenContains(v string) predicate.Group { + return predicate.Group(sql.FieldContains(FieldToken, v)) +} + +// TokenHasPrefix applies the HasPrefix predicate on the "token" field. +func TokenHasPrefix(v string) predicate.Group { + return predicate.Group(sql.FieldHasPrefix(FieldToken, v)) +} + +// TokenHasSuffix applies the HasSuffix predicate on the "token" field. +func TokenHasSuffix(v string) predicate.Group { + return predicate.Group(sql.FieldHasSuffix(FieldToken, v)) +} + +// TokenIsNil applies the IsNil predicate on the "token" field. +func TokenIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldToken)) +} + +// TokenNotNil applies the NotNil predicate on the "token" field. +func TokenNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldToken)) +} + +// TokenEqualFold applies the EqualFold predicate on the "token" field. +func TokenEqualFold(v string) predicate.Group { + return predicate.Group(sql.FieldEqualFold(FieldToken, v)) +} + +// TokenContainsFold applies the ContainsFold predicate on the "token" field. +func TokenContainsFold(v string) predicate.Group { + return predicate.Group(sql.FieldContainsFold(FieldToken, v)) +} + +// RegionEQ applies the EQ predicate on the "region" field. +func RegionEQ(v enums.Region) predicate.Group { + vc := v + return predicate.Group(sql.FieldEQ(FieldRegion, vc)) +} + +// RegionNEQ applies the NEQ predicate on the "region" field. +func RegionNEQ(v enums.Region) predicate.Group { + vc := v + return predicate.Group(sql.FieldNEQ(FieldRegion, vc)) +} + +// RegionIn applies the In predicate on the "region" field. +func RegionIn(vs ...enums.Region) predicate.Group { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Group(sql.FieldIn(FieldRegion, v...)) +} + +// RegionNotIn applies the NotIn predicate on the "region" field. +func RegionNotIn(vs ...enums.Region) predicate.Group { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Group(sql.FieldNotIn(FieldRegion, v...)) +} + +// HasDatabases applies the HasEdge predicate on the "databases" edge. +func HasDatabases() predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, DatabasesTable, DatabasesColumn), + ) + schemaConfig := internal.SchemaConfigFromContext(s.Context()) + step.To.Schema = schemaConfig.Database + step.Edge.Schema = schemaConfig.Database + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasDatabasesWith applies the HasEdge predicate on the "databases" edge with a given conditions (other predicates). +func HasDatabasesWith(preds ...predicate.Database) predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := newDatabasesStep() + schemaConfig := internal.SchemaConfigFromContext(s.Context()) + step.To.Schema = schemaConfig.Database + step.Edge.Schema = schemaConfig.Database + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Group) predicate.Group { + return predicate.Group(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Group) predicate.Group { + return predicate.Group(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Group) predicate.Group { + return predicate.Group(sql.NotPredicates(p)) +} diff --git a/internal/ent/generated/group_create.go b/internal/ent/generated/group_create.go new file mode 100644 index 0000000..85b8c84 --- /dev/null +++ b/internal/ent/generated/group_create.go @@ -0,0 +1,475 @@ +// Code generated by ent, DO NOT EDIT. + +package generated + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/datumforge/geodetic/internal/ent/generated/database" + "github.com/datumforge/geodetic/internal/ent/generated/group" + "github.com/datumforge/geodetic/pkg/enums" +) + +// GroupCreate is the builder for creating a Group entity. +type GroupCreate struct { + config + mutation *GroupMutation + hooks []Hook +} + +// SetCreatedAt sets the "created_at" field. +func (gc *GroupCreate) SetCreatedAt(t time.Time) *GroupCreate { + gc.mutation.SetCreatedAt(t) + return gc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (gc *GroupCreate) SetNillableCreatedAt(t *time.Time) *GroupCreate { + if t != nil { + gc.SetCreatedAt(*t) + } + return gc +} + +// SetUpdatedAt sets the "updated_at" field. +func (gc *GroupCreate) SetUpdatedAt(t time.Time) *GroupCreate { + gc.mutation.SetUpdatedAt(t) + return gc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (gc *GroupCreate) SetNillableUpdatedAt(t *time.Time) *GroupCreate { + if t != nil { + gc.SetUpdatedAt(*t) + } + return gc +} + +// SetCreatedBy sets the "created_by" field. +func (gc *GroupCreate) SetCreatedBy(s string) *GroupCreate { + gc.mutation.SetCreatedBy(s) + return gc +} + +// SetNillableCreatedBy sets the "created_by" field if the given value is not nil. +func (gc *GroupCreate) SetNillableCreatedBy(s *string) *GroupCreate { + if s != nil { + gc.SetCreatedBy(*s) + } + return gc +} + +// SetUpdatedBy sets the "updated_by" field. +func (gc *GroupCreate) SetUpdatedBy(s string) *GroupCreate { + gc.mutation.SetUpdatedBy(s) + return gc +} + +// SetNillableUpdatedBy sets the "updated_by" field if the given value is not nil. +func (gc *GroupCreate) SetNillableUpdatedBy(s *string) *GroupCreate { + if s != nil { + gc.SetUpdatedBy(*s) + } + return gc +} + +// SetDeletedAt sets the "deleted_at" field. +func (gc *GroupCreate) SetDeletedAt(t time.Time) *GroupCreate { + gc.mutation.SetDeletedAt(t) + return gc +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (gc *GroupCreate) SetNillableDeletedAt(t *time.Time) *GroupCreate { + if t != nil { + gc.SetDeletedAt(*t) + } + return gc +} + +// SetDeletedBy sets the "deleted_by" field. +func (gc *GroupCreate) SetDeletedBy(s string) *GroupCreate { + gc.mutation.SetDeletedBy(s) + return gc +} + +// SetNillableDeletedBy sets the "deleted_by" field if the given value is not nil. +func (gc *GroupCreate) SetNillableDeletedBy(s *string) *GroupCreate { + if s != nil { + gc.SetDeletedBy(*s) + } + return gc +} + +// SetName sets the "name" field. +func (gc *GroupCreate) SetName(s string) *GroupCreate { + gc.mutation.SetName(s) + return gc +} + +// SetDescription sets the "description" field. +func (gc *GroupCreate) SetDescription(s string) *GroupCreate { + gc.mutation.SetDescription(s) + return gc +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (gc *GroupCreate) SetNillableDescription(s *string) *GroupCreate { + if s != nil { + gc.SetDescription(*s) + } + return gc +} + +// SetPrimaryLocation sets the "primary_location" field. +func (gc *GroupCreate) SetPrimaryLocation(s string) *GroupCreate { + gc.mutation.SetPrimaryLocation(s) + return gc +} + +// SetLocations sets the "locations" field. +func (gc *GroupCreate) SetLocations(s []string) *GroupCreate { + gc.mutation.SetLocations(s) + return gc +} + +// SetToken sets the "token" field. +func (gc *GroupCreate) SetToken(s string) *GroupCreate { + gc.mutation.SetToken(s) + return gc +} + +// SetNillableToken sets the "token" field if the given value is not nil. +func (gc *GroupCreate) SetNillableToken(s *string) *GroupCreate { + if s != nil { + gc.SetToken(*s) + } + return gc +} + +// SetRegion sets the "region" field. +func (gc *GroupCreate) SetRegion(e enums.Region) *GroupCreate { + gc.mutation.SetRegion(e) + return gc +} + +// SetNillableRegion sets the "region" field if the given value is not nil. +func (gc *GroupCreate) SetNillableRegion(e *enums.Region) *GroupCreate { + if e != nil { + gc.SetRegion(*e) + } + return gc +} + +// SetID sets the "id" field. +func (gc *GroupCreate) SetID(s string) *GroupCreate { + gc.mutation.SetID(s) + return gc +} + +// SetNillableID sets the "id" field if the given value is not nil. +func (gc *GroupCreate) SetNillableID(s *string) *GroupCreate { + if s != nil { + gc.SetID(*s) + } + return gc +} + +// AddDatabaseIDs adds the "databases" edge to the Database entity by IDs. +func (gc *GroupCreate) AddDatabaseIDs(ids ...string) *GroupCreate { + gc.mutation.AddDatabaseIDs(ids...) + return gc +} + +// AddDatabases adds the "databases" edges to the Database entity. +func (gc *GroupCreate) AddDatabases(d ...*Database) *GroupCreate { + ids := make([]string, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return gc.AddDatabaseIDs(ids...) +} + +// Mutation returns the GroupMutation object of the builder. +func (gc *GroupCreate) Mutation() *GroupMutation { + return gc.mutation +} + +// Save creates the Group in the database. +func (gc *GroupCreate) Save(ctx context.Context) (*Group, error) { + if err := gc.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, gc.sqlSave, gc.mutation, gc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (gc *GroupCreate) SaveX(ctx context.Context) *Group { + v, err := gc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (gc *GroupCreate) Exec(ctx context.Context) error { + _, err := gc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (gc *GroupCreate) ExecX(ctx context.Context) { + if err := gc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (gc *GroupCreate) defaults() error { + if _, ok := gc.mutation.CreatedAt(); !ok { + if group.DefaultCreatedAt == nil { + return fmt.Errorf("generated: uninitialized group.DefaultCreatedAt (forgotten import generated/runtime?)") + } + v := group.DefaultCreatedAt() + gc.mutation.SetCreatedAt(v) + } + if _, ok := gc.mutation.UpdatedAt(); !ok { + if group.DefaultUpdatedAt == nil { + return fmt.Errorf("generated: uninitialized group.DefaultUpdatedAt (forgotten import generated/runtime?)") + } + v := group.DefaultUpdatedAt() + gc.mutation.SetUpdatedAt(v) + } + if _, ok := gc.mutation.Region(); !ok { + v := group.DefaultRegion + gc.mutation.SetRegion(v) + } + if _, ok := gc.mutation.ID(); !ok { + if group.DefaultID == nil { + return fmt.Errorf("generated: uninitialized group.DefaultID (forgotten import generated/runtime?)") + } + v := group.DefaultID() + gc.mutation.SetID(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (gc *GroupCreate) check() error { + if _, ok := gc.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`generated: missing required field "Group.name"`)} + } + if v, ok := gc.mutation.Name(); ok { + if err := group.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`generated: validator failed for field "Group.name": %w`, err)} + } + } + if _, ok := gc.mutation.PrimaryLocation(); !ok { + return &ValidationError{Name: "primary_location", err: errors.New(`generated: missing required field "Group.primary_location"`)} + } + if v, ok := gc.mutation.PrimaryLocation(); ok { + if err := group.PrimaryLocationValidator(v); err != nil { + return &ValidationError{Name: "primary_location", err: fmt.Errorf(`generated: validator failed for field "Group.primary_location": %w`, err)} + } + } + if _, ok := gc.mutation.Region(); !ok { + return &ValidationError{Name: "region", err: errors.New(`generated: missing required field "Group.region"`)} + } + if v, ok := gc.mutation.Region(); ok { + if err := group.RegionValidator(v); err != nil { + return &ValidationError{Name: "region", err: fmt.Errorf(`generated: validator failed for field "Group.region": %w`, err)} + } + } + return nil +} + +func (gc *GroupCreate) sqlSave(ctx context.Context) (*Group, error) { + if err := gc.check(); err != nil { + return nil, err + } + _node, _spec := gc.createSpec() + if err := sqlgraph.CreateNode(ctx, gc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(string); ok { + _node.ID = id + } else { + return nil, fmt.Errorf("unexpected Group.ID type: %T", _spec.ID.Value) + } + } + gc.mutation.id = &_node.ID + gc.mutation.done = true + return _node, nil +} + +func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) { + var ( + _node = &Group{config: gc.config} + _spec = sqlgraph.NewCreateSpec(group.Table, sqlgraph.NewFieldSpec(group.FieldID, field.TypeString)) + ) + _spec.Schema = gc.schemaConfig.Group + if id, ok := gc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = id + } + if value, ok := gc.mutation.CreatedAt(); ok { + _spec.SetField(group.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := gc.mutation.UpdatedAt(); ok { + _spec.SetField(group.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := gc.mutation.CreatedBy(); ok { + _spec.SetField(group.FieldCreatedBy, field.TypeString, value) + _node.CreatedBy = value + } + if value, ok := gc.mutation.UpdatedBy(); ok { + _spec.SetField(group.FieldUpdatedBy, field.TypeString, value) + _node.UpdatedBy = value + } + if value, ok := gc.mutation.DeletedAt(); ok { + _spec.SetField(group.FieldDeletedAt, field.TypeTime, value) + _node.DeletedAt = value + } + if value, ok := gc.mutation.DeletedBy(); ok { + _spec.SetField(group.FieldDeletedBy, field.TypeString, value) + _node.DeletedBy = value + } + if value, ok := gc.mutation.Name(); ok { + _spec.SetField(group.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := gc.mutation.Description(); ok { + _spec.SetField(group.FieldDescription, field.TypeString, value) + _node.Description = value + } + if value, ok := gc.mutation.PrimaryLocation(); ok { + _spec.SetField(group.FieldPrimaryLocation, field.TypeString, value) + _node.PrimaryLocation = value + } + if value, ok := gc.mutation.Locations(); ok { + _spec.SetField(group.FieldLocations, field.TypeJSON, value) + _node.Locations = value + } + if value, ok := gc.mutation.Token(); ok { + _spec.SetField(group.FieldToken, field.TypeString, value) + _node.Token = value + } + if value, ok := gc.mutation.Region(); ok { + _spec.SetField(group.FieldRegion, field.TypeEnum, value) + _node.Region = value + } + if nodes := gc.mutation.DatabasesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.DatabasesTable, + Columns: []string{group.DatabasesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(database.FieldID, field.TypeString), + }, + } + edge.Schema = gc.schemaConfig.Database + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// GroupCreateBulk is the builder for creating many Group entities in bulk. +type GroupCreateBulk struct { + config + err error + builders []*GroupCreate +} + +// Save creates the Group entities in the database. +func (gcb *GroupCreateBulk) Save(ctx context.Context) ([]*Group, error) { + if gcb.err != nil { + return nil, gcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(gcb.builders)) + nodes := make([]*Group, len(gcb.builders)) + mutators := make([]Mutator, len(gcb.builders)) + for i := range gcb.builders { + func(i int, root context.Context) { + builder := gcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*GroupMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, gcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, gcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, gcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (gcb *GroupCreateBulk) SaveX(ctx context.Context) []*Group { + v, err := gcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (gcb *GroupCreateBulk) Exec(ctx context.Context) error { + _, err := gcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (gcb *GroupCreateBulk) ExecX(ctx context.Context) { + if err := gcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/internal/ent/generated/group_delete.go b/internal/ent/generated/group_delete.go new file mode 100644 index 0000000..3bed0e3 --- /dev/null +++ b/internal/ent/generated/group_delete.go @@ -0,0 +1,92 @@ +// Code generated by ent, DO NOT EDIT. + +package generated + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/datumforge/geodetic/internal/ent/generated/predicate" + + "github.com/datumforge/geodetic/internal/ent/generated/group" + "github.com/datumforge/geodetic/internal/ent/generated/internal" +) + +// GroupDelete is the builder for deleting a Group entity. +type GroupDelete struct { + config + hooks []Hook + mutation *GroupMutation +} + +// Where appends a list predicates to the GroupDelete builder. +func (gd *GroupDelete) Where(ps ...predicate.Group) *GroupDelete { + gd.mutation.Where(ps...) + return gd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (gd *GroupDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, gd.sqlExec, gd.mutation, gd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (gd *GroupDelete) ExecX(ctx context.Context) int { + n, err := gd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (gd *GroupDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(group.Table, sqlgraph.NewFieldSpec(group.FieldID, field.TypeString)) + _spec.Node.Schema = gd.schemaConfig.Group + ctx = internal.NewSchemaConfigContext(ctx, gd.schemaConfig) + if ps := gd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, gd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + gd.mutation.done = true + return affected, err +} + +// GroupDeleteOne is the builder for deleting a single Group entity. +type GroupDeleteOne struct { + gd *GroupDelete +} + +// Where appends a list predicates to the GroupDelete builder. +func (gdo *GroupDeleteOne) Where(ps ...predicate.Group) *GroupDeleteOne { + gdo.gd.mutation.Where(ps...) + return gdo +} + +// Exec executes the deletion query. +func (gdo *GroupDeleteOne) Exec(ctx context.Context) error { + n, err := gdo.gd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{group.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (gdo *GroupDeleteOne) ExecX(ctx context.Context) { + if err := gdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/internal/ent/generated/group_query.go b/internal/ent/generated/group_query.go new file mode 100644 index 0000000..31b212d --- /dev/null +++ b/internal/ent/generated/group_query.go @@ -0,0 +1,652 @@ +// Code generated by ent, DO NOT EDIT. + +package generated + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/datumforge/geodetic/internal/ent/generated/database" + "github.com/datumforge/geodetic/internal/ent/generated/group" + "github.com/datumforge/geodetic/internal/ent/generated/predicate" + + "github.com/datumforge/geodetic/internal/ent/generated/internal" +) + +// GroupQuery is the builder for querying Group entities. +type GroupQuery struct { + config + ctx *QueryContext + order []group.OrderOption + inters []Interceptor + predicates []predicate.Group + withDatabases *DatabaseQuery + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*Group) error + withNamedDatabases map[string]*DatabaseQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the GroupQuery builder. +func (gq *GroupQuery) Where(ps ...predicate.Group) *GroupQuery { + gq.predicates = append(gq.predicates, ps...) + return gq +} + +// Limit the number of records to be returned by this query. +func (gq *GroupQuery) Limit(limit int) *GroupQuery { + gq.ctx.Limit = &limit + return gq +} + +// Offset to start from. +func (gq *GroupQuery) Offset(offset int) *GroupQuery { + gq.ctx.Offset = &offset + return gq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (gq *GroupQuery) Unique(unique bool) *GroupQuery { + gq.ctx.Unique = &unique + return gq +} + +// Order specifies how the records should be ordered. +func (gq *GroupQuery) Order(o ...group.OrderOption) *GroupQuery { + gq.order = append(gq.order, o...) + return gq +} + +// QueryDatabases chains the current query on the "databases" edge. +func (gq *GroupQuery) QueryDatabases() *DatabaseQuery { + query := (&DatabaseClient{config: gq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := gq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := gq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, selector), + sqlgraph.To(database.Table, database.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, group.DatabasesTable, group.DatabasesColumn), + ) + schemaConfig := gq.schemaConfig + step.To.Schema = schemaConfig.Database + step.Edge.Schema = schemaConfig.Database + fromU = sqlgraph.SetNeighbors(gq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Group entity from the query. +// Returns a *NotFoundError when no Group was found. +func (gq *GroupQuery) First(ctx context.Context) (*Group, error) { + nodes, err := gq.Limit(1).All(setContextOp(ctx, gq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{group.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (gq *GroupQuery) FirstX(ctx context.Context) *Group { + node, err := gq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Group ID from the query. +// Returns a *NotFoundError when no Group ID was found. +func (gq *GroupQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = gq.Limit(1).IDs(setContextOp(ctx, gq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{group.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (gq *GroupQuery) FirstIDX(ctx context.Context) string { + id, err := gq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Group entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Group entity is found. +// Returns a *NotFoundError when no Group entities are found. +func (gq *GroupQuery) Only(ctx context.Context) (*Group, error) { + nodes, err := gq.Limit(2).All(setContextOp(ctx, gq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{group.Label} + default: + return nil, &NotSingularError{group.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (gq *GroupQuery) OnlyX(ctx context.Context) *Group { + node, err := gq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Group ID in the query. +// Returns a *NotSingularError when more than one Group ID is found. +// Returns a *NotFoundError when no entities are found. +func (gq *GroupQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = gq.Limit(2).IDs(setContextOp(ctx, gq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{group.Label} + default: + err = &NotSingularError{group.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (gq *GroupQuery) OnlyIDX(ctx context.Context) string { + id, err := gq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Groups. +func (gq *GroupQuery) All(ctx context.Context) ([]*Group, error) { + ctx = setContextOp(ctx, gq.ctx, "All") + if err := gq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Group, *GroupQuery]() + return withInterceptors[[]*Group](ctx, gq, qr, gq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (gq *GroupQuery) AllX(ctx context.Context) []*Group { + nodes, err := gq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Group IDs. +func (gq *GroupQuery) IDs(ctx context.Context) (ids []string, err error) { + if gq.ctx.Unique == nil && gq.path != nil { + gq.Unique(true) + } + ctx = setContextOp(ctx, gq.ctx, "IDs") + if err = gq.Select(group.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (gq *GroupQuery) IDsX(ctx context.Context) []string { + ids, err := gq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (gq *GroupQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, gq.ctx, "Count") + if err := gq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, gq, querierCount[*GroupQuery](), gq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (gq *GroupQuery) CountX(ctx context.Context) int { + count, err := gq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (gq *GroupQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, gq.ctx, "Exist") + switch _, err := gq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("generated: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (gq *GroupQuery) ExistX(ctx context.Context) bool { + exist, err := gq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the GroupQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (gq *GroupQuery) Clone() *GroupQuery { + if gq == nil { + return nil + } + return &GroupQuery{ + config: gq.config, + ctx: gq.ctx.Clone(), + order: append([]group.OrderOption{}, gq.order...), + inters: append([]Interceptor{}, gq.inters...), + predicates: append([]predicate.Group{}, gq.predicates...), + withDatabases: gq.withDatabases.Clone(), + // clone intermediate query. + sql: gq.sql.Clone(), + path: gq.path, + } +} + +// WithDatabases tells the query-builder to eager-load the nodes that are connected to +// the "databases" edge. The optional arguments are used to configure the query builder of the edge. +func (gq *GroupQuery) WithDatabases(opts ...func(*DatabaseQuery)) *GroupQuery { + query := (&DatabaseClient{config: gq.config}).Query() + for _, opt := range opts { + opt(query) + } + gq.withDatabases = query + return gq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Group.Query(). +// GroupBy(group.FieldCreatedAt). +// Aggregate(generated.Count()). +// Scan(ctx, &v) +func (gq *GroupQuery) GroupBy(field string, fields ...string) *GroupGroupBy { + gq.ctx.Fields = append([]string{field}, fields...) + grbuild := &GroupGroupBy{build: gq} + grbuild.flds = &gq.ctx.Fields + grbuild.label = group.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.Group.Query(). +// Select(group.FieldCreatedAt). +// Scan(ctx, &v) +func (gq *GroupQuery) Select(fields ...string) *GroupSelect { + gq.ctx.Fields = append(gq.ctx.Fields, fields...) + sbuild := &GroupSelect{GroupQuery: gq} + sbuild.label = group.Label + sbuild.flds, sbuild.scan = &gq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a GroupSelect configured with the given aggregations. +func (gq *GroupQuery) Aggregate(fns ...AggregateFunc) *GroupSelect { + return gq.Select().Aggregate(fns...) +} + +func (gq *GroupQuery) prepareQuery(ctx context.Context) error { + for _, inter := range gq.inters { + if inter == nil { + return fmt.Errorf("generated: uninitialized interceptor (forgotten import generated/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, gq); err != nil { + return err + } + } + } + for _, f := range gq.ctx.Fields { + if !group.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("generated: invalid field %q for query", f)} + } + } + if gq.path != nil { + prev, err := gq.path(ctx) + if err != nil { + return err + } + gq.sql = prev + } + return nil +} + +func (gq *GroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Group, error) { + var ( + nodes = []*Group{} + _spec = gq.querySpec() + loadedTypes = [1]bool{ + gq.withDatabases != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Group).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Group{config: gq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + _spec.Node.Schema = gq.schemaConfig.Group + ctx = internal.NewSchemaConfigContext(ctx, gq.schemaConfig) + if len(gq.modifiers) > 0 { + _spec.Modifiers = gq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, gq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := gq.withDatabases; query != nil { + if err := gq.loadDatabases(ctx, query, nodes, + func(n *Group) { n.Edges.Databases = []*Database{} }, + func(n *Group, e *Database) { n.Edges.Databases = append(n.Edges.Databases, e) }); err != nil { + return nil, err + } + } + for name, query := range gq.withNamedDatabases { + if err := gq.loadDatabases(ctx, query, nodes, + func(n *Group) { n.appendNamedDatabases(name) }, + func(n *Group, e *Database) { n.appendNamedDatabases(name, e) }); err != nil { + return nil, err + } + } + for i := range gq.loadTotal { + if err := gq.loadTotal[i](ctx, nodes); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (gq *GroupQuery) loadDatabases(ctx context.Context, query *DatabaseQuery, nodes []*Group, init func(*Group), assign func(*Group, *Database)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[string]*Group) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(database.FieldGroupID) + } + query.Where(predicate.Database(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(group.DatabasesColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.GroupID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "group_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (gq *GroupQuery) sqlCount(ctx context.Context) (int, error) { + _spec := gq.querySpec() + _spec.Node.Schema = gq.schemaConfig.Group + ctx = internal.NewSchemaConfigContext(ctx, gq.schemaConfig) + if len(gq.modifiers) > 0 { + _spec.Modifiers = gq.modifiers + } + _spec.Node.Columns = gq.ctx.Fields + if len(gq.ctx.Fields) > 0 { + _spec.Unique = gq.ctx.Unique != nil && *gq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, gq.driver, _spec) +} + +func (gq *GroupQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(group.Table, group.Columns, sqlgraph.NewFieldSpec(group.FieldID, field.TypeString)) + _spec.From = gq.sql + if unique := gq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if gq.path != nil { + _spec.Unique = true + } + if fields := gq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, group.FieldID) + for i := range fields { + if fields[i] != group.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := gq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := gq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := gq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := gq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (gq *GroupQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(gq.driver.Dialect()) + t1 := builder.Table(group.Table) + columns := gq.ctx.Fields + if len(columns) == 0 { + columns = group.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if gq.sql != nil { + selector = gq.sql + selector.Select(selector.Columns(columns...)...) + } + if gq.ctx.Unique != nil && *gq.ctx.Unique { + selector.Distinct() + } + t1.Schema(gq.schemaConfig.Group) + ctx = internal.NewSchemaConfigContext(ctx, gq.schemaConfig) + selector.WithContext(ctx) + for _, p := range gq.predicates { + p(selector) + } + for _, p := range gq.order { + p(selector) + } + if offset := gq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := gq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// WithNamedDatabases tells the query-builder to eager-load the nodes that are connected to the "databases" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (gq *GroupQuery) WithNamedDatabases(name string, opts ...func(*DatabaseQuery)) *GroupQuery { + query := (&DatabaseClient{config: gq.config}).Query() + for _, opt := range opts { + opt(query) + } + if gq.withNamedDatabases == nil { + gq.withNamedDatabases = make(map[string]*DatabaseQuery) + } + gq.withNamedDatabases[name] = query + return gq +} + +// GroupGroupBy is the group-by builder for Group entities. +type GroupGroupBy struct { + selector + build *GroupQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (ggb *GroupGroupBy) Aggregate(fns ...AggregateFunc) *GroupGroupBy { + ggb.fns = append(ggb.fns, fns...) + return ggb +} + +// Scan applies the selector query and scans the result into the given value. +func (ggb *GroupGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ggb.build.ctx, "GroupBy") + if err := ggb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*GroupQuery, *GroupGroupBy](ctx, ggb.build, ggb, ggb.build.inters, v) +} + +func (ggb *GroupGroupBy) sqlScan(ctx context.Context, root *GroupQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(ggb.fns)) + for _, fn := range ggb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*ggb.flds)+len(ggb.fns)) + for _, f := range *ggb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*ggb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ggb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// GroupSelect is the builder for selecting fields of Group entities. +type GroupSelect struct { + *GroupQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (gs *GroupSelect) Aggregate(fns ...AggregateFunc) *GroupSelect { + gs.fns = append(gs.fns, fns...) + return gs +} + +// Scan applies the selector query and scans the result into the given value. +func (gs *GroupSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, gs.ctx, "Select") + if err := gs.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*GroupQuery, *GroupSelect](ctx, gs.GroupQuery, gs, gs.inters, v) +} + +func (gs *GroupSelect) sqlScan(ctx context.Context, root *GroupQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(gs.fns)) + for _, fn := range gs.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*gs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := gs.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/internal/ent/generated/group_update.go b/internal/ent/generated/group_update.go new file mode 100644 index 0000000..f760cb8 --- /dev/null +++ b/internal/ent/generated/group_update.go @@ -0,0 +1,897 @@ +// Code generated by ent, DO NOT EDIT. + +package generated + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" + "entgo.io/ent/schema/field" + "github.com/datumforge/geodetic/internal/ent/generated/database" + "github.com/datumforge/geodetic/internal/ent/generated/group" + "github.com/datumforge/geodetic/internal/ent/generated/predicate" + "github.com/datumforge/geodetic/pkg/enums" + + "github.com/datumforge/geodetic/internal/ent/generated/internal" +) + +// GroupUpdate is the builder for updating Group entities. +type GroupUpdate struct { + config + hooks []Hook + mutation *GroupMutation +} + +// Where appends a list predicates to the GroupUpdate builder. +func (gu *GroupUpdate) Where(ps ...predicate.Group) *GroupUpdate { + gu.mutation.Where(ps...) + return gu +} + +// SetUpdatedAt sets the "updated_at" field. +func (gu *GroupUpdate) SetUpdatedAt(t time.Time) *GroupUpdate { + gu.mutation.SetUpdatedAt(t) + return gu +} + +// ClearUpdatedAt clears the value of the "updated_at" field. +func (gu *GroupUpdate) ClearUpdatedAt() *GroupUpdate { + gu.mutation.ClearUpdatedAt() + return gu +} + +// SetUpdatedBy sets the "updated_by" field. +func (gu *GroupUpdate) SetUpdatedBy(s string) *GroupUpdate { + gu.mutation.SetUpdatedBy(s) + return gu +} + +// SetNillableUpdatedBy sets the "updated_by" field if the given value is not nil. +func (gu *GroupUpdate) SetNillableUpdatedBy(s *string) *GroupUpdate { + if s != nil { + gu.SetUpdatedBy(*s) + } + return gu +} + +// ClearUpdatedBy clears the value of the "updated_by" field. +func (gu *GroupUpdate) ClearUpdatedBy() *GroupUpdate { + gu.mutation.ClearUpdatedBy() + return gu +} + +// SetDeletedAt sets the "deleted_at" field. +func (gu *GroupUpdate) SetDeletedAt(t time.Time) *GroupUpdate { + gu.mutation.SetDeletedAt(t) + return gu +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (gu *GroupUpdate) SetNillableDeletedAt(t *time.Time) *GroupUpdate { + if t != nil { + gu.SetDeletedAt(*t) + } + return gu +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (gu *GroupUpdate) ClearDeletedAt() *GroupUpdate { + gu.mutation.ClearDeletedAt() + return gu +} + +// SetDeletedBy sets the "deleted_by" field. +func (gu *GroupUpdate) SetDeletedBy(s string) *GroupUpdate { + gu.mutation.SetDeletedBy(s) + return gu +} + +// SetNillableDeletedBy sets the "deleted_by" field if the given value is not nil. +func (gu *GroupUpdate) SetNillableDeletedBy(s *string) *GroupUpdate { + if s != nil { + gu.SetDeletedBy(*s) + } + return gu +} + +// ClearDeletedBy clears the value of the "deleted_by" field. +func (gu *GroupUpdate) ClearDeletedBy() *GroupUpdate { + gu.mutation.ClearDeletedBy() + return gu +} + +// SetName sets the "name" field. +func (gu *GroupUpdate) SetName(s string) *GroupUpdate { + gu.mutation.SetName(s) + return gu +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (gu *GroupUpdate) SetNillableName(s *string) *GroupUpdate { + if s != nil { + gu.SetName(*s) + } + return gu +} + +// SetDescription sets the "description" field. +func (gu *GroupUpdate) SetDescription(s string) *GroupUpdate { + gu.mutation.SetDescription(s) + return gu +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (gu *GroupUpdate) SetNillableDescription(s *string) *GroupUpdate { + if s != nil { + gu.SetDescription(*s) + } + return gu +} + +// ClearDescription clears the value of the "description" field. +func (gu *GroupUpdate) ClearDescription() *GroupUpdate { + gu.mutation.ClearDescription() + return gu +} + +// SetPrimaryLocation sets the "primary_location" field. +func (gu *GroupUpdate) SetPrimaryLocation(s string) *GroupUpdate { + gu.mutation.SetPrimaryLocation(s) + return gu +} + +// SetNillablePrimaryLocation sets the "primary_location" field if the given value is not nil. +func (gu *GroupUpdate) SetNillablePrimaryLocation(s *string) *GroupUpdate { + if s != nil { + gu.SetPrimaryLocation(*s) + } + return gu +} + +// SetLocations sets the "locations" field. +func (gu *GroupUpdate) SetLocations(s []string) *GroupUpdate { + gu.mutation.SetLocations(s) + return gu +} + +// AppendLocations appends s to the "locations" field. +func (gu *GroupUpdate) AppendLocations(s []string) *GroupUpdate { + gu.mutation.AppendLocations(s) + return gu +} + +// ClearLocations clears the value of the "locations" field. +func (gu *GroupUpdate) ClearLocations() *GroupUpdate { + gu.mutation.ClearLocations() + return gu +} + +// SetToken sets the "token" field. +func (gu *GroupUpdate) SetToken(s string) *GroupUpdate { + gu.mutation.SetToken(s) + return gu +} + +// SetNillableToken sets the "token" field if the given value is not nil. +func (gu *GroupUpdate) SetNillableToken(s *string) *GroupUpdate { + if s != nil { + gu.SetToken(*s) + } + return gu +} + +// ClearToken clears the value of the "token" field. +func (gu *GroupUpdate) ClearToken() *GroupUpdate { + gu.mutation.ClearToken() + return gu +} + +// SetRegion sets the "region" field. +func (gu *GroupUpdate) SetRegion(e enums.Region) *GroupUpdate { + gu.mutation.SetRegion(e) + return gu +} + +// SetNillableRegion sets the "region" field if the given value is not nil. +func (gu *GroupUpdate) SetNillableRegion(e *enums.Region) *GroupUpdate { + if e != nil { + gu.SetRegion(*e) + } + return gu +} + +// AddDatabaseIDs adds the "databases" edge to the Database entity by IDs. +func (gu *GroupUpdate) AddDatabaseIDs(ids ...string) *GroupUpdate { + gu.mutation.AddDatabaseIDs(ids...) + return gu +} + +// AddDatabases adds the "databases" edges to the Database entity. +func (gu *GroupUpdate) AddDatabases(d ...*Database) *GroupUpdate { + ids := make([]string, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return gu.AddDatabaseIDs(ids...) +} + +// Mutation returns the GroupMutation object of the builder. +func (gu *GroupUpdate) Mutation() *GroupMutation { + return gu.mutation +} + +// ClearDatabases clears all "databases" edges to the Database entity. +func (gu *GroupUpdate) ClearDatabases() *GroupUpdate { + gu.mutation.ClearDatabases() + return gu +} + +// RemoveDatabaseIDs removes the "databases" edge to Database entities by IDs. +func (gu *GroupUpdate) RemoveDatabaseIDs(ids ...string) *GroupUpdate { + gu.mutation.RemoveDatabaseIDs(ids...) + return gu +} + +// RemoveDatabases removes "databases" edges to Database entities. +func (gu *GroupUpdate) RemoveDatabases(d ...*Database) *GroupUpdate { + ids := make([]string, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return gu.RemoveDatabaseIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (gu *GroupUpdate) Save(ctx context.Context) (int, error) { + if err := gu.defaults(); err != nil { + return 0, err + } + return withHooks(ctx, gu.sqlSave, gu.mutation, gu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (gu *GroupUpdate) SaveX(ctx context.Context) int { + affected, err := gu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (gu *GroupUpdate) Exec(ctx context.Context) error { + _, err := gu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (gu *GroupUpdate) ExecX(ctx context.Context) { + if err := gu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (gu *GroupUpdate) defaults() error { + if _, ok := gu.mutation.UpdatedAt(); !ok && !gu.mutation.UpdatedAtCleared() { + if group.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("generated: uninitialized group.UpdateDefaultUpdatedAt (forgotten import generated/runtime?)") + } + v := group.UpdateDefaultUpdatedAt() + gu.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (gu *GroupUpdate) check() error { + if v, ok := gu.mutation.Name(); ok { + if err := group.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`generated: validator failed for field "Group.name": %w`, err)} + } + } + if v, ok := gu.mutation.PrimaryLocation(); ok { + if err := group.PrimaryLocationValidator(v); err != nil { + return &ValidationError{Name: "primary_location", err: fmt.Errorf(`generated: validator failed for field "Group.primary_location": %w`, err)} + } + } + if v, ok := gu.mutation.Region(); ok { + if err := group.RegionValidator(v); err != nil { + return &ValidationError{Name: "region", err: fmt.Errorf(`generated: validator failed for field "Group.region": %w`, err)} + } + } + return nil +} + +func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := gu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(group.Table, group.Columns, sqlgraph.NewFieldSpec(group.FieldID, field.TypeString)) + if ps := gu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if gu.mutation.CreatedAtCleared() { + _spec.ClearField(group.FieldCreatedAt, field.TypeTime) + } + if value, ok := gu.mutation.UpdatedAt(); ok { + _spec.SetField(group.FieldUpdatedAt, field.TypeTime, value) + } + if gu.mutation.UpdatedAtCleared() { + _spec.ClearField(group.FieldUpdatedAt, field.TypeTime) + } + if gu.mutation.CreatedByCleared() { + _spec.ClearField(group.FieldCreatedBy, field.TypeString) + } + if value, ok := gu.mutation.UpdatedBy(); ok { + _spec.SetField(group.FieldUpdatedBy, field.TypeString, value) + } + if gu.mutation.UpdatedByCleared() { + _spec.ClearField(group.FieldUpdatedBy, field.TypeString) + } + if value, ok := gu.mutation.DeletedAt(); ok { + _spec.SetField(group.FieldDeletedAt, field.TypeTime, value) + } + if gu.mutation.DeletedAtCleared() { + _spec.ClearField(group.FieldDeletedAt, field.TypeTime) + } + if value, ok := gu.mutation.DeletedBy(); ok { + _spec.SetField(group.FieldDeletedBy, field.TypeString, value) + } + if gu.mutation.DeletedByCleared() { + _spec.ClearField(group.FieldDeletedBy, field.TypeString) + } + if value, ok := gu.mutation.Name(); ok { + _spec.SetField(group.FieldName, field.TypeString, value) + } + if value, ok := gu.mutation.Description(); ok { + _spec.SetField(group.FieldDescription, field.TypeString, value) + } + if gu.mutation.DescriptionCleared() { + _spec.ClearField(group.FieldDescription, field.TypeString) + } + if value, ok := gu.mutation.PrimaryLocation(); ok { + _spec.SetField(group.FieldPrimaryLocation, field.TypeString, value) + } + if value, ok := gu.mutation.Locations(); ok { + _spec.SetField(group.FieldLocations, field.TypeJSON, value) + } + if value, ok := gu.mutation.AppendedLocations(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, group.FieldLocations, value) + }) + } + if gu.mutation.LocationsCleared() { + _spec.ClearField(group.FieldLocations, field.TypeJSON) + } + if value, ok := gu.mutation.Token(); ok { + _spec.SetField(group.FieldToken, field.TypeString, value) + } + if gu.mutation.TokenCleared() { + _spec.ClearField(group.FieldToken, field.TypeString) + } + if value, ok := gu.mutation.Region(); ok { + _spec.SetField(group.FieldRegion, field.TypeEnum, value) + } + if gu.mutation.DatabasesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.DatabasesTable, + Columns: []string{group.DatabasesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(database.FieldID, field.TypeString), + }, + } + edge.Schema = gu.schemaConfig.Database + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := gu.mutation.RemovedDatabasesIDs(); len(nodes) > 0 && !gu.mutation.DatabasesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.DatabasesTable, + Columns: []string{group.DatabasesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(database.FieldID, field.TypeString), + }, + } + edge.Schema = gu.schemaConfig.Database + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := gu.mutation.DatabasesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.DatabasesTable, + Columns: []string{group.DatabasesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(database.FieldID, field.TypeString), + }, + } + edge.Schema = gu.schemaConfig.Database + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _spec.Node.Schema = gu.schemaConfig.Group + ctx = internal.NewSchemaConfigContext(ctx, gu.schemaConfig) + if n, err = sqlgraph.UpdateNodes(ctx, gu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{group.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + gu.mutation.done = true + return n, nil +} + +// GroupUpdateOne is the builder for updating a single Group entity. +type GroupUpdateOne struct { + config + fields []string + hooks []Hook + mutation *GroupMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (guo *GroupUpdateOne) SetUpdatedAt(t time.Time) *GroupUpdateOne { + guo.mutation.SetUpdatedAt(t) + return guo +} + +// ClearUpdatedAt clears the value of the "updated_at" field. +func (guo *GroupUpdateOne) ClearUpdatedAt() *GroupUpdateOne { + guo.mutation.ClearUpdatedAt() + return guo +} + +// SetUpdatedBy sets the "updated_by" field. +func (guo *GroupUpdateOne) SetUpdatedBy(s string) *GroupUpdateOne { + guo.mutation.SetUpdatedBy(s) + return guo +} + +// SetNillableUpdatedBy sets the "updated_by" field if the given value is not nil. +func (guo *GroupUpdateOne) SetNillableUpdatedBy(s *string) *GroupUpdateOne { + if s != nil { + guo.SetUpdatedBy(*s) + } + return guo +} + +// ClearUpdatedBy clears the value of the "updated_by" field. +func (guo *GroupUpdateOne) ClearUpdatedBy() *GroupUpdateOne { + guo.mutation.ClearUpdatedBy() + return guo +} + +// SetDeletedAt sets the "deleted_at" field. +func (guo *GroupUpdateOne) SetDeletedAt(t time.Time) *GroupUpdateOne { + guo.mutation.SetDeletedAt(t) + return guo +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (guo *GroupUpdateOne) SetNillableDeletedAt(t *time.Time) *GroupUpdateOne { + if t != nil { + guo.SetDeletedAt(*t) + } + return guo +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (guo *GroupUpdateOne) ClearDeletedAt() *GroupUpdateOne { + guo.mutation.ClearDeletedAt() + return guo +} + +// SetDeletedBy sets the "deleted_by" field. +func (guo *GroupUpdateOne) SetDeletedBy(s string) *GroupUpdateOne { + guo.mutation.SetDeletedBy(s) + return guo +} + +// SetNillableDeletedBy sets the "deleted_by" field if the given value is not nil. +func (guo *GroupUpdateOne) SetNillableDeletedBy(s *string) *GroupUpdateOne { + if s != nil { + guo.SetDeletedBy(*s) + } + return guo +} + +// ClearDeletedBy clears the value of the "deleted_by" field. +func (guo *GroupUpdateOne) ClearDeletedBy() *GroupUpdateOne { + guo.mutation.ClearDeletedBy() + return guo +} + +// SetName sets the "name" field. +func (guo *GroupUpdateOne) SetName(s string) *GroupUpdateOne { + guo.mutation.SetName(s) + return guo +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (guo *GroupUpdateOne) SetNillableName(s *string) *GroupUpdateOne { + if s != nil { + guo.SetName(*s) + } + return guo +} + +// SetDescription sets the "description" field. +func (guo *GroupUpdateOne) SetDescription(s string) *GroupUpdateOne { + guo.mutation.SetDescription(s) + return guo +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (guo *GroupUpdateOne) SetNillableDescription(s *string) *GroupUpdateOne { + if s != nil { + guo.SetDescription(*s) + } + return guo +} + +// ClearDescription clears the value of the "description" field. +func (guo *GroupUpdateOne) ClearDescription() *GroupUpdateOne { + guo.mutation.ClearDescription() + return guo +} + +// SetPrimaryLocation sets the "primary_location" field. +func (guo *GroupUpdateOne) SetPrimaryLocation(s string) *GroupUpdateOne { + guo.mutation.SetPrimaryLocation(s) + return guo +} + +// SetNillablePrimaryLocation sets the "primary_location" field if the given value is not nil. +func (guo *GroupUpdateOne) SetNillablePrimaryLocation(s *string) *GroupUpdateOne { + if s != nil { + guo.SetPrimaryLocation(*s) + } + return guo +} + +// SetLocations sets the "locations" field. +func (guo *GroupUpdateOne) SetLocations(s []string) *GroupUpdateOne { + guo.mutation.SetLocations(s) + return guo +} + +// AppendLocations appends s to the "locations" field. +func (guo *GroupUpdateOne) AppendLocations(s []string) *GroupUpdateOne { + guo.mutation.AppendLocations(s) + return guo +} + +// ClearLocations clears the value of the "locations" field. +func (guo *GroupUpdateOne) ClearLocations() *GroupUpdateOne { + guo.mutation.ClearLocations() + return guo +} + +// SetToken sets the "token" field. +func (guo *GroupUpdateOne) SetToken(s string) *GroupUpdateOne { + guo.mutation.SetToken(s) + return guo +} + +// SetNillableToken sets the "token" field if the given value is not nil. +func (guo *GroupUpdateOne) SetNillableToken(s *string) *GroupUpdateOne { + if s != nil { + guo.SetToken(*s) + } + return guo +} + +// ClearToken clears the value of the "token" field. +func (guo *GroupUpdateOne) ClearToken() *GroupUpdateOne { + guo.mutation.ClearToken() + return guo +} + +// SetRegion sets the "region" field. +func (guo *GroupUpdateOne) SetRegion(e enums.Region) *GroupUpdateOne { + guo.mutation.SetRegion(e) + return guo +} + +// SetNillableRegion sets the "region" field if the given value is not nil. +func (guo *GroupUpdateOne) SetNillableRegion(e *enums.Region) *GroupUpdateOne { + if e != nil { + guo.SetRegion(*e) + } + return guo +} + +// AddDatabaseIDs adds the "databases" edge to the Database entity by IDs. +func (guo *GroupUpdateOne) AddDatabaseIDs(ids ...string) *GroupUpdateOne { + guo.mutation.AddDatabaseIDs(ids...) + return guo +} + +// AddDatabases adds the "databases" edges to the Database entity. +func (guo *GroupUpdateOne) AddDatabases(d ...*Database) *GroupUpdateOne { + ids := make([]string, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return guo.AddDatabaseIDs(ids...) +} + +// Mutation returns the GroupMutation object of the builder. +func (guo *GroupUpdateOne) Mutation() *GroupMutation { + return guo.mutation +} + +// ClearDatabases clears all "databases" edges to the Database entity. +func (guo *GroupUpdateOne) ClearDatabases() *GroupUpdateOne { + guo.mutation.ClearDatabases() + return guo +} + +// RemoveDatabaseIDs removes the "databases" edge to Database entities by IDs. +func (guo *GroupUpdateOne) RemoveDatabaseIDs(ids ...string) *GroupUpdateOne { + guo.mutation.RemoveDatabaseIDs(ids...) + return guo +} + +// RemoveDatabases removes "databases" edges to Database entities. +func (guo *GroupUpdateOne) RemoveDatabases(d ...*Database) *GroupUpdateOne { + ids := make([]string, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return guo.RemoveDatabaseIDs(ids...) +} + +// Where appends a list predicates to the GroupUpdate builder. +func (guo *GroupUpdateOne) Where(ps ...predicate.Group) *GroupUpdateOne { + guo.mutation.Where(ps...) + return guo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (guo *GroupUpdateOne) Select(field string, fields ...string) *GroupUpdateOne { + guo.fields = append([]string{field}, fields...) + return guo +} + +// Save executes the query and returns the updated Group entity. +func (guo *GroupUpdateOne) Save(ctx context.Context) (*Group, error) { + if err := guo.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, guo.sqlSave, guo.mutation, guo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (guo *GroupUpdateOne) SaveX(ctx context.Context) *Group { + node, err := guo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (guo *GroupUpdateOne) Exec(ctx context.Context) error { + _, err := guo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (guo *GroupUpdateOne) ExecX(ctx context.Context) { + if err := guo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (guo *GroupUpdateOne) defaults() error { + if _, ok := guo.mutation.UpdatedAt(); !ok && !guo.mutation.UpdatedAtCleared() { + if group.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("generated: uninitialized group.UpdateDefaultUpdatedAt (forgotten import generated/runtime?)") + } + v := group.UpdateDefaultUpdatedAt() + guo.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (guo *GroupUpdateOne) check() error { + if v, ok := guo.mutation.Name(); ok { + if err := group.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`generated: validator failed for field "Group.name": %w`, err)} + } + } + if v, ok := guo.mutation.PrimaryLocation(); ok { + if err := group.PrimaryLocationValidator(v); err != nil { + return &ValidationError{Name: "primary_location", err: fmt.Errorf(`generated: validator failed for field "Group.primary_location": %w`, err)} + } + } + if v, ok := guo.mutation.Region(); ok { + if err := group.RegionValidator(v); err != nil { + return &ValidationError{Name: "region", err: fmt.Errorf(`generated: validator failed for field "Group.region": %w`, err)} + } + } + return nil +} + +func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error) { + if err := guo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(group.Table, group.Columns, sqlgraph.NewFieldSpec(group.FieldID, field.TypeString)) + id, ok := guo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`generated: missing "Group.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := guo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, group.FieldID) + for _, f := range fields { + if !group.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("generated: invalid field %q for query", f)} + } + if f != group.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := guo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if guo.mutation.CreatedAtCleared() { + _spec.ClearField(group.FieldCreatedAt, field.TypeTime) + } + if value, ok := guo.mutation.UpdatedAt(); ok { + _spec.SetField(group.FieldUpdatedAt, field.TypeTime, value) + } + if guo.mutation.UpdatedAtCleared() { + _spec.ClearField(group.FieldUpdatedAt, field.TypeTime) + } + if guo.mutation.CreatedByCleared() { + _spec.ClearField(group.FieldCreatedBy, field.TypeString) + } + if value, ok := guo.mutation.UpdatedBy(); ok { + _spec.SetField(group.FieldUpdatedBy, field.TypeString, value) + } + if guo.mutation.UpdatedByCleared() { + _spec.ClearField(group.FieldUpdatedBy, field.TypeString) + } + if value, ok := guo.mutation.DeletedAt(); ok { + _spec.SetField(group.FieldDeletedAt, field.TypeTime, value) + } + if guo.mutation.DeletedAtCleared() { + _spec.ClearField(group.FieldDeletedAt, field.TypeTime) + } + if value, ok := guo.mutation.DeletedBy(); ok { + _spec.SetField(group.FieldDeletedBy, field.TypeString, value) + } + if guo.mutation.DeletedByCleared() { + _spec.ClearField(group.FieldDeletedBy, field.TypeString) + } + if value, ok := guo.mutation.Name(); ok { + _spec.SetField(group.FieldName, field.TypeString, value) + } + if value, ok := guo.mutation.Description(); ok { + _spec.SetField(group.FieldDescription, field.TypeString, value) + } + if guo.mutation.DescriptionCleared() { + _spec.ClearField(group.FieldDescription, field.TypeString) + } + if value, ok := guo.mutation.PrimaryLocation(); ok { + _spec.SetField(group.FieldPrimaryLocation, field.TypeString, value) + } + if value, ok := guo.mutation.Locations(); ok { + _spec.SetField(group.FieldLocations, field.TypeJSON, value) + } + if value, ok := guo.mutation.AppendedLocations(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, group.FieldLocations, value) + }) + } + if guo.mutation.LocationsCleared() { + _spec.ClearField(group.FieldLocations, field.TypeJSON) + } + if value, ok := guo.mutation.Token(); ok { + _spec.SetField(group.FieldToken, field.TypeString, value) + } + if guo.mutation.TokenCleared() { + _spec.ClearField(group.FieldToken, field.TypeString) + } + if value, ok := guo.mutation.Region(); ok { + _spec.SetField(group.FieldRegion, field.TypeEnum, value) + } + if guo.mutation.DatabasesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.DatabasesTable, + Columns: []string{group.DatabasesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(database.FieldID, field.TypeString), + }, + } + edge.Schema = guo.schemaConfig.Database + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := guo.mutation.RemovedDatabasesIDs(); len(nodes) > 0 && !guo.mutation.DatabasesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.DatabasesTable, + Columns: []string{group.DatabasesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(database.FieldID, field.TypeString), + }, + } + edge.Schema = guo.schemaConfig.Database + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := guo.mutation.DatabasesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.DatabasesTable, + Columns: []string{group.DatabasesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(database.FieldID, field.TypeString), + }, + } + edge.Schema = guo.schemaConfig.Database + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _spec.Node.Schema = guo.schemaConfig.Group + ctx = internal.NewSchemaConfigContext(ctx, guo.schemaConfig) + _node = &Group{config: guo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, guo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{group.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + guo.mutation.done = true + return _node, nil +} diff --git a/internal/ent/generated/hook/hook.go b/internal/ent/generated/hook/hook.go new file mode 100644 index 0000000..f236dd9 --- /dev/null +++ b/internal/ent/generated/hook/hook.go @@ -0,0 +1,211 @@ +// Code generated by ent, DO NOT EDIT. + +package hook + +import ( + "context" + "fmt" + + "github.com/datumforge/geodetic/internal/ent/generated" +) + +// The DatabaseFunc type is an adapter to allow the use of ordinary +// function as Database mutator. +type DatabaseFunc func(context.Context, *generated.DatabaseMutation) (generated.Value, error) + +// Mutate calls f(ctx, m). +func (f DatabaseFunc) Mutate(ctx context.Context, m generated.Mutation) (generated.Value, error) { + if mv, ok := m.(*generated.DatabaseMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *generated.DatabaseMutation", m) +} + +// The GroupFunc type is an adapter to allow the use of ordinary +// function as Group mutator. +type GroupFunc func(context.Context, *generated.GroupMutation) (generated.Value, error) + +// Mutate calls f(ctx, m). +func (f GroupFunc) Mutate(ctx context.Context, m generated.Mutation) (generated.Value, error) { + if mv, ok := m.(*generated.GroupMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *generated.GroupMutation", m) +} + +// Condition is a hook condition function. +type Condition func(context.Context, generated.Mutation) bool + +// And groups conditions with the AND operator. +func And(first, second Condition, rest ...Condition) Condition { + return func(ctx context.Context, m generated.Mutation) bool { + if !first(ctx, m) || !second(ctx, m) { + return false + } + for _, cond := range rest { + if !cond(ctx, m) { + return false + } + } + return true + } +} + +// Or groups conditions with the OR operator. +func Or(first, second Condition, rest ...Condition) Condition { + return func(ctx context.Context, m generated.Mutation) bool { + if first(ctx, m) || second(ctx, m) { + return true + } + for _, cond := range rest { + if cond(ctx, m) { + return true + } + } + return false + } +} + +// Not negates a given condition. +func Not(cond Condition) Condition { + return func(ctx context.Context, m generated.Mutation) bool { + return !cond(ctx, m) + } +} + +// HasOp is a condition testing mutation operation. +func HasOp(op generated.Op) Condition { + return func(_ context.Context, m generated.Mutation) bool { + return m.Op().Is(op) + } +} + +// HasAddedFields is a condition validating `.AddedField` on fields. +func HasAddedFields(field string, fields ...string) Condition { + return func(_ context.Context, m generated.Mutation) bool { + if _, exists := m.AddedField(field); !exists { + return false + } + for _, field := range fields { + if _, exists := m.AddedField(field); !exists { + return false + } + } + return true + } +} + +// HasClearedFields is a condition validating `.FieldCleared` on fields. +func HasClearedFields(field string, fields ...string) Condition { + return func(_ context.Context, m generated.Mutation) bool { + if exists := m.FieldCleared(field); !exists { + return false + } + for _, field := range fields { + if exists := m.FieldCleared(field); !exists { + return false + } + } + return true + } +} + +// HasFields is a condition validating `.Field` on fields. +func HasFields(field string, fields ...string) Condition { + return func(_ context.Context, m generated.Mutation) bool { + if _, exists := m.Field(field); !exists { + return false + } + for _, field := range fields { + if _, exists := m.Field(field); !exists { + return false + } + } + return true + } +} + +// If executes the given hook under condition. +// +// hook.If(ComputeAverage, And(HasFields(...), HasAddedFields(...))) +func If(hk generated.Hook, cond Condition) generated.Hook { + return func(next generated.Mutator) generated.Mutator { + return generated.MutateFunc(func(ctx context.Context, m generated.Mutation) (generated.Value, error) { + if cond(ctx, m) { + return hk(next).Mutate(ctx, m) + } + return next.Mutate(ctx, m) + }) + } +} + +// On executes the given hook only for the given operation. +// +// hook.On(Log, generated.Delete|generated.Create) +func On(hk generated.Hook, op generated.Op) generated.Hook { + return If(hk, HasOp(op)) +} + +// Unless skips the given hook only for the given operation. +// +// hook.Unless(Log, generated.Update|generated.UpdateOne) +func Unless(hk generated.Hook, op generated.Op) generated.Hook { + return If(hk, Not(HasOp(op))) +} + +// FixedError is a hook returning a fixed error. +func FixedError(err error) generated.Hook { + return func(generated.Mutator) generated.Mutator { + return generated.MutateFunc(func(context.Context, generated.Mutation) (generated.Value, error) { + return nil, err + }) + } +} + +// Reject returns a hook that rejects all operations that match op. +// +// func (T) Hooks() []generated.Hook { +// return []generated.Hook{ +// Reject(generated.Delete|generated.Update), +// } +// } +func Reject(op generated.Op) generated.Hook { + hk := FixedError(fmt.Errorf("%s operation is not allowed", op)) + return On(hk, op) +} + +// Chain acts as a list of hooks and is effectively immutable. +// Once created, it will always hold the same set of hooks in the same order. +type Chain struct { + hooks []generated.Hook +} + +// NewChain creates a new chain of hooks. +func NewChain(hooks ...generated.Hook) Chain { + return Chain{append([]generated.Hook(nil), hooks...)} +} + +// Hook chains the list of hooks and returns the final hook. +func (c Chain) Hook() generated.Hook { + return func(mutator generated.Mutator) generated.Mutator { + for i := len(c.hooks) - 1; i >= 0; i-- { + mutator = c.hooks[i](mutator) + } + return mutator + } +} + +// Append extends a chain, adding the specified hook +// as the last ones in the mutation flow. +func (c Chain) Append(hooks ...generated.Hook) Chain { + newHooks := make([]generated.Hook, 0, len(c.hooks)+len(hooks)) + newHooks = append(newHooks, c.hooks...) + newHooks = append(newHooks, hooks...) + return Chain{newHooks} +} + +// Extend extends a chain, adding the specified chain +// as the last ones in the mutation flow. +func (c Chain) Extend(chain Chain) Chain { + return c.Append(chain.hooks...) +} diff --git a/internal/ent/generated/intercept/intercept.go b/internal/ent/generated/intercept/intercept.go new file mode 100644 index 0000000..93f8df6 --- /dev/null +++ b/internal/ent/generated/intercept/intercept.go @@ -0,0 +1,179 @@ +// Code generated by ent, DO NOT EDIT. + +package intercept + +import ( + "context" + "fmt" + + "entgo.io/ent/dialect/sql" + "github.com/datumforge/geodetic/internal/ent/generated" + "github.com/datumforge/geodetic/internal/ent/generated/database" + "github.com/datumforge/geodetic/internal/ent/generated/group" + "github.com/datumforge/geodetic/internal/ent/generated/predicate" +) + +// The Query interface represents an operation that queries a graph. +// By using this interface, users can write generic code that manipulates +// query builders of different types. +type Query interface { + // Type returns the string representation of the query type. + Type() string + // Limit the number of records to be returned by this query. + Limit(int) + // Offset to start from. + Offset(int) + // Unique configures the query builder to filter duplicate records. + Unique(bool) + // Order specifies how the records should be ordered. + Order(...func(*sql.Selector)) + // WhereP appends storage-level predicates to the query builder. Using this method, users + // can use type-assertion to append predicates that do not depend on any generated package. + WhereP(...func(*sql.Selector)) +} + +// The Func type is an adapter that allows ordinary functions to be used as interceptors. +// Unlike traversal functions, interceptors are skipped during graph traversals. Note that the +// implementation of Func is different from the one defined in entgo.io/ent.InterceptFunc. +type Func func(context.Context, Query) error + +// Intercept calls f(ctx, q) and then applied the next Querier. +func (f Func) Intercept(next generated.Querier) generated.Querier { + return generated.QuerierFunc(func(ctx context.Context, q generated.Query) (generated.Value, error) { + query, err := NewQuery(q) + if err != nil { + return nil, err + } + if err := f(ctx, query); err != nil { + return nil, err + } + return next.Query(ctx, q) + }) +} + +// The TraverseFunc type is an adapter to allow the use of ordinary function as Traverser. +// If f is a function with the appropriate signature, TraverseFunc(f) is a Traverser that calls f. +type TraverseFunc func(context.Context, Query) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseFunc) Intercept(next generated.Querier) generated.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseFunc) Traverse(ctx context.Context, q generated.Query) error { + query, err := NewQuery(q) + if err != nil { + return err + } + return f(ctx, query) +} + +// The DatabaseFunc type is an adapter to allow the use of ordinary function as a Querier. +type DatabaseFunc func(context.Context, *generated.DatabaseQuery) (generated.Value, error) + +// Query calls f(ctx, q). +func (f DatabaseFunc) Query(ctx context.Context, q generated.Query) (generated.Value, error) { + if q, ok := q.(*generated.DatabaseQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *generated.DatabaseQuery", q) +} + +// The TraverseDatabase type is an adapter to allow the use of ordinary function as Traverser. +type TraverseDatabase func(context.Context, *generated.DatabaseQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseDatabase) Intercept(next generated.Querier) generated.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseDatabase) Traverse(ctx context.Context, q generated.Query) error { + if q, ok := q.(*generated.DatabaseQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *generated.DatabaseQuery", q) +} + +// The GroupFunc type is an adapter to allow the use of ordinary function as a Querier. +type GroupFunc func(context.Context, *generated.GroupQuery) (generated.Value, error) + +// Query calls f(ctx, q). +func (f GroupFunc) Query(ctx context.Context, q generated.Query) (generated.Value, error) { + if q, ok := q.(*generated.GroupQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *generated.GroupQuery", q) +} + +// The TraverseGroup type is an adapter to allow the use of ordinary function as Traverser. +type TraverseGroup func(context.Context, *generated.GroupQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseGroup) Intercept(next generated.Querier) generated.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseGroup) Traverse(ctx context.Context, q generated.Query) error { + if q, ok := q.(*generated.GroupQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *generated.GroupQuery", q) +} + +// NewQuery returns the generic Query interface for the given typed query. +func NewQuery(q generated.Query) (Query, error) { + switch q := q.(type) { + case *generated.DatabaseQuery: + return &query[*generated.DatabaseQuery, predicate.Database, database.OrderOption]{typ: generated.TypeDatabase, tq: q}, nil + case *generated.GroupQuery: + return &query[*generated.GroupQuery, predicate.Group, group.OrderOption]{typ: generated.TypeGroup, tq: q}, nil + default: + return nil, fmt.Errorf("unknown query type %T", q) + } +} + +type query[T any, P ~func(*sql.Selector), R ~func(*sql.Selector)] struct { + typ string + tq interface { + Limit(int) T + Offset(int) T + Unique(bool) T + Order(...R) T + Where(...P) T + } +} + +func (q query[T, P, R]) Type() string { + return q.typ +} + +func (q query[T, P, R]) Limit(limit int) { + q.tq.Limit(limit) +} + +func (q query[T, P, R]) Offset(offset int) { + q.tq.Offset(offset) +} + +func (q query[T, P, R]) Unique(unique bool) { + q.tq.Unique(unique) +} + +func (q query[T, P, R]) Order(orders ...func(*sql.Selector)) { + rs := make([]R, len(orders)) + for i := range orders { + rs[i] = orders[i] + } + q.tq.Order(rs...) +} + +func (q query[T, P, R]) WhereP(ps ...func(*sql.Selector)) { + p := make([]P, len(ps)) + for i := range ps { + p[i] = ps[i] + } + q.tq.Where(p...) +} diff --git a/internal/ent/generated/internal/schema.go b/internal/ent/generated/internal/schema.go new file mode 100644 index 0000000..e6d4a1e --- /dev/null +++ b/internal/ent/generated/internal/schema.go @@ -0,0 +1,9 @@ +// Code generated by ent, DO NOT EDIT. + +//go:build tools +// +build tools + +// Package internal holds a loadable version of the latest schema. +package internal + +const Schema = "{\"Schema\":\"github.com/datumforge/geodetic/internal/ent/schema\",\"Package\":\"github.com/datumforge/geodetic/internal/ent/generated\",\"Schemas\":[{\"name\":\"Database\",\"config\":{\"Table\":\"\"},\"edges\":[{\"name\":\"group\",\"type\":\"Group\",\"field\":\"group_id\",\"ref_name\":\"databases\",\"unique\":true,\"inverse\":true,\"required\":true}],\"fields\":[{\"name\":\"created_at\",\"type\":{\"Type\":2,\"Ident\":\"\",\"PkgPath\":\"time\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"optional\":true,\"default\":true,\"default_kind\":19,\"immutable\":true,\"position\":{\"Index\":0,\"MixedIn\":true,\"MixinIndex\":0},\"annotations\":{\"EntOAS\":{\"Create\":{\"Groups\":null,\"Policy\":0},\"Delete\":{\"Groups\":null,\"Policy\":0},\"Example\":null,\"Groups\":null,\"List\":{\"Groups\":null,\"Policy\":0},\"Read\":{\"Groups\":null,\"Policy\":0},\"ReadOnly\":true,\"Schema\":null,\"Skip\":false,\"Update\":{\"Groups\":null,\"Policy\":0}}}},{\"name\":\"updated_at\",\"type\":{\"Type\":2,\"Ident\":\"\",\"PkgPath\":\"time\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"optional\":true,\"default\":true,\"default_kind\":19,\"update_default\":true,\"position\":{\"Index\":1,\"MixedIn\":true,\"MixinIndex\":0},\"annotations\":{\"EntOAS\":{\"Create\":{\"Groups\":null,\"Policy\":0},\"Delete\":{\"Groups\":null,\"Policy\":0},\"Example\":null,\"Groups\":null,\"List\":{\"Groups\":null,\"Policy\":0},\"Read\":{\"Groups\":null,\"Policy\":0},\"ReadOnly\":true,\"Schema\":null,\"Skip\":false,\"Update\":{\"Groups\":null,\"Policy\":0}}}},{\"name\":\"created_by\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"optional\":true,\"immutable\":true,\"position\":{\"Index\":2,\"MixedIn\":true,\"MixinIndex\":0},\"annotations\":{\"EntOAS\":{\"Create\":{\"Groups\":null,\"Policy\":0},\"Delete\":{\"Groups\":null,\"Policy\":0},\"Example\":null,\"Groups\":null,\"List\":{\"Groups\":null,\"Policy\":0},\"Read\":{\"Groups\":null,\"Policy\":0},\"ReadOnly\":true,\"Schema\":null,\"Skip\":false,\"Update\":{\"Groups\":null,\"Policy\":0}}}},{\"name\":\"updated_by\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"optional\":true,\"position\":{\"Index\":3,\"MixedIn\":true,\"MixinIndex\":0},\"annotations\":{\"EntOAS\":{\"Create\":{\"Groups\":null,\"Policy\":0},\"Delete\":{\"Groups\":null,\"Policy\":0},\"Example\":null,\"Groups\":null,\"List\":{\"Groups\":null,\"Policy\":0},\"Read\":{\"Groups\":null,\"Policy\":0},\"ReadOnly\":true,\"Schema\":null,\"Skip\":false,\"Update\":{\"Groups\":null,\"Policy\":0}}}},{\"name\":\"deleted_at\",\"type\":{\"Type\":2,\"Ident\":\"\",\"PkgPath\":\"time\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"optional\":true,\"position\":{\"Index\":0,\"MixedIn\":true,\"MixinIndex\":1},\"annotations\":{\"EntGQL\":{\"Skip\":48},\"EntOAS\":{\"Create\":{\"Groups\":null,\"Policy\":0},\"Delete\":{\"Groups\":null,\"Policy\":0},\"Example\":null,\"Groups\":null,\"List\":{\"Groups\":null,\"Policy\":0},\"Read\":{\"Groups\":null,\"Policy\":0},\"ReadOnly\":true,\"Schema\":null,\"Skip\":false,\"Update\":{\"Groups\":null,\"Policy\":0}}}},{\"name\":\"deleted_by\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"optional\":true,\"position\":{\"Index\":1,\"MixedIn\":true,\"MixinIndex\":1},\"annotations\":{\"EntGQL\":{\"Skip\":48},\"EntOAS\":{\"Create\":{\"Groups\":null,\"Policy\":0},\"Delete\":{\"Groups\":null,\"Policy\":0},\"Example\":null,\"Groups\":null,\"List\":{\"Groups\":null,\"Policy\":0},\"Read\":{\"Groups\":null,\"Policy\":0},\"ReadOnly\":true,\"Schema\":null,\"Skip\":false,\"Update\":{\"Groups\":null,\"Policy\":0}}}},{\"name\":\"id\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"default\":true,\"default_kind\":19,\"immutable\":true,\"position\":{\"Index\":0,\"MixedIn\":true,\"MixinIndex\":2},\"annotations\":{\"EntOAS\":{\"Create\":{\"Groups\":null,\"Policy\":0},\"Delete\":{\"Groups\":null,\"Policy\":0},\"Example\":null,\"Groups\":null,\"List\":{\"Groups\":null,\"Policy\":0},\"Read\":{\"Groups\":null,\"Policy\":0},\"ReadOnly\":true,\"Schema\":null,\"Skip\":false,\"Update\":{\"Groups\":null,\"Policy\":0}}}},{\"name\":\"organization_id\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"validators\":1,\"position\":{\"Index\":0,\"MixedIn\":false,\"MixinIndex\":0},\"comment\":\"the ID of the organization\"},{\"name\":\"name\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"validators\":1,\"position\":{\"Index\":1,\"MixedIn\":false,\"MixinIndex\":0},\"comment\":\"the name to the database\"},{\"name\":\"geo\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"optional\":true,\"position\":{\"Index\":2,\"MixedIn\":false,\"MixinIndex\":0},\"comment\":\"the geo location of the database\"},{\"name\":\"dsn\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"validators\":1,\"position\":{\"Index\":3,\"MixedIn\":false,\"MixinIndex\":0},\"comment\":\"the DSN to the database\"},{\"name\":\"group_id\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"position\":{\"Index\":4,\"MixedIn\":false,\"MixinIndex\":0},\"comment\":\"the ID of the group\"},{\"name\":\"token\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"optional\":true,\"position\":{\"Index\":5,\"MixedIn\":false,\"MixinIndex\":0},\"sensitive\":true,\"comment\":\"the auth token used to connect to the database\"},{\"name\":\"status\",\"type\":{\"Type\":6,\"Ident\":\"enums.DatabaseStatus\",\"PkgPath\":\"github.com/datumforge/geodetic/pkg/enums\",\"PkgName\":\"enums\",\"Nillable\":false,\"RType\":{\"Name\":\"DatabaseStatus\",\"Ident\":\"enums.DatabaseStatus\",\"Kind\":24,\"PkgPath\":\"github.com/datumforge/geodetic/pkg/enums\",\"Methods\":{\"MarshalGQL\":{\"In\":[{\"Name\":\"Writer\",\"Ident\":\"io.Writer\",\"Kind\":20,\"PkgPath\":\"io\",\"Methods\":null}],\"Out\":[]},\"String\":{\"In\":[],\"Out\":[{\"Name\":\"string\",\"Ident\":\"string\",\"Kind\":24,\"PkgPath\":\"\",\"Methods\":null}]},\"UnmarshalGQL\":{\"In\":[{\"Name\":\"\",\"Ident\":\"interface {}\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}],\"Out\":[{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"Values\":{\"In\":[],\"Out\":[{\"Name\":\"\",\"Ident\":\"[]string\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null}]}}}},\"enums\":[{\"N\":\"ACTIVE\",\"V\":\"ACTIVE\"},{\"N\":\"CREATING\",\"V\":\"CREATING\"},{\"N\":\"DELETING\",\"V\":\"DELETING\"},{\"N\":\"DELETED\",\"V\":\"DELETED\"}],\"default\":true,\"default_value\":\"CREATING\",\"default_kind\":24,\"position\":{\"Index\":6,\"MixedIn\":false,\"MixinIndex\":0},\"comment\":\"status of the database\"},{\"name\":\"provider\",\"type\":{\"Type\":6,\"Ident\":\"enums.DatabaseProvider\",\"PkgPath\":\"github.com/datumforge/geodetic/pkg/enums\",\"PkgName\":\"enums\",\"Nillable\":false,\"RType\":{\"Name\":\"DatabaseProvider\",\"Ident\":\"enums.DatabaseProvider\",\"Kind\":24,\"PkgPath\":\"github.com/datumforge/geodetic/pkg/enums\",\"Methods\":{\"MarshalGQL\":{\"In\":[{\"Name\":\"Writer\",\"Ident\":\"io.Writer\",\"Kind\":20,\"PkgPath\":\"io\",\"Methods\":null}],\"Out\":[]},\"String\":{\"In\":[],\"Out\":[{\"Name\":\"string\",\"Ident\":\"string\",\"Kind\":24,\"PkgPath\":\"\",\"Methods\":null}]},\"UnmarshalGQL\":{\"In\":[{\"Name\":\"\",\"Ident\":\"interface {}\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}],\"Out\":[{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"Values\":{\"In\":[],\"Out\":[{\"Name\":\"\",\"Ident\":\"[]string\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null}]}}}},\"enums\":[{\"N\":\"LOCAL\",\"V\":\"LOCAL\"},{\"N\":\"TURSO\",\"V\":\"TURSO\"}],\"default\":true,\"default_value\":\"LOCAL\",\"default_kind\":24,\"position\":{\"Index\":7,\"MixedIn\":false,\"MixinIndex\":0},\"comment\":\"provider of the database\"}],\"indexes\":[{\"unique\":true,\"fields\":[\"organization_id\"],\"annotations\":{\"EntSQLIndexes\":{\"Desc\":false,\"DescColumns\":null,\"IncludeColumns\":null,\"OpClass\":\"\",\"OpClassColumns\":null,\"Prefix\":0,\"PrefixColumns\":null,\"Type\":\"\",\"Types\":null,\"Where\":\"deleted_at is NULL\"}}},{\"unique\":true,\"fields\":[\"name\"],\"annotations\":{\"EntSQLIndexes\":{\"Desc\":false,\"DescColumns\":null,\"IncludeColumns\":null,\"OpClass\":\"\",\"OpClassColumns\":null,\"Prefix\":0,\"PrefixColumns\":null,\"Type\":\"\",\"Types\":null,\"Where\":\"deleted_at is NULL\"}}}],\"hooks\":[{\"Index\":0,\"MixedIn\":true,\"MixinIndex\":0},{\"Index\":0,\"MixedIn\":false,\"MixinIndex\":0},{\"Index\":1,\"MixedIn\":false,\"MixinIndex\":0}],\"annotations\":{\"EntGQL\":{\"MutationInputs\":[{\"IsCreate\":true},{}],\"QueryField\":{},\"RelayConnection\":true}}},{\"name\":\"Group\",\"config\":{\"Table\":\"\"},\"edges\":[{\"name\":\"databases\",\"type\":\"Database\",\"annotations\":{\"DATUM_CASCADE\":{\"Field\":\"Group\"}}}],\"fields\":[{\"name\":\"created_at\",\"type\":{\"Type\":2,\"Ident\":\"\",\"PkgPath\":\"time\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"optional\":true,\"default\":true,\"default_kind\":19,\"immutable\":true,\"position\":{\"Index\":0,\"MixedIn\":true,\"MixinIndex\":0},\"annotations\":{\"EntOAS\":{\"Create\":{\"Groups\":null,\"Policy\":0},\"Delete\":{\"Groups\":null,\"Policy\":0},\"Example\":null,\"Groups\":null,\"List\":{\"Groups\":null,\"Policy\":0},\"Read\":{\"Groups\":null,\"Policy\":0},\"ReadOnly\":true,\"Schema\":null,\"Skip\":false,\"Update\":{\"Groups\":null,\"Policy\":0}}}},{\"name\":\"updated_at\",\"type\":{\"Type\":2,\"Ident\":\"\",\"PkgPath\":\"time\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"optional\":true,\"default\":true,\"default_kind\":19,\"update_default\":true,\"position\":{\"Index\":1,\"MixedIn\":true,\"MixinIndex\":0},\"annotations\":{\"EntOAS\":{\"Create\":{\"Groups\":null,\"Policy\":0},\"Delete\":{\"Groups\":null,\"Policy\":0},\"Example\":null,\"Groups\":null,\"List\":{\"Groups\":null,\"Policy\":0},\"Read\":{\"Groups\":null,\"Policy\":0},\"ReadOnly\":true,\"Schema\":null,\"Skip\":false,\"Update\":{\"Groups\":null,\"Policy\":0}}}},{\"name\":\"created_by\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"optional\":true,\"immutable\":true,\"position\":{\"Index\":2,\"MixedIn\":true,\"MixinIndex\":0},\"annotations\":{\"EntOAS\":{\"Create\":{\"Groups\":null,\"Policy\":0},\"Delete\":{\"Groups\":null,\"Policy\":0},\"Example\":null,\"Groups\":null,\"List\":{\"Groups\":null,\"Policy\":0},\"Read\":{\"Groups\":null,\"Policy\":0},\"ReadOnly\":true,\"Schema\":null,\"Skip\":false,\"Update\":{\"Groups\":null,\"Policy\":0}}}},{\"name\":\"updated_by\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"optional\":true,\"position\":{\"Index\":3,\"MixedIn\":true,\"MixinIndex\":0},\"annotations\":{\"EntOAS\":{\"Create\":{\"Groups\":null,\"Policy\":0},\"Delete\":{\"Groups\":null,\"Policy\":0},\"Example\":null,\"Groups\":null,\"List\":{\"Groups\":null,\"Policy\":0},\"Read\":{\"Groups\":null,\"Policy\":0},\"ReadOnly\":true,\"Schema\":null,\"Skip\":false,\"Update\":{\"Groups\":null,\"Policy\":0}}}},{\"name\":\"id\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"default\":true,\"default_kind\":19,\"immutable\":true,\"position\":{\"Index\":0,\"MixedIn\":true,\"MixinIndex\":1},\"annotations\":{\"EntOAS\":{\"Create\":{\"Groups\":null,\"Policy\":0},\"Delete\":{\"Groups\":null,\"Policy\":0},\"Example\":null,\"Groups\":null,\"List\":{\"Groups\":null,\"Policy\":0},\"Read\":{\"Groups\":null,\"Policy\":0},\"ReadOnly\":true,\"Schema\":null,\"Skip\":false,\"Update\":{\"Groups\":null,\"Policy\":0}}}},{\"name\":\"deleted_at\",\"type\":{\"Type\":2,\"Ident\":\"\",\"PkgPath\":\"time\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"optional\":true,\"position\":{\"Index\":0,\"MixedIn\":true,\"MixinIndex\":2},\"annotations\":{\"EntGQL\":{\"Skip\":48},\"EntOAS\":{\"Create\":{\"Groups\":null,\"Policy\":0},\"Delete\":{\"Groups\":null,\"Policy\":0},\"Example\":null,\"Groups\":null,\"List\":{\"Groups\":null,\"Policy\":0},\"Read\":{\"Groups\":null,\"Policy\":0},\"ReadOnly\":true,\"Schema\":null,\"Skip\":false,\"Update\":{\"Groups\":null,\"Policy\":0}}}},{\"name\":\"deleted_by\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"optional\":true,\"position\":{\"Index\":1,\"MixedIn\":true,\"MixinIndex\":2},\"annotations\":{\"EntGQL\":{\"Skip\":48},\"EntOAS\":{\"Create\":{\"Groups\":null,\"Policy\":0},\"Delete\":{\"Groups\":null,\"Policy\":0},\"Example\":null,\"Groups\":null,\"List\":{\"Groups\":null,\"Policy\":0},\"Read\":{\"Groups\":null,\"Policy\":0},\"ReadOnly\":true,\"Schema\":null,\"Skip\":false,\"Update\":{\"Groups\":null,\"Policy\":0}}}},{\"name\":\"name\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"validators\":1,\"position\":{\"Index\":0,\"MixedIn\":false,\"MixinIndex\":0},\"comment\":\"the name of the group in turso\"},{\"name\":\"description\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"optional\":true,\"position\":{\"Index\":1,\"MixedIn\":false,\"MixinIndex\":0},\"comment\":\"the description of the group\"},{\"name\":\"primary_location\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"validators\":1,\"position\":{\"Index\":2,\"MixedIn\":false,\"MixinIndex\":0},\"comment\":\"the primary of the group\"},{\"name\":\"locations\",\"type\":{\"Type\":3,\"Ident\":\"[]string\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":true,\"RType\":{\"Name\":\"\",\"Ident\":\"[]string\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":{}}},\"optional\":true,\"position\":{\"Index\":3,\"MixedIn\":false,\"MixinIndex\":0},\"comment\":\"the replica locations of the group\"},{\"name\":\"token\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"optional\":true,\"position\":{\"Index\":4,\"MixedIn\":false,\"MixinIndex\":0},\"sensitive\":true,\"comment\":\"the auth token used to connect to the group\"},{\"name\":\"region\",\"type\":{\"Type\":6,\"Ident\":\"enums.Region\",\"PkgPath\":\"github.com/datumforge/geodetic/pkg/enums\",\"PkgName\":\"enums\",\"Nillable\":false,\"RType\":{\"Name\":\"Region\",\"Ident\":\"enums.Region\",\"Kind\":24,\"PkgPath\":\"github.com/datumforge/geodetic/pkg/enums\",\"Methods\":{\"MarshalGQL\":{\"In\":[{\"Name\":\"Writer\",\"Ident\":\"io.Writer\",\"Kind\":20,\"PkgPath\":\"io\",\"Methods\":null}],\"Out\":[]},\"String\":{\"In\":[],\"Out\":[{\"Name\":\"string\",\"Ident\":\"string\",\"Kind\":24,\"PkgPath\":\"\",\"Methods\":null}]},\"UnmarshalGQL\":{\"In\":[{\"Name\":\"\",\"Ident\":\"interface {}\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}],\"Out\":[{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"Values\":{\"In\":[],\"Out\":[{\"Name\":\"\",\"Ident\":\"[]string\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null}]}}}},\"enums\":[{\"N\":\"AMER\",\"V\":\"AMER\"},{\"N\":\"EMEA\",\"V\":\"EMEA\"},{\"N\":\"APAC\",\"V\":\"APAC\"}],\"default\":true,\"default_value\":\"AMER\",\"default_kind\":24,\"position\":{\"Index\":5,\"MixedIn\":false,\"MixinIndex\":0},\"comment\":\"region the group\"}],\"indexes\":[{\"unique\":true,\"fields\":[\"name\"],\"annotations\":{\"EntSQLIndexes\":{\"Desc\":false,\"DescColumns\":null,\"IncludeColumns\":null,\"OpClass\":\"\",\"OpClassColumns\":null,\"Prefix\":0,\"PrefixColumns\":null,\"Type\":\"\",\"Types\":null,\"Where\":\"deleted_at is NULL\"}}}],\"hooks\":[{\"Index\":0,\"MixedIn\":true,\"MixinIndex\":0},{\"Index\":0,\"MixedIn\":false,\"MixinIndex\":0},{\"Index\":1,\"MixedIn\":false,\"MixinIndex\":0},{\"Index\":2,\"MixedIn\":false,\"MixinIndex\":0}],\"annotations\":{\"EntGQL\":{\"MutationInputs\":[{\"IsCreate\":true},{}],\"QueryField\":{},\"RelayConnection\":true}}}],\"Features\":[\"sql/versioned-migration\",\"privacy\",\"schema/snapshot\",\"entql\",\"namedges\",\"sql/schemaconfig\",\"intercept\",\"namedges\"]}" diff --git a/internal/ent/generated/internal/schemaconfig.go b/internal/ent/generated/internal/schemaconfig.go new file mode 100644 index 0000000..6f23138 --- /dev/null +++ b/internal/ent/generated/internal/schemaconfig.go @@ -0,0 +1,25 @@ +// Code generated by ent, DO NOT EDIT. + +package internal + +import "context" + +// SchemaConfig represents alternative schema names for all tables +// that can be passed at runtime. +type SchemaConfig struct { + Database string // Database table. + Group string // Group table. +} + +type schemaCtxKey struct{} + +// SchemaConfigFromContext returns a SchemaConfig stored inside a context, or empty if there isn't one. +func SchemaConfigFromContext(ctx context.Context) SchemaConfig { + config, _ := ctx.Value(schemaCtxKey{}).(SchemaConfig) + return config +} + +// NewSchemaConfigContext returns a new context with the given SchemaConfig attached. +func NewSchemaConfigContext(parent context.Context, config SchemaConfig) context.Context { + return context.WithValue(parent, schemaCtxKey{}, config) +} diff --git a/internal/ent/generated/migrate/migrate.go b/internal/ent/generated/migrate/migrate.go new file mode 100644 index 0000000..d8d3bcb --- /dev/null +++ b/internal/ent/generated/migrate/migrate.go @@ -0,0 +1,96 @@ +// Code generated by ent, DO NOT EDIT. + +package migrate + +import ( + "context" + "fmt" + "io" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql/schema" +) + +var ( + // WithGlobalUniqueID sets the universal ids options to the migration. + // If this option is enabled, ent migration will allocate a 1<<32 range + // for the ids of each entity (table). + // Note that this option cannot be applied on tables that already exist. + WithGlobalUniqueID = schema.WithGlobalUniqueID + // WithDropColumn sets the drop column option to the migration. + // If this option is enabled, ent migration will drop old columns + // that were used for both fields and edges. This defaults to false. + WithDropColumn = schema.WithDropColumn + // WithDropIndex sets the drop index option to the migration. + // If this option is enabled, ent migration will drop old indexes + // that were defined in the schema. This defaults to false. + // Note that unique constraints are defined using `UNIQUE INDEX`, + // and therefore, it's recommended to enable this option to get more + // flexibility in the schema changes. + WithDropIndex = schema.WithDropIndex + // WithForeignKeys enables creating foreign-key in schema DDL. This defaults to true. + WithForeignKeys = schema.WithForeignKeys +) + +// Schema is the API for creating, migrating and dropping a schema. +type Schema struct { + drv dialect.Driver +} + +// NewSchema creates a new schema client. +func NewSchema(drv dialect.Driver) *Schema { return &Schema{drv: drv} } + +// Create creates all schema resources. +func (s *Schema) Create(ctx context.Context, opts ...schema.MigrateOption) error { + return Create(ctx, s, Tables, opts...) +} + +// Create creates all table resources using the given schema driver. +func Create(ctx context.Context, s *Schema, tables []*schema.Table, opts ...schema.MigrateOption) error { + migrate, err := schema.NewMigrate(s.drv, opts...) + if err != nil { + return fmt.Errorf("ent/migrate: %w", err) + } + return migrate.Create(ctx, tables...) +} + +// Diff compares the state read from a database connection or migration directory with +// the state defined by the Ent schema. Changes will be written to new migration files. +func Diff(ctx context.Context, url string, opts ...schema.MigrateOption) error { + return NamedDiff(ctx, url, "changes", opts...) +} + +// NamedDiff compares the state read from a database connection or migration directory with +// the state defined by the Ent schema. Changes will be written to new named migration files. +func NamedDiff(ctx context.Context, url, name string, opts ...schema.MigrateOption) error { + return schema.Diff(ctx, url, name, Tables, opts...) +} + +// Diff creates a migration file containing the statements to resolve the diff +// between the Ent schema and the connected database. +func (s *Schema) Diff(ctx context.Context, opts ...schema.MigrateOption) error { + migrate, err := schema.NewMigrate(s.drv, opts...) + if err != nil { + return fmt.Errorf("ent/migrate: %w", err) + } + return migrate.Diff(ctx, Tables...) +} + +// NamedDiff creates a named migration file containing the statements to resolve the diff +// between the Ent schema and the connected database. +func (s *Schema) NamedDiff(ctx context.Context, name string, opts ...schema.MigrateOption) error { + migrate, err := schema.NewMigrate(s.drv, opts...) + if err != nil { + return fmt.Errorf("ent/migrate: %w", err) + } + return migrate.NamedDiff(ctx, name, Tables...) +} + +// WriteTo writes the schema changes to w instead of running them against the database. +// +// if err := client.Schema.WriteTo(context.Background(), os.Stdout); err != nil { +// log.Fatal(err) +// } +func (s *Schema) WriteTo(ctx context.Context, w io.Writer, opts ...schema.MigrateOption) error { + return Create(ctx, &Schema{drv: &schema.WriteDriver{Writer: w, Driver: s.drv}}, Tables, opts...) +} diff --git a/internal/ent/generated/migrate/schema.go b/internal/ent/generated/migrate/schema.go new file mode 100644 index 0000000..58207fc --- /dev/null +++ b/internal/ent/generated/migrate/schema.go @@ -0,0 +1,103 @@ +// Code generated by ent, DO NOT EDIT. + +package migrate + +import ( + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/dialect/sql/schema" + "entgo.io/ent/schema/field" +) + +var ( + // DatabasesColumns holds the columns for the "databases" table. + DatabasesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeString}, + {Name: "created_at", Type: field.TypeTime, Nullable: true}, + {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "created_by", Type: field.TypeString, Nullable: true}, + {Name: "updated_by", Type: field.TypeString, Nullable: true}, + {Name: "deleted_at", Type: field.TypeTime, Nullable: true}, + {Name: "deleted_by", Type: field.TypeString, Nullable: true}, + {Name: "organization_id", Type: field.TypeString}, + {Name: "name", Type: field.TypeString}, + {Name: "geo", Type: field.TypeString, Nullable: true}, + {Name: "dsn", Type: field.TypeString}, + {Name: "token", Type: field.TypeString, Nullable: true}, + {Name: "status", Type: field.TypeEnum, Enums: []string{"ACTIVE", "CREATING", "DELETING", "DELETED"}, Default: "CREATING"}, + {Name: "provider", Type: field.TypeEnum, Enums: []string{"LOCAL", "TURSO"}, Default: "LOCAL"}, + {Name: "group_id", Type: field.TypeString}, + } + // DatabasesTable holds the schema information for the "databases" table. + DatabasesTable = &schema.Table{ + Name: "databases", + Columns: DatabasesColumns, + PrimaryKey: []*schema.Column{DatabasesColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "databases_groups_databases", + Columns: []*schema.Column{DatabasesColumns[14]}, + RefColumns: []*schema.Column{GroupsColumns[0]}, + OnDelete: schema.NoAction, + }, + }, + Indexes: []*schema.Index{ + { + Name: "database_organization_id", + Unique: true, + Columns: []*schema.Column{DatabasesColumns[7]}, + Annotation: &entsql.IndexAnnotation{ + Where: "deleted_at is NULL", + }, + }, + { + Name: "database_name", + Unique: true, + Columns: []*schema.Column{DatabasesColumns[8]}, + Annotation: &entsql.IndexAnnotation{ + Where: "deleted_at is NULL", + }, + }, + }, + } + // GroupsColumns holds the columns for the "groups" table. + GroupsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeString}, + {Name: "created_at", Type: field.TypeTime, Nullable: true}, + {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "created_by", Type: field.TypeString, Nullable: true}, + {Name: "updated_by", Type: field.TypeString, Nullable: true}, + {Name: "deleted_at", Type: field.TypeTime, Nullable: true}, + {Name: "deleted_by", Type: field.TypeString, Nullable: true}, + {Name: "name", Type: field.TypeString}, + {Name: "description", Type: field.TypeString, Nullable: true}, + {Name: "primary_location", Type: field.TypeString}, + {Name: "locations", Type: field.TypeJSON, Nullable: true}, + {Name: "token", Type: field.TypeString, Nullable: true}, + {Name: "region", Type: field.TypeEnum, Enums: []string{"AMER", "EMEA", "APAC"}, Default: "AMER"}, + } + // GroupsTable holds the schema information for the "groups" table. + GroupsTable = &schema.Table{ + Name: "groups", + Columns: GroupsColumns, + PrimaryKey: []*schema.Column{GroupsColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "group_name", + Unique: true, + Columns: []*schema.Column{GroupsColumns[7]}, + Annotation: &entsql.IndexAnnotation{ + Where: "deleted_at is NULL", + }, + }, + }, + } + // Tables holds all the tables in the schema. + Tables = []*schema.Table{ + DatabasesTable, + GroupsTable, + } +) + +func init() { + DatabasesTable.ForeignKeys[0].RefTable = GroupsTable +} diff --git a/internal/ent/generated/mutation.go b/internal/ent/generated/mutation.go new file mode 100644 index 0000000..336c1ef --- /dev/null +++ b/internal/ent/generated/mutation.go @@ -0,0 +1,2484 @@ +// Code generated by ent, DO NOT EDIT. + +package generated + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/datumforge/geodetic/internal/ent/generated/database" + "github.com/datumforge/geodetic/internal/ent/generated/group" + "github.com/datumforge/geodetic/internal/ent/generated/predicate" + "github.com/datumforge/geodetic/pkg/enums" +) + +const ( + // Operation types. + OpCreate = ent.OpCreate + OpDelete = ent.OpDelete + OpDeleteOne = ent.OpDeleteOne + OpUpdate = ent.OpUpdate + OpUpdateOne = ent.OpUpdateOne + + // Node types. + TypeDatabase = "Database" + TypeGroup = "Group" +) + +// DatabaseMutation represents an operation that mutates the Database nodes in the graph. +type DatabaseMutation struct { + config + op Op + typ string + id *string + created_at *time.Time + updated_at *time.Time + created_by *string + updated_by *string + deleted_at *time.Time + deleted_by *string + organization_id *string + name *string + geo *string + dsn *string + token *string + status *enums.DatabaseStatus + provider *enums.DatabaseProvider + clearedFields map[string]struct{} + group *string + clearedgroup bool + done bool + oldValue func(context.Context) (*Database, error) + predicates []predicate.Database +} + +var _ ent.Mutation = (*DatabaseMutation)(nil) + +// databaseOption allows management of the mutation configuration using functional options. +type databaseOption func(*DatabaseMutation) + +// newDatabaseMutation creates new mutation for the Database entity. +func newDatabaseMutation(c config, op Op, opts ...databaseOption) *DatabaseMutation { + m := &DatabaseMutation{ + config: c, + op: op, + typ: TypeDatabase, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withDatabaseID sets the ID field of the mutation. +func withDatabaseID(id string) databaseOption { + return func(m *DatabaseMutation) { + var ( + err error + once sync.Once + value *Database + ) + m.oldValue = func(ctx context.Context) (*Database, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Database.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withDatabase sets the old Database of the mutation. +func withDatabase(node *Database) databaseOption { + return func(m *DatabaseMutation) { + m.oldValue = func(context.Context) (*Database, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m DatabaseMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m DatabaseMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("generated: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of Database entities. +func (m *DatabaseMutation) SetID(id string) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *DatabaseMutation) ID() (id string, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *DatabaseMutation) IDs(ctx context.Context) ([]string, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []string{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Database.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *DatabaseMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *DatabaseMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Database entity. +// If the Database object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DatabaseMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ClearCreatedAt clears the value of the "created_at" field. +func (m *DatabaseMutation) ClearCreatedAt() { + m.created_at = nil + m.clearedFields[database.FieldCreatedAt] = struct{}{} +} + +// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. +func (m *DatabaseMutation) CreatedAtCleared() bool { + _, ok := m.clearedFields[database.FieldCreatedAt] + return ok +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *DatabaseMutation) ResetCreatedAt() { + m.created_at = nil + delete(m.clearedFields, database.FieldCreatedAt) +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *DatabaseMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *DatabaseMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Database entity. +// If the Database object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DatabaseMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ClearUpdatedAt clears the value of the "updated_at" field. +func (m *DatabaseMutation) ClearUpdatedAt() { + m.updated_at = nil + m.clearedFields[database.FieldUpdatedAt] = struct{}{} +} + +// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. +func (m *DatabaseMutation) UpdatedAtCleared() bool { + _, ok := m.clearedFields[database.FieldUpdatedAt] + return ok +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *DatabaseMutation) ResetUpdatedAt() { + m.updated_at = nil + delete(m.clearedFields, database.FieldUpdatedAt) +} + +// SetCreatedBy sets the "created_by" field. +func (m *DatabaseMutation) SetCreatedBy(s string) { + m.created_by = &s +} + +// CreatedBy returns the value of the "created_by" field in the mutation. +func (m *DatabaseMutation) CreatedBy() (r string, exists bool) { + v := m.created_by + if v == nil { + return + } + return *v, true +} + +// OldCreatedBy returns the old "created_by" field's value of the Database entity. +// If the Database object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DatabaseMutation) OldCreatedBy(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedBy is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedBy requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedBy: %w", err) + } + return oldValue.CreatedBy, nil +} + +// ClearCreatedBy clears the value of the "created_by" field. +func (m *DatabaseMutation) ClearCreatedBy() { + m.created_by = nil + m.clearedFields[database.FieldCreatedBy] = struct{}{} +} + +// CreatedByCleared returns if the "created_by" field was cleared in this mutation. +func (m *DatabaseMutation) CreatedByCleared() bool { + _, ok := m.clearedFields[database.FieldCreatedBy] + return ok +} + +// ResetCreatedBy resets all changes to the "created_by" field. +func (m *DatabaseMutation) ResetCreatedBy() { + m.created_by = nil + delete(m.clearedFields, database.FieldCreatedBy) +} + +// SetUpdatedBy sets the "updated_by" field. +func (m *DatabaseMutation) SetUpdatedBy(s string) { + m.updated_by = &s +} + +// UpdatedBy returns the value of the "updated_by" field in the mutation. +func (m *DatabaseMutation) UpdatedBy() (r string, exists bool) { + v := m.updated_by + if v == nil { + return + } + return *v, true +} + +// OldUpdatedBy returns the old "updated_by" field's value of the Database entity. +// If the Database object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DatabaseMutation) OldUpdatedBy(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedBy is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedBy requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedBy: %w", err) + } + return oldValue.UpdatedBy, nil +} + +// ClearUpdatedBy clears the value of the "updated_by" field. +func (m *DatabaseMutation) ClearUpdatedBy() { + m.updated_by = nil + m.clearedFields[database.FieldUpdatedBy] = struct{}{} +} + +// UpdatedByCleared returns if the "updated_by" field was cleared in this mutation. +func (m *DatabaseMutation) UpdatedByCleared() bool { + _, ok := m.clearedFields[database.FieldUpdatedBy] + return ok +} + +// ResetUpdatedBy resets all changes to the "updated_by" field. +func (m *DatabaseMutation) ResetUpdatedBy() { + m.updated_by = nil + delete(m.clearedFields, database.FieldUpdatedBy) +} + +// SetDeletedAt sets the "deleted_at" field. +func (m *DatabaseMutation) SetDeletedAt(t time.Time) { + m.deleted_at = &t +} + +// DeletedAt returns the value of the "deleted_at" field in the mutation. +func (m *DatabaseMutation) DeletedAt() (r time.Time, exists bool) { + v := m.deleted_at + if v == nil { + return + } + return *v, true +} + +// OldDeletedAt returns the old "deleted_at" field's value of the Database entity. +// If the Database object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DatabaseMutation) OldDeletedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDeletedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDeletedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDeletedAt: %w", err) + } + return oldValue.DeletedAt, nil +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (m *DatabaseMutation) ClearDeletedAt() { + m.deleted_at = nil + m.clearedFields[database.FieldDeletedAt] = struct{}{} +} + +// DeletedAtCleared returns if the "deleted_at" field was cleared in this mutation. +func (m *DatabaseMutation) DeletedAtCleared() bool { + _, ok := m.clearedFields[database.FieldDeletedAt] + return ok +} + +// ResetDeletedAt resets all changes to the "deleted_at" field. +func (m *DatabaseMutation) ResetDeletedAt() { + m.deleted_at = nil + delete(m.clearedFields, database.FieldDeletedAt) +} + +// SetDeletedBy sets the "deleted_by" field. +func (m *DatabaseMutation) SetDeletedBy(s string) { + m.deleted_by = &s +} + +// DeletedBy returns the value of the "deleted_by" field in the mutation. +func (m *DatabaseMutation) DeletedBy() (r string, exists bool) { + v := m.deleted_by + if v == nil { + return + } + return *v, true +} + +// OldDeletedBy returns the old "deleted_by" field's value of the Database entity. +// If the Database object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DatabaseMutation) OldDeletedBy(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDeletedBy is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDeletedBy requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDeletedBy: %w", err) + } + return oldValue.DeletedBy, nil +} + +// ClearDeletedBy clears the value of the "deleted_by" field. +func (m *DatabaseMutation) ClearDeletedBy() { + m.deleted_by = nil + m.clearedFields[database.FieldDeletedBy] = struct{}{} +} + +// DeletedByCleared returns if the "deleted_by" field was cleared in this mutation. +func (m *DatabaseMutation) DeletedByCleared() bool { + _, ok := m.clearedFields[database.FieldDeletedBy] + return ok +} + +// ResetDeletedBy resets all changes to the "deleted_by" field. +func (m *DatabaseMutation) ResetDeletedBy() { + m.deleted_by = nil + delete(m.clearedFields, database.FieldDeletedBy) +} + +// SetOrganizationID sets the "organization_id" field. +func (m *DatabaseMutation) SetOrganizationID(s string) { + m.organization_id = &s +} + +// OrganizationID returns the value of the "organization_id" field in the mutation. +func (m *DatabaseMutation) OrganizationID() (r string, exists bool) { + v := m.organization_id + if v == nil { + return + } + return *v, true +} + +// OldOrganizationID returns the old "organization_id" field's value of the Database entity. +// If the Database object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DatabaseMutation) OldOrganizationID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOrganizationID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOrganizationID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOrganizationID: %w", err) + } + return oldValue.OrganizationID, nil +} + +// ResetOrganizationID resets all changes to the "organization_id" field. +func (m *DatabaseMutation) ResetOrganizationID() { + m.organization_id = nil +} + +// SetName sets the "name" field. +func (m *DatabaseMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *DatabaseMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the Database entity. +// If the Database object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DatabaseMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *DatabaseMutation) ResetName() { + m.name = nil +} + +// SetGeo sets the "geo" field. +func (m *DatabaseMutation) SetGeo(s string) { + m.geo = &s +} + +// Geo returns the value of the "geo" field in the mutation. +func (m *DatabaseMutation) Geo() (r string, exists bool) { + v := m.geo + if v == nil { + return + } + return *v, true +} + +// OldGeo returns the old "geo" field's value of the Database entity. +// If the Database object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DatabaseMutation) OldGeo(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldGeo is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldGeo requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldGeo: %w", err) + } + return oldValue.Geo, nil +} + +// ClearGeo clears the value of the "geo" field. +func (m *DatabaseMutation) ClearGeo() { + m.geo = nil + m.clearedFields[database.FieldGeo] = struct{}{} +} + +// GeoCleared returns if the "geo" field was cleared in this mutation. +func (m *DatabaseMutation) GeoCleared() bool { + _, ok := m.clearedFields[database.FieldGeo] + return ok +} + +// ResetGeo resets all changes to the "geo" field. +func (m *DatabaseMutation) ResetGeo() { + m.geo = nil + delete(m.clearedFields, database.FieldGeo) +} + +// SetDsn sets the "dsn" field. +func (m *DatabaseMutation) SetDsn(s string) { + m.dsn = &s +} + +// Dsn returns the value of the "dsn" field in the mutation. +func (m *DatabaseMutation) Dsn() (r string, exists bool) { + v := m.dsn + if v == nil { + return + } + return *v, true +} + +// OldDsn returns the old "dsn" field's value of the Database entity. +// If the Database object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DatabaseMutation) OldDsn(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDsn is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDsn requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDsn: %w", err) + } + return oldValue.Dsn, nil +} + +// ResetDsn resets all changes to the "dsn" field. +func (m *DatabaseMutation) ResetDsn() { + m.dsn = nil +} + +// SetGroupID sets the "group_id" field. +func (m *DatabaseMutation) SetGroupID(s string) { + m.group = &s +} + +// GroupID returns the value of the "group_id" field in the mutation. +func (m *DatabaseMutation) GroupID() (r string, exists bool) { + v := m.group + if v == nil { + return + } + return *v, true +} + +// OldGroupID returns the old "group_id" field's value of the Database entity. +// If the Database object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DatabaseMutation) OldGroupID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldGroupID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldGroupID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldGroupID: %w", err) + } + return oldValue.GroupID, nil +} + +// ResetGroupID resets all changes to the "group_id" field. +func (m *DatabaseMutation) ResetGroupID() { + m.group = nil +} + +// SetToken sets the "token" field. +func (m *DatabaseMutation) SetToken(s string) { + m.token = &s +} + +// Token returns the value of the "token" field in the mutation. +func (m *DatabaseMutation) Token() (r string, exists bool) { + v := m.token + if v == nil { + return + } + return *v, true +} + +// OldToken returns the old "token" field's value of the Database entity. +// If the Database object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DatabaseMutation) OldToken(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldToken is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldToken requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldToken: %w", err) + } + return oldValue.Token, nil +} + +// ClearToken clears the value of the "token" field. +func (m *DatabaseMutation) ClearToken() { + m.token = nil + m.clearedFields[database.FieldToken] = struct{}{} +} + +// TokenCleared returns if the "token" field was cleared in this mutation. +func (m *DatabaseMutation) TokenCleared() bool { + _, ok := m.clearedFields[database.FieldToken] + return ok +} + +// ResetToken resets all changes to the "token" field. +func (m *DatabaseMutation) ResetToken() { + m.token = nil + delete(m.clearedFields, database.FieldToken) +} + +// SetStatus sets the "status" field. +func (m *DatabaseMutation) SetStatus(es enums.DatabaseStatus) { + m.status = &es +} + +// Status returns the value of the "status" field in the mutation. +func (m *DatabaseMutation) Status() (r enums.DatabaseStatus, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the Database entity. +// If the Database object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DatabaseMutation) OldStatus(ctx context.Context) (v enums.DatabaseStatus, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *DatabaseMutation) ResetStatus() { + m.status = nil +} + +// SetProvider sets the "provider" field. +func (m *DatabaseMutation) SetProvider(ep enums.DatabaseProvider) { + m.provider = &ep +} + +// Provider returns the value of the "provider" field in the mutation. +func (m *DatabaseMutation) Provider() (r enums.DatabaseProvider, exists bool) { + v := m.provider + if v == nil { + return + } + return *v, true +} + +// OldProvider returns the old "provider" field's value of the Database entity. +// If the Database object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DatabaseMutation) OldProvider(ctx context.Context) (v enums.DatabaseProvider, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldProvider is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldProvider requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldProvider: %w", err) + } + return oldValue.Provider, nil +} + +// ResetProvider resets all changes to the "provider" field. +func (m *DatabaseMutation) ResetProvider() { + m.provider = nil +} + +// ClearGroup clears the "group" edge to the Group entity. +func (m *DatabaseMutation) ClearGroup() { + m.clearedgroup = true + m.clearedFields[database.FieldGroupID] = struct{}{} +} + +// GroupCleared reports if the "group" edge to the Group entity was cleared. +func (m *DatabaseMutation) GroupCleared() bool { + return m.clearedgroup +} + +// GroupIDs returns the "group" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// GroupID instead. It exists only for internal usage by the builders. +func (m *DatabaseMutation) GroupIDs() (ids []string) { + if id := m.group; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetGroup resets all changes to the "group" edge. +func (m *DatabaseMutation) ResetGroup() { + m.group = nil + m.clearedgroup = false +} + +// Where appends a list predicates to the DatabaseMutation builder. +func (m *DatabaseMutation) Where(ps ...predicate.Database) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the DatabaseMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *DatabaseMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Database, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *DatabaseMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *DatabaseMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Database). +func (m *DatabaseMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *DatabaseMutation) Fields() []string { + fields := make([]string, 0, 14) + if m.created_at != nil { + fields = append(fields, database.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, database.FieldUpdatedAt) + } + if m.created_by != nil { + fields = append(fields, database.FieldCreatedBy) + } + if m.updated_by != nil { + fields = append(fields, database.FieldUpdatedBy) + } + if m.deleted_at != nil { + fields = append(fields, database.FieldDeletedAt) + } + if m.deleted_by != nil { + fields = append(fields, database.FieldDeletedBy) + } + if m.organization_id != nil { + fields = append(fields, database.FieldOrganizationID) + } + if m.name != nil { + fields = append(fields, database.FieldName) + } + if m.geo != nil { + fields = append(fields, database.FieldGeo) + } + if m.dsn != nil { + fields = append(fields, database.FieldDsn) + } + if m.group != nil { + fields = append(fields, database.FieldGroupID) + } + if m.token != nil { + fields = append(fields, database.FieldToken) + } + if m.status != nil { + fields = append(fields, database.FieldStatus) + } + if m.provider != nil { + fields = append(fields, database.FieldProvider) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *DatabaseMutation) Field(name string) (ent.Value, bool) { + switch name { + case database.FieldCreatedAt: + return m.CreatedAt() + case database.FieldUpdatedAt: + return m.UpdatedAt() + case database.FieldCreatedBy: + return m.CreatedBy() + case database.FieldUpdatedBy: + return m.UpdatedBy() + case database.FieldDeletedAt: + return m.DeletedAt() + case database.FieldDeletedBy: + return m.DeletedBy() + case database.FieldOrganizationID: + return m.OrganizationID() + case database.FieldName: + return m.Name() + case database.FieldGeo: + return m.Geo() + case database.FieldDsn: + return m.Dsn() + case database.FieldGroupID: + return m.GroupID() + case database.FieldToken: + return m.Token() + case database.FieldStatus: + return m.Status() + case database.FieldProvider: + return m.Provider() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *DatabaseMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case database.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case database.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case database.FieldCreatedBy: + return m.OldCreatedBy(ctx) + case database.FieldUpdatedBy: + return m.OldUpdatedBy(ctx) + case database.FieldDeletedAt: + return m.OldDeletedAt(ctx) + case database.FieldDeletedBy: + return m.OldDeletedBy(ctx) + case database.FieldOrganizationID: + return m.OldOrganizationID(ctx) + case database.FieldName: + return m.OldName(ctx) + case database.FieldGeo: + return m.OldGeo(ctx) + case database.FieldDsn: + return m.OldDsn(ctx) + case database.FieldGroupID: + return m.OldGroupID(ctx) + case database.FieldToken: + return m.OldToken(ctx) + case database.FieldStatus: + return m.OldStatus(ctx) + case database.FieldProvider: + return m.OldProvider(ctx) + } + return nil, fmt.Errorf("unknown Database field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *DatabaseMutation) SetField(name string, value ent.Value) error { + switch name { + case database.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case database.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case database.FieldCreatedBy: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedBy(v) + return nil + case database.FieldUpdatedBy: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedBy(v) + return nil + case database.FieldDeletedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDeletedAt(v) + return nil + case database.FieldDeletedBy: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDeletedBy(v) + return nil + case database.FieldOrganizationID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOrganizationID(v) + return nil + case database.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case database.FieldGeo: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetGeo(v) + return nil + case database.FieldDsn: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDsn(v) + return nil + case database.FieldGroupID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetGroupID(v) + return nil + case database.FieldToken: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetToken(v) + return nil + case database.FieldStatus: + v, ok := value.(enums.DatabaseStatus) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case database.FieldProvider: + v, ok := value.(enums.DatabaseProvider) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetProvider(v) + return nil + } + return fmt.Errorf("unknown Database field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *DatabaseMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *DatabaseMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *DatabaseMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Database numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *DatabaseMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(database.FieldCreatedAt) { + fields = append(fields, database.FieldCreatedAt) + } + if m.FieldCleared(database.FieldUpdatedAt) { + fields = append(fields, database.FieldUpdatedAt) + } + if m.FieldCleared(database.FieldCreatedBy) { + fields = append(fields, database.FieldCreatedBy) + } + if m.FieldCleared(database.FieldUpdatedBy) { + fields = append(fields, database.FieldUpdatedBy) + } + if m.FieldCleared(database.FieldDeletedAt) { + fields = append(fields, database.FieldDeletedAt) + } + if m.FieldCleared(database.FieldDeletedBy) { + fields = append(fields, database.FieldDeletedBy) + } + if m.FieldCleared(database.FieldGeo) { + fields = append(fields, database.FieldGeo) + } + if m.FieldCleared(database.FieldToken) { + fields = append(fields, database.FieldToken) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *DatabaseMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *DatabaseMutation) ClearField(name string) error { + switch name { + case database.FieldCreatedAt: + m.ClearCreatedAt() + return nil + case database.FieldUpdatedAt: + m.ClearUpdatedAt() + return nil + case database.FieldCreatedBy: + m.ClearCreatedBy() + return nil + case database.FieldUpdatedBy: + m.ClearUpdatedBy() + return nil + case database.FieldDeletedAt: + m.ClearDeletedAt() + return nil + case database.FieldDeletedBy: + m.ClearDeletedBy() + return nil + case database.FieldGeo: + m.ClearGeo() + return nil + case database.FieldToken: + m.ClearToken() + return nil + } + return fmt.Errorf("unknown Database nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *DatabaseMutation) ResetField(name string) error { + switch name { + case database.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case database.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case database.FieldCreatedBy: + m.ResetCreatedBy() + return nil + case database.FieldUpdatedBy: + m.ResetUpdatedBy() + return nil + case database.FieldDeletedAt: + m.ResetDeletedAt() + return nil + case database.FieldDeletedBy: + m.ResetDeletedBy() + return nil + case database.FieldOrganizationID: + m.ResetOrganizationID() + return nil + case database.FieldName: + m.ResetName() + return nil + case database.FieldGeo: + m.ResetGeo() + return nil + case database.FieldDsn: + m.ResetDsn() + return nil + case database.FieldGroupID: + m.ResetGroupID() + return nil + case database.FieldToken: + m.ResetToken() + return nil + case database.FieldStatus: + m.ResetStatus() + return nil + case database.FieldProvider: + m.ResetProvider() + return nil + } + return fmt.Errorf("unknown Database field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *DatabaseMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.group != nil { + edges = append(edges, database.EdgeGroup) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *DatabaseMutation) AddedIDs(name string) []ent.Value { + switch name { + case database.EdgeGroup: + if id := m.group; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *DatabaseMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *DatabaseMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *DatabaseMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedgroup { + edges = append(edges, database.EdgeGroup) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *DatabaseMutation) EdgeCleared(name string) bool { + switch name { + case database.EdgeGroup: + return m.clearedgroup + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *DatabaseMutation) ClearEdge(name string) error { + switch name { + case database.EdgeGroup: + m.ClearGroup() + return nil + } + return fmt.Errorf("unknown Database unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *DatabaseMutation) ResetEdge(name string) error { + switch name { + case database.EdgeGroup: + m.ResetGroup() + return nil + } + return fmt.Errorf("unknown Database edge %s", name) +} + +// GroupMutation represents an operation that mutates the Group nodes in the graph. +type GroupMutation struct { + config + op Op + typ string + id *string + created_at *time.Time + updated_at *time.Time + created_by *string + updated_by *string + deleted_at *time.Time + deleted_by *string + name *string + description *string + primary_location *string + locations *[]string + appendlocations []string + token *string + region *enums.Region + clearedFields map[string]struct{} + databases map[string]struct{} + removeddatabases map[string]struct{} + cleareddatabases bool + done bool + oldValue func(context.Context) (*Group, error) + predicates []predicate.Group +} + +var _ ent.Mutation = (*GroupMutation)(nil) + +// groupOption allows management of the mutation configuration using functional options. +type groupOption func(*GroupMutation) + +// newGroupMutation creates new mutation for the Group entity. +func newGroupMutation(c config, op Op, opts ...groupOption) *GroupMutation { + m := &GroupMutation{ + config: c, + op: op, + typ: TypeGroup, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withGroupID sets the ID field of the mutation. +func withGroupID(id string) groupOption { + return func(m *GroupMutation) { + var ( + err error + once sync.Once + value *Group + ) + m.oldValue = func(ctx context.Context) (*Group, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Group.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withGroup sets the old Group of the mutation. +func withGroup(node *Group) groupOption { + return func(m *GroupMutation) { + m.oldValue = func(context.Context) (*Group, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m GroupMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m GroupMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("generated: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of Group entities. +func (m *GroupMutation) SetID(id string) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *GroupMutation) ID() (id string, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *GroupMutation) IDs(ctx context.Context) ([]string, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []string{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Group.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *GroupMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *GroupMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ClearCreatedAt clears the value of the "created_at" field. +func (m *GroupMutation) ClearCreatedAt() { + m.created_at = nil + m.clearedFields[group.FieldCreatedAt] = struct{}{} +} + +// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. +func (m *GroupMutation) CreatedAtCleared() bool { + _, ok := m.clearedFields[group.FieldCreatedAt] + return ok +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *GroupMutation) ResetCreatedAt() { + m.created_at = nil + delete(m.clearedFields, group.FieldCreatedAt) +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *GroupMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *GroupMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ClearUpdatedAt clears the value of the "updated_at" field. +func (m *GroupMutation) ClearUpdatedAt() { + m.updated_at = nil + m.clearedFields[group.FieldUpdatedAt] = struct{}{} +} + +// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. +func (m *GroupMutation) UpdatedAtCleared() bool { + _, ok := m.clearedFields[group.FieldUpdatedAt] + return ok +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *GroupMutation) ResetUpdatedAt() { + m.updated_at = nil + delete(m.clearedFields, group.FieldUpdatedAt) +} + +// SetCreatedBy sets the "created_by" field. +func (m *GroupMutation) SetCreatedBy(s string) { + m.created_by = &s +} + +// CreatedBy returns the value of the "created_by" field in the mutation. +func (m *GroupMutation) CreatedBy() (r string, exists bool) { + v := m.created_by + if v == nil { + return + } + return *v, true +} + +// OldCreatedBy returns the old "created_by" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldCreatedBy(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedBy is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedBy requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedBy: %w", err) + } + return oldValue.CreatedBy, nil +} + +// ClearCreatedBy clears the value of the "created_by" field. +func (m *GroupMutation) ClearCreatedBy() { + m.created_by = nil + m.clearedFields[group.FieldCreatedBy] = struct{}{} +} + +// CreatedByCleared returns if the "created_by" field was cleared in this mutation. +func (m *GroupMutation) CreatedByCleared() bool { + _, ok := m.clearedFields[group.FieldCreatedBy] + return ok +} + +// ResetCreatedBy resets all changes to the "created_by" field. +func (m *GroupMutation) ResetCreatedBy() { + m.created_by = nil + delete(m.clearedFields, group.FieldCreatedBy) +} + +// SetUpdatedBy sets the "updated_by" field. +func (m *GroupMutation) SetUpdatedBy(s string) { + m.updated_by = &s +} + +// UpdatedBy returns the value of the "updated_by" field in the mutation. +func (m *GroupMutation) UpdatedBy() (r string, exists bool) { + v := m.updated_by + if v == nil { + return + } + return *v, true +} + +// OldUpdatedBy returns the old "updated_by" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldUpdatedBy(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedBy is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedBy requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedBy: %w", err) + } + return oldValue.UpdatedBy, nil +} + +// ClearUpdatedBy clears the value of the "updated_by" field. +func (m *GroupMutation) ClearUpdatedBy() { + m.updated_by = nil + m.clearedFields[group.FieldUpdatedBy] = struct{}{} +} + +// UpdatedByCleared returns if the "updated_by" field was cleared in this mutation. +func (m *GroupMutation) UpdatedByCleared() bool { + _, ok := m.clearedFields[group.FieldUpdatedBy] + return ok +} + +// ResetUpdatedBy resets all changes to the "updated_by" field. +func (m *GroupMutation) ResetUpdatedBy() { + m.updated_by = nil + delete(m.clearedFields, group.FieldUpdatedBy) +} + +// SetDeletedAt sets the "deleted_at" field. +func (m *GroupMutation) SetDeletedAt(t time.Time) { + m.deleted_at = &t +} + +// DeletedAt returns the value of the "deleted_at" field in the mutation. +func (m *GroupMutation) DeletedAt() (r time.Time, exists bool) { + v := m.deleted_at + if v == nil { + return + } + return *v, true +} + +// OldDeletedAt returns the old "deleted_at" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldDeletedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDeletedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDeletedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDeletedAt: %w", err) + } + return oldValue.DeletedAt, nil +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (m *GroupMutation) ClearDeletedAt() { + m.deleted_at = nil + m.clearedFields[group.FieldDeletedAt] = struct{}{} +} + +// DeletedAtCleared returns if the "deleted_at" field was cleared in this mutation. +func (m *GroupMutation) DeletedAtCleared() bool { + _, ok := m.clearedFields[group.FieldDeletedAt] + return ok +} + +// ResetDeletedAt resets all changes to the "deleted_at" field. +func (m *GroupMutation) ResetDeletedAt() { + m.deleted_at = nil + delete(m.clearedFields, group.FieldDeletedAt) +} + +// SetDeletedBy sets the "deleted_by" field. +func (m *GroupMutation) SetDeletedBy(s string) { + m.deleted_by = &s +} + +// DeletedBy returns the value of the "deleted_by" field in the mutation. +func (m *GroupMutation) DeletedBy() (r string, exists bool) { + v := m.deleted_by + if v == nil { + return + } + return *v, true +} + +// OldDeletedBy returns the old "deleted_by" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldDeletedBy(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDeletedBy is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDeletedBy requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDeletedBy: %w", err) + } + return oldValue.DeletedBy, nil +} + +// ClearDeletedBy clears the value of the "deleted_by" field. +func (m *GroupMutation) ClearDeletedBy() { + m.deleted_by = nil + m.clearedFields[group.FieldDeletedBy] = struct{}{} +} + +// DeletedByCleared returns if the "deleted_by" field was cleared in this mutation. +func (m *GroupMutation) DeletedByCleared() bool { + _, ok := m.clearedFields[group.FieldDeletedBy] + return ok +} + +// ResetDeletedBy resets all changes to the "deleted_by" field. +func (m *GroupMutation) ResetDeletedBy() { + m.deleted_by = nil + delete(m.clearedFields, group.FieldDeletedBy) +} + +// SetName sets the "name" field. +func (m *GroupMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *GroupMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *GroupMutation) ResetName() { + m.name = nil +} + +// SetDescription sets the "description" field. +func (m *GroupMutation) SetDescription(s string) { + m.description = &s +} + +// Description returns the value of the "description" field in the mutation. +func (m *GroupMutation) Description() (r string, exists bool) { + v := m.description + if v == nil { + return + } + return *v, true +} + +// OldDescription returns the old "description" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldDescription(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDescription is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDescription requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDescription: %w", err) + } + return oldValue.Description, nil +} + +// ClearDescription clears the value of the "description" field. +func (m *GroupMutation) ClearDescription() { + m.description = nil + m.clearedFields[group.FieldDescription] = struct{}{} +} + +// DescriptionCleared returns if the "description" field was cleared in this mutation. +func (m *GroupMutation) DescriptionCleared() bool { + _, ok := m.clearedFields[group.FieldDescription] + return ok +} + +// ResetDescription resets all changes to the "description" field. +func (m *GroupMutation) ResetDescription() { + m.description = nil + delete(m.clearedFields, group.FieldDescription) +} + +// SetPrimaryLocation sets the "primary_location" field. +func (m *GroupMutation) SetPrimaryLocation(s string) { + m.primary_location = &s +} + +// PrimaryLocation returns the value of the "primary_location" field in the mutation. +func (m *GroupMutation) PrimaryLocation() (r string, exists bool) { + v := m.primary_location + if v == nil { + return + } + return *v, true +} + +// OldPrimaryLocation returns the old "primary_location" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldPrimaryLocation(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPrimaryLocation is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPrimaryLocation requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPrimaryLocation: %w", err) + } + return oldValue.PrimaryLocation, nil +} + +// ResetPrimaryLocation resets all changes to the "primary_location" field. +func (m *GroupMutation) ResetPrimaryLocation() { + m.primary_location = nil +} + +// SetLocations sets the "locations" field. +func (m *GroupMutation) SetLocations(s []string) { + m.locations = &s + m.appendlocations = nil +} + +// Locations returns the value of the "locations" field in the mutation. +func (m *GroupMutation) Locations() (r []string, exists bool) { + v := m.locations + if v == nil { + return + } + return *v, true +} + +// OldLocations returns the old "locations" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldLocations(ctx context.Context) (v []string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLocations is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLocations requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLocations: %w", err) + } + return oldValue.Locations, nil +} + +// AppendLocations adds s to the "locations" field. +func (m *GroupMutation) AppendLocations(s []string) { + m.appendlocations = append(m.appendlocations, s...) +} + +// AppendedLocations returns the list of values that were appended to the "locations" field in this mutation. +func (m *GroupMutation) AppendedLocations() ([]string, bool) { + if len(m.appendlocations) == 0 { + return nil, false + } + return m.appendlocations, true +} + +// ClearLocations clears the value of the "locations" field. +func (m *GroupMutation) ClearLocations() { + m.locations = nil + m.appendlocations = nil + m.clearedFields[group.FieldLocations] = struct{}{} +} + +// LocationsCleared returns if the "locations" field was cleared in this mutation. +func (m *GroupMutation) LocationsCleared() bool { + _, ok := m.clearedFields[group.FieldLocations] + return ok +} + +// ResetLocations resets all changes to the "locations" field. +func (m *GroupMutation) ResetLocations() { + m.locations = nil + m.appendlocations = nil + delete(m.clearedFields, group.FieldLocations) +} + +// SetToken sets the "token" field. +func (m *GroupMutation) SetToken(s string) { + m.token = &s +} + +// Token returns the value of the "token" field in the mutation. +func (m *GroupMutation) Token() (r string, exists bool) { + v := m.token + if v == nil { + return + } + return *v, true +} + +// OldToken returns the old "token" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldToken(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldToken is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldToken requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldToken: %w", err) + } + return oldValue.Token, nil +} + +// ClearToken clears the value of the "token" field. +func (m *GroupMutation) ClearToken() { + m.token = nil + m.clearedFields[group.FieldToken] = struct{}{} +} + +// TokenCleared returns if the "token" field was cleared in this mutation. +func (m *GroupMutation) TokenCleared() bool { + _, ok := m.clearedFields[group.FieldToken] + return ok +} + +// ResetToken resets all changes to the "token" field. +func (m *GroupMutation) ResetToken() { + m.token = nil + delete(m.clearedFields, group.FieldToken) +} + +// SetRegion sets the "region" field. +func (m *GroupMutation) SetRegion(e enums.Region) { + m.region = &e +} + +// Region returns the value of the "region" field in the mutation. +func (m *GroupMutation) Region() (r enums.Region, exists bool) { + v := m.region + if v == nil { + return + } + return *v, true +} + +// OldRegion returns the old "region" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldRegion(ctx context.Context) (v enums.Region, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRegion is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRegion requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRegion: %w", err) + } + return oldValue.Region, nil +} + +// ResetRegion resets all changes to the "region" field. +func (m *GroupMutation) ResetRegion() { + m.region = nil +} + +// AddDatabaseIDs adds the "databases" edge to the Database entity by ids. +func (m *GroupMutation) AddDatabaseIDs(ids ...string) { + if m.databases == nil { + m.databases = make(map[string]struct{}) + } + for i := range ids { + m.databases[ids[i]] = struct{}{} + } +} + +// ClearDatabases clears the "databases" edge to the Database entity. +func (m *GroupMutation) ClearDatabases() { + m.cleareddatabases = true +} + +// DatabasesCleared reports if the "databases" edge to the Database entity was cleared. +func (m *GroupMutation) DatabasesCleared() bool { + return m.cleareddatabases +} + +// RemoveDatabaseIDs removes the "databases" edge to the Database entity by IDs. +func (m *GroupMutation) RemoveDatabaseIDs(ids ...string) { + if m.removeddatabases == nil { + m.removeddatabases = make(map[string]struct{}) + } + for i := range ids { + delete(m.databases, ids[i]) + m.removeddatabases[ids[i]] = struct{}{} + } +} + +// RemovedDatabases returns the removed IDs of the "databases" edge to the Database entity. +func (m *GroupMutation) RemovedDatabasesIDs() (ids []string) { + for id := range m.removeddatabases { + ids = append(ids, id) + } + return +} + +// DatabasesIDs returns the "databases" edge IDs in the mutation. +func (m *GroupMutation) DatabasesIDs() (ids []string) { + for id := range m.databases { + ids = append(ids, id) + } + return +} + +// ResetDatabases resets all changes to the "databases" edge. +func (m *GroupMutation) ResetDatabases() { + m.databases = nil + m.cleareddatabases = false + m.removeddatabases = nil +} + +// Where appends a list predicates to the GroupMutation builder. +func (m *GroupMutation) Where(ps ...predicate.Group) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the GroupMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *GroupMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Group, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *GroupMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *GroupMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Group). +func (m *GroupMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *GroupMutation) Fields() []string { + fields := make([]string, 0, 12) + if m.created_at != nil { + fields = append(fields, group.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, group.FieldUpdatedAt) + } + if m.created_by != nil { + fields = append(fields, group.FieldCreatedBy) + } + if m.updated_by != nil { + fields = append(fields, group.FieldUpdatedBy) + } + if m.deleted_at != nil { + fields = append(fields, group.FieldDeletedAt) + } + if m.deleted_by != nil { + fields = append(fields, group.FieldDeletedBy) + } + if m.name != nil { + fields = append(fields, group.FieldName) + } + if m.description != nil { + fields = append(fields, group.FieldDescription) + } + if m.primary_location != nil { + fields = append(fields, group.FieldPrimaryLocation) + } + if m.locations != nil { + fields = append(fields, group.FieldLocations) + } + if m.token != nil { + fields = append(fields, group.FieldToken) + } + if m.region != nil { + fields = append(fields, group.FieldRegion) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *GroupMutation) Field(name string) (ent.Value, bool) { + switch name { + case group.FieldCreatedAt: + return m.CreatedAt() + case group.FieldUpdatedAt: + return m.UpdatedAt() + case group.FieldCreatedBy: + return m.CreatedBy() + case group.FieldUpdatedBy: + return m.UpdatedBy() + case group.FieldDeletedAt: + return m.DeletedAt() + case group.FieldDeletedBy: + return m.DeletedBy() + case group.FieldName: + return m.Name() + case group.FieldDescription: + return m.Description() + case group.FieldPrimaryLocation: + return m.PrimaryLocation() + case group.FieldLocations: + return m.Locations() + case group.FieldToken: + return m.Token() + case group.FieldRegion: + return m.Region() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *GroupMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case group.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case group.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case group.FieldCreatedBy: + return m.OldCreatedBy(ctx) + case group.FieldUpdatedBy: + return m.OldUpdatedBy(ctx) + case group.FieldDeletedAt: + return m.OldDeletedAt(ctx) + case group.FieldDeletedBy: + return m.OldDeletedBy(ctx) + case group.FieldName: + return m.OldName(ctx) + case group.FieldDescription: + return m.OldDescription(ctx) + case group.FieldPrimaryLocation: + return m.OldPrimaryLocation(ctx) + case group.FieldLocations: + return m.OldLocations(ctx) + case group.FieldToken: + return m.OldToken(ctx) + case group.FieldRegion: + return m.OldRegion(ctx) + } + return nil, fmt.Errorf("unknown Group field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *GroupMutation) SetField(name string, value ent.Value) error { + switch name { + case group.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case group.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case group.FieldCreatedBy: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedBy(v) + return nil + case group.FieldUpdatedBy: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedBy(v) + return nil + case group.FieldDeletedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDeletedAt(v) + return nil + case group.FieldDeletedBy: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDeletedBy(v) + return nil + case group.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case group.FieldDescription: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDescription(v) + return nil + case group.FieldPrimaryLocation: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPrimaryLocation(v) + return nil + case group.FieldLocations: + v, ok := value.([]string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLocations(v) + return nil + case group.FieldToken: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetToken(v) + return nil + case group.FieldRegion: + v, ok := value.(enums.Region) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRegion(v) + return nil + } + return fmt.Errorf("unknown Group field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *GroupMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *GroupMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *GroupMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Group numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *GroupMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(group.FieldCreatedAt) { + fields = append(fields, group.FieldCreatedAt) + } + if m.FieldCleared(group.FieldUpdatedAt) { + fields = append(fields, group.FieldUpdatedAt) + } + if m.FieldCleared(group.FieldCreatedBy) { + fields = append(fields, group.FieldCreatedBy) + } + if m.FieldCleared(group.FieldUpdatedBy) { + fields = append(fields, group.FieldUpdatedBy) + } + if m.FieldCleared(group.FieldDeletedAt) { + fields = append(fields, group.FieldDeletedAt) + } + if m.FieldCleared(group.FieldDeletedBy) { + fields = append(fields, group.FieldDeletedBy) + } + if m.FieldCleared(group.FieldDescription) { + fields = append(fields, group.FieldDescription) + } + if m.FieldCleared(group.FieldLocations) { + fields = append(fields, group.FieldLocations) + } + if m.FieldCleared(group.FieldToken) { + fields = append(fields, group.FieldToken) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *GroupMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *GroupMutation) ClearField(name string) error { + switch name { + case group.FieldCreatedAt: + m.ClearCreatedAt() + return nil + case group.FieldUpdatedAt: + m.ClearUpdatedAt() + return nil + case group.FieldCreatedBy: + m.ClearCreatedBy() + return nil + case group.FieldUpdatedBy: + m.ClearUpdatedBy() + return nil + case group.FieldDeletedAt: + m.ClearDeletedAt() + return nil + case group.FieldDeletedBy: + m.ClearDeletedBy() + return nil + case group.FieldDescription: + m.ClearDescription() + return nil + case group.FieldLocations: + m.ClearLocations() + return nil + case group.FieldToken: + m.ClearToken() + return nil + } + return fmt.Errorf("unknown Group nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *GroupMutation) ResetField(name string) error { + switch name { + case group.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case group.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case group.FieldCreatedBy: + m.ResetCreatedBy() + return nil + case group.FieldUpdatedBy: + m.ResetUpdatedBy() + return nil + case group.FieldDeletedAt: + m.ResetDeletedAt() + return nil + case group.FieldDeletedBy: + m.ResetDeletedBy() + return nil + case group.FieldName: + m.ResetName() + return nil + case group.FieldDescription: + m.ResetDescription() + return nil + case group.FieldPrimaryLocation: + m.ResetPrimaryLocation() + return nil + case group.FieldLocations: + m.ResetLocations() + return nil + case group.FieldToken: + m.ResetToken() + return nil + case group.FieldRegion: + m.ResetRegion() + return nil + } + return fmt.Errorf("unknown Group field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *GroupMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.databases != nil { + edges = append(edges, group.EdgeDatabases) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *GroupMutation) AddedIDs(name string) []ent.Value { + switch name { + case group.EdgeDatabases: + ids := make([]ent.Value, 0, len(m.databases)) + for id := range m.databases { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *GroupMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + if m.removeddatabases != nil { + edges = append(edges, group.EdgeDatabases) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *GroupMutation) RemovedIDs(name string) []ent.Value { + switch name { + case group.EdgeDatabases: + ids := make([]ent.Value, 0, len(m.removeddatabases)) + for id := range m.removeddatabases { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *GroupMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.cleareddatabases { + edges = append(edges, group.EdgeDatabases) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *GroupMutation) EdgeCleared(name string) bool { + switch name { + case group.EdgeDatabases: + return m.cleareddatabases + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *GroupMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown Group unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *GroupMutation) ResetEdge(name string) error { + switch name { + case group.EdgeDatabases: + m.ResetDatabases() + return nil + } + return fmt.Errorf("unknown Group edge %s", name) +} diff --git a/internal/ent/generated/openapi.json b/internal/ent/generated/openapi.json new file mode 100644 index 0000000..93de96c --- /dev/null +++ b/internal/ent/generated/openapi.json @@ -0,0 +1,1038 @@ +{ + "openapi": "3.1.0", + "info": { + "title": "Datum OpenAPI 3.1.0 Specifications", + "description": "Programmatic interfaces for interacting with Datum Services", + "termsOfService": "https://datum.net/tos", + "contact": { + "name": "Datum Support", + "url": "https://datum.net/support", + "email": "support@datum.net" + }, + "license": { + "name": "Apache 2.0", + "url": "https://www.apache.org/licenses/LICENSE-2.0" + }, + "version": "1.0.1" + }, + "servers": [ + { + "url": "https://api.datum.net/v1", + "description": "Datum Production API Endpoint" + }, + { + "url": "http://localhost:17608/v1", + "description": "http localhost endpoint for testing purposes" + } + ], + "paths": { + "/databases": { + "get": { + "tags": [ + "Database" + ], + "summary": "List Databases", + "description": "List Databases.", + "operationId": "listDatabase", + "parameters": [ + { + "name": "page", + "in": "query", + "description": "what page to render", + "schema": { + "type": "integer", + "minimum": 1 + } + }, + { + "name": "itemsPerPage", + "in": "query", + "description": "item count to render per page", + "schema": { + "type": "integer", + "maximum": 255, + "minimum": 1 + } + } + ], + "responses": { + "200": { + "description": "result Database list", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Database" + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/400" + }, + "404": { + "$ref": "#/components/responses/404" + }, + "409": { + "$ref": "#/components/responses/409" + }, + "500": { + "$ref": "#/components/responses/500" + } + } + }, + "post": { + "tags": [ + "Database" + ], + "summary": "Create a new Database", + "description": "Creates a new Database and persists it to storage.", + "operationId": "createDatabase", + "requestBody": { + "description": "Database to create", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "organization_id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "geo": { + "type": "string" + }, + "dsn": { + "type": "string" + }, + "group_id": { + "type": "string" + }, + "token": { + "type": "string" + }, + "status": { + "type": "string", + "enum": [ + "ACTIVE", + "CREATING", + "DELETING", + "DELETED" + ], + "default": "CREATING" + }, + "provider": { + "type": "string", + "enum": [ + "LOCAL", + "TURSO" + ], + "default": "LOCAL" + }, + "group": { + "type": "string" + } + }, + "required": [ + "organization_id", + "name", + "dsn", + "group_id", + "status", + "provider", + "group" + ] + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Database created", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Database" + } + } + } + }, + "400": { + "$ref": "#/components/responses/400" + }, + "409": { + "$ref": "#/components/responses/409" + }, + "500": { + "$ref": "#/components/responses/500" + } + } + } + }, + "/databases/{id}": { + "get": { + "tags": [ + "Database" + ], + "summary": "Find a Database by ID", + "description": "Finds the Database with the requested ID and returns it.", + "operationId": "readDatabase", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Database", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Database with requested ID was found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Database" + } + } + } + }, + "400": { + "$ref": "#/components/responses/400" + }, + "404": { + "$ref": "#/components/responses/404" + }, + "409": { + "$ref": "#/components/responses/409" + }, + "500": { + "$ref": "#/components/responses/500" + } + } + }, + "delete": { + "tags": [ + "Database" + ], + "summary": "Deletes a Database by ID", + "description": "Deletes the Database with the requested ID.", + "operationId": "deleteDatabase", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Database", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "204": { + "description": "Database with requested ID was deleted" + }, + "400": { + "$ref": "#/components/responses/400" + }, + "404": { + "$ref": "#/components/responses/404" + }, + "409": { + "$ref": "#/components/responses/409" + }, + "500": { + "$ref": "#/components/responses/500" + } + } + }, + "patch": { + "tags": [ + "Database" + ], + "summary": "Updates a Database", + "description": "Updates a Database and persists changes to storage.", + "operationId": "updateDatabase", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Database", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "description": "Database properties to update", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "organization_id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "geo": { + "type": "string" + }, + "dsn": { + "type": "string" + }, + "group_id": { + "type": "string" + }, + "token": { + "type": "string" + }, + "status": { + "type": "string", + "enum": [ + "ACTIVE", + "CREATING", + "DELETING", + "DELETED" + ], + "default": "CREATING" + }, + "provider": { + "type": "string", + "enum": [ + "LOCAL", + "TURSO" + ], + "default": "LOCAL" + }, + "group": { + "type": "string" + } + } + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Database updated", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Database" + } + } + } + }, + "400": { + "$ref": "#/components/responses/400" + }, + "404": { + "$ref": "#/components/responses/404" + }, + "409": { + "$ref": "#/components/responses/409" + }, + "500": { + "$ref": "#/components/responses/500" + } + } + } + }, + "/databases/{id}/group": { + "get": { + "tags": [ + "Database" + ], + "summary": "Find the attached Group", + "description": "Find the attached Group of the Database with the given ID", + "operationId": "readDatabaseGroup", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Database", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Group attached to Database with requested ID was found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Group" + } + } + } + }, + "400": { + "$ref": "#/components/responses/400" + }, + "404": { + "$ref": "#/components/responses/404" + }, + "409": { + "$ref": "#/components/responses/409" + }, + "500": { + "$ref": "#/components/responses/500" + } + } + } + }, + "/groups": { + "get": { + "tags": [ + "Group" + ], + "summary": "List Groups", + "description": "List Groups.", + "operationId": "listGroup", + "parameters": [ + { + "name": "page", + "in": "query", + "description": "what page to render", + "schema": { + "type": "integer", + "minimum": 1 + } + }, + { + "name": "itemsPerPage", + "in": "query", + "description": "item count to render per page", + "schema": { + "type": "integer", + "maximum": 255, + "minimum": 1 + } + } + ], + "responses": { + "200": { + "description": "result Group list", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Group" + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/400" + }, + "404": { + "$ref": "#/components/responses/404" + }, + "409": { + "$ref": "#/components/responses/409" + }, + "500": { + "$ref": "#/components/responses/500" + } + } + }, + "post": { + "tags": [ + "Group" + ], + "summary": "Create a new Group", + "description": "Creates a new Group and persists it to storage.", + "operationId": "createGroup", + "requestBody": { + "description": "Group to create", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "primary_location": { + "type": "string" + }, + "locations": { + "type": "array", + "items": { + "type": "string" + } + }, + "token": { + "type": "string" + }, + "region": { + "type": "string", + "enum": [ + "AMER", + "EMEA", + "APAC" + ], + "default": "AMER" + }, + "databases": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "name", + "primary_location", + "region" + ] + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Group created", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Group" + } + } + } + }, + "400": { + "$ref": "#/components/responses/400" + }, + "409": { + "$ref": "#/components/responses/409" + }, + "500": { + "$ref": "#/components/responses/500" + } + } + } + }, + "/groups/{id}": { + "get": { + "tags": [ + "Group" + ], + "summary": "Find a Group by ID", + "description": "Finds the Group with the requested ID and returns it.", + "operationId": "readGroup", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Group", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Group with requested ID was found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Group" + } + } + } + }, + "400": { + "$ref": "#/components/responses/400" + }, + "404": { + "$ref": "#/components/responses/404" + }, + "409": { + "$ref": "#/components/responses/409" + }, + "500": { + "$ref": "#/components/responses/500" + } + } + }, + "delete": { + "tags": [ + "Group" + ], + "summary": "Deletes a Group by ID", + "description": "Deletes the Group with the requested ID.", + "operationId": "deleteGroup", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Group", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "204": { + "description": "Group with requested ID was deleted" + }, + "400": { + "$ref": "#/components/responses/400" + }, + "404": { + "$ref": "#/components/responses/404" + }, + "409": { + "$ref": "#/components/responses/409" + }, + "500": { + "$ref": "#/components/responses/500" + } + } + }, + "patch": { + "tags": [ + "Group" + ], + "summary": "Updates a Group", + "description": "Updates a Group and persists changes to storage.", + "operationId": "updateGroup", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Group", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "description": "Group properties to update", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "primary_location": { + "type": "string" + }, + "locations": { + "type": "array", + "items": { + "type": "string" + } + }, + "token": { + "type": "string" + }, + "region": { + "type": "string", + "enum": [ + "AMER", + "EMEA", + "APAC" + ], + "default": "AMER" + }, + "databases": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Group updated", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Group" + } + } + } + }, + "400": { + "$ref": "#/components/responses/400" + }, + "404": { + "$ref": "#/components/responses/404" + }, + "409": { + "$ref": "#/components/responses/409" + }, + "500": { + "$ref": "#/components/responses/500" + } + } + } + }, + "/groups/{id}/databases": { + "get": { + "tags": [ + "Group" + ], + "summary": "List attached Databases", + "description": "List attached Databases.", + "operationId": "listGroupDatabases", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Group", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "page", + "in": "query", + "description": "what page to render", + "schema": { + "type": "integer" + } + }, + { + "name": "itemsPerPage", + "in": "query", + "description": "item count to render per page", + "schema": { + "type": "integer" + } + } + ], + "responses": { + "200": { + "description": "result Groups list", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Database" + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/400" + }, + "404": { + "$ref": "#/components/responses/404" + }, + "409": { + "$ref": "#/components/responses/409" + }, + "500": { + "$ref": "#/components/responses/500" + } + } + } + } + }, + "components": { + "schemas": { + "Database": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "created_at": { + "type": "string", + "format": "date-time" + }, + "updated_at": { + "type": "string", + "format": "date-time" + }, + "created_by": { + "type": "string" + }, + "updated_by": { + "type": "string" + }, + "deleted_at": { + "type": "string", + "format": "date-time" + }, + "deleted_by": { + "type": "string" + }, + "organization_id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "geo": { + "type": "string" + }, + "dsn": { + "type": "string" + }, + "group_id": { + "type": "string" + }, + "token": { + "type": "string" + }, + "status": { + "type": "string", + "enum": [ + "ACTIVE", + "CREATING", + "DELETING", + "DELETED" + ], + "default": "CREATING" + }, + "provider": { + "type": "string", + "enum": [ + "LOCAL", + "TURSO" + ], + "default": "LOCAL" + }, + "group": { + "$ref": "#/components/schemas/Group" + } + }, + "required": [ + "id", + "organization_id", + "name", + "dsn", + "group_id", + "status", + "provider", + "group" + ] + }, + "Group": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "created_at": { + "type": "string", + "format": "date-time" + }, + "updated_at": { + "type": "string", + "format": "date-time" + }, + "created_by": { + "type": "string" + }, + "updated_by": { + "type": "string" + }, + "deleted_at": { + "type": "string", + "format": "date-time" + }, + "deleted_by": { + "type": "string" + }, + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "primary_location": { + "type": "string" + }, + "locations": { + "type": "array", + "items": { + "type": "string" + } + }, + "token": { + "type": "string" + }, + "region": { + "type": "string", + "enum": [ + "AMER", + "EMEA", + "APAC" + ], + "default": "AMER" + }, + "databases": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Database" + } + } + }, + "required": [ + "id", + "name", + "primary_location", + "region" + ] + } + }, + "responses": { + "400": { + "description": "invalid input, data invalid", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "code": { + "type": "integer" + }, + "status": { + "type": "string" + }, + "errors": {} + }, + "required": [ + "code", + "status" + ] + } + } + } + }, + "403": { + "description": "insufficient permissions", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "code": { + "type": "integer" + }, + "status": { + "type": "string" + }, + "errors": {} + }, + "required": [ + "code", + "status" + ] + } + } + } + }, + "404": { + "description": "resource not found", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "code": { + "type": "integer" + }, + "status": { + "type": "string" + }, + "errors": {} + }, + "required": [ + "code", + "status" + ] + } + } + } + }, + "409": { + "description": "conflicting resources", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "code": { + "type": "integer" + }, + "status": { + "type": "string" + }, + "errors": {} + }, + "required": [ + "code", + "status" + ] + } + } + } + }, + "500": { + "description": "unexpected error", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "code": { + "type": "integer" + }, + "status": { + "type": "string" + }, + "errors": {} + }, + "required": [ + "code", + "status" + ] + } + } + } + } + } + } +} \ No newline at end of file diff --git a/internal/ent/generated/predicate/predicate.go b/internal/ent/generated/predicate/predicate.go new file mode 100644 index 0000000..e456610 --- /dev/null +++ b/internal/ent/generated/predicate/predicate.go @@ -0,0 +1,13 @@ +// Code generated by ent, DO NOT EDIT. + +package predicate + +import ( + "entgo.io/ent/dialect/sql" +) + +// Database is the predicate function for database builders. +type Database func(*sql.Selector) + +// Group is the predicate function for group builders. +type Group func(*sql.Selector) diff --git a/internal/ent/generated/privacy/privacy.go b/internal/ent/generated/privacy/privacy.go new file mode 100644 index 0000000..40cecf6 --- /dev/null +++ b/internal/ent/generated/privacy/privacy.go @@ -0,0 +1,215 @@ +// Code generated by ent, DO NOT EDIT. + +package privacy + +import ( + "context" + + "github.com/datumforge/geodetic/internal/ent/generated" + + "entgo.io/ent/entql" + "entgo.io/ent/privacy" +) + +var ( + // Allow may be returned by rules to indicate that the policy + // evaluation should terminate with allow decision. + Allow = privacy.Allow + + // Deny may be returned by rules to indicate that the policy + // evaluation should terminate with deny decision. + Deny = privacy.Deny + + // Skip may be returned by rules to indicate that the policy + // evaluation should continue to the next rule. + Skip = privacy.Skip +) + +// Allowf returns a formatted wrapped Allow decision. +func Allowf(format string, a ...any) error { + return privacy.Allowf(format, a...) +} + +// Denyf returns a formatted wrapped Deny decision. +func Denyf(format string, a ...any) error { + return privacy.Denyf(format, a...) +} + +// Skipf returns a formatted wrapped Skip decision. +func Skipf(format string, a ...any) error { + return privacy.Skipf(format, a...) +} + +// DecisionContext creates a new context from the given parent context with +// a policy decision attach to it. +func DecisionContext(parent context.Context, decision error) context.Context { + return privacy.DecisionContext(parent, decision) +} + +// DecisionFromContext retrieves the policy decision from the context. +func DecisionFromContext(ctx context.Context) (error, bool) { + return privacy.DecisionFromContext(ctx) +} + +type ( + // Policy groups query and mutation policies. + Policy = privacy.Policy + + // QueryRule defines the interface deciding whether a + // query is allowed and optionally modify it. + QueryRule = privacy.QueryRule + // QueryPolicy combines multiple query rules into a single policy. + QueryPolicy = privacy.QueryPolicy + + // MutationRule defines the interface which decides whether a + // mutation is allowed and optionally modifies it. + MutationRule = privacy.MutationRule + // MutationPolicy combines multiple mutation rules into a single policy. + MutationPolicy = privacy.MutationPolicy + // MutationRuleFunc type is an adapter which allows the use of + // ordinary functions as mutation rules. + MutationRuleFunc = privacy.MutationRuleFunc + + // QueryMutationRule is an interface which groups query and mutation rules. + QueryMutationRule = privacy.QueryMutationRule +) + +// QueryRuleFunc type is an adapter to allow the use of +// ordinary functions as query rules. +type QueryRuleFunc func(context.Context, generated.Query) error + +// Eval returns f(ctx, q). +func (f QueryRuleFunc) EvalQuery(ctx context.Context, q generated.Query) error { + return f(ctx, q) +} + +// AlwaysAllowRule returns a rule that returns an allow decision. +func AlwaysAllowRule() QueryMutationRule { + return privacy.AlwaysAllowRule() +} + +// AlwaysDenyRule returns a rule that returns a deny decision. +func AlwaysDenyRule() QueryMutationRule { + return privacy.AlwaysDenyRule() +} + +// ContextQueryMutationRule creates a query/mutation rule from a context eval func. +func ContextQueryMutationRule(eval func(context.Context) error) QueryMutationRule { + return privacy.ContextQueryMutationRule(eval) +} + +// OnMutationOperation evaluates the given rule only on a given mutation operation. +func OnMutationOperation(rule MutationRule, op generated.Op) MutationRule { + return privacy.OnMutationOperation(rule, op) +} + +// DenyMutationOperationRule returns a rule denying specified mutation operation. +func DenyMutationOperationRule(op generated.Op) MutationRule { + rule := MutationRuleFunc(func(_ context.Context, m generated.Mutation) error { + return Denyf("generated/privacy: operation %s is not allowed", m.Op()) + }) + return OnMutationOperation(rule, op) +} + +// The DatabaseQueryRuleFunc type is an adapter to allow the use of ordinary +// functions as a query rule. +type DatabaseQueryRuleFunc func(context.Context, *generated.DatabaseQuery) error + +// EvalQuery return f(ctx, q). +func (f DatabaseQueryRuleFunc) EvalQuery(ctx context.Context, q generated.Query) error { + if q, ok := q.(*generated.DatabaseQuery); ok { + return f(ctx, q) + } + return Denyf("generated/privacy: unexpected query type %T, expect *generated.DatabaseQuery", q) +} + +// The DatabaseMutationRuleFunc type is an adapter to allow the use of ordinary +// functions as a mutation rule. +type DatabaseMutationRuleFunc func(context.Context, *generated.DatabaseMutation) error + +// EvalMutation calls f(ctx, m). +func (f DatabaseMutationRuleFunc) EvalMutation(ctx context.Context, m generated.Mutation) error { + if m, ok := m.(*generated.DatabaseMutation); ok { + return f(ctx, m) + } + return Denyf("generated/privacy: unexpected mutation type %T, expect *generated.DatabaseMutation", m) +} + +// The GroupQueryRuleFunc type is an adapter to allow the use of ordinary +// functions as a query rule. +type GroupQueryRuleFunc func(context.Context, *generated.GroupQuery) error + +// EvalQuery return f(ctx, q). +func (f GroupQueryRuleFunc) EvalQuery(ctx context.Context, q generated.Query) error { + if q, ok := q.(*generated.GroupQuery); ok { + return f(ctx, q) + } + return Denyf("generated/privacy: unexpected query type %T, expect *generated.GroupQuery", q) +} + +// The GroupMutationRuleFunc type is an adapter to allow the use of ordinary +// functions as a mutation rule. +type GroupMutationRuleFunc func(context.Context, *generated.GroupMutation) error + +// EvalMutation calls f(ctx, m). +func (f GroupMutationRuleFunc) EvalMutation(ctx context.Context, m generated.Mutation) error { + if m, ok := m.(*generated.GroupMutation); ok { + return f(ctx, m) + } + return Denyf("generated/privacy: unexpected mutation type %T, expect *generated.GroupMutation", m) +} + +type ( + // Filter is the interface that wraps the Where function + // for filtering nodes in queries and mutations. + Filter interface { + // Where applies a filter on the executed query/mutation. + Where(entql.P) + } + + // The FilterFunc type is an adapter that allows the use of ordinary + // functions as filters for query and mutation types. + FilterFunc func(context.Context, Filter) error +) + +// EvalQuery calls f(ctx, q) if the query implements the Filter interface, otherwise it is denied. +func (f FilterFunc) EvalQuery(ctx context.Context, q generated.Query) error { + fr, err := queryFilter(q) + if err != nil { + return err + } + return f(ctx, fr) +} + +// EvalMutation calls f(ctx, q) if the mutation implements the Filter interface, otherwise it is denied. +func (f FilterFunc) EvalMutation(ctx context.Context, m generated.Mutation) error { + fr, err := mutationFilter(m) + if err != nil { + return err + } + return f(ctx, fr) +} + +var _ QueryMutationRule = FilterFunc(nil) + +func queryFilter(q generated.Query) (Filter, error) { + switch q := q.(type) { + case *generated.DatabaseQuery: + return q.Filter(), nil + case *generated.GroupQuery: + return q.Filter(), nil + default: + return nil, Denyf("generated/privacy: unexpected query type %T for query filter", q) + } +} + +func mutationFilter(m generated.Mutation) (Filter, error) { + switch m := m.(type) { + case *generated.DatabaseMutation: + return m.Filter(), nil + case *generated.GroupMutation: + return m.Filter(), nil + default: + return nil, Denyf("generated/privacy: unexpected mutation type %T for mutation filter", m) + } +} diff --git a/internal/ent/generated/runtime.go b/internal/ent/generated/runtime.go new file mode 100644 index 0000000..d2c2760 --- /dev/null +++ b/internal/ent/generated/runtime.go @@ -0,0 +1,5 @@ +// Code generated by ent, DO NOT EDIT. + +package generated + +// The schema-stitching logic is generated in github.com/datumforge/geodetic/internal/ent/generated/runtime/runtime.go diff --git a/internal/ent/generated/runtime/runtime.go b/internal/ent/generated/runtime/runtime.go new file mode 100644 index 0000000..b1b3e51 --- /dev/null +++ b/internal/ent/generated/runtime/runtime.go @@ -0,0 +1,95 @@ +// Code generated by ent, DO NOT EDIT. + +package runtime + +import ( + "time" + + "github.com/datumforge/geodetic/internal/ent/generated/database" + "github.com/datumforge/geodetic/internal/ent/generated/group" + "github.com/datumforge/geodetic/internal/ent/schema" +) + +// The init function reads all schema descriptors with runtime code +// (default values, validators, hooks and policies) and stitches it +// to their package variables. +func init() { + databaseMixin := schema.Database{}.Mixin() + databaseMixinHooks0 := databaseMixin[0].Hooks() + databaseHooks := schema.Database{}.Hooks() + database.Hooks[0] = databaseMixinHooks0[0] + database.Hooks[1] = databaseHooks[0] + database.Hooks[2] = databaseHooks[1] + databaseMixinFields0 := databaseMixin[0].Fields() + _ = databaseMixinFields0 + databaseMixinFields2 := databaseMixin[2].Fields() + _ = databaseMixinFields2 + databaseFields := schema.Database{}.Fields() + _ = databaseFields + // databaseDescCreatedAt is the schema descriptor for created_at field. + databaseDescCreatedAt := databaseMixinFields0[0].Descriptor() + // database.DefaultCreatedAt holds the default value on creation for the created_at field. + database.DefaultCreatedAt = databaseDescCreatedAt.Default.(func() time.Time) + // databaseDescUpdatedAt is the schema descriptor for updated_at field. + databaseDescUpdatedAt := databaseMixinFields0[1].Descriptor() + // database.DefaultUpdatedAt holds the default value on creation for the updated_at field. + database.DefaultUpdatedAt = databaseDescUpdatedAt.Default.(func() time.Time) + // database.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + database.UpdateDefaultUpdatedAt = databaseDescUpdatedAt.UpdateDefault.(func() time.Time) + // databaseDescOrganizationID is the schema descriptor for organization_id field. + databaseDescOrganizationID := databaseFields[0].Descriptor() + // database.OrganizationIDValidator is a validator for the "organization_id" field. It is called by the builders before save. + database.OrganizationIDValidator = databaseDescOrganizationID.Validators[0].(func(string) error) + // databaseDescName is the schema descriptor for name field. + databaseDescName := databaseFields[1].Descriptor() + // database.NameValidator is a validator for the "name" field. It is called by the builders before save. + database.NameValidator = databaseDescName.Validators[0].(func(string) error) + // databaseDescDsn is the schema descriptor for dsn field. + databaseDescDsn := databaseFields[3].Descriptor() + // database.DsnValidator is a validator for the "dsn" field. It is called by the builders before save. + database.DsnValidator = databaseDescDsn.Validators[0].(func(string) error) + // databaseDescID is the schema descriptor for id field. + databaseDescID := databaseMixinFields2[0].Descriptor() + // database.DefaultID holds the default value on creation for the id field. + database.DefaultID = databaseDescID.Default.(func() string) + groupMixin := schema.Group{}.Mixin() + groupMixinHooks0 := groupMixin[0].Hooks() + groupHooks := schema.Group{}.Hooks() + group.Hooks[0] = groupMixinHooks0[0] + group.Hooks[1] = groupHooks[0] + group.Hooks[2] = groupHooks[1] + group.Hooks[3] = groupHooks[2] + groupMixinFields0 := groupMixin[0].Fields() + _ = groupMixinFields0 + groupMixinFields1 := groupMixin[1].Fields() + _ = groupMixinFields1 + groupFields := schema.Group{}.Fields() + _ = groupFields + // groupDescCreatedAt is the schema descriptor for created_at field. + groupDescCreatedAt := groupMixinFields0[0].Descriptor() + // group.DefaultCreatedAt holds the default value on creation for the created_at field. + group.DefaultCreatedAt = groupDescCreatedAt.Default.(func() time.Time) + // groupDescUpdatedAt is the schema descriptor for updated_at field. + groupDescUpdatedAt := groupMixinFields0[1].Descriptor() + // group.DefaultUpdatedAt holds the default value on creation for the updated_at field. + group.DefaultUpdatedAt = groupDescUpdatedAt.Default.(func() time.Time) + // group.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + group.UpdateDefaultUpdatedAt = groupDescUpdatedAt.UpdateDefault.(func() time.Time) + // groupDescName is the schema descriptor for name field. + groupDescName := groupFields[0].Descriptor() + // group.NameValidator is a validator for the "name" field. It is called by the builders before save. + group.NameValidator = groupDescName.Validators[0].(func(string) error) + // groupDescPrimaryLocation is the schema descriptor for primary_location field. + groupDescPrimaryLocation := groupFields[2].Descriptor() + // group.PrimaryLocationValidator is a validator for the "primary_location" field. It is called by the builders before save. + group.PrimaryLocationValidator = groupDescPrimaryLocation.Validators[0].(func(string) error) + // groupDescID is the schema descriptor for id field. + groupDescID := groupMixinFields1[0].Descriptor() + // group.DefaultID holds the default value on creation for the id field. + group.DefaultID = groupDescID.Default.(func() string) +} + +const ( + Version = "v0.13.1" // Version of ent codegen. + Sum = "h1:uD8QwN1h6SNphdCCzmkMN3feSUzNnVvV/WIkHKMbzOE=" // Sum of ent codegen. +) diff --git a/internal/ent/generated/tx.go b/internal/ent/generated/tx.go new file mode 100644 index 0000000..e3780f5 --- /dev/null +++ b/internal/ent/generated/tx.go @@ -0,0 +1,213 @@ +// Code generated by ent, DO NOT EDIT. + +package generated + +import ( + "context" + "sync" + + "entgo.io/ent/dialect" +) + +// Tx is a transactional client that is created by calling Client.Tx(). +type Tx struct { + config + // Database is the client for interacting with the Database builders. + Database *DatabaseClient + // Group is the client for interacting with the Group builders. + Group *GroupClient + + // lazily loaded. + client *Client + clientOnce sync.Once + // ctx lives for the life of the transaction. It is + // the same context used by the underlying connection. + ctx context.Context +} + +type ( + // Committer is the interface that wraps the Commit method. + Committer interface { + Commit(context.Context, *Tx) error + } + + // The CommitFunc type is an adapter to allow the use of ordinary + // function as a Committer. If f is a function with the appropriate + // signature, CommitFunc(f) is a Committer that calls f. + CommitFunc func(context.Context, *Tx) error + + // CommitHook defines the "commit middleware". A function that gets a Committer + // and returns a Committer. For example: + // + // hook := func(next ent.Committer) ent.Committer { + // return ent.CommitFunc(func(ctx context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Commit(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + CommitHook func(Committer) Committer +) + +// Commit calls f(ctx, m). +func (f CommitFunc) Commit(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Commit commits the transaction. +func (tx *Tx) Commit() error { + txDriver := tx.config.driver.(*txDriver) + var fn Committer = CommitFunc(func(context.Context, *Tx) error { + return txDriver.tx.Commit() + }) + txDriver.mu.Lock() + hooks := append([]CommitHook(nil), txDriver.onCommit...) + txDriver.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Commit(tx.ctx, tx) +} + +// OnCommit adds a hook to call on commit. +func (tx *Tx) OnCommit(f CommitHook) { + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onCommit = append(txDriver.onCommit, f) + txDriver.mu.Unlock() +} + +type ( + // Rollbacker is the interface that wraps the Rollback method. + Rollbacker interface { + Rollback(context.Context, *Tx) error + } + + // The RollbackFunc type is an adapter to allow the use of ordinary + // function as a Rollbacker. If f is a function with the appropriate + // signature, RollbackFunc(f) is a Rollbacker that calls f. + RollbackFunc func(context.Context, *Tx) error + + // RollbackHook defines the "rollback middleware". A function that gets a Rollbacker + // and returns a Rollbacker. For example: + // + // hook := func(next ent.Rollbacker) ent.Rollbacker { + // return ent.RollbackFunc(func(ctx context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Rollback(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + RollbackHook func(Rollbacker) Rollbacker +) + +// Rollback calls f(ctx, m). +func (f RollbackFunc) Rollback(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Rollback rollbacks the transaction. +func (tx *Tx) Rollback() error { + txDriver := tx.config.driver.(*txDriver) + var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error { + return txDriver.tx.Rollback() + }) + txDriver.mu.Lock() + hooks := append([]RollbackHook(nil), txDriver.onRollback...) + txDriver.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Rollback(tx.ctx, tx) +} + +// OnRollback adds a hook to call on rollback. +func (tx *Tx) OnRollback(f RollbackHook) { + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onRollback = append(txDriver.onRollback, f) + txDriver.mu.Unlock() +} + +// Client returns a Client that binds to current transaction. +func (tx *Tx) Client() *Client { + tx.clientOnce.Do(func() { + tx.client = &Client{config: tx.config} + tx.client.init() + }) + return tx.client +} + +func (tx *Tx) init() { + tx.Database = NewDatabaseClient(tx.config) + tx.Group = NewGroupClient(tx.config) +} + +// txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation. +// The idea is to support transactions without adding any extra code to the builders. +// When a builder calls to driver.Tx(), it gets the same dialect.Tx instance. +// Commit and Rollback are nop for the internal builders and the user must call one +// of them in order to commit or rollback the transaction. +// +// If a closed transaction is embedded in one of the generated entities, and the entity +// applies a query, for example: Database.QueryXXX(), the query will be executed +// through the driver which created this transaction. +// +// Note that txDriver is not goroutine safe. +type txDriver struct { + // the driver we started the transaction from. + drv dialect.Driver + // tx is the underlying transaction. + tx dialect.Tx + // completion hooks. + mu sync.Mutex + onCommit []CommitHook + onRollback []RollbackHook +} + +// newTx creates a new transactional driver. +func newTx(ctx context.Context, drv dialect.Driver) (*txDriver, error) { + tx, err := drv.Tx(ctx) + if err != nil { + return nil, err + } + return &txDriver{tx: tx, drv: drv}, nil +} + +// Tx returns the transaction wrapper (txDriver) to avoid Commit or Rollback calls +// from the internal builders. Should be called only by the internal builders. +func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil } + +// Dialect returns the dialect of the driver we started the transaction from. +func (tx *txDriver) Dialect() string { return tx.drv.Dialect() } + +// Close is a nop close. +func (*txDriver) Close() error { return nil } + +// Commit is a nop commit for the internal builders. +// User must call `Tx.Commit` in order to commit the transaction. +func (*txDriver) Commit() error { return nil } + +// Rollback is a nop rollback for the internal builders. +// User must call `Tx.Rollback` in order to rollback the transaction. +func (*txDriver) Rollback() error { return nil } + +// Exec calls tx.Exec. +func (tx *txDriver) Exec(ctx context.Context, query string, args, v any) error { + return tx.tx.Exec(ctx, query, args, v) +} + +// Query calls tx.Query. +func (tx *txDriver) Query(ctx context.Context, query string, args, v any) error { + return tx.tx.Query(ctx, query, args, v) +} + +var _ dialect.Driver = (*txDriver)(nil) diff --git a/internal/ent/hooks/database.go b/internal/ent/hooks/database.go new file mode 100644 index 0000000..c34176c --- /dev/null +++ b/internal/ent/hooks/database.go @@ -0,0 +1,140 @@ +package hooks + +import ( + "context" + "fmt" + "strings" + + "entgo.io/ent" + + "github.com/99designs/gqlgen/graphql" + "github.com/datumforge/datum/pkg/rout" + "github.com/datumforge/go-turso" + + "github.com/datumforge/geodetic/internal/ent/generated" + "github.com/datumforge/geodetic/internal/ent/generated/group" + "github.com/datumforge/geodetic/internal/ent/generated/hook" + "github.com/datumforge/geodetic/pkg/enums" +) + +// HookCreateDatabase creates sets the name of the database and creates the database in turso, if the provider is turso +func HookCreateDatabase() ent.Hook { + return hook.On(func(next ent.Mutator) ent.Mutator { + return hook.DatabaseFunc(func(ctx context.Context, mutation *generated.DatabaseMutation) (generated.Value, error) { + // get organization and provider from the request + orgID, _ := mutation.OrganizationID() + provider, _ := mutation.Provider() + + // create a name for the database + name := strings.ToLower(fmt.Sprintf("org-%s", orgID)) + mutation.SetName(name) + + // if the provider is turso, create a database + if provider == enums.Turso { + // get the group to assign the database to + groupName, err := getGroupName(ctx, mutation) + if err != nil { + return nil, err + } + + // create a turso db + body := turso.CreateDatabaseRequest{ + Group: groupName, + Name: name, + } + + // create the database in turso + db, err := mutation.Turso.Database.CreateDatabase(ctx, body) + if err != nil { + return nil, err + } + + mutation.Logger.Infow("created turso db", "db", db.Database.DatabaseID, "hostname", db.Database.Hostname) + + mutation.SetDsn(db.Database.Hostname) + } else { + // set the dsn to the name + mutation.SetDsn(fmt.Sprintf("file:%s.db", name)) + } + + // set the status of the database to active + mutation.SetStatus(enums.Active) + + // write things that we need to the database + return next.Mutate(ctx, mutation) + }) + }, ent.OpCreate) +} + +// HookDatabaseDelete deletes the database in turso +func HookDatabaseDelete() ent.Hook { + return hook.On(func(next ent.Mutator) ent.Mutator { + return hook.DatabaseFunc(func(ctx context.Context, mutation *generated.DatabaseMutation) (generated.Value, error) { + if ok := graphql.HasOperationContext(ctx); ok { + // TODO: this only works for a delete database and not on a cascade delete + gtx := graphql.GetOperationContext(ctx) + name := gtx.Variables["name"].(string) + + if name == "" { + mutation.Logger.Errorw("unable to delete database, no name provided") + + return nil, rout.InvalidField("name") + } + + db, err := mutation.Turso.Database.DeleteDatabase(ctx, name) + if err != nil { + return nil, err + } + + mutation.Logger.Infow("deleted turso database", "database", db.Database) + } + + // write things that we need to the database + return next.Mutate(ctx, mutation) + }) + }, ent.OpDelete|ent.OpDeleteOne) +} + +// getGroupName gets the group name associated with the geo or group id +func getGroupName(ctx context.Context, mutation *generated.DatabaseMutation) (string, error) { + groupID, ok := mutation.GroupID() + + // if the group id is set, get the group by the group id + if ok && groupID != "" { + g, err := mutation.Client().Group.Get(ctx, groupID) + if err != nil { + mutation.Logger.Errorw("unable to get group, invalid group ID", "error", err) + + return "", err + } + + return g.Name, nil + } + + // else get the group by the geo + geo, ok := mutation.Geo() + + if !ok || geo == "" { + mutation.Logger.Errorw("unable to get geo or group id, cannot create database") + + return "", rout.InvalidField("geo") + } + + g, err := mutation.Client().Group.Query().Where(group.RegionEQ(enums.Region(geo))).Only(ctx) + if err != nil { + mutation.Logger.Errorw("unable to get associated group", "error", err) + + return "", err + } + + if g == nil { + mutation.Logger.Errorw("unable to get associated group", "geo", geo) + + return "", rout.InvalidField("geo") + } + + // set the group id on the mutation + mutation.SetGroupID(g.ID) + + return g.Name, nil +} diff --git a/internal/ent/hooks/doc.go b/internal/ent/hooks/doc.go new file mode 100644 index 0000000..3ff6db7 --- /dev/null +++ b/internal/ent/hooks/doc.go @@ -0,0 +1,2 @@ +// Package hooks is middleware to alter the graphql mutation +package hooks diff --git a/internal/ent/hooks/group.go b/internal/ent/hooks/group.go new file mode 100644 index 0000000..580cd82 --- /dev/null +++ b/internal/ent/hooks/group.go @@ -0,0 +1,112 @@ +package hooks + +import ( + "context" + + "entgo.io/ent" + + "github.com/99designs/gqlgen/graphql" + "github.com/datumforge/datum/pkg/rout" + "github.com/datumforge/go-turso" + + "github.com/datumforge/geodetic/internal/ent/generated" + "github.com/datumforge/geodetic/internal/ent/generated/hook" +) + +func HookGroupCreate() ent.Hook { + return hook.On(func(next ent.Mutator) ent.Mutator { + return hook.GroupFunc(func(ctx context.Context, mutation *generated.GroupMutation) (generated.Value, error) { + name, _ := mutation.Name() + loc, _ := mutation.PrimaryLocation() + + // create a turso group + body := turso.CreateGroupRequest{ + Name: name, + Location: loc, + } + + group, err := mutation.Turso.Group.CreateGroup(ctx, body) + if err != nil { + return nil, err + } + + mutation.Logger.Infow("created turso group", "group", group.Group.Name, "locations", group.Group.Locations) + + // write things that we need to the database + return next.Mutate(ctx, mutation) + }) + }, ent.OpCreate) +} + +func HookGroupUpdate() ent.Hook { + return hook.On(func(next ent.Mutator) ent.Mutator { + return hook.GroupFunc(func(ctx context.Context, mutation *generated.GroupMutation) (generated.Value, error) { + name, _ := mutation.Name() + locs, _ := mutation.Locations() + + // first get the group from Turso + group, err := mutation.Turso.Group.GetGroup(ctx, name) + if err != nil { + return nil, err + } + + // Add locations to the group that don't exist + for _, loc := range locs { + if !exists(loc, group.Group.Locations) { + // add location to the group + req := turso.GroupLocationRequest{ + GroupName: name, + Location: loc, + } + + if _, err := mutation.Turso.Group.AddLocation(ctx, req); err != nil { + mutation.Logger.Errorw("failed to add location to group", "group", name, "location", loc, "error", err) + + return nil, err + } + } + } + + // write things that we need to the database + return next.Mutate(ctx, mutation) + }) + }, ent.OpUpdate|ent.OpUpdateOne) +} + +func HookGroupDelete() ent.Hook { + return hook.On(func(next ent.Mutator) ent.Mutator { + return hook.GroupFunc(func(ctx context.Context, mutation *generated.GroupMutation) (generated.Value, error) { + if ok := graphql.HasOperationContext(ctx); ok { + gtx := graphql.GetOperationContext(ctx) + name := gtx.Variables["name"].(string) + + if name == "" { + mutation.Logger.Errorw("unable to delete group, no name provided") + + return nil, rout.InvalidField("name") + } + + group, err := mutation.Turso.Group.DeleteGroup(ctx, name) + if err != nil { + return nil, err + } + + mutation.Logger.Infow("deleted turso group", "group", group.Group) + } + + // write things that we need to the database + return next.Mutate(ctx, mutation) + }) + }, ent.OpDelete|ent.OpDeleteOne) +} + +// exists checks if a location exists in a list of locations +func exists(loc string, locs []string) bool { + for _, l := range locs { + if l == loc { + return true + } + } + + return false +} diff --git a/internal/ent/mixin/doc.go b/internal/ent/mixin/doc.go new file mode 100644 index 0000000..d9a2257 --- /dev/null +++ b/internal/ent/mixin/doc.go @@ -0,0 +1,2 @@ +// Package mixin contains the mixin package +package mixin diff --git a/internal/ent/mixin/errors.go b/internal/ent/mixin/errors.go new file mode 100644 index 0000000..b149fab --- /dev/null +++ b/internal/ent/mixin/errors.go @@ -0,0 +1,23 @@ +package mixin + +import ( + "fmt" + + "entgo.io/ent" +) + +// UnexpectedMutationTypeError is returned when an unexpected mutation type is parsed +type UnexpectedMutationTypeError struct { + MutationType ent.Mutation +} + +// Error returns the UnexpectedAuditError in string format +func (e *UnexpectedMutationTypeError) Error() string { + return fmt.Sprintf("unexpected mutation type: %T", e.MutationType) +} + +func newUnexpectedMutationTypeError(arg ent.Mutation) *UnexpectedMutationTypeError { + return &UnexpectedMutationTypeError{ + MutationType: arg, + } +} diff --git a/internal/ent/mixin/softdelete_mixin.go b/internal/ent/mixin/softdelete_mixin.go new file mode 100644 index 0000000..62436d1 --- /dev/null +++ b/internal/ent/mixin/softdelete_mixin.go @@ -0,0 +1,113 @@ +package mixin + +import ( + "context" + "time" + + "entgo.io/contrib/entgql" + "entgo.io/contrib/entoas" + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/mixin" + + "github.com/datumforge/datum/pkg/auth" + "github.com/datumforge/entx" + + "github.com/datumforge/geodetic/internal/ent/generated" + // "github.com/datumforge/geodetic/internal/ent/generated/hook" + // "github.com/datumforge/geodetic/internal/ent/generated/intercept" +) + +// SoftDeleteMixin implements the soft delete pattern for schemas. +type SoftDeleteMixin struct { + mixin.Schema +} + +// Fields of the SoftDeleteMixin. +func (SoftDeleteMixin) Fields() []ent.Field { + return []ent.Field{ + field.Time("deleted_at"). + Optional(). + Annotations( + entgql.Skip(entgql.SkipMutationCreateInput, entgql.SkipMutationUpdateInput), + entoas.Annotation{ReadOnly: true}, + ), + field.String("deleted_by"). + Optional(). + Annotations( + entgql.Skip(entgql.SkipMutationCreateInput, entgql.SkipMutationUpdateInput), + entoas.Annotation{ReadOnly: true}, + ), + } +} + +// // Interceptors of the SoftDeleteMixin. +// func (d SoftDeleteMixin) Interceptors() []ent.Interceptor { +// return []ent.Interceptor{ +// intercept.TraverseFunc(func(ctx context.Context, q intercept.Query) error { +// // Skip soft-delete, means include soft-deleted entities. +// if skip, _ := ctx.Value(entx.SoftDeleteSkipKey{}).(bool); skip { +// return nil +// } +// d.P(q) +// return nil +// }), +// } +// } + +// SoftDeleteHook will soft delete records, by changing the delete mutation to an update and setting +// the deleted_at and deleted_by fields, unless the softDeleteSkipKey is set +func (d SoftDeleteMixin) SoftDeleteHook(next ent.Mutator) ent.Mutator { + type SoftDelete interface { + SetOp(ent.Op) + Client() *generated.Client + SetDeletedAt(time.Time) + SetDeletedBy(string) + WhereP(...func(*sql.Selector)) + } + + return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if skip, _ := ctx.Value(entx.SoftDeleteSkipKey{}).(bool); skip { + return next.Mutate(ctx, m) + } + + actor, err := auth.GetUserIDFromContext(ctx) + if err != nil { + actor = "unknown" + } + + sd, ok := m.(SoftDelete) + if !ok { + return nil, newUnexpectedMutationTypeError(m) + } + + d.P(sd) + sd.SetOp(ent.OpUpdate) + + // set that the transaction is a soft-delete + ctx = entx.IsSoftDelete(ctx) + + sd.SetDeletedAt(time.Now()) + sd.SetDeletedBy(actor) + + return sd.Client().Mutate(ctx, m) + }) +} + +// // Hooks of the SoftDeleteMixin. +// func (d SoftDeleteMixin) Hooks() []ent.Hook { +// return []ent.Hook{ +// hook.On( +// d.SoftDeleteHook, +// ent.OpDeleteOne|ent.OpDelete, +// ), +// } +// } + +// P adds a storage-level predicate to the queries and mutations. +func (d SoftDeleteMixin) P(w interface{ WhereP(...func(*sql.Selector)) }) { + w.WhereP( + sql.FieldIsNull(d.Fields()[0].Descriptor().Name), + ) +} diff --git a/internal/ent/schema/database.go b/internal/ent/schema/database.go new file mode 100644 index 0000000..61cf9b8 --- /dev/null +++ b/internal/ent/schema/database.go @@ -0,0 +1,102 @@ +package schema + +import ( + "entgo.io/contrib/entgql" + "entgo.io/ent" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" + + emixin "github.com/datumforge/entx/mixin" + + "github.com/datumforge/geodetic/internal/ent/hooks" + "github.com/datumforge/geodetic/internal/ent/mixin" + "github.com/datumforge/geodetic/pkg/enums" +) + +// Database holds the example schema definition for the Database entity +type Database struct { + ent.Schema +} + +// Fields of the Database +func (Database) Fields() []ent.Field { + return []ent.Field{ + field.String("organization_id"). + Comment("the ID of the organization"). + NotEmpty(), + field.String("name"). + Comment("the name to the database"). + NotEmpty(), + field.String("geo"). + Comment("the geo location of the database"). + Optional(), + field.String("dsn"). + Comment("the DSN to the database"). + NotEmpty(), + field.String("group_id"). + Comment("the ID of the group"), + field.String("token"). + Sensitive(). + Comment("the auth token used to connect to the database"). + Optional(), // optional because the token is created after the database is created + field.Enum("status"). + GoType(enums.DatabaseStatus("")). + Comment("status of the database"). + Default(string(enums.Creating)), + field.Enum("provider"). + GoType(enums.DatabaseProvider("")). + Comment("provider of the database"). + Default(string(enums.Local)), + } +} + +// Indexes of the Database +func (Database) Indexes() []ent.Index { + return []ent.Index{ + // organization_id should be unique, this will also create a unique name + index.Fields("organization_id"). + Unique().Annotations(entsql.IndexWhere("deleted_at is NULL")), + index.Fields("name"). + Unique().Annotations(entsql.IndexWhere("deleted_at is NULL")), + } +} + +// Mixin of the Database +func (Database) Mixin() []ent.Mixin { + return []ent.Mixin{ + emixin.AuditMixin{}, + mixin.SoftDeleteMixin{}, + emixin.IDMixin{}, + } +} + +// Edges of the Database +func (Database) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("group", Group.Type). + Field("group_id"). + Required(). + Unique(). + Ref("databases"), + } +} + +// Annotations of the Database +func (Database) Annotations() []schema.Annotation { + return []schema.Annotation{ + entgql.QueryField(), + entgql.RelayConnection(), + entgql.Mutations(entgql.MutationCreate(), entgql.MutationUpdate()), + } +} + +// Hooks of the Database +func (Database) Hooks() []ent.Hook { + return []ent.Hook{ + hooks.HookCreateDatabase(), + hooks.HookDatabaseDelete(), + } +} diff --git a/internal/ent/schema/doc.go b/internal/ent/schema/doc.go new file mode 100644 index 0000000..81f4469 --- /dev/null +++ b/internal/ent/schema/doc.go @@ -0,0 +1,2 @@ +// Package schema contains the ent schema +package schema diff --git a/internal/ent/schema/group.go b/internal/ent/schema/group.go new file mode 100644 index 0000000..410a1bf --- /dev/null +++ b/internal/ent/schema/group.go @@ -0,0 +1,92 @@ +package schema + +import ( + "entgo.io/contrib/entgql" + "entgo.io/ent" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" + + "github.com/datumforge/entx" + emixin "github.com/datumforge/entx/mixin" + + "github.com/datumforge/geodetic/internal/ent/hooks" + "github.com/datumforge/geodetic/internal/ent/mixin" + "github.com/datumforge/geodetic/pkg/enums" +) + +// Group holds the schema definition for the Group entity +type Group struct { + ent.Schema +} + +// Fields of the Group +func (Group) Fields() []ent.Field { + return []ent.Field{ + field.String("name"). + Comment("the name of the group in turso"). + NotEmpty(), + field.String("description"). + Comment("the description of the group"). + Optional(), + field.String("primary_location"). + Comment("the primary of the group"). + NotEmpty(), + field.Strings("locations"). + Comment("the replica locations of the group"). + Optional(), + field.String("token"). + Sensitive(). + Comment("the auth token used to connect to the group"). + Optional(), // optional because the token is created after the group is created + field.Enum("region"). + GoType(enums.Region("")). + Comment("region the group"). + Default(string(enums.Amer)), + } +} + +// Mixin of the Group +func (Group) Mixin() []ent.Mixin { + return []ent.Mixin{ + emixin.AuditMixin{}, + emixin.IDMixin{}, + mixin.SoftDeleteMixin{}, + } +} + +// Hooks of the Group +func (Group) Hooks() []ent.Hook { + return []ent.Hook{ + hooks.HookGroupCreate(), + hooks.HookGroupUpdate(), + hooks.HookGroupDelete(), + } +} + +// Edges of the Database +func (Group) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("databases", Database.Type). + Annotations(entx.CascadeAnnotationField("Group")), + } +} + +func (Group) Indexes() []ent.Index { + return []ent.Index{ + // names should be unique, but ignore deleted names + index.Fields("name"). + Unique().Annotations(entsql.IndexWhere("deleted_at is NULL")), + } +} + +// Annotations of the Group +func (Group) Annotations() []schema.Annotation { + return []schema.Annotation{ + entgql.QueryField(), + entgql.RelayConnection(), + entgql.Mutations(entgql.MutationCreate(), (entgql.MutationUpdate())), + } +} diff --git a/internal/ent/templates/.gitkeep b/internal/ent/templates/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/internal/ent/templates/edge_cleanup.tmpl b/internal/ent/templates/edge_cleanup.tmpl new file mode 100644 index 0000000..e8189ca --- /dev/null +++ b/internal/ent/templates/edge_cleanup.tmpl @@ -0,0 +1,43 @@ +{{/* The line below tells Intellij/GoLand to enable the autocompletion based on the *gen.Graph type. */}} +{{/* gotype: entgo.io/ent/entc/gen.Graph */}} + +{{ define "edge_cleanup" }} + +{{/* Add the base header for the generated file */}} +{{ $pkg := base $.Config.Package }} +{{ template "header" $ }} + + {{/* For each schema */}} + {{- range $node := $.Nodes }} + {{/* create an EdgeCleanup function accepting an ID */}} + func {{ $node.Name }}EdgeCleanup(ctx context.Context, id string) error { + {{/* For each edge */}} + {{- range $edge := $node.Edges }} + {{/* if the edge has our custom annotation applied */}} + {{- if $annotation := $edge.Annotations.DATUM_CASCADE }} + {{/* use the client to delete records where the edge schema has a field (provided by the annotation) containing the ID provided by the func */}} + if exists, err := FromContext(ctx).{{ $edge.Type.Name }}.Query().Where(({{ $edge.Type.Name | lower }}.Has{{ $annotation.Field }}With({{ $node.Name | lower }}.ID(id)))).Exist(ctx); err == nil && exists { + if {{ $edge.Type.Name | lower }}Count, err := FromContext(ctx).{{ $edge.Type.Name }}.Delete().Where({{ $edge.Type.Name | lower }}.Has{{ $annotation.Field }}With({{ $node.Name | lower }}.ID(id))).Exec(ctx); err != nil { + FromContext(ctx).Logger.Debugw("deleting {{ $edge.Type.Name | lower }}", "count", {{ $edge.Type.Name | lower }}Count, "err", err) + return err + } + } + {{ end }} + {{- end }} + {{- if $annotation := $node.Annotations.DATUM_CASCADE_THROUGH }} + {{- range $schema := $annotation.Schemas }} + {{- $field := $schema.Through }} + {{/* use the client to delete records where the edge has a field and a through schema (provided by the annotation) containing the ID provided by the func */}} + if exists, err := FromContext(ctx).{{ $field }}.Query().Where(({{ $field | lower }}.Has{{ $schema.Field }}With({{ $node.Name | lower }}.ID(id)))).Exist(ctx); err == nil && exists { + if {{ $field | lower }}Count, err := FromContext(ctx).{{ $field }}.Delete().Where({{ $field | lower }}.Has{{ $schema.Field }}With({{ $node.Name | lower }}.ID(id))).Exec(ctx); err != nil { + FromContext(ctx).Logger.Debugw("deleting {{ $field | lower }}", "count", {{ $field | lower }}Count, "err", err) + return err + } + } + {{ end }} + {{ end }} + return nil + } + {{ end }} +{{ end }} + diff --git a/internal/entdb/client.go b/internal/entdb/client.go new file mode 100644 index 0000000..c65658b --- /dev/null +++ b/internal/entdb/client.go @@ -0,0 +1,216 @@ +package entdb + +import ( + "context" + "database/sql" + "fmt" + "os" + "time" + + "ariga.io/entcache" + entsql "entgo.io/ent/dialect/sql" + "github.com/datumforge/entx" + "github.com/pressly/goose/v3" + "go.uber.org/zap" + + "github.com/datumforge/datum/pkg/testutils" + + migratedb "github.com/datumforge/geodetic/db" + ent "github.com/datumforge/geodetic/internal/ent/generated" +) + +type client struct { + // config is the entdb configuration + config *entx.Config + // pc is the primary ent client + pc *ent.Client + // sc is the secondary ent client + sc *ent.Client + // logger holds the zap logger + logger *zap.SugaredLogger +} + +// NewMultiDriverDBClient returns a ent client with a primary and secondary, if configured, write database +func NewMultiDriverDBClient(ctx context.Context, c entx.Config, l *zap.SugaredLogger, opts []ent.Option) (*ent.Client, *entx.EntClientConfig, error) { + client := &client{ + config: &c, + logger: l, + } + + dbOpts := []entx.DBOption{ + entx.WithLogger(l), + } + + if c.MultiWrite { + dbOpts = append(dbOpts, entx.WithSecondaryDB()) + } + + entConfig := entx.NewDBConfig(c, dbOpts...) + + // Decorates the sql.Driver with entcache.Driver on the primaryDB + drvPrimary := entcache.NewDriver( + entConfig.GetPrimaryDB(), + entcache.TTL(c.CacheTTL), // set the TTL on the cache + ) + + client.pc = client.createEntDBClient(entConfig.GetPrimaryDB()) + + if c.RunMigrations { + if err := client.runMigrations(ctx); err != nil { + client.logger.Errorf("failed running migrations", zap.Error(err)) + + return nil, nil, err + } + } + + var cOpts []ent.Option + + if c.MultiWrite { + // Decorates the sql.Driver with entcache.Driver on the primaryDB + drvSecondary := entcache.NewDriver( + entConfig.GetSecondaryDB(), + entcache.TTL(c.CacheTTL), // set the TTL on the cache + ) + + client.sc = client.createEntDBClient(entConfig.GetSecondaryDB()) + + if c.RunMigrations { + if err := client.runMigrations(ctx); err != nil { + client.logger.Errorf("failed running migrations", zap.Error(err)) + + return nil, nil, err + } + } + + // Create Multiwrite driver + cOpts = []ent.Option{ent.Driver(&entx.MultiWriteDriver{Wp: drvPrimary, Ws: drvSecondary})} + } else { + cOpts = []ent.Option{ent.Driver(drvPrimary)} + } + + cOpts = append(cOpts, opts...) + + if c.Debug { + cOpts = append(cOpts, + ent.Log(client.logger.Named("ent").Debugln), + ent.Debug(), + ent.Driver(drvPrimary), + ) + } + + ec := ent.NewClient(cOpts...) + + // add authz hooks + ec.WithAuthz() + + return ec, entConfig, nil +} + +// createEntDBClient creates a new ent client with configured options +func (c *client) createEntDBClient(db *entsql.Driver) *ent.Client { + cOpts := []ent.Option{ent.Driver(db)} + + if c.config.Debug { + cOpts = append(cOpts, + ent.Log(c.logger.Named("ent").Debugln), + ent.Debug(), + ) + } + + return ent.NewClient(cOpts...) +} + +func NewTestContainer(ctx context.Context) *testutils.TC { + // Grab the DB environment variable or use the default + testDBURI := os.Getenv("TEST_DB_URL") + + return testutils.GetTestURI(ctx, testDBURI) +} + +// NewTestClient creates a entdb client that can be used for TEST purposes ONLY +func NewTestClient(ctx context.Context, ctr *testutils.TC, entOpts []ent.Option) (*ent.Client, error) { + // setup logger + logger := zap.NewNop().Sugar() + + dbconf := entx.Config{ + Debug: true, + DriverName: ctr.Dialect, + PrimaryDBSource: ctr.URI, + CacheTTL: -1 * time.Second, // do not cache results in tests + } + + entOpts = append(entOpts, ent.Logger(*logger)) + + db, _, err := NewMultiDriverDBClient(ctx, dbconf, logger, entOpts) + if err != nil { + return nil, err + } + + if err := db.Schema.Create(ctx); err != nil { + return nil, err + } + + return db, nil +} + +// runMigrations runs the migrations based on the configured migration provider on startup +func (c *client) runMigrations(ctx context.Context) error { + switch c.config.MigrationProvider { + case "goose": + return c.runGooseMigrations() + default: // atlas + return c.runAtlasMigrations(ctx) + } +} + +// runGooseMigrations runs the goose migrations +func (c *client) runGooseMigrations() error { + driver, err := entx.CheckEntDialect(c.config.DriverName) + if err != nil { + return err + } + + drv, err := sql.Open(c.config.DriverName, c.config.PrimaryDBSource) + if err != nil { + return err + } + defer drv.Close() + + if _, err := drv.Exec("PRAGMA foreign_keys = off;", nil); err != nil { + drv.Close() + + return fmt.Errorf("failed to disable foreign keys: %w", err) + } + + goose.SetBaseFS(migratedb.GooseMigrations) + + if err := goose.SetDialect(driver); err != nil { + return err + } + + if err := goose.Up(drv, "migrations-goose"); err != nil { + return err + } + + if _, err := drv.Exec("PRAGMA foreign_keys = on;", nil); err != nil { + drv.Close() + + return fmt.Errorf("failed to enable foreign keys: %w", err) + } + + return nil +} + +// runAtlasMigrations runs the atlas auto-migrations +// this do not use the generated versioned migrations files from ent +func (c *client) runAtlasMigrations(ctx context.Context) error { + // Run the automatic migration tool to create all schema resources. + // entcache.Driver will skip the caching layer when running the schema migration + if err := c.pc.Schema.Create(entcache.Skip(ctx)); err != nil { + c.logger.Errorf("failed creating schema resources", zap.Error(err)) + + return err + } + + return nil +} diff --git a/internal/entdb/doc.go b/internal/entdb/doc.go new file mode 100644 index 0000000..f1eb0f1 --- /dev/null +++ b/internal/entdb/doc.go @@ -0,0 +1,2 @@ +// Package entdb extends the ent db library and satisfies matt's needs for consistency +package entdb diff --git a/internal/graphapi/.gitkeep b/internal/graphapi/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/internal/graphapi/database.resolvers.go b/internal/graphapi/database.resolvers.go new file mode 100644 index 0000000..4873e95 --- /dev/null +++ b/internal/graphapi/database.resolvers.go @@ -0,0 +1,80 @@ +package graphapi + +// This file will be automatically regenerated based on the schema, any resolver implementations +// will be copied through when generating and any unknown code will be moved to the end. +// Code generated by github.com/99designs/gqlgen + +import ( + "context" + "fmt" + + "github.com/datumforge/datum/pkg/rout" + "github.com/datumforge/geodetic/internal/ent/generated" + "github.com/datumforge/geodetic/internal/ent/generated/database" +) + +// CreateDatabase is the resolver for the createDatabase field. +func (r *mutationResolver) CreateDatabase(ctx context.Context, input generated.CreateDatabaseInput) (*DatabaseCreatePayload, error) { + db, err := withTransactionalMutation(ctx).Database.Create().SetInput(input).Save(ctx) + if err != nil { + if generated.IsConstraintError(err) { + constraintError := err.(*generated.ConstraintError) + + r.logger.Debugw("constraint error", "error", constraintError.Error()) + + return nil, constraintError + } + + if generated.IsValidationError(err) { + ve := err.(*generated.ValidationError) + + return nil, rout.InvalidField(ve.Name) + } + + r.logger.Errorw("failed to create database", "error", err) + + return nil, err + } + + return &DatabaseCreatePayload{Database: db}, err +} + +// UpdateDatabase is the resolver for the updateDatabase field. +func (r *mutationResolver) UpdateDatabase(ctx context.Context, name string, input generated.UpdateDatabaseInput) (*DatabaseUpdatePayload, error) { + panic(fmt.Errorf("not implemented: UpdateDatabase - updateDatabase")) +} + +// DeleteDatabase is the resolver for the deleteDatabase field. +func (r *mutationResolver) DeleteDatabase(ctx context.Context, name string) (*DatabaseDeletePayload, error) { + db, err := withTransactionalMutation(ctx).Database.Query().Where(database.NameEQ(name)).Only(ctx) + if err != nil { + r.logger.Errorw("failed to get database", "error", err) + + return nil, err + } + + if err := withTransactionalMutation(ctx).Database.DeleteOneID(db.ID).Exec(ctx); err != nil { + r.logger.Errorw("failed to delete database", "error", err) + + return nil, err + } + + return &DatabaseDeletePayload{DeletedID: db.ID}, nil +} + +// Database is the resolver for the database field. +func (r *queryResolver) Database(ctx context.Context, name string) (*generated.Database, error) { + db, err := withTransactionalMutation(ctx).Database.Query().Where(database.NameEQ(name)).Only(ctx) + if err != nil { + r.logger.Errorw("failed to get database", "error", err) + + return nil, err + } + + return db, err +} + +// Mutation returns MutationResolver implementation. +func (r *Resolver) Mutation() MutationResolver { return &mutationResolver{r} } + +type mutationResolver struct{ *Resolver } diff --git a/internal/graphapi/database_test.go b/internal/graphapi/database_test.go new file mode 100644 index 0000000..341d209 --- /dev/null +++ b/internal/graphapi/database_test.go @@ -0,0 +1,210 @@ +package graphapi_test + +import ( + "context" + "strings" + "testing" + + "github.com/samber/lo" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + ent "github.com/datumforge/geodetic/internal/ent/generated" + "github.com/datumforge/geodetic/pkg/enums" + "github.com/datumforge/geodetic/pkg/geodeticclient" +) + +func (suite *GraphTestSuite) TestQueryDatabase() { + t := suite.T() + + db := (&DatabaseBuilder{client: suite.client}).MustNew(context.Background(), t) + + testCases := []struct { + name string + query string + expected *ent.Database + errorMsg string + }{ + { + name: "happy path database", + query: db.Name, + expected: db, + }, + { + name: "database not found", + query: "notfound", + expected: nil, + errorMsg: "database not found", + }, + } + + for _, tc := range testCases { + t.Run("Get "+tc.name, func(t *testing.T) { + resp, err := suite.client.geodetic.GetDatabase(context.Background(), tc.query) + + if tc.errorMsg != "" { + require.Error(t, err) + assert.ErrorContains(t, err, tc.errorMsg) + assert.Nil(t, resp) + + return + } + + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Database) + }) + } + + (&DatabaseCleanup{client: suite.client, DatabaseID: db.ID}).MustDelete(context.Background(), t) + (&GroupCleanup{client: suite.client, GroupID: db.GroupID}).MustDelete(context.Background(), t) +} + +func (suite *GraphTestSuite) TestListDatabases() { + t := suite.T() + + db1 := (&DatabaseBuilder{client: suite.client}).MustNew(context.Background(), t) + db2 := (&DatabaseBuilder{client: suite.client}).MustNew(context.Background(), t) + + t.Run("List Databases", func(t *testing.T) { + resp, err := suite.client.geodetic.GetAllDatabases(context.Background()) + + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Databases) + require.Len(t, resp.Databases.Edges, 2) + }) + + (&DatabaseCleanup{client: suite.client, DatabaseID: db1.ID}).MustDelete(context.Background(), t) + (&DatabaseCleanup{client: suite.client, DatabaseID: db2.ID}).MustDelete(context.Background(), t) + (&GroupCleanup{client: suite.client, GroupID: db1.GroupID}).MustDelete(context.Background(), t) + (&GroupCleanup{client: suite.client, GroupID: db2.GroupID}).MustDelete(context.Background(), t) +} + +func (suite *GraphTestSuite) TestCreateDatabase() { + t := suite.T() + + group := (&GroupBuilder{client: suite.client}).MustNew(context.Background(), t) + + testCases := []struct { + name string + orgID string + groupID string + region enums.Region + provider *enums.DatabaseProvider + errorMsg string + }{ + { + name: "happy path, turso database", + orgID: "01HSCAGDJ1XZ12Y06FESH4VEC1", + groupID: group.ID, + provider: &enums.Turso, + }, + { + name: "happy path, turso database with region", + orgID: "01HSCAGDJ1XZ12Y06FESH4VEC1", + region: enums.Amer, + provider: &enums.Turso, + }, + { + name: "happy path, local database", + orgID: "01HSCAGDJ1XZ12Y06FESH4VEC2", + groupID: group.ID, + provider: &enums.Local, + }, + { + name: "missing group", + orgID: "01HSCAGDJ1XZ12Y06FESH4VEC3", + groupID: "notfound", + provider: &enums.Turso, + errorMsg: "group not found", + }, + { + name: "missing org id", + orgID: "", + groupID: group.ID, + provider: &enums.Turso, + errorMsg: "invalid or unparsable field: organization_id", + }, + } + + for _, tc := range testCases { + t.Run("Create "+tc.name, func(t *testing.T) { + g := geodeticclient.CreateDatabaseInput{ + OrganizationID: tc.orgID, + Provider: tc.provider, + GroupID: tc.groupID, + } + + if tc.region != "" { + g.Geo = lo.ToPtr(tc.region.String()) + } + + resp, err := suite.client.geodetic.CreateDatabase(context.Background(), g) + + if tc.errorMsg != "" { + require.Error(t, err) + assert.ErrorContains(t, err, tc.errorMsg) + assert.Nil(t, resp) + + return + } + + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.CreateDatabase) + + assert.Contains(t, resp.CreateDatabase.Database.Name, strings.ToLower(tc.orgID)) + assert.Equal(t, *tc.provider, resp.CreateDatabase.Database.Provider) + assert.Equal(t, tc.orgID, resp.CreateDatabase.Database.OrganizationID) + + (&DatabaseCleanup{client: suite.client, DatabaseID: resp.CreateDatabase.Database.ID}).MustDelete(context.Background(), t) + }) + } + + (&GroupCleanup{client: suite.client, GroupID: group.ID}).MustDelete(context.Background(), t) +} + +func (suite *GraphTestSuite) TestDeleteDatabase() { + t := suite.T() + + db := (&DatabaseBuilder{client: suite.client}).MustNew(context.Background(), t) + + testCases := []struct { + name string + dbName string + errorMsg string + }{ + { + name: "happy path database", + dbName: db.Name, + }, + { + name: "db does not exist", + dbName: "lost-ark", + errorMsg: "database not found", + }, + } + + for _, tc := range testCases { + t.Run("Delete "+tc.name, func(t *testing.T) { + resp, err := suite.client.geodetic.DeleteDatabase(context.Background(), tc.dbName) + + if tc.errorMsg != "" { + require.Error(t, err) + assert.ErrorContains(t, err, tc.errorMsg) + assert.Nil(t, resp) + + return + } + + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.DeleteDatabase) + + assert.NotEmpty(t, resp.DeleteDatabase.DeletedID) + }) + } + + (&GroupCleanup{client: suite.client, GroupID: db.GroupID}).MustDelete(context.Background(), t) +} diff --git a/internal/graphapi/doc.go b/internal/graphapi/doc.go new file mode 100644 index 0000000..df63230 --- /dev/null +++ b/internal/graphapi/doc.go @@ -0,0 +1,2 @@ +// Package graphapi does graph stuff +package graphapi diff --git a/internal/graphapi/ent.resolvers.go b/internal/graphapi/ent.resolvers.go new file mode 100644 index 0000000..2df9d08 --- /dev/null +++ b/internal/graphapi/ent.resolvers.go @@ -0,0 +1,39 @@ +package graphapi + +// This file will be automatically regenerated based on the schema, any resolver implementations +// will be copied through when generating and any unknown code will be moved to the end. +// Code generated by github.com/99designs/gqlgen + +import ( + "context" + "fmt" + + "entgo.io/contrib/entgql" + "github.com/datumforge/geodetic/internal/ent/generated" + _ "github.com/datumforge/geodetic/internal/ent/generated/runtime" +) + +// Node is the resolver for the node field. +func (r *queryResolver) Node(ctx context.Context, id string) (generated.Noder, error) { + panic(fmt.Errorf("not implemented: Node - node")) +} + +// Nodes is the resolver for the nodes field. +func (r *queryResolver) Nodes(ctx context.Context, ids []string) ([]generated.Noder, error) { + panic(fmt.Errorf("not implemented: Nodes - nodes")) +} + +// Databases is the resolver for the databases field. +func (r *queryResolver) Databases(ctx context.Context, after *entgql.Cursor[string], first *int, before *entgql.Cursor[string], last *int, where *generated.DatabaseWhereInput) (*generated.DatabaseConnection, error) { + return withTransactionalMutation(ctx).Database.Query().Paginate(ctx, after, first, before, last, generated.WithDatabaseFilter(where.Filter)) +} + +// Groups is the resolver for the groups field. +func (r *queryResolver) Groups(ctx context.Context, after *entgql.Cursor[string], first *int, before *entgql.Cursor[string], last *int, where *generated.GroupWhereInput) (*generated.GroupConnection, error) { + return withTransactionalMutation(ctx).Group.Query().Paginate(ctx, after, first, before, last, generated.WithGroupFilter(where.Filter)) +} + +// Query returns QueryResolver implementation. +func (r *Resolver) Query() QueryResolver { return &queryResolver{r} } + +type queryResolver struct{ *Resolver } diff --git a/internal/graphapi/errors.go b/internal/graphapi/errors.go new file mode 100644 index 0000000..926302b --- /dev/null +++ b/internal/graphapi/errors.go @@ -0,0 +1,15 @@ +package graphapi + +import ( + "errors" + "fmt" +) + +var ( + // ErrCascadeDelete is returned when an error occurs while performing cascade deletes on associated objects + ErrCascadeDelete = errors.New("error deleting associated objects") +) + +func newCascadeDeleteError(err error) error { + return fmt.Errorf("%w: %v", ErrCascadeDelete, err) +} diff --git a/internal/graphapi/gen_models.go b/internal/graphapi/gen_models.go new file mode 100644 index 0000000..d4a1c63 --- /dev/null +++ b/internal/graphapi/gen_models.go @@ -0,0 +1,43 @@ +// Code generated by github.com/99designs/gqlgen, DO NOT EDIT. + +package graphapi + +import ( + "github.com/datumforge/geodetic/internal/ent/generated" +) + +// Return response for createDatabase mutation +type DatabaseCreatePayload struct { + // Created database + Database *generated.Database `json:"database"` +} + +// Return response for deleteDatabase mutation +type DatabaseDeletePayload struct { + // Deleted database ID + DeletedID string `json:"deletedID"` +} + +// Return response for updateDatabase mutation +type DatabaseUpdatePayload struct { + // Updated database + Database *generated.Database `json:"database"` +} + +// Return response for createGroup mutation +type GroupCreatePayload struct { + // Created group + Group *generated.Group `json:"group"` +} + +// Return response for deleteGroup mutation +type GroupDeletePayload struct { + // Deleted group ID + DeletedID string `json:"deletedID"` +} + +// Return response for updateGroup mutation +type GroupUpdatePayload struct { + // Updated group + Group *generated.Group `json:"group"` +} diff --git a/internal/graphapi/gen_server.go b/internal/graphapi/gen_server.go new file mode 100644 index 0000000..8bbd2c4 --- /dev/null +++ b/internal/graphapi/gen_server.go @@ -0,0 +1,12491 @@ +// Code generated by github.com/99designs/gqlgen, DO NOT EDIT. + +package graphapi + +import ( + "bytes" + "context" + "errors" + "fmt" + "strconv" + "sync" + "sync/atomic" + "time" + + "entgo.io/contrib/entgql" + "github.com/99designs/gqlgen/graphql" + "github.com/99designs/gqlgen/graphql/introspection" + "github.com/datumforge/geodetic/internal/ent/generated" + "github.com/datumforge/geodetic/pkg/enums" + gqlparser "github.com/vektah/gqlparser/v2" + "github.com/vektah/gqlparser/v2/ast" +) + +// region ************************** generated!.gotpl ************************** + +// NewExecutableSchema creates an ExecutableSchema from the ResolverRoot interface. +func NewExecutableSchema(cfg Config) graphql.ExecutableSchema { + return &executableSchema{ + schema: cfg.Schema, + resolvers: cfg.Resolvers, + directives: cfg.Directives, + complexity: cfg.Complexity, + } +} + +type Config struct { + Schema *ast.Schema + Resolvers ResolverRoot + Directives DirectiveRoot + Complexity ComplexityRoot +} + +type ResolverRoot interface { + Mutation() MutationResolver + Query() QueryResolver +} + +type DirectiveRoot struct { +} + +type ComplexityRoot struct { + Database struct { + CreatedAt func(childComplexity int) int + CreatedBy func(childComplexity int) int + DeletedAt func(childComplexity int) int + DeletedBy func(childComplexity int) int + Dsn func(childComplexity int) int + Geo func(childComplexity int) int + Group func(childComplexity int) int + GroupID func(childComplexity int) int + ID func(childComplexity int) int + Name func(childComplexity int) int + OrganizationID func(childComplexity int) int + Provider func(childComplexity int) int + Status func(childComplexity int) int + UpdatedAt func(childComplexity int) int + UpdatedBy func(childComplexity int) int + } + + DatabaseConnection struct { + Edges func(childComplexity int) int + PageInfo func(childComplexity int) int + TotalCount func(childComplexity int) int + } + + DatabaseCreatePayload struct { + Database func(childComplexity int) int + } + + DatabaseDeletePayload struct { + DeletedID func(childComplexity int) int + } + + DatabaseEdge struct { + Cursor func(childComplexity int) int + Node func(childComplexity int) int + } + + DatabaseUpdatePayload struct { + Database func(childComplexity int) int + } + + Group struct { + CreatedAt func(childComplexity int) int + CreatedBy func(childComplexity int) int + Databases func(childComplexity int) int + DeletedAt func(childComplexity int) int + DeletedBy func(childComplexity int) int + Description func(childComplexity int) int + ID func(childComplexity int) int + Locations func(childComplexity int) int + Name func(childComplexity int) int + PrimaryLocation func(childComplexity int) int + Region func(childComplexity int) int + UpdatedAt func(childComplexity int) int + UpdatedBy func(childComplexity int) int + } + + GroupConnection struct { + Edges func(childComplexity int) int + PageInfo func(childComplexity int) int + TotalCount func(childComplexity int) int + } + + GroupCreatePayload struct { + Group func(childComplexity int) int + } + + GroupDeletePayload struct { + DeletedID func(childComplexity int) int + } + + GroupEdge struct { + Cursor func(childComplexity int) int + Node func(childComplexity int) int + } + + GroupUpdatePayload struct { + Group func(childComplexity int) int + } + + Mutation struct { + CreateDatabase func(childComplexity int, input generated.CreateDatabaseInput) int + CreateGroup func(childComplexity int, input generated.CreateGroupInput) int + DeleteDatabase func(childComplexity int, name string) int + DeleteGroup func(childComplexity int, name string) int + UpdateDatabase func(childComplexity int, name string, input generated.UpdateDatabaseInput) int + UpdateGroup func(childComplexity int, name string, input generated.UpdateGroupInput) int + } + + PageInfo struct { + EndCursor func(childComplexity int) int + HasNextPage func(childComplexity int) int + HasPreviousPage func(childComplexity int) int + StartCursor func(childComplexity int) int + } + + Query struct { + Database func(childComplexity int, name string) int + Databases func(childComplexity int, after *entgql.Cursor[string], first *int, before *entgql.Cursor[string], last *int, where *generated.DatabaseWhereInput) int + Group func(childComplexity int, name string) int + Groups func(childComplexity int, after *entgql.Cursor[string], first *int, before *entgql.Cursor[string], last *int, where *generated.GroupWhereInput) int + Node func(childComplexity int, id string) int + Nodes func(childComplexity int, ids []string) int + } +} + +type MutationResolver interface { + CreateDatabase(ctx context.Context, input generated.CreateDatabaseInput) (*DatabaseCreatePayload, error) + UpdateDatabase(ctx context.Context, name string, input generated.UpdateDatabaseInput) (*DatabaseUpdatePayload, error) + DeleteDatabase(ctx context.Context, name string) (*DatabaseDeletePayload, error) + CreateGroup(ctx context.Context, input generated.CreateGroupInput) (*GroupCreatePayload, error) + UpdateGroup(ctx context.Context, name string, input generated.UpdateGroupInput) (*GroupUpdatePayload, error) + DeleteGroup(ctx context.Context, name string) (*GroupDeletePayload, error) +} +type QueryResolver interface { + Node(ctx context.Context, id string) (generated.Noder, error) + Nodes(ctx context.Context, ids []string) ([]generated.Noder, error) + Databases(ctx context.Context, after *entgql.Cursor[string], first *int, before *entgql.Cursor[string], last *int, where *generated.DatabaseWhereInput) (*generated.DatabaseConnection, error) + Groups(ctx context.Context, after *entgql.Cursor[string], first *int, before *entgql.Cursor[string], last *int, where *generated.GroupWhereInput) (*generated.GroupConnection, error) + Database(ctx context.Context, name string) (*generated.Database, error) + Group(ctx context.Context, name string) (*generated.Group, error) +} + +type executableSchema struct { + schema *ast.Schema + resolvers ResolverRoot + directives DirectiveRoot + complexity ComplexityRoot +} + +func (e *executableSchema) Schema() *ast.Schema { + if e.schema != nil { + return e.schema + } + return parsedSchema +} + +func (e *executableSchema) Complexity(typeName, field string, childComplexity int, rawArgs map[string]interface{}) (int, bool) { + ec := executionContext{nil, e, 0, 0, nil} + _ = ec + switch typeName + "." + field { + + case "Database.createdAt": + if e.complexity.Database.CreatedAt == nil { + break + } + + return e.complexity.Database.CreatedAt(childComplexity), true + + case "Database.createdBy": + if e.complexity.Database.CreatedBy == nil { + break + } + + return e.complexity.Database.CreatedBy(childComplexity), true + + case "Database.deletedAt": + if e.complexity.Database.DeletedAt == nil { + break + } + + return e.complexity.Database.DeletedAt(childComplexity), true + + case "Database.deletedBy": + if e.complexity.Database.DeletedBy == nil { + break + } + + return e.complexity.Database.DeletedBy(childComplexity), true + + case "Database.dsn": + if e.complexity.Database.Dsn == nil { + break + } + + return e.complexity.Database.Dsn(childComplexity), true + + case "Database.geo": + if e.complexity.Database.Geo == nil { + break + } + + return e.complexity.Database.Geo(childComplexity), true + + case "Database.group": + if e.complexity.Database.Group == nil { + break + } + + return e.complexity.Database.Group(childComplexity), true + + case "Database.groupID": + if e.complexity.Database.GroupID == nil { + break + } + + return e.complexity.Database.GroupID(childComplexity), true + + case "Database.id": + if e.complexity.Database.ID == nil { + break + } + + return e.complexity.Database.ID(childComplexity), true + + case "Database.name": + if e.complexity.Database.Name == nil { + break + } + + return e.complexity.Database.Name(childComplexity), true + + case "Database.organizationID": + if e.complexity.Database.OrganizationID == nil { + break + } + + return e.complexity.Database.OrganizationID(childComplexity), true + + case "Database.provider": + if e.complexity.Database.Provider == nil { + break + } + + return e.complexity.Database.Provider(childComplexity), true + + case "Database.status": + if e.complexity.Database.Status == nil { + break + } + + return e.complexity.Database.Status(childComplexity), true + + case "Database.updatedAt": + if e.complexity.Database.UpdatedAt == nil { + break + } + + return e.complexity.Database.UpdatedAt(childComplexity), true + + case "Database.updatedBy": + if e.complexity.Database.UpdatedBy == nil { + break + } + + return e.complexity.Database.UpdatedBy(childComplexity), true + + case "DatabaseConnection.edges": + if e.complexity.DatabaseConnection.Edges == nil { + break + } + + return e.complexity.DatabaseConnection.Edges(childComplexity), true + + case "DatabaseConnection.pageInfo": + if e.complexity.DatabaseConnection.PageInfo == nil { + break + } + + return e.complexity.DatabaseConnection.PageInfo(childComplexity), true + + case "DatabaseConnection.totalCount": + if e.complexity.DatabaseConnection.TotalCount == nil { + break + } + + return e.complexity.DatabaseConnection.TotalCount(childComplexity), true + + case "DatabaseCreatePayload.database": + if e.complexity.DatabaseCreatePayload.Database == nil { + break + } + + return e.complexity.DatabaseCreatePayload.Database(childComplexity), true + + case "DatabaseDeletePayload.deletedID": + if e.complexity.DatabaseDeletePayload.DeletedID == nil { + break + } + + return e.complexity.DatabaseDeletePayload.DeletedID(childComplexity), true + + case "DatabaseEdge.cursor": + if e.complexity.DatabaseEdge.Cursor == nil { + break + } + + return e.complexity.DatabaseEdge.Cursor(childComplexity), true + + case "DatabaseEdge.node": + if e.complexity.DatabaseEdge.Node == nil { + break + } + + return e.complexity.DatabaseEdge.Node(childComplexity), true + + case "DatabaseUpdatePayload.database": + if e.complexity.DatabaseUpdatePayload.Database == nil { + break + } + + return e.complexity.DatabaseUpdatePayload.Database(childComplexity), true + + case "Group.createdAt": + if e.complexity.Group.CreatedAt == nil { + break + } + + return e.complexity.Group.CreatedAt(childComplexity), true + + case "Group.createdBy": + if e.complexity.Group.CreatedBy == nil { + break + } + + return e.complexity.Group.CreatedBy(childComplexity), true + + case "Group.databases": + if e.complexity.Group.Databases == nil { + break + } + + return e.complexity.Group.Databases(childComplexity), true + + case "Group.deletedAt": + if e.complexity.Group.DeletedAt == nil { + break + } + + return e.complexity.Group.DeletedAt(childComplexity), true + + case "Group.deletedBy": + if e.complexity.Group.DeletedBy == nil { + break + } + + return e.complexity.Group.DeletedBy(childComplexity), true + + case "Group.description": + if e.complexity.Group.Description == nil { + break + } + + return e.complexity.Group.Description(childComplexity), true + + case "Group.id": + if e.complexity.Group.ID == nil { + break + } + + return e.complexity.Group.ID(childComplexity), true + + case "Group.locations": + if e.complexity.Group.Locations == nil { + break + } + + return e.complexity.Group.Locations(childComplexity), true + + case "Group.name": + if e.complexity.Group.Name == nil { + break + } + + return e.complexity.Group.Name(childComplexity), true + + case "Group.primaryLocation": + if e.complexity.Group.PrimaryLocation == nil { + break + } + + return e.complexity.Group.PrimaryLocation(childComplexity), true + + case "Group.region": + if e.complexity.Group.Region == nil { + break + } + + return e.complexity.Group.Region(childComplexity), true + + case "Group.updatedAt": + if e.complexity.Group.UpdatedAt == nil { + break + } + + return e.complexity.Group.UpdatedAt(childComplexity), true + + case "Group.updatedBy": + if e.complexity.Group.UpdatedBy == nil { + break + } + + return e.complexity.Group.UpdatedBy(childComplexity), true + + case "GroupConnection.edges": + if e.complexity.GroupConnection.Edges == nil { + break + } + + return e.complexity.GroupConnection.Edges(childComplexity), true + + case "GroupConnection.pageInfo": + if e.complexity.GroupConnection.PageInfo == nil { + break + } + + return e.complexity.GroupConnection.PageInfo(childComplexity), true + + case "GroupConnection.totalCount": + if e.complexity.GroupConnection.TotalCount == nil { + break + } + + return e.complexity.GroupConnection.TotalCount(childComplexity), true + + case "GroupCreatePayload.group": + if e.complexity.GroupCreatePayload.Group == nil { + break + } + + return e.complexity.GroupCreatePayload.Group(childComplexity), true + + case "GroupDeletePayload.deletedID": + if e.complexity.GroupDeletePayload.DeletedID == nil { + break + } + + return e.complexity.GroupDeletePayload.DeletedID(childComplexity), true + + case "GroupEdge.cursor": + if e.complexity.GroupEdge.Cursor == nil { + break + } + + return e.complexity.GroupEdge.Cursor(childComplexity), true + + case "GroupEdge.node": + if e.complexity.GroupEdge.Node == nil { + break + } + + return e.complexity.GroupEdge.Node(childComplexity), true + + case "GroupUpdatePayload.group": + if e.complexity.GroupUpdatePayload.Group == nil { + break + } + + return e.complexity.GroupUpdatePayload.Group(childComplexity), true + + case "Mutation.createDatabase": + if e.complexity.Mutation.CreateDatabase == nil { + break + } + + args, err := ec.field_Mutation_createDatabase_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.CreateDatabase(childComplexity, args["input"].(generated.CreateDatabaseInput)), true + + case "Mutation.createGroup": + if e.complexity.Mutation.CreateGroup == nil { + break + } + + args, err := ec.field_Mutation_createGroup_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.CreateGroup(childComplexity, args["input"].(generated.CreateGroupInput)), true + + case "Mutation.deleteDatabase": + if e.complexity.Mutation.DeleteDatabase == nil { + break + } + + args, err := ec.field_Mutation_deleteDatabase_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.DeleteDatabase(childComplexity, args["name"].(string)), true + + case "Mutation.deleteGroup": + if e.complexity.Mutation.DeleteGroup == nil { + break + } + + args, err := ec.field_Mutation_deleteGroup_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.DeleteGroup(childComplexity, args["name"].(string)), true + + case "Mutation.updateDatabase": + if e.complexity.Mutation.UpdateDatabase == nil { + break + } + + args, err := ec.field_Mutation_updateDatabase_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.UpdateDatabase(childComplexity, args["name"].(string), args["input"].(generated.UpdateDatabaseInput)), true + + case "Mutation.updateGroup": + if e.complexity.Mutation.UpdateGroup == nil { + break + } + + args, err := ec.field_Mutation_updateGroup_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.UpdateGroup(childComplexity, args["name"].(string), args["input"].(generated.UpdateGroupInput)), true + + case "PageInfo.endCursor": + if e.complexity.PageInfo.EndCursor == nil { + break + } + + return e.complexity.PageInfo.EndCursor(childComplexity), true + + case "PageInfo.hasNextPage": + if e.complexity.PageInfo.HasNextPage == nil { + break + } + + return e.complexity.PageInfo.HasNextPage(childComplexity), true + + case "PageInfo.hasPreviousPage": + if e.complexity.PageInfo.HasPreviousPage == nil { + break + } + + return e.complexity.PageInfo.HasPreviousPage(childComplexity), true + + case "PageInfo.startCursor": + if e.complexity.PageInfo.StartCursor == nil { + break + } + + return e.complexity.PageInfo.StartCursor(childComplexity), true + + case "Query.database": + if e.complexity.Query.Database == nil { + break + } + + args, err := ec.field_Query_database_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.Database(childComplexity, args["name"].(string)), true + + case "Query.databases": + if e.complexity.Query.Databases == nil { + break + } + + args, err := ec.field_Query_databases_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.Databases(childComplexity, args["after"].(*entgql.Cursor[string]), args["first"].(*int), args["before"].(*entgql.Cursor[string]), args["last"].(*int), args["where"].(*generated.DatabaseWhereInput)), true + + case "Query.group": + if e.complexity.Query.Group == nil { + break + } + + args, err := ec.field_Query_group_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.Group(childComplexity, args["name"].(string)), true + + case "Query.groups": + if e.complexity.Query.Groups == nil { + break + } + + args, err := ec.field_Query_groups_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.Groups(childComplexity, args["after"].(*entgql.Cursor[string]), args["first"].(*int), args["before"].(*entgql.Cursor[string]), args["last"].(*int), args["where"].(*generated.GroupWhereInput)), true + + case "Query.node": + if e.complexity.Query.Node == nil { + break + } + + args, err := ec.field_Query_node_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.Node(childComplexity, args["id"].(string)), true + + case "Query.nodes": + if e.complexity.Query.Nodes == nil { + break + } + + args, err := ec.field_Query_nodes_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.Nodes(childComplexity, args["ids"].([]string)), true + + } + return 0, false +} + +func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler { + rc := graphql.GetOperationContext(ctx) + ec := executionContext{rc, e, 0, 0, make(chan graphql.DeferredResult)} + inputUnmarshalMap := graphql.BuildUnmarshalerMap( + ec.unmarshalInputCreateDatabaseInput, + ec.unmarshalInputCreateGroupInput, + ec.unmarshalInputDatabaseWhereInput, + ec.unmarshalInputGroupWhereInput, + ec.unmarshalInputUpdateDatabaseInput, + ec.unmarshalInputUpdateGroupInput, + ) + first := true + + switch rc.Operation.Operation { + case ast.Query: + return func(ctx context.Context) *graphql.Response { + var response graphql.Response + var data graphql.Marshaler + if first { + first = false + ctx = graphql.WithUnmarshalerMap(ctx, inputUnmarshalMap) + data = ec._Query(ctx, rc.Operation.SelectionSet) + } else { + if atomic.LoadInt32(&ec.pendingDeferred) > 0 { + result := <-ec.deferredResults + atomic.AddInt32(&ec.pendingDeferred, -1) + data = result.Result + response.Path = result.Path + response.Label = result.Label + response.Errors = result.Errors + } else { + return nil + } + } + var buf bytes.Buffer + data.MarshalGQL(&buf) + response.Data = buf.Bytes() + if atomic.LoadInt32(&ec.deferred) > 0 { + hasNext := atomic.LoadInt32(&ec.pendingDeferred) > 0 + response.HasNext = &hasNext + } + + return &response + } + case ast.Mutation: + return func(ctx context.Context) *graphql.Response { + if !first { + return nil + } + first = false + ctx = graphql.WithUnmarshalerMap(ctx, inputUnmarshalMap) + data := ec._Mutation(ctx, rc.Operation.SelectionSet) + var buf bytes.Buffer + data.MarshalGQL(&buf) + + return &graphql.Response{ + Data: buf.Bytes(), + } + } + + default: + return graphql.OneShot(graphql.ErrorResponse(ctx, "unsupported GraphQL operation")) + } +} + +type executionContext struct { + *graphql.OperationContext + *executableSchema + deferred int32 + pendingDeferred int32 + deferredResults chan graphql.DeferredResult +} + +func (ec *executionContext) processDeferredGroup(dg graphql.DeferredGroup) { + atomic.AddInt32(&ec.pendingDeferred, 1) + go func() { + ctx := graphql.WithFreshResponseContext(dg.Context) + dg.FieldSet.Dispatch(ctx) + ds := graphql.DeferredResult{ + Path: dg.Path, + Label: dg.Label, + Result: dg.FieldSet, + Errors: graphql.GetErrors(ctx), + } + // null fields should bubble up + if dg.FieldSet.Invalids > 0 { + ds.Result = graphql.Null + } + ec.deferredResults <- ds + }() +} + +func (ec *executionContext) introspectSchema() (*introspection.Schema, error) { + if ec.DisableIntrospection { + return nil, errors.New("introspection disabled") + } + return introspection.WrapSchema(ec.Schema()), nil +} + +func (ec *executionContext) introspectType(name string) (*introspection.Type, error) { + if ec.DisableIntrospection { + return nil, errors.New("introspection disabled") + } + return introspection.WrapTypeFromDef(ec.Schema(), ec.Schema().Types[name]), nil +} + +var sources = []*ast.Source{ + {Name: "../../schema/database.graphql", Input: `extend type Query { + """ + Look up database by ID + """ + database( + """ + Name of the database + """ + name: String! + ): Database! +} + +extend type Mutation{ + """ + Create a new database + """ + createDatabase( + """ + values of the database + """ + input: CreateDatabaseInput! + ): DatabaseCreatePayload! + """ + Update an existing database + """ + updateDatabase( + """ + Name of the database + """ + name: String! + """ + New values for the database + """ + input: UpdateDatabaseInput! + ): DatabaseUpdatePayload! + """ + Delete an existing database + """ + deleteDatabase( + """ + Name of the database + """ + name: String! + ): DatabaseDeletePayload! +} + +""" +Return response for createDatabase mutation +""" +type DatabaseCreatePayload { + """ + Created database + """ + database: Database! +} + +""" +Return response for updateDatabase mutation +""" +type DatabaseUpdatePayload { + """ + Updated database + """ + database: Database! +} + +""" +Return response for deleteDatabase mutation +""" +type DatabaseDeletePayload { + """ + Deleted database ID + """ + deletedID: ID! +}`, BuiltIn: false}, + {Name: "../../schema/ent.graphql", Input: `directive @goField(forceResolver: Boolean, name: String) on FIELD_DEFINITION | INPUT_FIELD_DEFINITION +directive @goModel(model: String, models: [String!]) on OBJECT | INPUT_OBJECT | SCALAR | ENUM | INTERFACE | UNION +""" +CreateDatabaseInput is used for create Database object. +Input was generated by ent. +""" +input CreateDatabaseInput { + createdAt: Time + updatedAt: Time + createdBy: String + updatedBy: String + """ + the ID of the organization + """ + organizationID: String! + """ + the name to the database + """ + name: String! + """ + the geo location of the database + """ + geo: String + """ + the DSN to the database + """ + dsn: String! + """ + the auth token used to connect to the database + """ + token: String + """ + status of the database + """ + status: DatabaseDatabaseStatus + """ + provider of the database + """ + provider: DatabaseDatabaseProvider + groupID: ID! +} +""" +CreateGroupInput is used for create Group object. +Input was generated by ent. +""" +input CreateGroupInput { + createdAt: Time + updatedAt: Time + createdBy: String + updatedBy: String + """ + the name of the group in turso + """ + name: String! + """ + the description of the group + """ + description: String + """ + the primary of the group + """ + primaryLocation: String! + """ + the replica locations of the group + """ + locations: [String!] + """ + the auth token used to connect to the group + """ + token: String + """ + region the group + """ + region: GroupRegion + databaseIDs: [ID!] +} +""" +Define a Relay Cursor type: +https://relay.dev/graphql/connections.htm#sec-Cursor +""" +scalar Cursor +type Database implements Node { + id: ID! + createdAt: Time + updatedAt: Time + createdBy: String + updatedBy: String + deletedAt: Time + deletedBy: String + """ + the ID of the organization + """ + organizationID: String! + """ + the name to the database + """ + name: String! + """ + the geo location of the database + """ + geo: String + """ + the DSN to the database + """ + dsn: String! + """ + the ID of the group + """ + groupID: ID! + """ + status of the database + """ + status: DatabaseDatabaseStatus! + """ + provider of the database + """ + provider: DatabaseDatabaseProvider! + group: Group! +} +""" +A connection to a list of items. +""" +type DatabaseConnection { + """ + A list of edges. + """ + edges: [DatabaseEdge] + """ + Information to aid in pagination. + """ + pageInfo: PageInfo! + """ + Identifies the total count of items in the connection. + """ + totalCount: Int! +} +""" +DatabaseDatabaseProvider is enum for the field provider +""" +enum DatabaseDatabaseProvider @goModel(model: "github.com/datumforge/geodetic/pkg/enums.DatabaseProvider") { + LOCAL + TURSO +} +""" +DatabaseDatabaseStatus is enum for the field status +""" +enum DatabaseDatabaseStatus @goModel(model: "github.com/datumforge/geodetic/pkg/enums.DatabaseStatus") { + ACTIVE + CREATING + DELETING + DELETED +} +""" +An edge in a connection. +""" +type DatabaseEdge { + """ + The item at the end of the edge. + """ + node: Database + """ + A cursor for use in pagination. + """ + cursor: Cursor! +} +""" +DatabaseWhereInput is used for filtering Database objects. +Input was generated by ent. +""" +input DatabaseWhereInput { + not: DatabaseWhereInput + and: [DatabaseWhereInput!] + or: [DatabaseWhereInput!] + """ + id field predicates + """ + id: ID + idNEQ: ID + idIn: [ID!] + idNotIn: [ID!] + idGT: ID + idGTE: ID + idLT: ID + idLTE: ID + idEqualFold: ID + idContainsFold: ID + """ + created_at field predicates + """ + createdAt: Time + createdAtNEQ: Time + createdAtIn: [Time!] + createdAtNotIn: [Time!] + createdAtGT: Time + createdAtGTE: Time + createdAtLT: Time + createdAtLTE: Time + createdAtIsNil: Boolean + createdAtNotNil: Boolean + """ + updated_at field predicates + """ + updatedAt: Time + updatedAtNEQ: Time + updatedAtIn: [Time!] + updatedAtNotIn: [Time!] + updatedAtGT: Time + updatedAtGTE: Time + updatedAtLT: Time + updatedAtLTE: Time + updatedAtIsNil: Boolean + updatedAtNotNil: Boolean + """ + created_by field predicates + """ + createdBy: String + createdByNEQ: String + createdByIn: [String!] + createdByNotIn: [String!] + createdByGT: String + createdByGTE: String + createdByLT: String + createdByLTE: String + createdByContains: String + createdByHasPrefix: String + createdByHasSuffix: String + createdByIsNil: Boolean + createdByNotNil: Boolean + createdByEqualFold: String + createdByContainsFold: String + """ + updated_by field predicates + """ + updatedBy: String + updatedByNEQ: String + updatedByIn: [String!] + updatedByNotIn: [String!] + updatedByGT: String + updatedByGTE: String + updatedByLT: String + updatedByLTE: String + updatedByContains: String + updatedByHasPrefix: String + updatedByHasSuffix: String + updatedByIsNil: Boolean + updatedByNotNil: Boolean + updatedByEqualFold: String + updatedByContainsFold: String + """ + deleted_at field predicates + """ + deletedAt: Time + deletedAtNEQ: Time + deletedAtIn: [Time!] + deletedAtNotIn: [Time!] + deletedAtGT: Time + deletedAtGTE: Time + deletedAtLT: Time + deletedAtLTE: Time + deletedAtIsNil: Boolean + deletedAtNotNil: Boolean + """ + deleted_by field predicates + """ + deletedBy: String + deletedByNEQ: String + deletedByIn: [String!] + deletedByNotIn: [String!] + deletedByGT: String + deletedByGTE: String + deletedByLT: String + deletedByLTE: String + deletedByContains: String + deletedByHasPrefix: String + deletedByHasSuffix: String + deletedByIsNil: Boolean + deletedByNotNil: Boolean + deletedByEqualFold: String + deletedByContainsFold: String + """ + organization_id field predicates + """ + organizationID: String + organizationIDNEQ: String + organizationIDIn: [String!] + organizationIDNotIn: [String!] + organizationIDGT: String + organizationIDGTE: String + organizationIDLT: String + organizationIDLTE: String + organizationIDContains: String + organizationIDHasPrefix: String + organizationIDHasSuffix: String + organizationIDEqualFold: String + organizationIDContainsFold: String + """ + name field predicates + """ + name: String + nameNEQ: String + nameIn: [String!] + nameNotIn: [String!] + nameGT: String + nameGTE: String + nameLT: String + nameLTE: String + nameContains: String + nameHasPrefix: String + nameHasSuffix: String + nameEqualFold: String + nameContainsFold: String + """ + geo field predicates + """ + geo: String + geoNEQ: String + geoIn: [String!] + geoNotIn: [String!] + geoGT: String + geoGTE: String + geoLT: String + geoLTE: String + geoContains: String + geoHasPrefix: String + geoHasSuffix: String + geoIsNil: Boolean + geoNotNil: Boolean + geoEqualFold: String + geoContainsFold: String + """ + dsn field predicates + """ + dsn: String + dsnNEQ: String + dsnIn: [String!] + dsnNotIn: [String!] + dsnGT: String + dsnGTE: String + dsnLT: String + dsnLTE: String + dsnContains: String + dsnHasPrefix: String + dsnHasSuffix: String + dsnEqualFold: String + dsnContainsFold: String + """ + group_id field predicates + """ + groupID: ID + groupIDNEQ: ID + groupIDIn: [ID!] + groupIDNotIn: [ID!] + groupIDGT: ID + groupIDGTE: ID + groupIDLT: ID + groupIDLTE: ID + groupIDContains: ID + groupIDHasPrefix: ID + groupIDHasSuffix: ID + groupIDEqualFold: ID + groupIDContainsFold: ID + """ + status field predicates + """ + status: DatabaseDatabaseStatus + statusNEQ: DatabaseDatabaseStatus + statusIn: [DatabaseDatabaseStatus!] + statusNotIn: [DatabaseDatabaseStatus!] + """ + provider field predicates + """ + provider: DatabaseDatabaseProvider + providerNEQ: DatabaseDatabaseProvider + providerIn: [DatabaseDatabaseProvider!] + providerNotIn: [DatabaseDatabaseProvider!] + """ + group edge predicates + """ + hasGroup: Boolean + hasGroupWith: [GroupWhereInput!] +} +type Group implements Node { + id: ID! + createdAt: Time + updatedAt: Time + createdBy: String + updatedBy: String + deletedAt: Time + deletedBy: String + """ + the name of the group in turso + """ + name: String! + """ + the description of the group + """ + description: String + """ + the primary of the group + """ + primaryLocation: String! + """ + the replica locations of the group + """ + locations: [String!] + """ + region the group + """ + region: GroupRegion! + databases: [Database!] +} +""" +A connection to a list of items. +""" +type GroupConnection { + """ + A list of edges. + """ + edges: [GroupEdge] + """ + Information to aid in pagination. + """ + pageInfo: PageInfo! + """ + Identifies the total count of items in the connection. + """ + totalCount: Int! +} +""" +An edge in a connection. +""" +type GroupEdge { + """ + The item at the end of the edge. + """ + node: Group + """ + A cursor for use in pagination. + """ + cursor: Cursor! +} +""" +GroupRegion is enum for the field region +""" +enum GroupRegion @goModel(model: "github.com/datumforge/geodetic/pkg/enums.Region") { + AMER + EMEA + APAC +} +""" +GroupWhereInput is used for filtering Group objects. +Input was generated by ent. +""" +input GroupWhereInput { + not: GroupWhereInput + and: [GroupWhereInput!] + or: [GroupWhereInput!] + """ + id field predicates + """ + id: ID + idNEQ: ID + idIn: [ID!] + idNotIn: [ID!] + idGT: ID + idGTE: ID + idLT: ID + idLTE: ID + idEqualFold: ID + idContainsFold: ID + """ + created_at field predicates + """ + createdAt: Time + createdAtNEQ: Time + createdAtIn: [Time!] + createdAtNotIn: [Time!] + createdAtGT: Time + createdAtGTE: Time + createdAtLT: Time + createdAtLTE: Time + createdAtIsNil: Boolean + createdAtNotNil: Boolean + """ + updated_at field predicates + """ + updatedAt: Time + updatedAtNEQ: Time + updatedAtIn: [Time!] + updatedAtNotIn: [Time!] + updatedAtGT: Time + updatedAtGTE: Time + updatedAtLT: Time + updatedAtLTE: Time + updatedAtIsNil: Boolean + updatedAtNotNil: Boolean + """ + created_by field predicates + """ + createdBy: String + createdByNEQ: String + createdByIn: [String!] + createdByNotIn: [String!] + createdByGT: String + createdByGTE: String + createdByLT: String + createdByLTE: String + createdByContains: String + createdByHasPrefix: String + createdByHasSuffix: String + createdByIsNil: Boolean + createdByNotNil: Boolean + createdByEqualFold: String + createdByContainsFold: String + """ + updated_by field predicates + """ + updatedBy: String + updatedByNEQ: String + updatedByIn: [String!] + updatedByNotIn: [String!] + updatedByGT: String + updatedByGTE: String + updatedByLT: String + updatedByLTE: String + updatedByContains: String + updatedByHasPrefix: String + updatedByHasSuffix: String + updatedByIsNil: Boolean + updatedByNotNil: Boolean + updatedByEqualFold: String + updatedByContainsFold: String + """ + deleted_at field predicates + """ + deletedAt: Time + deletedAtNEQ: Time + deletedAtIn: [Time!] + deletedAtNotIn: [Time!] + deletedAtGT: Time + deletedAtGTE: Time + deletedAtLT: Time + deletedAtLTE: Time + deletedAtIsNil: Boolean + deletedAtNotNil: Boolean + """ + deleted_by field predicates + """ + deletedBy: String + deletedByNEQ: String + deletedByIn: [String!] + deletedByNotIn: [String!] + deletedByGT: String + deletedByGTE: String + deletedByLT: String + deletedByLTE: String + deletedByContains: String + deletedByHasPrefix: String + deletedByHasSuffix: String + deletedByIsNil: Boolean + deletedByNotNil: Boolean + deletedByEqualFold: String + deletedByContainsFold: String + """ + name field predicates + """ + name: String + nameNEQ: String + nameIn: [String!] + nameNotIn: [String!] + nameGT: String + nameGTE: String + nameLT: String + nameLTE: String + nameContains: String + nameHasPrefix: String + nameHasSuffix: String + nameEqualFold: String + nameContainsFold: String + """ + description field predicates + """ + description: String + descriptionNEQ: String + descriptionIn: [String!] + descriptionNotIn: [String!] + descriptionGT: String + descriptionGTE: String + descriptionLT: String + descriptionLTE: String + descriptionContains: String + descriptionHasPrefix: String + descriptionHasSuffix: String + descriptionIsNil: Boolean + descriptionNotNil: Boolean + descriptionEqualFold: String + descriptionContainsFold: String + """ + primary_location field predicates + """ + primaryLocation: String + primaryLocationNEQ: String + primaryLocationIn: [String!] + primaryLocationNotIn: [String!] + primaryLocationGT: String + primaryLocationGTE: String + primaryLocationLT: String + primaryLocationLTE: String + primaryLocationContains: String + primaryLocationHasPrefix: String + primaryLocationHasSuffix: String + primaryLocationEqualFold: String + primaryLocationContainsFold: String + """ + region field predicates + """ + region: GroupRegion + regionNEQ: GroupRegion + regionIn: [GroupRegion!] + regionNotIn: [GroupRegion!] + """ + databases edge predicates + """ + hasDatabases: Boolean + hasDatabasesWith: [DatabaseWhereInput!] +} +""" +A valid JSON string. +""" +scalar JSON +""" +An object with an ID. +Follows the [Relay Global Object Identification Specification](https://relay.dev/graphql/objectidentification.htm) +""" +interface Node @goModel(model: "github.com/datumforge/geodetic/internal/ent/generated.Noder") { + """ + The id of the object. + """ + id: ID! +} +""" +Possible directions in which to order a list of items when provided an ` + "`" + `orderBy` + "`" + ` argument. +""" +enum OrderDirection { + """ + Specifies an ascending order for a given ` + "`" + `orderBy` + "`" + ` argument. + """ + ASC + """ + Specifies a descending order for a given ` + "`" + `orderBy` + "`" + ` argument. + """ + DESC +} +""" +Information about pagination in a connection. +https://relay.dev/graphql/connections.htm#sec-undefined.PageInfo +""" +type PageInfo { + """ + When paginating forwards, are there more items? + """ + hasNextPage: Boolean! + """ + When paginating backwards, are there more items? + """ + hasPreviousPage: Boolean! + """ + When paginating backwards, the cursor to continue. + """ + startCursor: Cursor + """ + When paginating forwards, the cursor to continue. + """ + endCursor: Cursor +} +type Query { + """ + Fetches an object given its ID. + """ + node( + """ + ID of the object. + """ + id: ID! + ): Node + """ + Lookup nodes by a list of IDs. + """ + nodes( + """ + The list of node IDs. + """ + ids: [ID!]! + ): [Node]! + databases( + """ + Returns the elements in the list that come after the specified cursor. + """ + after: Cursor + + """ + Returns the first _n_ elements from the list. + """ + first: Int + + """ + Returns the elements in the list that come before the specified cursor. + """ + before: Cursor + + """ + Returns the last _n_ elements from the list. + """ + last: Int + + """ + Filtering options for Databases returned from the connection. + """ + where: DatabaseWhereInput + ): DatabaseConnection! + groups( + """ + Returns the elements in the list that come after the specified cursor. + """ + after: Cursor + + """ + Returns the first _n_ elements from the list. + """ + first: Int + + """ + Returns the elements in the list that come before the specified cursor. + """ + before: Cursor + + """ + Returns the last _n_ elements from the list. + """ + last: Int + + """ + Filtering options for Groups returned from the connection. + """ + where: GroupWhereInput + ): GroupConnection! +} +""" +The builtin Time type +""" +scalar Time +""" +UpdateDatabaseInput is used for update Database object. +Input was generated by ent. +""" +input UpdateDatabaseInput { + updatedAt: Time + clearUpdatedAt: Boolean + updatedBy: String + clearUpdatedBy: Boolean + """ + the ID of the organization + """ + organizationID: String + """ + the name to the database + """ + name: String + """ + the geo location of the database + """ + geo: String + clearGeo: Boolean + """ + the DSN to the database + """ + dsn: String + """ + the auth token used to connect to the database + """ + token: String + clearToken: Boolean + """ + status of the database + """ + status: DatabaseDatabaseStatus + """ + provider of the database + """ + provider: DatabaseDatabaseProvider + groupID: ID +} +""" +UpdateGroupInput is used for update Group object. +Input was generated by ent. +""" +input UpdateGroupInput { + updatedAt: Time + clearUpdatedAt: Boolean + updatedBy: String + clearUpdatedBy: Boolean + """ + the name of the group in turso + """ + name: String + """ + the description of the group + """ + description: String + clearDescription: Boolean + """ + the primary of the group + """ + primaryLocation: String + """ + the replica locations of the group + """ + locations: [String!] + appendLocations: [String!] + clearLocations: Boolean + """ + the auth token used to connect to the group + """ + token: String + clearToken: Boolean + """ + region the group + """ + region: GroupRegion + addDatabaseIDs: [ID!] + removeDatabaseIDs: [ID!] + clearDatabases: Boolean +} +`, BuiltIn: false}, + {Name: "../../schema/group.graphql", Input: `extend type Query { + """ + Look up group by ID + """ + group( + """ + Name of the group + """ + name: String! + ): Group! +} + +extend type Mutation{ + """ + Create a new group + """ + createGroup( + """ + values of the group + """ + input: CreateGroupInput! + ): GroupCreatePayload! + """ + Update an existing group + """ + updateGroup( + """ + Name of the group + """ + name: String! + """ + New values for the group + """ + input: UpdateGroupInput! + ): GroupUpdatePayload! + """ + Delete an existing group + """ + deleteGroup( + """ + Name of the group + """ + name: String! + ): GroupDeletePayload! +} + +""" +Return response for createGroup mutation +""" +type GroupCreatePayload { + """ + Created group + """ + group: Group! +} + +""" +Return response for updateGroup mutation +""" +type GroupUpdatePayload { + """ + Updated group + """ + group: Group! +} + +""" +Return response for deleteGroup mutation +""" +type GroupDeletePayload { + """ + Deleted group ID + """ + deletedID: ID! +}`, BuiltIn: false}, +} +var parsedSchema = gqlparser.MustLoadSchema(sources...) + +// endregion ************************** generated!.gotpl ************************** + +// region ***************************** args.gotpl ***************************** + +func (ec *executionContext) field_Mutation_createDatabase_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 generated.CreateDatabaseInput + if tmp, ok := rawArgs["input"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("input")) + arg0, err = ec.unmarshalNCreateDatabaseInput2githubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐCreateDatabaseInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_createGroup_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 generated.CreateGroupInput + if tmp, ok := rawArgs["input"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("input")) + arg0, err = ec.unmarshalNCreateGroupInput2githubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐCreateGroupInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_deleteDatabase_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 string + if tmp, ok := rawArgs["name"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name")) + arg0, err = ec.unmarshalNString2string(ctx, tmp) + if err != nil { + return nil, err + } + } + args["name"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_deleteGroup_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 string + if tmp, ok := rawArgs["name"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name")) + arg0, err = ec.unmarshalNString2string(ctx, tmp) + if err != nil { + return nil, err + } + } + args["name"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_updateDatabase_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 string + if tmp, ok := rawArgs["name"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name")) + arg0, err = ec.unmarshalNString2string(ctx, tmp) + if err != nil { + return nil, err + } + } + args["name"] = arg0 + var arg1 generated.UpdateDatabaseInput + if tmp, ok := rawArgs["input"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("input")) + arg1, err = ec.unmarshalNUpdateDatabaseInput2githubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐUpdateDatabaseInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg1 + return args, nil +} + +func (ec *executionContext) field_Mutation_updateGroup_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 string + if tmp, ok := rawArgs["name"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name")) + arg0, err = ec.unmarshalNString2string(ctx, tmp) + if err != nil { + return nil, err + } + } + args["name"] = arg0 + var arg1 generated.UpdateGroupInput + if tmp, ok := rawArgs["input"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("input")) + arg1, err = ec.unmarshalNUpdateGroupInput2githubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐUpdateGroupInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg1 + return args, nil +} + +func (ec *executionContext) field_Query___type_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 string + if tmp, ok := rawArgs["name"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name")) + arg0, err = ec.unmarshalNString2string(ctx, tmp) + if err != nil { + return nil, err + } + } + args["name"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_database_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 string + if tmp, ok := rawArgs["name"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name")) + arg0, err = ec.unmarshalNString2string(ctx, tmp) + if err != nil { + return nil, err + } + } + args["name"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_databases_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 *entgql.Cursor[string] + if tmp, ok := rawArgs["after"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("after")) + arg0, err = ec.unmarshalOCursor2ᚖentgoᚗioᚋcontribᚋentgqlᚐCursor(ctx, tmp) + if err != nil { + return nil, err + } + } + args["after"] = arg0 + var arg1 *int + if tmp, ok := rawArgs["first"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("first")) + arg1, err = ec.unmarshalOInt2ᚖint(ctx, tmp) + if err != nil { + return nil, err + } + } + args["first"] = arg1 + var arg2 *entgql.Cursor[string] + if tmp, ok := rawArgs["before"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("before")) + arg2, err = ec.unmarshalOCursor2ᚖentgoᚗioᚋcontribᚋentgqlᚐCursor(ctx, tmp) + if err != nil { + return nil, err + } + } + args["before"] = arg2 + var arg3 *int + if tmp, ok := rawArgs["last"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("last")) + arg3, err = ec.unmarshalOInt2ᚖint(ctx, tmp) + if err != nil { + return nil, err + } + } + args["last"] = arg3 + var arg4 *generated.DatabaseWhereInput + if tmp, ok := rawArgs["where"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("where")) + arg4, err = ec.unmarshalODatabaseWhereInput2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabaseWhereInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["where"] = arg4 + return args, nil +} + +func (ec *executionContext) field_Query_group_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 string + if tmp, ok := rawArgs["name"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name")) + arg0, err = ec.unmarshalNString2string(ctx, tmp) + if err != nil { + return nil, err + } + } + args["name"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_groups_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 *entgql.Cursor[string] + if tmp, ok := rawArgs["after"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("after")) + arg0, err = ec.unmarshalOCursor2ᚖentgoᚗioᚋcontribᚋentgqlᚐCursor(ctx, tmp) + if err != nil { + return nil, err + } + } + args["after"] = arg0 + var arg1 *int + if tmp, ok := rawArgs["first"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("first")) + arg1, err = ec.unmarshalOInt2ᚖint(ctx, tmp) + if err != nil { + return nil, err + } + } + args["first"] = arg1 + var arg2 *entgql.Cursor[string] + if tmp, ok := rawArgs["before"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("before")) + arg2, err = ec.unmarshalOCursor2ᚖentgoᚗioᚋcontribᚋentgqlᚐCursor(ctx, tmp) + if err != nil { + return nil, err + } + } + args["before"] = arg2 + var arg3 *int + if tmp, ok := rawArgs["last"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("last")) + arg3, err = ec.unmarshalOInt2ᚖint(ctx, tmp) + if err != nil { + return nil, err + } + } + args["last"] = arg3 + var arg4 *generated.GroupWhereInput + if tmp, ok := rawArgs["where"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("where")) + arg4, err = ec.unmarshalOGroupWhereInput2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐGroupWhereInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["where"] = arg4 + return args, nil +} + +func (ec *executionContext) field_Query_node_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 string + if tmp, ok := rawArgs["id"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) + arg0, err = ec.unmarshalNID2string(ctx, tmp) + if err != nil { + return nil, err + } + } + args["id"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_nodes_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 []string + if tmp, ok := rawArgs["ids"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("ids")) + arg0, err = ec.unmarshalNID2ᚕstringᚄ(ctx, tmp) + if err != nil { + return nil, err + } + } + args["ids"] = arg0 + return args, nil +} + +func (ec *executionContext) field___Type_enumValues_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 bool + if tmp, ok := rawArgs["includeDeprecated"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated")) + arg0, err = ec.unmarshalOBoolean2bool(ctx, tmp) + if err != nil { + return nil, err + } + } + args["includeDeprecated"] = arg0 + return args, nil +} + +func (ec *executionContext) field___Type_fields_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 bool + if tmp, ok := rawArgs["includeDeprecated"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated")) + arg0, err = ec.unmarshalOBoolean2bool(ctx, tmp) + if err != nil { + return nil, err + } + } + args["includeDeprecated"] = arg0 + return args, nil +} + +// endregion ***************************** args.gotpl ***************************** + +// region ************************** directives.gotpl ************************** + +// endregion ************************** directives.gotpl ************************** + +// region **************************** field.gotpl ***************************** + +func (ec *executionContext) _Database_id(ctx context.Context, field graphql.CollectedField, obj *generated.Database) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Database_id(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Database_id(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Database", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type ID does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Database_createdAt(ctx context.Context, field graphql.CollectedField, obj *generated.Database) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Database_createdAt(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.CreatedAt, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(time.Time) + fc.Result = res + return ec.marshalOTime2timeᚐTime(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Database_createdAt(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Database", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Time does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Database_updatedAt(ctx context.Context, field graphql.CollectedField, obj *generated.Database) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Database_updatedAt(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.UpdatedAt, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(time.Time) + fc.Result = res + return ec.marshalOTime2timeᚐTime(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Database_updatedAt(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Database", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Time does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Database_createdBy(ctx context.Context, field graphql.CollectedField, obj *generated.Database) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Database_createdBy(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.CreatedBy, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalOString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Database_createdBy(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Database", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Database_updatedBy(ctx context.Context, field graphql.CollectedField, obj *generated.Database) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Database_updatedBy(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.UpdatedBy, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalOString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Database_updatedBy(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Database", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Database_deletedAt(ctx context.Context, field graphql.CollectedField, obj *generated.Database) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Database_deletedAt(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.DeletedAt, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(time.Time) + fc.Result = res + return ec.marshalOTime2timeᚐTime(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Database_deletedAt(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Database", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Time does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Database_deletedBy(ctx context.Context, field graphql.CollectedField, obj *generated.Database) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Database_deletedBy(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.DeletedBy, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalOString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Database_deletedBy(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Database", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Database_organizationID(ctx context.Context, field graphql.CollectedField, obj *generated.Database) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Database_organizationID(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.OrganizationID, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Database_organizationID(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Database", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Database_name(ctx context.Context, field graphql.CollectedField, obj *generated.Database) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Database_name(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Database_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Database", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Database_geo(ctx context.Context, field graphql.CollectedField, obj *generated.Database) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Database_geo(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Geo, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalOString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Database_geo(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Database", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Database_dsn(ctx context.Context, field graphql.CollectedField, obj *generated.Database) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Database_dsn(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Dsn, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Database_dsn(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Database", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Database_groupID(ctx context.Context, field graphql.CollectedField, obj *generated.Database) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Database_groupID(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.GroupID, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Database_groupID(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Database", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type ID does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Database_status(ctx context.Context, field graphql.CollectedField, obj *generated.Database) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Database_status(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Status, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(enums.DatabaseStatus) + fc.Result = res + return ec.marshalNDatabaseDatabaseStatus2githubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseStatus(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Database_status(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Database", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type DatabaseDatabaseStatus does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Database_provider(ctx context.Context, field graphql.CollectedField, obj *generated.Database) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Database_provider(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Provider, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(enums.DatabaseProvider) + fc.Result = res + return ec.marshalNDatabaseDatabaseProvider2githubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseProvider(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Database_provider(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Database", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type DatabaseDatabaseProvider does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Database_group(ctx context.Context, field graphql.CollectedField, obj *generated.Database) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Database_group(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Group(ctx) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*generated.Group) + fc.Result = res + return ec.marshalNGroup2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐGroup(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Database_group(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Database", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "id": + return ec.fieldContext_Group_id(ctx, field) + case "createdAt": + return ec.fieldContext_Group_createdAt(ctx, field) + case "updatedAt": + return ec.fieldContext_Group_updatedAt(ctx, field) + case "createdBy": + return ec.fieldContext_Group_createdBy(ctx, field) + case "updatedBy": + return ec.fieldContext_Group_updatedBy(ctx, field) + case "deletedAt": + return ec.fieldContext_Group_deletedAt(ctx, field) + case "deletedBy": + return ec.fieldContext_Group_deletedBy(ctx, field) + case "name": + return ec.fieldContext_Group_name(ctx, field) + case "description": + return ec.fieldContext_Group_description(ctx, field) + case "primaryLocation": + return ec.fieldContext_Group_primaryLocation(ctx, field) + case "locations": + return ec.fieldContext_Group_locations(ctx, field) + case "region": + return ec.fieldContext_Group_region(ctx, field) + case "databases": + return ec.fieldContext_Group_databases(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type Group", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _DatabaseConnection_edges(ctx context.Context, field graphql.CollectedField, obj *generated.DatabaseConnection) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_DatabaseConnection_edges(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Edges, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]*generated.DatabaseEdge) + fc.Result = res + return ec.marshalODatabaseEdge2ᚕᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabaseEdge(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_DatabaseConnection_edges(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "DatabaseConnection", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "node": + return ec.fieldContext_DatabaseEdge_node(ctx, field) + case "cursor": + return ec.fieldContext_DatabaseEdge_cursor(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type DatabaseEdge", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _DatabaseConnection_pageInfo(ctx context.Context, field graphql.CollectedField, obj *generated.DatabaseConnection) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_DatabaseConnection_pageInfo(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.PageInfo, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(entgql.PageInfo[string]) + fc.Result = res + return ec.marshalNPageInfo2entgoᚗioᚋcontribᚋentgqlᚐPageInfo(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_DatabaseConnection_pageInfo(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "DatabaseConnection", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "hasNextPage": + return ec.fieldContext_PageInfo_hasNextPage(ctx, field) + case "hasPreviousPage": + return ec.fieldContext_PageInfo_hasPreviousPage(ctx, field) + case "startCursor": + return ec.fieldContext_PageInfo_startCursor(ctx, field) + case "endCursor": + return ec.fieldContext_PageInfo_endCursor(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type PageInfo", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _DatabaseConnection_totalCount(ctx context.Context, field graphql.CollectedField, obj *generated.DatabaseConnection) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_DatabaseConnection_totalCount(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.TotalCount, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_DatabaseConnection_totalCount(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "DatabaseConnection", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _DatabaseCreatePayload_database(ctx context.Context, field graphql.CollectedField, obj *DatabaseCreatePayload) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_DatabaseCreatePayload_database(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Database, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*generated.Database) + fc.Result = res + return ec.marshalNDatabase2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabase(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_DatabaseCreatePayload_database(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "DatabaseCreatePayload", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "id": + return ec.fieldContext_Database_id(ctx, field) + case "createdAt": + return ec.fieldContext_Database_createdAt(ctx, field) + case "updatedAt": + return ec.fieldContext_Database_updatedAt(ctx, field) + case "createdBy": + return ec.fieldContext_Database_createdBy(ctx, field) + case "updatedBy": + return ec.fieldContext_Database_updatedBy(ctx, field) + case "deletedAt": + return ec.fieldContext_Database_deletedAt(ctx, field) + case "deletedBy": + return ec.fieldContext_Database_deletedBy(ctx, field) + case "organizationID": + return ec.fieldContext_Database_organizationID(ctx, field) + case "name": + return ec.fieldContext_Database_name(ctx, field) + case "geo": + return ec.fieldContext_Database_geo(ctx, field) + case "dsn": + return ec.fieldContext_Database_dsn(ctx, field) + case "groupID": + return ec.fieldContext_Database_groupID(ctx, field) + case "status": + return ec.fieldContext_Database_status(ctx, field) + case "provider": + return ec.fieldContext_Database_provider(ctx, field) + case "group": + return ec.fieldContext_Database_group(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type Database", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _DatabaseDeletePayload_deletedID(ctx context.Context, field graphql.CollectedField, obj *DatabaseDeletePayload) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_DatabaseDeletePayload_deletedID(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.DeletedID, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_DatabaseDeletePayload_deletedID(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "DatabaseDeletePayload", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type ID does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _DatabaseEdge_node(ctx context.Context, field graphql.CollectedField, obj *generated.DatabaseEdge) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_DatabaseEdge_node(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Node, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*generated.Database) + fc.Result = res + return ec.marshalODatabase2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabase(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_DatabaseEdge_node(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "DatabaseEdge", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "id": + return ec.fieldContext_Database_id(ctx, field) + case "createdAt": + return ec.fieldContext_Database_createdAt(ctx, field) + case "updatedAt": + return ec.fieldContext_Database_updatedAt(ctx, field) + case "createdBy": + return ec.fieldContext_Database_createdBy(ctx, field) + case "updatedBy": + return ec.fieldContext_Database_updatedBy(ctx, field) + case "deletedAt": + return ec.fieldContext_Database_deletedAt(ctx, field) + case "deletedBy": + return ec.fieldContext_Database_deletedBy(ctx, field) + case "organizationID": + return ec.fieldContext_Database_organizationID(ctx, field) + case "name": + return ec.fieldContext_Database_name(ctx, field) + case "geo": + return ec.fieldContext_Database_geo(ctx, field) + case "dsn": + return ec.fieldContext_Database_dsn(ctx, field) + case "groupID": + return ec.fieldContext_Database_groupID(ctx, field) + case "status": + return ec.fieldContext_Database_status(ctx, field) + case "provider": + return ec.fieldContext_Database_provider(ctx, field) + case "group": + return ec.fieldContext_Database_group(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type Database", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _DatabaseEdge_cursor(ctx context.Context, field graphql.CollectedField, obj *generated.DatabaseEdge) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_DatabaseEdge_cursor(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Cursor, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(entgql.Cursor[string]) + fc.Result = res + return ec.marshalNCursor2entgoᚗioᚋcontribᚋentgqlᚐCursor(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_DatabaseEdge_cursor(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "DatabaseEdge", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Cursor does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _DatabaseUpdatePayload_database(ctx context.Context, field graphql.CollectedField, obj *DatabaseUpdatePayload) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_DatabaseUpdatePayload_database(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Database, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*generated.Database) + fc.Result = res + return ec.marshalNDatabase2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabase(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_DatabaseUpdatePayload_database(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "DatabaseUpdatePayload", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "id": + return ec.fieldContext_Database_id(ctx, field) + case "createdAt": + return ec.fieldContext_Database_createdAt(ctx, field) + case "updatedAt": + return ec.fieldContext_Database_updatedAt(ctx, field) + case "createdBy": + return ec.fieldContext_Database_createdBy(ctx, field) + case "updatedBy": + return ec.fieldContext_Database_updatedBy(ctx, field) + case "deletedAt": + return ec.fieldContext_Database_deletedAt(ctx, field) + case "deletedBy": + return ec.fieldContext_Database_deletedBy(ctx, field) + case "organizationID": + return ec.fieldContext_Database_organizationID(ctx, field) + case "name": + return ec.fieldContext_Database_name(ctx, field) + case "geo": + return ec.fieldContext_Database_geo(ctx, field) + case "dsn": + return ec.fieldContext_Database_dsn(ctx, field) + case "groupID": + return ec.fieldContext_Database_groupID(ctx, field) + case "status": + return ec.fieldContext_Database_status(ctx, field) + case "provider": + return ec.fieldContext_Database_provider(ctx, field) + case "group": + return ec.fieldContext_Database_group(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type Database", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _Group_id(ctx context.Context, field graphql.CollectedField, obj *generated.Group) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Group_id(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Group_id(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Group", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type ID does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Group_createdAt(ctx context.Context, field graphql.CollectedField, obj *generated.Group) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Group_createdAt(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.CreatedAt, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(time.Time) + fc.Result = res + return ec.marshalOTime2timeᚐTime(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Group_createdAt(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Group", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Time does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Group_updatedAt(ctx context.Context, field graphql.CollectedField, obj *generated.Group) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Group_updatedAt(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.UpdatedAt, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(time.Time) + fc.Result = res + return ec.marshalOTime2timeᚐTime(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Group_updatedAt(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Group", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Time does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Group_createdBy(ctx context.Context, field graphql.CollectedField, obj *generated.Group) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Group_createdBy(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.CreatedBy, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalOString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Group_createdBy(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Group", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Group_updatedBy(ctx context.Context, field graphql.CollectedField, obj *generated.Group) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Group_updatedBy(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.UpdatedBy, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalOString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Group_updatedBy(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Group", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Group_deletedAt(ctx context.Context, field graphql.CollectedField, obj *generated.Group) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Group_deletedAt(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.DeletedAt, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(time.Time) + fc.Result = res + return ec.marshalOTime2timeᚐTime(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Group_deletedAt(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Group", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Time does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Group_deletedBy(ctx context.Context, field graphql.CollectedField, obj *generated.Group) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Group_deletedBy(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.DeletedBy, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalOString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Group_deletedBy(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Group", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Group_name(ctx context.Context, field graphql.CollectedField, obj *generated.Group) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Group_name(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Group_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Group", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Group_description(ctx context.Context, field graphql.CollectedField, obj *generated.Group) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Group_description(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Description, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalOString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Group_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Group", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Group_primaryLocation(ctx context.Context, field graphql.CollectedField, obj *generated.Group) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Group_primaryLocation(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.PrimaryLocation, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Group_primaryLocation(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Group", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Group_locations(ctx context.Context, field graphql.CollectedField, obj *generated.Group) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Group_locations(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Locations, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]string) + fc.Result = res + return ec.marshalOString2ᚕstringᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Group_locations(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Group", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Group_region(ctx context.Context, field graphql.CollectedField, obj *generated.Group) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Group_region(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Region, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(enums.Region) + fc.Result = res + return ec.marshalNGroupRegion2githubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐRegion(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Group_region(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Group", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type GroupRegion does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Group_databases(ctx context.Context, field graphql.CollectedField, obj *generated.Group) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Group_databases(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Databases(ctx) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]*generated.Database) + fc.Result = res + return ec.marshalODatabase2ᚕᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabaseᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Group_databases(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Group", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "id": + return ec.fieldContext_Database_id(ctx, field) + case "createdAt": + return ec.fieldContext_Database_createdAt(ctx, field) + case "updatedAt": + return ec.fieldContext_Database_updatedAt(ctx, field) + case "createdBy": + return ec.fieldContext_Database_createdBy(ctx, field) + case "updatedBy": + return ec.fieldContext_Database_updatedBy(ctx, field) + case "deletedAt": + return ec.fieldContext_Database_deletedAt(ctx, field) + case "deletedBy": + return ec.fieldContext_Database_deletedBy(ctx, field) + case "organizationID": + return ec.fieldContext_Database_organizationID(ctx, field) + case "name": + return ec.fieldContext_Database_name(ctx, field) + case "geo": + return ec.fieldContext_Database_geo(ctx, field) + case "dsn": + return ec.fieldContext_Database_dsn(ctx, field) + case "groupID": + return ec.fieldContext_Database_groupID(ctx, field) + case "status": + return ec.fieldContext_Database_status(ctx, field) + case "provider": + return ec.fieldContext_Database_provider(ctx, field) + case "group": + return ec.fieldContext_Database_group(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type Database", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _GroupConnection_edges(ctx context.Context, field graphql.CollectedField, obj *generated.GroupConnection) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_GroupConnection_edges(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Edges, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]*generated.GroupEdge) + fc.Result = res + return ec.marshalOGroupEdge2ᚕᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐGroupEdge(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_GroupConnection_edges(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "GroupConnection", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "node": + return ec.fieldContext_GroupEdge_node(ctx, field) + case "cursor": + return ec.fieldContext_GroupEdge_cursor(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type GroupEdge", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _GroupConnection_pageInfo(ctx context.Context, field graphql.CollectedField, obj *generated.GroupConnection) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_GroupConnection_pageInfo(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.PageInfo, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(entgql.PageInfo[string]) + fc.Result = res + return ec.marshalNPageInfo2entgoᚗioᚋcontribᚋentgqlᚐPageInfo(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_GroupConnection_pageInfo(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "GroupConnection", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "hasNextPage": + return ec.fieldContext_PageInfo_hasNextPage(ctx, field) + case "hasPreviousPage": + return ec.fieldContext_PageInfo_hasPreviousPage(ctx, field) + case "startCursor": + return ec.fieldContext_PageInfo_startCursor(ctx, field) + case "endCursor": + return ec.fieldContext_PageInfo_endCursor(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type PageInfo", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _GroupConnection_totalCount(ctx context.Context, field graphql.CollectedField, obj *generated.GroupConnection) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_GroupConnection_totalCount(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.TotalCount, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_GroupConnection_totalCount(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "GroupConnection", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _GroupCreatePayload_group(ctx context.Context, field graphql.CollectedField, obj *GroupCreatePayload) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_GroupCreatePayload_group(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Group, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*generated.Group) + fc.Result = res + return ec.marshalNGroup2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐGroup(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_GroupCreatePayload_group(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "GroupCreatePayload", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "id": + return ec.fieldContext_Group_id(ctx, field) + case "createdAt": + return ec.fieldContext_Group_createdAt(ctx, field) + case "updatedAt": + return ec.fieldContext_Group_updatedAt(ctx, field) + case "createdBy": + return ec.fieldContext_Group_createdBy(ctx, field) + case "updatedBy": + return ec.fieldContext_Group_updatedBy(ctx, field) + case "deletedAt": + return ec.fieldContext_Group_deletedAt(ctx, field) + case "deletedBy": + return ec.fieldContext_Group_deletedBy(ctx, field) + case "name": + return ec.fieldContext_Group_name(ctx, field) + case "description": + return ec.fieldContext_Group_description(ctx, field) + case "primaryLocation": + return ec.fieldContext_Group_primaryLocation(ctx, field) + case "locations": + return ec.fieldContext_Group_locations(ctx, field) + case "region": + return ec.fieldContext_Group_region(ctx, field) + case "databases": + return ec.fieldContext_Group_databases(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type Group", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _GroupDeletePayload_deletedID(ctx context.Context, field graphql.CollectedField, obj *GroupDeletePayload) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_GroupDeletePayload_deletedID(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.DeletedID, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_GroupDeletePayload_deletedID(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "GroupDeletePayload", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type ID does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _GroupEdge_node(ctx context.Context, field graphql.CollectedField, obj *generated.GroupEdge) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_GroupEdge_node(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Node, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*generated.Group) + fc.Result = res + return ec.marshalOGroup2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐGroup(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_GroupEdge_node(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "GroupEdge", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "id": + return ec.fieldContext_Group_id(ctx, field) + case "createdAt": + return ec.fieldContext_Group_createdAt(ctx, field) + case "updatedAt": + return ec.fieldContext_Group_updatedAt(ctx, field) + case "createdBy": + return ec.fieldContext_Group_createdBy(ctx, field) + case "updatedBy": + return ec.fieldContext_Group_updatedBy(ctx, field) + case "deletedAt": + return ec.fieldContext_Group_deletedAt(ctx, field) + case "deletedBy": + return ec.fieldContext_Group_deletedBy(ctx, field) + case "name": + return ec.fieldContext_Group_name(ctx, field) + case "description": + return ec.fieldContext_Group_description(ctx, field) + case "primaryLocation": + return ec.fieldContext_Group_primaryLocation(ctx, field) + case "locations": + return ec.fieldContext_Group_locations(ctx, field) + case "region": + return ec.fieldContext_Group_region(ctx, field) + case "databases": + return ec.fieldContext_Group_databases(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type Group", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _GroupEdge_cursor(ctx context.Context, field graphql.CollectedField, obj *generated.GroupEdge) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_GroupEdge_cursor(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Cursor, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(entgql.Cursor[string]) + fc.Result = res + return ec.marshalNCursor2entgoᚗioᚋcontribᚋentgqlᚐCursor(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_GroupEdge_cursor(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "GroupEdge", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Cursor does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _GroupUpdatePayload_group(ctx context.Context, field graphql.CollectedField, obj *GroupUpdatePayload) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_GroupUpdatePayload_group(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Group, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*generated.Group) + fc.Result = res + return ec.marshalNGroup2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐGroup(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_GroupUpdatePayload_group(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "GroupUpdatePayload", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "id": + return ec.fieldContext_Group_id(ctx, field) + case "createdAt": + return ec.fieldContext_Group_createdAt(ctx, field) + case "updatedAt": + return ec.fieldContext_Group_updatedAt(ctx, field) + case "createdBy": + return ec.fieldContext_Group_createdBy(ctx, field) + case "updatedBy": + return ec.fieldContext_Group_updatedBy(ctx, field) + case "deletedAt": + return ec.fieldContext_Group_deletedAt(ctx, field) + case "deletedBy": + return ec.fieldContext_Group_deletedBy(ctx, field) + case "name": + return ec.fieldContext_Group_name(ctx, field) + case "description": + return ec.fieldContext_Group_description(ctx, field) + case "primaryLocation": + return ec.fieldContext_Group_primaryLocation(ctx, field) + case "locations": + return ec.fieldContext_Group_locations(ctx, field) + case "region": + return ec.fieldContext_Group_region(ctx, field) + case "databases": + return ec.fieldContext_Group_databases(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type Group", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _Mutation_createDatabase(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Mutation_createDatabase(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().CreateDatabase(rctx, fc.Args["input"].(generated.CreateDatabaseInput)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*DatabaseCreatePayload) + fc.Result = res + return ec.marshalNDatabaseCreatePayload2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋgraphapiᚐDatabaseCreatePayload(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Mutation_createDatabase(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Mutation", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "database": + return ec.fieldContext_DatabaseCreatePayload_database(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type DatabaseCreatePayload", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Mutation_createDatabase_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Mutation_updateDatabase(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Mutation_updateDatabase(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().UpdateDatabase(rctx, fc.Args["name"].(string), fc.Args["input"].(generated.UpdateDatabaseInput)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*DatabaseUpdatePayload) + fc.Result = res + return ec.marshalNDatabaseUpdatePayload2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋgraphapiᚐDatabaseUpdatePayload(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Mutation_updateDatabase(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Mutation", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "database": + return ec.fieldContext_DatabaseUpdatePayload_database(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type DatabaseUpdatePayload", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Mutation_updateDatabase_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Mutation_deleteDatabase(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Mutation_deleteDatabase(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().DeleteDatabase(rctx, fc.Args["name"].(string)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*DatabaseDeletePayload) + fc.Result = res + return ec.marshalNDatabaseDeletePayload2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋgraphapiᚐDatabaseDeletePayload(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Mutation_deleteDatabase(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Mutation", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "deletedID": + return ec.fieldContext_DatabaseDeletePayload_deletedID(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type DatabaseDeletePayload", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Mutation_deleteDatabase_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Mutation_createGroup(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Mutation_createGroup(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().CreateGroup(rctx, fc.Args["input"].(generated.CreateGroupInput)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*GroupCreatePayload) + fc.Result = res + return ec.marshalNGroupCreatePayload2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋgraphapiᚐGroupCreatePayload(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Mutation_createGroup(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Mutation", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "group": + return ec.fieldContext_GroupCreatePayload_group(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type GroupCreatePayload", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Mutation_createGroup_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Mutation_updateGroup(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Mutation_updateGroup(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().UpdateGroup(rctx, fc.Args["name"].(string), fc.Args["input"].(generated.UpdateGroupInput)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*GroupUpdatePayload) + fc.Result = res + return ec.marshalNGroupUpdatePayload2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋgraphapiᚐGroupUpdatePayload(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Mutation_updateGroup(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Mutation", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "group": + return ec.fieldContext_GroupUpdatePayload_group(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type GroupUpdatePayload", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Mutation_updateGroup_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Mutation_deleteGroup(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Mutation_deleteGroup(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().DeleteGroup(rctx, fc.Args["name"].(string)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*GroupDeletePayload) + fc.Result = res + return ec.marshalNGroupDeletePayload2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋgraphapiᚐGroupDeletePayload(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Mutation_deleteGroup(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Mutation", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "deletedID": + return ec.fieldContext_GroupDeletePayload_deletedID(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type GroupDeletePayload", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Mutation_deleteGroup_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _PageInfo_hasNextPage(ctx context.Context, field graphql.CollectedField, obj *entgql.PageInfo[string]) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_PageInfo_hasNextPage(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.HasNextPage, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + fc.Result = res + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_PageInfo_hasNextPage(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "PageInfo", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Boolean does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _PageInfo_hasPreviousPage(ctx context.Context, field graphql.CollectedField, obj *entgql.PageInfo[string]) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_PageInfo_hasPreviousPage(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.HasPreviousPage, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + fc.Result = res + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_PageInfo_hasPreviousPage(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "PageInfo", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Boolean does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _PageInfo_startCursor(ctx context.Context, field graphql.CollectedField, obj *entgql.PageInfo[string]) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_PageInfo_startCursor(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.StartCursor, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*entgql.Cursor[string]) + fc.Result = res + return ec.marshalOCursor2ᚖentgoᚗioᚋcontribᚋentgqlᚐCursor(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_PageInfo_startCursor(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "PageInfo", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Cursor does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _PageInfo_endCursor(ctx context.Context, field graphql.CollectedField, obj *entgql.PageInfo[string]) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_PageInfo_endCursor(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.EndCursor, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*entgql.Cursor[string]) + fc.Result = res + return ec.marshalOCursor2ᚖentgoᚗioᚋcontribᚋentgqlᚐCursor(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_PageInfo_endCursor(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "PageInfo", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Cursor does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Query_node(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_node(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().Node(rctx, fc.Args["id"].(string)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(generated.Noder) + fc.Result = res + return ec.marshalONode2githubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐNoder(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query_node(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("FieldContext.Child cannot be called on type INTERFACE") + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Query_node_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Query_nodes(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_nodes(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().Nodes(rctx, fc.Args["ids"].([]string)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]generated.Noder) + fc.Result = res + return ec.marshalNNode2ᚕgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐNoder(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query_nodes(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("FieldContext.Child cannot be called on type INTERFACE") + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Query_nodes_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Query_databases(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_databases(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().Databases(rctx, fc.Args["after"].(*entgql.Cursor[string]), fc.Args["first"].(*int), fc.Args["before"].(*entgql.Cursor[string]), fc.Args["last"].(*int), fc.Args["where"].(*generated.DatabaseWhereInput)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*generated.DatabaseConnection) + fc.Result = res + return ec.marshalNDatabaseConnection2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabaseConnection(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query_databases(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "edges": + return ec.fieldContext_DatabaseConnection_edges(ctx, field) + case "pageInfo": + return ec.fieldContext_DatabaseConnection_pageInfo(ctx, field) + case "totalCount": + return ec.fieldContext_DatabaseConnection_totalCount(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type DatabaseConnection", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Query_databases_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Query_groups(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_groups(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().Groups(rctx, fc.Args["after"].(*entgql.Cursor[string]), fc.Args["first"].(*int), fc.Args["before"].(*entgql.Cursor[string]), fc.Args["last"].(*int), fc.Args["where"].(*generated.GroupWhereInput)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*generated.GroupConnection) + fc.Result = res + return ec.marshalNGroupConnection2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐGroupConnection(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query_groups(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "edges": + return ec.fieldContext_GroupConnection_edges(ctx, field) + case "pageInfo": + return ec.fieldContext_GroupConnection_pageInfo(ctx, field) + case "totalCount": + return ec.fieldContext_GroupConnection_totalCount(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type GroupConnection", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Query_groups_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Query_database(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_database(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().Database(rctx, fc.Args["name"].(string)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*generated.Database) + fc.Result = res + return ec.marshalNDatabase2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabase(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query_database(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "id": + return ec.fieldContext_Database_id(ctx, field) + case "createdAt": + return ec.fieldContext_Database_createdAt(ctx, field) + case "updatedAt": + return ec.fieldContext_Database_updatedAt(ctx, field) + case "createdBy": + return ec.fieldContext_Database_createdBy(ctx, field) + case "updatedBy": + return ec.fieldContext_Database_updatedBy(ctx, field) + case "deletedAt": + return ec.fieldContext_Database_deletedAt(ctx, field) + case "deletedBy": + return ec.fieldContext_Database_deletedBy(ctx, field) + case "organizationID": + return ec.fieldContext_Database_organizationID(ctx, field) + case "name": + return ec.fieldContext_Database_name(ctx, field) + case "geo": + return ec.fieldContext_Database_geo(ctx, field) + case "dsn": + return ec.fieldContext_Database_dsn(ctx, field) + case "groupID": + return ec.fieldContext_Database_groupID(ctx, field) + case "status": + return ec.fieldContext_Database_status(ctx, field) + case "provider": + return ec.fieldContext_Database_provider(ctx, field) + case "group": + return ec.fieldContext_Database_group(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type Database", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Query_database_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Query_group(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_group(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().Group(rctx, fc.Args["name"].(string)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*generated.Group) + fc.Result = res + return ec.marshalNGroup2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐGroup(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query_group(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "id": + return ec.fieldContext_Group_id(ctx, field) + case "createdAt": + return ec.fieldContext_Group_createdAt(ctx, field) + case "updatedAt": + return ec.fieldContext_Group_updatedAt(ctx, field) + case "createdBy": + return ec.fieldContext_Group_createdBy(ctx, field) + case "updatedBy": + return ec.fieldContext_Group_updatedBy(ctx, field) + case "deletedAt": + return ec.fieldContext_Group_deletedAt(ctx, field) + case "deletedBy": + return ec.fieldContext_Group_deletedBy(ctx, field) + case "name": + return ec.fieldContext_Group_name(ctx, field) + case "description": + return ec.fieldContext_Group_description(ctx, field) + case "primaryLocation": + return ec.fieldContext_Group_primaryLocation(ctx, field) + case "locations": + return ec.fieldContext_Group_locations(ctx, field) + case "region": + return ec.fieldContext_Group_region(ctx, field) + case "databases": + return ec.fieldContext_Group_databases(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type Group", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Query_group_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Query___type(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query___type(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.introspectType(fc.Args["name"].(string)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*introspection.Type) + fc.Result = res + return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query___type(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "kind": + return ec.fieldContext___Type_kind(ctx, field) + case "name": + return ec.fieldContext___Type_name(ctx, field) + case "description": + return ec.fieldContext___Type_description(ctx, field) + case "fields": + return ec.fieldContext___Type_fields(ctx, field) + case "interfaces": + return ec.fieldContext___Type_interfaces(ctx, field) + case "possibleTypes": + return ec.fieldContext___Type_possibleTypes(ctx, field) + case "enumValues": + return ec.fieldContext___Type_enumValues(ctx, field) + case "inputFields": + return ec.fieldContext___Type_inputFields(ctx, field) + case "ofType": + return ec.fieldContext___Type_ofType(ctx, field) + case "specifiedByURL": + return ec.fieldContext___Type_specifiedByURL(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Query___type_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Query___schema(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query___schema(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.introspectSchema() + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*introspection.Schema) + fc.Result = res + return ec.marshalO__Schema2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐSchema(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query___schema(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "description": + return ec.fieldContext___Schema_description(ctx, field) + case "types": + return ec.fieldContext___Schema_types(ctx, field) + case "queryType": + return ec.fieldContext___Schema_queryType(ctx, field) + case "mutationType": + return ec.fieldContext___Schema_mutationType(ctx, field) + case "subscriptionType": + return ec.fieldContext___Schema_subscriptionType(ctx, field) + case "directives": + return ec.fieldContext___Schema_directives(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type __Schema", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) ___Directive_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Directive_name(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Directive_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Directive", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) ___Directive_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Directive_description(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Description(), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*string) + fc.Result = res + return ec.marshalOString2ᚖstring(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Directive_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Directive", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) ___Directive_locations(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Directive_locations(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Locations, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]string) + fc.Result = res + return ec.marshalN__DirectiveLocation2ᚕstringᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Directive_locations(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Directive", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type __DirectiveLocation does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) ___Directive_args(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Directive_args(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Args, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]introspection.InputValue) + fc.Result = res + return ec.marshalN__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Directive_args(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Directive", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "name": + return ec.fieldContext___InputValue_name(ctx, field) + case "description": + return ec.fieldContext___InputValue_description(ctx, field) + case "type": + return ec.fieldContext___InputValue_type(ctx, field) + case "defaultValue": + return ec.fieldContext___InputValue_defaultValue(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type __InputValue", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) ___Directive_isRepeatable(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Directive_isRepeatable(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.IsRepeatable, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + fc.Result = res + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Directive_isRepeatable(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Directive", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Boolean does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) ___EnumValue_name(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___EnumValue_name(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___EnumValue_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__EnumValue", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) ___EnumValue_description(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___EnumValue_description(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Description(), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*string) + fc.Result = res + return ec.marshalOString2ᚖstring(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___EnumValue_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__EnumValue", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) ___EnumValue_isDeprecated(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___EnumValue_isDeprecated(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.IsDeprecated(), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + fc.Result = res + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___EnumValue_isDeprecated(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__EnumValue", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Boolean does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) ___EnumValue_deprecationReason(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___EnumValue_deprecationReason(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.DeprecationReason(), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*string) + fc.Result = res + return ec.marshalOString2ᚖstring(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___EnumValue_deprecationReason(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__EnumValue", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) ___Field_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Field_name(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Field_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Field", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) ___Field_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Field_description(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Description(), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*string) + fc.Result = res + return ec.marshalOString2ᚖstring(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Field_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Field", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) ___Field_args(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Field_args(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Args, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]introspection.InputValue) + fc.Result = res + return ec.marshalN__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Field_args(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Field", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "name": + return ec.fieldContext___InputValue_name(ctx, field) + case "description": + return ec.fieldContext___InputValue_description(ctx, field) + case "type": + return ec.fieldContext___InputValue_type(ctx, field) + case "defaultValue": + return ec.fieldContext___InputValue_defaultValue(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type __InputValue", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) ___Field_type(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Field_type(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Type, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*introspection.Type) + fc.Result = res + return ec.marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Field_type(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Field", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "kind": + return ec.fieldContext___Type_kind(ctx, field) + case "name": + return ec.fieldContext___Type_name(ctx, field) + case "description": + return ec.fieldContext___Type_description(ctx, field) + case "fields": + return ec.fieldContext___Type_fields(ctx, field) + case "interfaces": + return ec.fieldContext___Type_interfaces(ctx, field) + case "possibleTypes": + return ec.fieldContext___Type_possibleTypes(ctx, field) + case "enumValues": + return ec.fieldContext___Type_enumValues(ctx, field) + case "inputFields": + return ec.fieldContext___Type_inputFields(ctx, field) + case "ofType": + return ec.fieldContext___Type_ofType(ctx, field) + case "specifiedByURL": + return ec.fieldContext___Type_specifiedByURL(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) ___Field_isDeprecated(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Field_isDeprecated(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.IsDeprecated(), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + fc.Result = res + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Field_isDeprecated(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Field", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Boolean does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) ___Field_deprecationReason(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Field_deprecationReason(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.DeprecationReason(), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*string) + fc.Result = res + return ec.marshalOString2ᚖstring(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Field_deprecationReason(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Field", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) ___InputValue_name(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___InputValue_name(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___InputValue_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__InputValue", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) ___InputValue_description(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___InputValue_description(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Description(), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*string) + fc.Result = res + return ec.marshalOString2ᚖstring(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___InputValue_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__InputValue", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) ___InputValue_type(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___InputValue_type(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Type, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*introspection.Type) + fc.Result = res + return ec.marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___InputValue_type(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__InputValue", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "kind": + return ec.fieldContext___Type_kind(ctx, field) + case "name": + return ec.fieldContext___Type_name(ctx, field) + case "description": + return ec.fieldContext___Type_description(ctx, field) + case "fields": + return ec.fieldContext___Type_fields(ctx, field) + case "interfaces": + return ec.fieldContext___Type_interfaces(ctx, field) + case "possibleTypes": + return ec.fieldContext___Type_possibleTypes(ctx, field) + case "enumValues": + return ec.fieldContext___Type_enumValues(ctx, field) + case "inputFields": + return ec.fieldContext___Type_inputFields(ctx, field) + case "ofType": + return ec.fieldContext___Type_ofType(ctx, field) + case "specifiedByURL": + return ec.fieldContext___Type_specifiedByURL(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) ___InputValue_defaultValue(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___InputValue_defaultValue(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.DefaultValue, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*string) + fc.Result = res + return ec.marshalOString2ᚖstring(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___InputValue_defaultValue(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__InputValue", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) ___Schema_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Schema_description(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Description(), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*string) + fc.Result = res + return ec.marshalOString2ᚖstring(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Schema_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Schema", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) ___Schema_types(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Schema_types(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Types(), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]introspection.Type) + fc.Result = res + return ec.marshalN__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Schema_types(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Schema", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "kind": + return ec.fieldContext___Type_kind(ctx, field) + case "name": + return ec.fieldContext___Type_name(ctx, field) + case "description": + return ec.fieldContext___Type_description(ctx, field) + case "fields": + return ec.fieldContext___Type_fields(ctx, field) + case "interfaces": + return ec.fieldContext___Type_interfaces(ctx, field) + case "possibleTypes": + return ec.fieldContext___Type_possibleTypes(ctx, field) + case "enumValues": + return ec.fieldContext___Type_enumValues(ctx, field) + case "inputFields": + return ec.fieldContext___Type_inputFields(ctx, field) + case "ofType": + return ec.fieldContext___Type_ofType(ctx, field) + case "specifiedByURL": + return ec.fieldContext___Type_specifiedByURL(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) ___Schema_queryType(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Schema_queryType(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.QueryType(), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*introspection.Type) + fc.Result = res + return ec.marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Schema_queryType(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Schema", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "kind": + return ec.fieldContext___Type_kind(ctx, field) + case "name": + return ec.fieldContext___Type_name(ctx, field) + case "description": + return ec.fieldContext___Type_description(ctx, field) + case "fields": + return ec.fieldContext___Type_fields(ctx, field) + case "interfaces": + return ec.fieldContext___Type_interfaces(ctx, field) + case "possibleTypes": + return ec.fieldContext___Type_possibleTypes(ctx, field) + case "enumValues": + return ec.fieldContext___Type_enumValues(ctx, field) + case "inputFields": + return ec.fieldContext___Type_inputFields(ctx, field) + case "ofType": + return ec.fieldContext___Type_ofType(ctx, field) + case "specifiedByURL": + return ec.fieldContext___Type_specifiedByURL(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) ___Schema_mutationType(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Schema_mutationType(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.MutationType(), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*introspection.Type) + fc.Result = res + return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Schema_mutationType(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Schema", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "kind": + return ec.fieldContext___Type_kind(ctx, field) + case "name": + return ec.fieldContext___Type_name(ctx, field) + case "description": + return ec.fieldContext___Type_description(ctx, field) + case "fields": + return ec.fieldContext___Type_fields(ctx, field) + case "interfaces": + return ec.fieldContext___Type_interfaces(ctx, field) + case "possibleTypes": + return ec.fieldContext___Type_possibleTypes(ctx, field) + case "enumValues": + return ec.fieldContext___Type_enumValues(ctx, field) + case "inputFields": + return ec.fieldContext___Type_inputFields(ctx, field) + case "ofType": + return ec.fieldContext___Type_ofType(ctx, field) + case "specifiedByURL": + return ec.fieldContext___Type_specifiedByURL(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) ___Schema_subscriptionType(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Schema_subscriptionType(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.SubscriptionType(), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*introspection.Type) + fc.Result = res + return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Schema_subscriptionType(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Schema", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "kind": + return ec.fieldContext___Type_kind(ctx, field) + case "name": + return ec.fieldContext___Type_name(ctx, field) + case "description": + return ec.fieldContext___Type_description(ctx, field) + case "fields": + return ec.fieldContext___Type_fields(ctx, field) + case "interfaces": + return ec.fieldContext___Type_interfaces(ctx, field) + case "possibleTypes": + return ec.fieldContext___Type_possibleTypes(ctx, field) + case "enumValues": + return ec.fieldContext___Type_enumValues(ctx, field) + case "inputFields": + return ec.fieldContext___Type_inputFields(ctx, field) + case "ofType": + return ec.fieldContext___Type_ofType(ctx, field) + case "specifiedByURL": + return ec.fieldContext___Type_specifiedByURL(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) ___Schema_directives(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Schema_directives(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Directives(), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]introspection.Directive) + fc.Result = res + return ec.marshalN__Directive2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirectiveᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Schema_directives(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Schema", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "name": + return ec.fieldContext___Directive_name(ctx, field) + case "description": + return ec.fieldContext___Directive_description(ctx, field) + case "locations": + return ec.fieldContext___Directive_locations(ctx, field) + case "args": + return ec.fieldContext___Directive_args(ctx, field) + case "isRepeatable": + return ec.fieldContext___Directive_isRepeatable(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type __Directive", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) ___Type_kind(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Type_kind(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Kind(), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalN__TypeKind2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Type_kind(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Type", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type __TypeKind does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) ___Type_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Type_name(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name(), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*string) + fc.Result = res + return ec.marshalOString2ᚖstring(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Type_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Type", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) ___Type_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Type_description(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Description(), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*string) + fc.Result = res + return ec.marshalOString2ᚖstring(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Type_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Type", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) ___Type_fields(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Type_fields(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Fields(fc.Args["includeDeprecated"].(bool)), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]introspection.Field) + fc.Result = res + return ec.marshalO__Field2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐFieldᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Type_fields(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Type", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "name": + return ec.fieldContext___Field_name(ctx, field) + case "description": + return ec.fieldContext___Field_description(ctx, field) + case "args": + return ec.fieldContext___Field_args(ctx, field) + case "type": + return ec.fieldContext___Field_type(ctx, field) + case "isDeprecated": + return ec.fieldContext___Field_isDeprecated(ctx, field) + case "deprecationReason": + return ec.fieldContext___Field_deprecationReason(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type __Field", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field___Type_fields_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) ___Type_interfaces(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Type_interfaces(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Interfaces(), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]introspection.Type) + fc.Result = res + return ec.marshalO__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Type_interfaces(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Type", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "kind": + return ec.fieldContext___Type_kind(ctx, field) + case "name": + return ec.fieldContext___Type_name(ctx, field) + case "description": + return ec.fieldContext___Type_description(ctx, field) + case "fields": + return ec.fieldContext___Type_fields(ctx, field) + case "interfaces": + return ec.fieldContext___Type_interfaces(ctx, field) + case "possibleTypes": + return ec.fieldContext___Type_possibleTypes(ctx, field) + case "enumValues": + return ec.fieldContext___Type_enumValues(ctx, field) + case "inputFields": + return ec.fieldContext___Type_inputFields(ctx, field) + case "ofType": + return ec.fieldContext___Type_ofType(ctx, field) + case "specifiedByURL": + return ec.fieldContext___Type_specifiedByURL(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) ___Type_possibleTypes(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Type_possibleTypes(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.PossibleTypes(), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]introspection.Type) + fc.Result = res + return ec.marshalO__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Type_possibleTypes(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Type", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "kind": + return ec.fieldContext___Type_kind(ctx, field) + case "name": + return ec.fieldContext___Type_name(ctx, field) + case "description": + return ec.fieldContext___Type_description(ctx, field) + case "fields": + return ec.fieldContext___Type_fields(ctx, field) + case "interfaces": + return ec.fieldContext___Type_interfaces(ctx, field) + case "possibleTypes": + return ec.fieldContext___Type_possibleTypes(ctx, field) + case "enumValues": + return ec.fieldContext___Type_enumValues(ctx, field) + case "inputFields": + return ec.fieldContext___Type_inputFields(ctx, field) + case "ofType": + return ec.fieldContext___Type_ofType(ctx, field) + case "specifiedByURL": + return ec.fieldContext___Type_specifiedByURL(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) ___Type_enumValues(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Type_enumValues(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.EnumValues(fc.Args["includeDeprecated"].(bool)), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]introspection.EnumValue) + fc.Result = res + return ec.marshalO__EnumValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValueᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Type_enumValues(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Type", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "name": + return ec.fieldContext___EnumValue_name(ctx, field) + case "description": + return ec.fieldContext___EnumValue_description(ctx, field) + case "isDeprecated": + return ec.fieldContext___EnumValue_isDeprecated(ctx, field) + case "deprecationReason": + return ec.fieldContext___EnumValue_deprecationReason(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type __EnumValue", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field___Type_enumValues_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) ___Type_inputFields(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Type_inputFields(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.InputFields(), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]introspection.InputValue) + fc.Result = res + return ec.marshalO__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Type_inputFields(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Type", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "name": + return ec.fieldContext___InputValue_name(ctx, field) + case "description": + return ec.fieldContext___InputValue_description(ctx, field) + case "type": + return ec.fieldContext___InputValue_type(ctx, field) + case "defaultValue": + return ec.fieldContext___InputValue_defaultValue(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type __InputValue", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) ___Type_ofType(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Type_ofType(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.OfType(), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*introspection.Type) + fc.Result = res + return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Type_ofType(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Type", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "kind": + return ec.fieldContext___Type_kind(ctx, field) + case "name": + return ec.fieldContext___Type_name(ctx, field) + case "description": + return ec.fieldContext___Type_description(ctx, field) + case "fields": + return ec.fieldContext___Type_fields(ctx, field) + case "interfaces": + return ec.fieldContext___Type_interfaces(ctx, field) + case "possibleTypes": + return ec.fieldContext___Type_possibleTypes(ctx, field) + case "enumValues": + return ec.fieldContext___Type_enumValues(ctx, field) + case "inputFields": + return ec.fieldContext___Type_inputFields(ctx, field) + case "ofType": + return ec.fieldContext___Type_ofType(ctx, field) + case "specifiedByURL": + return ec.fieldContext___Type_specifiedByURL(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) ___Type_specifiedByURL(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) { + fc, err := ec.fieldContext___Type_specifiedByURL(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.SpecifiedByURL(), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*string) + fc.Result = res + return ec.marshalOString2ᚖstring(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext___Type_specifiedByURL(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "__Type", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +// endregion **************************** field.gotpl ***************************** + +// region **************************** input.gotpl ***************************** + +func (ec *executionContext) unmarshalInputCreateDatabaseInput(ctx context.Context, obj interface{}) (generated.CreateDatabaseInput, error) { + var it generated.CreateDatabaseInput + asMap := map[string]interface{}{} + for k, v := range obj.(map[string]interface{}) { + asMap[k] = v + } + + fieldsInOrder := [...]string{"createdAt", "updatedAt", "createdBy", "updatedBy", "organizationID", "name", "geo", "dsn", "token", "status", "provider", "groupID"} + for _, k := range fieldsInOrder { + v, ok := asMap[k] + if !ok { + continue + } + switch k { + case "createdAt": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdAt")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.CreatedAt = data + case "updatedAt": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedAt")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.UpdatedAt = data + case "createdBy": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdBy")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.CreatedBy = data + case "updatedBy": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedBy")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedBy = data + case "organizationID": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("organizationID")) + data, err := ec.unmarshalNString2string(ctx, v) + if err != nil { + return it, err + } + it.OrganizationID = data + case "name": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name")) + data, err := ec.unmarshalNString2string(ctx, v) + if err != nil { + return it, err + } + it.Name = data + case "geo": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("geo")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.Geo = data + case "dsn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("dsn")) + data, err := ec.unmarshalNString2string(ctx, v) + if err != nil { + return it, err + } + it.Dsn = data + case "token": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("token")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.Token = data + case "status": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("status")) + data, err := ec.unmarshalODatabaseDatabaseStatus2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseStatus(ctx, v) + if err != nil { + return it, err + } + it.Status = data + case "provider": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("provider")) + data, err := ec.unmarshalODatabaseDatabaseProvider2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseProvider(ctx, v) + if err != nil { + return it, err + } + it.Provider = data + case "groupID": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("groupID")) + data, err := ec.unmarshalNID2string(ctx, v) + if err != nil { + return it, err + } + it.GroupID = data + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputCreateGroupInput(ctx context.Context, obj interface{}) (generated.CreateGroupInput, error) { + var it generated.CreateGroupInput + asMap := map[string]interface{}{} + for k, v := range obj.(map[string]interface{}) { + asMap[k] = v + } + + fieldsInOrder := [...]string{"createdAt", "updatedAt", "createdBy", "updatedBy", "name", "description", "primaryLocation", "locations", "token", "region", "databaseIDs"} + for _, k := range fieldsInOrder { + v, ok := asMap[k] + if !ok { + continue + } + switch k { + case "createdAt": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdAt")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.CreatedAt = data + case "updatedAt": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedAt")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.UpdatedAt = data + case "createdBy": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdBy")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.CreatedBy = data + case "updatedBy": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedBy")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedBy = data + case "name": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name")) + data, err := ec.unmarshalNString2string(ctx, v) + if err != nil { + return it, err + } + it.Name = data + case "description": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("description")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.Description = data + case "primaryLocation": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("primaryLocation")) + data, err := ec.unmarshalNString2string(ctx, v) + if err != nil { + return it, err + } + it.PrimaryLocation = data + case "locations": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("locations")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.Locations = data + case "token": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("token")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.Token = data + case "region": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("region")) + data, err := ec.unmarshalOGroupRegion2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐRegion(ctx, v) + if err != nil { + return it, err + } + it.Region = data + case "databaseIDs": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("databaseIDs")) + data, err := ec.unmarshalOID2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.DatabaseIDs = data + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputDatabaseWhereInput(ctx context.Context, obj interface{}) (generated.DatabaseWhereInput, error) { + var it generated.DatabaseWhereInput + asMap := map[string]interface{}{} + for k, v := range obj.(map[string]interface{}) { + asMap[k] = v + } + + fieldsInOrder := [...]string{"not", "and", "or", "id", "idNEQ", "idIn", "idNotIn", "idGT", "idGTE", "idLT", "idLTE", "idEqualFold", "idContainsFold", "createdAt", "createdAtNEQ", "createdAtIn", "createdAtNotIn", "createdAtGT", "createdAtGTE", "createdAtLT", "createdAtLTE", "createdAtIsNil", "createdAtNotNil", "updatedAt", "updatedAtNEQ", "updatedAtIn", "updatedAtNotIn", "updatedAtGT", "updatedAtGTE", "updatedAtLT", "updatedAtLTE", "updatedAtIsNil", "updatedAtNotNil", "createdBy", "createdByNEQ", "createdByIn", "createdByNotIn", "createdByGT", "createdByGTE", "createdByLT", "createdByLTE", "createdByContains", "createdByHasPrefix", "createdByHasSuffix", "createdByIsNil", "createdByNotNil", "createdByEqualFold", "createdByContainsFold", "updatedBy", "updatedByNEQ", "updatedByIn", "updatedByNotIn", "updatedByGT", "updatedByGTE", "updatedByLT", "updatedByLTE", "updatedByContains", "updatedByHasPrefix", "updatedByHasSuffix", "updatedByIsNil", "updatedByNotNil", "updatedByEqualFold", "updatedByContainsFold", "deletedAt", "deletedAtNEQ", "deletedAtIn", "deletedAtNotIn", "deletedAtGT", "deletedAtGTE", "deletedAtLT", "deletedAtLTE", "deletedAtIsNil", "deletedAtNotNil", "deletedBy", "deletedByNEQ", "deletedByIn", "deletedByNotIn", "deletedByGT", "deletedByGTE", "deletedByLT", "deletedByLTE", "deletedByContains", "deletedByHasPrefix", "deletedByHasSuffix", "deletedByIsNil", "deletedByNotNil", "deletedByEqualFold", "deletedByContainsFold", "organizationID", "organizationIDNEQ", "organizationIDIn", "organizationIDNotIn", "organizationIDGT", "organizationIDGTE", "organizationIDLT", "organizationIDLTE", "organizationIDContains", "organizationIDHasPrefix", "organizationIDHasSuffix", "organizationIDEqualFold", "organizationIDContainsFold", "name", "nameNEQ", "nameIn", "nameNotIn", "nameGT", "nameGTE", "nameLT", "nameLTE", "nameContains", "nameHasPrefix", "nameHasSuffix", "nameEqualFold", "nameContainsFold", "geo", "geoNEQ", "geoIn", "geoNotIn", "geoGT", "geoGTE", "geoLT", "geoLTE", "geoContains", "geoHasPrefix", "geoHasSuffix", "geoIsNil", "geoNotNil", "geoEqualFold", "geoContainsFold", "dsn", "dsnNEQ", "dsnIn", "dsnNotIn", "dsnGT", "dsnGTE", "dsnLT", "dsnLTE", "dsnContains", "dsnHasPrefix", "dsnHasSuffix", "dsnEqualFold", "dsnContainsFold", "groupID", "groupIDNEQ", "groupIDIn", "groupIDNotIn", "groupIDGT", "groupIDGTE", "groupIDLT", "groupIDLTE", "groupIDContains", "groupIDHasPrefix", "groupIDHasSuffix", "groupIDEqualFold", "groupIDContainsFold", "status", "statusNEQ", "statusIn", "statusNotIn", "provider", "providerNEQ", "providerIn", "providerNotIn", "hasGroup", "hasGroupWith"} + for _, k := range fieldsInOrder { + v, ok := asMap[k] + if !ok { + continue + } + switch k { + case "not": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("not")) + data, err := ec.unmarshalODatabaseWhereInput2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabaseWhereInput(ctx, v) + if err != nil { + return it, err + } + it.Not = data + case "and": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("and")) + data, err := ec.unmarshalODatabaseWhereInput2ᚕᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabaseWhereInputᚄ(ctx, v) + if err != nil { + return it, err + } + it.And = data + case "or": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("or")) + data, err := ec.unmarshalODatabaseWhereInput2ᚕᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabaseWhereInputᚄ(ctx, v) + if err != nil { + return it, err + } + it.Or = data + case "id": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.ID = data + case "idNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("idNEQ")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.IDNEQ = data + case "idIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("idIn")) + data, err := ec.unmarshalOID2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.IDIn = data + case "idNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("idNotIn")) + data, err := ec.unmarshalOID2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.IDNotIn = data + case "idGT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("idGT")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.IDGT = data + case "idGTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("idGTE")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.IDGTE = data + case "idLT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("idLT")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.IDLT = data + case "idLTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("idLTE")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.IDLTE = data + case "idEqualFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("idEqualFold")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.IDEqualFold = data + case "idContainsFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("idContainsFold")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.IDContainsFold = data + case "createdAt": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdAt")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.CreatedAt = data + case "createdAtNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdAtNEQ")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.CreatedAtNEQ = data + case "createdAtIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdAtIn")) + data, err := ec.unmarshalOTime2ᚕtimeᚐTimeᚄ(ctx, v) + if err != nil { + return it, err + } + it.CreatedAtIn = data + case "createdAtNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdAtNotIn")) + data, err := ec.unmarshalOTime2ᚕtimeᚐTimeᚄ(ctx, v) + if err != nil { + return it, err + } + it.CreatedAtNotIn = data + case "createdAtGT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdAtGT")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.CreatedAtGT = data + case "createdAtGTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdAtGTE")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.CreatedAtGTE = data + case "createdAtLT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdAtLT")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.CreatedAtLT = data + case "createdAtLTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdAtLTE")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.CreatedAtLTE = data + case "createdAtIsNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdAtIsNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.CreatedAtIsNil = data + case "createdAtNotNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdAtNotNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.CreatedAtNotNil = data + case "updatedAt": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedAt")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.UpdatedAt = data + case "updatedAtNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedAtNEQ")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.UpdatedAtNEQ = data + case "updatedAtIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedAtIn")) + data, err := ec.unmarshalOTime2ᚕtimeᚐTimeᚄ(ctx, v) + if err != nil { + return it, err + } + it.UpdatedAtIn = data + case "updatedAtNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedAtNotIn")) + data, err := ec.unmarshalOTime2ᚕtimeᚐTimeᚄ(ctx, v) + if err != nil { + return it, err + } + it.UpdatedAtNotIn = data + case "updatedAtGT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedAtGT")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.UpdatedAtGT = data + case "updatedAtGTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedAtGTE")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.UpdatedAtGTE = data + case "updatedAtLT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedAtLT")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.UpdatedAtLT = data + case "updatedAtLTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedAtLTE")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.UpdatedAtLTE = data + case "updatedAtIsNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedAtIsNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.UpdatedAtIsNil = data + case "updatedAtNotNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedAtNotNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.UpdatedAtNotNil = data + case "createdBy": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdBy")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.CreatedBy = data + case "createdByNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByNEQ")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.CreatedByNEQ = data + case "createdByIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.CreatedByIn = data + case "createdByNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByNotIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.CreatedByNotIn = data + case "createdByGT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByGT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.CreatedByGT = data + case "createdByGTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByGTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.CreatedByGTE = data + case "createdByLT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByLT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.CreatedByLT = data + case "createdByLTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByLTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.CreatedByLTE = data + case "createdByContains": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByContains")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.CreatedByContains = data + case "createdByHasPrefix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByHasPrefix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.CreatedByHasPrefix = data + case "createdByHasSuffix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByHasSuffix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.CreatedByHasSuffix = data + case "createdByIsNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByIsNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.CreatedByIsNil = data + case "createdByNotNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByNotNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.CreatedByNotNil = data + case "createdByEqualFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByEqualFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.CreatedByEqualFold = data + case "createdByContainsFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByContainsFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.CreatedByContainsFold = data + case "updatedBy": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedBy")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedBy = data + case "updatedByNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByNEQ")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByNEQ = data + case "updatedByIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByIn = data + case "updatedByNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByNotIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByNotIn = data + case "updatedByGT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByGT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByGT = data + case "updatedByGTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByGTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByGTE = data + case "updatedByLT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByLT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByLT = data + case "updatedByLTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByLTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByLTE = data + case "updatedByContains": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByContains")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByContains = data + case "updatedByHasPrefix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByHasPrefix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByHasPrefix = data + case "updatedByHasSuffix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByHasSuffix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByHasSuffix = data + case "updatedByIsNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByIsNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByIsNil = data + case "updatedByNotNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByNotNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByNotNil = data + case "updatedByEqualFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByEqualFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByEqualFold = data + case "updatedByContainsFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByContainsFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByContainsFold = data + case "deletedAt": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedAt")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.DeletedAt = data + case "deletedAtNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedAtNEQ")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.DeletedAtNEQ = data + case "deletedAtIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedAtIn")) + data, err := ec.unmarshalOTime2ᚕtimeᚐTimeᚄ(ctx, v) + if err != nil { + return it, err + } + it.DeletedAtIn = data + case "deletedAtNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedAtNotIn")) + data, err := ec.unmarshalOTime2ᚕtimeᚐTimeᚄ(ctx, v) + if err != nil { + return it, err + } + it.DeletedAtNotIn = data + case "deletedAtGT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedAtGT")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.DeletedAtGT = data + case "deletedAtGTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedAtGTE")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.DeletedAtGTE = data + case "deletedAtLT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedAtLT")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.DeletedAtLT = data + case "deletedAtLTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedAtLTE")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.DeletedAtLTE = data + case "deletedAtIsNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedAtIsNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.DeletedAtIsNil = data + case "deletedAtNotNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedAtNotNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.DeletedAtNotNil = data + case "deletedBy": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedBy")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DeletedBy = data + case "deletedByNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByNEQ")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DeletedByNEQ = data + case "deletedByIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.DeletedByIn = data + case "deletedByNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByNotIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.DeletedByNotIn = data + case "deletedByGT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByGT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DeletedByGT = data + case "deletedByGTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByGTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DeletedByGTE = data + case "deletedByLT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByLT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DeletedByLT = data + case "deletedByLTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByLTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DeletedByLTE = data + case "deletedByContains": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByContains")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DeletedByContains = data + case "deletedByHasPrefix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByHasPrefix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DeletedByHasPrefix = data + case "deletedByHasSuffix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByHasSuffix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DeletedByHasSuffix = data + case "deletedByIsNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByIsNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.DeletedByIsNil = data + case "deletedByNotNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByNotNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.DeletedByNotNil = data + case "deletedByEqualFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByEqualFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DeletedByEqualFold = data + case "deletedByContainsFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByContainsFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DeletedByContainsFold = data + case "organizationID": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("organizationID")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.OrganizationID = data + case "organizationIDNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("organizationIDNEQ")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.OrganizationIDNEQ = data + case "organizationIDIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("organizationIDIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.OrganizationIDIn = data + case "organizationIDNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("organizationIDNotIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.OrganizationIDNotIn = data + case "organizationIDGT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("organizationIDGT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.OrganizationIDGT = data + case "organizationIDGTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("organizationIDGTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.OrganizationIDGTE = data + case "organizationIDLT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("organizationIDLT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.OrganizationIDLT = data + case "organizationIDLTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("organizationIDLTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.OrganizationIDLTE = data + case "organizationIDContains": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("organizationIDContains")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.OrganizationIDContains = data + case "organizationIDHasPrefix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("organizationIDHasPrefix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.OrganizationIDHasPrefix = data + case "organizationIDHasSuffix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("organizationIDHasSuffix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.OrganizationIDHasSuffix = data + case "organizationIDEqualFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("organizationIDEqualFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.OrganizationIDEqualFold = data + case "organizationIDContainsFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("organizationIDContainsFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.OrganizationIDContainsFold = data + case "name": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.Name = data + case "nameNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nameNEQ")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.NameNEQ = data + case "nameIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nameIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.NameIn = data + case "nameNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nameNotIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.NameNotIn = data + case "nameGT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nameGT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.NameGT = data + case "nameGTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nameGTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.NameGTE = data + case "nameLT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nameLT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.NameLT = data + case "nameLTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nameLTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.NameLTE = data + case "nameContains": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nameContains")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.NameContains = data + case "nameHasPrefix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nameHasPrefix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.NameHasPrefix = data + case "nameHasSuffix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nameHasSuffix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.NameHasSuffix = data + case "nameEqualFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nameEqualFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.NameEqualFold = data + case "nameContainsFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nameContainsFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.NameContainsFold = data + case "geo": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("geo")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.Geo = data + case "geoNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("geoNEQ")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.GeoNEQ = data + case "geoIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("geoIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.GeoIn = data + case "geoNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("geoNotIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.GeoNotIn = data + case "geoGT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("geoGT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.GeoGT = data + case "geoGTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("geoGTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.GeoGTE = data + case "geoLT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("geoLT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.GeoLT = data + case "geoLTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("geoLTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.GeoLTE = data + case "geoContains": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("geoContains")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.GeoContains = data + case "geoHasPrefix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("geoHasPrefix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.GeoHasPrefix = data + case "geoHasSuffix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("geoHasSuffix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.GeoHasSuffix = data + case "geoIsNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("geoIsNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.GeoIsNil = data + case "geoNotNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("geoNotNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.GeoNotNil = data + case "geoEqualFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("geoEqualFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.GeoEqualFold = data + case "geoContainsFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("geoContainsFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.GeoContainsFold = data + case "dsn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("dsn")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.Dsn = data + case "dsnNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("dsnNEQ")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DsnNEQ = data + case "dsnIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("dsnIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.DsnIn = data + case "dsnNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("dsnNotIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.DsnNotIn = data + case "dsnGT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("dsnGT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DsnGT = data + case "dsnGTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("dsnGTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DsnGTE = data + case "dsnLT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("dsnLT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DsnLT = data + case "dsnLTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("dsnLTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DsnLTE = data + case "dsnContains": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("dsnContains")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DsnContains = data + case "dsnHasPrefix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("dsnHasPrefix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DsnHasPrefix = data + case "dsnHasSuffix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("dsnHasSuffix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DsnHasSuffix = data + case "dsnEqualFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("dsnEqualFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DsnEqualFold = data + case "dsnContainsFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("dsnContainsFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DsnContainsFold = data + case "groupID": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("groupID")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.GroupID = data + case "groupIDNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("groupIDNEQ")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.GroupIDNEQ = data + case "groupIDIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("groupIDIn")) + data, err := ec.unmarshalOID2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.GroupIDIn = data + case "groupIDNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("groupIDNotIn")) + data, err := ec.unmarshalOID2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.GroupIDNotIn = data + case "groupIDGT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("groupIDGT")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.GroupIDGT = data + case "groupIDGTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("groupIDGTE")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.GroupIDGTE = data + case "groupIDLT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("groupIDLT")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.GroupIDLT = data + case "groupIDLTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("groupIDLTE")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.GroupIDLTE = data + case "groupIDContains": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("groupIDContains")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.GroupIDContains = data + case "groupIDHasPrefix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("groupIDHasPrefix")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.GroupIDHasPrefix = data + case "groupIDHasSuffix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("groupIDHasSuffix")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.GroupIDHasSuffix = data + case "groupIDEqualFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("groupIDEqualFold")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.GroupIDEqualFold = data + case "groupIDContainsFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("groupIDContainsFold")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.GroupIDContainsFold = data + case "status": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("status")) + data, err := ec.unmarshalODatabaseDatabaseStatus2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseStatus(ctx, v) + if err != nil { + return it, err + } + it.Status = data + case "statusNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("statusNEQ")) + data, err := ec.unmarshalODatabaseDatabaseStatus2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseStatus(ctx, v) + if err != nil { + return it, err + } + it.StatusNEQ = data + case "statusIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("statusIn")) + data, err := ec.unmarshalODatabaseDatabaseStatus2ᚕgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseStatusᚄ(ctx, v) + if err != nil { + return it, err + } + it.StatusIn = data + case "statusNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("statusNotIn")) + data, err := ec.unmarshalODatabaseDatabaseStatus2ᚕgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseStatusᚄ(ctx, v) + if err != nil { + return it, err + } + it.StatusNotIn = data + case "provider": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("provider")) + data, err := ec.unmarshalODatabaseDatabaseProvider2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseProvider(ctx, v) + if err != nil { + return it, err + } + it.Provider = data + case "providerNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("providerNEQ")) + data, err := ec.unmarshalODatabaseDatabaseProvider2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseProvider(ctx, v) + if err != nil { + return it, err + } + it.ProviderNEQ = data + case "providerIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("providerIn")) + data, err := ec.unmarshalODatabaseDatabaseProvider2ᚕgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseProviderᚄ(ctx, v) + if err != nil { + return it, err + } + it.ProviderIn = data + case "providerNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("providerNotIn")) + data, err := ec.unmarshalODatabaseDatabaseProvider2ᚕgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseProviderᚄ(ctx, v) + if err != nil { + return it, err + } + it.ProviderNotIn = data + case "hasGroup": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("hasGroup")) + data, err := ec.unmarshalOBoolean2ᚖbool(ctx, v) + if err != nil { + return it, err + } + it.HasGroup = data + case "hasGroupWith": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("hasGroupWith")) + data, err := ec.unmarshalOGroupWhereInput2ᚕᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐGroupWhereInputᚄ(ctx, v) + if err != nil { + return it, err + } + it.HasGroupWith = data + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputGroupWhereInput(ctx context.Context, obj interface{}) (generated.GroupWhereInput, error) { + var it generated.GroupWhereInput + asMap := map[string]interface{}{} + for k, v := range obj.(map[string]interface{}) { + asMap[k] = v + } + + fieldsInOrder := [...]string{"not", "and", "or", "id", "idNEQ", "idIn", "idNotIn", "idGT", "idGTE", "idLT", "idLTE", "idEqualFold", "idContainsFold", "createdAt", "createdAtNEQ", "createdAtIn", "createdAtNotIn", "createdAtGT", "createdAtGTE", "createdAtLT", "createdAtLTE", "createdAtIsNil", "createdAtNotNil", "updatedAt", "updatedAtNEQ", "updatedAtIn", "updatedAtNotIn", "updatedAtGT", "updatedAtGTE", "updatedAtLT", "updatedAtLTE", "updatedAtIsNil", "updatedAtNotNil", "createdBy", "createdByNEQ", "createdByIn", "createdByNotIn", "createdByGT", "createdByGTE", "createdByLT", "createdByLTE", "createdByContains", "createdByHasPrefix", "createdByHasSuffix", "createdByIsNil", "createdByNotNil", "createdByEqualFold", "createdByContainsFold", "updatedBy", "updatedByNEQ", "updatedByIn", "updatedByNotIn", "updatedByGT", "updatedByGTE", "updatedByLT", "updatedByLTE", "updatedByContains", "updatedByHasPrefix", "updatedByHasSuffix", "updatedByIsNil", "updatedByNotNil", "updatedByEqualFold", "updatedByContainsFold", "deletedAt", "deletedAtNEQ", "deletedAtIn", "deletedAtNotIn", "deletedAtGT", "deletedAtGTE", "deletedAtLT", "deletedAtLTE", "deletedAtIsNil", "deletedAtNotNil", "deletedBy", "deletedByNEQ", "deletedByIn", "deletedByNotIn", "deletedByGT", "deletedByGTE", "deletedByLT", "deletedByLTE", "deletedByContains", "deletedByHasPrefix", "deletedByHasSuffix", "deletedByIsNil", "deletedByNotNil", "deletedByEqualFold", "deletedByContainsFold", "name", "nameNEQ", "nameIn", "nameNotIn", "nameGT", "nameGTE", "nameLT", "nameLTE", "nameContains", "nameHasPrefix", "nameHasSuffix", "nameEqualFold", "nameContainsFold", "description", "descriptionNEQ", "descriptionIn", "descriptionNotIn", "descriptionGT", "descriptionGTE", "descriptionLT", "descriptionLTE", "descriptionContains", "descriptionHasPrefix", "descriptionHasSuffix", "descriptionIsNil", "descriptionNotNil", "descriptionEqualFold", "descriptionContainsFold", "primaryLocation", "primaryLocationNEQ", "primaryLocationIn", "primaryLocationNotIn", "primaryLocationGT", "primaryLocationGTE", "primaryLocationLT", "primaryLocationLTE", "primaryLocationContains", "primaryLocationHasPrefix", "primaryLocationHasSuffix", "primaryLocationEqualFold", "primaryLocationContainsFold", "region", "regionNEQ", "regionIn", "regionNotIn", "hasDatabases", "hasDatabasesWith"} + for _, k := range fieldsInOrder { + v, ok := asMap[k] + if !ok { + continue + } + switch k { + case "not": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("not")) + data, err := ec.unmarshalOGroupWhereInput2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐGroupWhereInput(ctx, v) + if err != nil { + return it, err + } + it.Not = data + case "and": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("and")) + data, err := ec.unmarshalOGroupWhereInput2ᚕᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐGroupWhereInputᚄ(ctx, v) + if err != nil { + return it, err + } + it.And = data + case "or": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("or")) + data, err := ec.unmarshalOGroupWhereInput2ᚕᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐGroupWhereInputᚄ(ctx, v) + if err != nil { + return it, err + } + it.Or = data + case "id": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.ID = data + case "idNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("idNEQ")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.IDNEQ = data + case "idIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("idIn")) + data, err := ec.unmarshalOID2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.IDIn = data + case "idNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("idNotIn")) + data, err := ec.unmarshalOID2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.IDNotIn = data + case "idGT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("idGT")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.IDGT = data + case "idGTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("idGTE")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.IDGTE = data + case "idLT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("idLT")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.IDLT = data + case "idLTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("idLTE")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.IDLTE = data + case "idEqualFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("idEqualFold")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.IDEqualFold = data + case "idContainsFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("idContainsFold")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.IDContainsFold = data + case "createdAt": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdAt")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.CreatedAt = data + case "createdAtNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdAtNEQ")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.CreatedAtNEQ = data + case "createdAtIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdAtIn")) + data, err := ec.unmarshalOTime2ᚕtimeᚐTimeᚄ(ctx, v) + if err != nil { + return it, err + } + it.CreatedAtIn = data + case "createdAtNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdAtNotIn")) + data, err := ec.unmarshalOTime2ᚕtimeᚐTimeᚄ(ctx, v) + if err != nil { + return it, err + } + it.CreatedAtNotIn = data + case "createdAtGT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdAtGT")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.CreatedAtGT = data + case "createdAtGTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdAtGTE")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.CreatedAtGTE = data + case "createdAtLT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdAtLT")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.CreatedAtLT = data + case "createdAtLTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdAtLTE")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.CreatedAtLTE = data + case "createdAtIsNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdAtIsNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.CreatedAtIsNil = data + case "createdAtNotNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdAtNotNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.CreatedAtNotNil = data + case "updatedAt": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedAt")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.UpdatedAt = data + case "updatedAtNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedAtNEQ")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.UpdatedAtNEQ = data + case "updatedAtIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedAtIn")) + data, err := ec.unmarshalOTime2ᚕtimeᚐTimeᚄ(ctx, v) + if err != nil { + return it, err + } + it.UpdatedAtIn = data + case "updatedAtNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedAtNotIn")) + data, err := ec.unmarshalOTime2ᚕtimeᚐTimeᚄ(ctx, v) + if err != nil { + return it, err + } + it.UpdatedAtNotIn = data + case "updatedAtGT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedAtGT")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.UpdatedAtGT = data + case "updatedAtGTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedAtGTE")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.UpdatedAtGTE = data + case "updatedAtLT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedAtLT")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.UpdatedAtLT = data + case "updatedAtLTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedAtLTE")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.UpdatedAtLTE = data + case "updatedAtIsNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedAtIsNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.UpdatedAtIsNil = data + case "updatedAtNotNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedAtNotNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.UpdatedAtNotNil = data + case "createdBy": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdBy")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.CreatedBy = data + case "createdByNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByNEQ")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.CreatedByNEQ = data + case "createdByIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.CreatedByIn = data + case "createdByNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByNotIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.CreatedByNotIn = data + case "createdByGT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByGT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.CreatedByGT = data + case "createdByGTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByGTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.CreatedByGTE = data + case "createdByLT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByLT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.CreatedByLT = data + case "createdByLTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByLTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.CreatedByLTE = data + case "createdByContains": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByContains")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.CreatedByContains = data + case "createdByHasPrefix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByHasPrefix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.CreatedByHasPrefix = data + case "createdByHasSuffix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByHasSuffix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.CreatedByHasSuffix = data + case "createdByIsNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByIsNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.CreatedByIsNil = data + case "createdByNotNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByNotNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.CreatedByNotNil = data + case "createdByEqualFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByEqualFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.CreatedByEqualFold = data + case "createdByContainsFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("createdByContainsFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.CreatedByContainsFold = data + case "updatedBy": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedBy")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedBy = data + case "updatedByNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByNEQ")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByNEQ = data + case "updatedByIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByIn = data + case "updatedByNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByNotIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByNotIn = data + case "updatedByGT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByGT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByGT = data + case "updatedByGTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByGTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByGTE = data + case "updatedByLT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByLT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByLT = data + case "updatedByLTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByLTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByLTE = data + case "updatedByContains": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByContains")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByContains = data + case "updatedByHasPrefix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByHasPrefix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByHasPrefix = data + case "updatedByHasSuffix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByHasSuffix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByHasSuffix = data + case "updatedByIsNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByIsNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByIsNil = data + case "updatedByNotNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByNotNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByNotNil = data + case "updatedByEqualFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByEqualFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByEqualFold = data + case "updatedByContainsFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedByContainsFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedByContainsFold = data + case "deletedAt": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedAt")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.DeletedAt = data + case "deletedAtNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedAtNEQ")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.DeletedAtNEQ = data + case "deletedAtIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedAtIn")) + data, err := ec.unmarshalOTime2ᚕtimeᚐTimeᚄ(ctx, v) + if err != nil { + return it, err + } + it.DeletedAtIn = data + case "deletedAtNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedAtNotIn")) + data, err := ec.unmarshalOTime2ᚕtimeᚐTimeᚄ(ctx, v) + if err != nil { + return it, err + } + it.DeletedAtNotIn = data + case "deletedAtGT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedAtGT")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.DeletedAtGT = data + case "deletedAtGTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedAtGTE")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.DeletedAtGTE = data + case "deletedAtLT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedAtLT")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.DeletedAtLT = data + case "deletedAtLTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedAtLTE")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.DeletedAtLTE = data + case "deletedAtIsNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedAtIsNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.DeletedAtIsNil = data + case "deletedAtNotNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedAtNotNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.DeletedAtNotNil = data + case "deletedBy": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedBy")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DeletedBy = data + case "deletedByNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByNEQ")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DeletedByNEQ = data + case "deletedByIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.DeletedByIn = data + case "deletedByNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByNotIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.DeletedByNotIn = data + case "deletedByGT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByGT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DeletedByGT = data + case "deletedByGTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByGTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DeletedByGTE = data + case "deletedByLT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByLT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DeletedByLT = data + case "deletedByLTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByLTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DeletedByLTE = data + case "deletedByContains": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByContains")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DeletedByContains = data + case "deletedByHasPrefix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByHasPrefix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DeletedByHasPrefix = data + case "deletedByHasSuffix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByHasSuffix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DeletedByHasSuffix = data + case "deletedByIsNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByIsNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.DeletedByIsNil = data + case "deletedByNotNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByNotNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.DeletedByNotNil = data + case "deletedByEqualFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByEqualFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DeletedByEqualFold = data + case "deletedByContainsFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("deletedByContainsFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DeletedByContainsFold = data + case "name": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.Name = data + case "nameNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nameNEQ")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.NameNEQ = data + case "nameIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nameIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.NameIn = data + case "nameNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nameNotIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.NameNotIn = data + case "nameGT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nameGT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.NameGT = data + case "nameGTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nameGTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.NameGTE = data + case "nameLT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nameLT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.NameLT = data + case "nameLTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nameLTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.NameLTE = data + case "nameContains": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nameContains")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.NameContains = data + case "nameHasPrefix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nameHasPrefix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.NameHasPrefix = data + case "nameHasSuffix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nameHasSuffix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.NameHasSuffix = data + case "nameEqualFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nameEqualFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.NameEqualFold = data + case "nameContainsFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nameContainsFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.NameContainsFold = data + case "description": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("description")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.Description = data + case "descriptionNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("descriptionNEQ")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DescriptionNEQ = data + case "descriptionIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("descriptionIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.DescriptionIn = data + case "descriptionNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("descriptionNotIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.DescriptionNotIn = data + case "descriptionGT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("descriptionGT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DescriptionGT = data + case "descriptionGTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("descriptionGTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DescriptionGTE = data + case "descriptionLT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("descriptionLT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DescriptionLT = data + case "descriptionLTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("descriptionLTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DescriptionLTE = data + case "descriptionContains": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("descriptionContains")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DescriptionContains = data + case "descriptionHasPrefix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("descriptionHasPrefix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DescriptionHasPrefix = data + case "descriptionHasSuffix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("descriptionHasSuffix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DescriptionHasSuffix = data + case "descriptionIsNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("descriptionIsNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.DescriptionIsNil = data + case "descriptionNotNil": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("descriptionNotNil")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.DescriptionNotNil = data + case "descriptionEqualFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("descriptionEqualFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DescriptionEqualFold = data + case "descriptionContainsFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("descriptionContainsFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.DescriptionContainsFold = data + case "primaryLocation": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("primaryLocation")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.PrimaryLocation = data + case "primaryLocationNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("primaryLocationNEQ")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.PrimaryLocationNEQ = data + case "primaryLocationIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("primaryLocationIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.PrimaryLocationIn = data + case "primaryLocationNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("primaryLocationNotIn")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.PrimaryLocationNotIn = data + case "primaryLocationGT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("primaryLocationGT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.PrimaryLocationGT = data + case "primaryLocationGTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("primaryLocationGTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.PrimaryLocationGTE = data + case "primaryLocationLT": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("primaryLocationLT")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.PrimaryLocationLT = data + case "primaryLocationLTE": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("primaryLocationLTE")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.PrimaryLocationLTE = data + case "primaryLocationContains": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("primaryLocationContains")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.PrimaryLocationContains = data + case "primaryLocationHasPrefix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("primaryLocationHasPrefix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.PrimaryLocationHasPrefix = data + case "primaryLocationHasSuffix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("primaryLocationHasSuffix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.PrimaryLocationHasSuffix = data + case "primaryLocationEqualFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("primaryLocationEqualFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.PrimaryLocationEqualFold = data + case "primaryLocationContainsFold": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("primaryLocationContainsFold")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.PrimaryLocationContainsFold = data + case "region": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("region")) + data, err := ec.unmarshalOGroupRegion2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐRegion(ctx, v) + if err != nil { + return it, err + } + it.Region = data + case "regionNEQ": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("regionNEQ")) + data, err := ec.unmarshalOGroupRegion2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐRegion(ctx, v) + if err != nil { + return it, err + } + it.RegionNEQ = data + case "regionIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("regionIn")) + data, err := ec.unmarshalOGroupRegion2ᚕgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐRegionᚄ(ctx, v) + if err != nil { + return it, err + } + it.RegionIn = data + case "regionNotIn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("regionNotIn")) + data, err := ec.unmarshalOGroupRegion2ᚕgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐRegionᚄ(ctx, v) + if err != nil { + return it, err + } + it.RegionNotIn = data + case "hasDatabases": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("hasDatabases")) + data, err := ec.unmarshalOBoolean2ᚖbool(ctx, v) + if err != nil { + return it, err + } + it.HasDatabases = data + case "hasDatabasesWith": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("hasDatabasesWith")) + data, err := ec.unmarshalODatabaseWhereInput2ᚕᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabaseWhereInputᚄ(ctx, v) + if err != nil { + return it, err + } + it.HasDatabasesWith = data + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputUpdateDatabaseInput(ctx context.Context, obj interface{}) (generated.UpdateDatabaseInput, error) { + var it generated.UpdateDatabaseInput + asMap := map[string]interface{}{} + for k, v := range obj.(map[string]interface{}) { + asMap[k] = v + } + + fieldsInOrder := [...]string{"updatedAt", "clearUpdatedAt", "updatedBy", "clearUpdatedBy", "organizationID", "name", "geo", "clearGeo", "dsn", "token", "clearToken", "status", "provider", "groupID"} + for _, k := range fieldsInOrder { + v, ok := asMap[k] + if !ok { + continue + } + switch k { + case "updatedAt": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedAt")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.UpdatedAt = data + case "clearUpdatedAt": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clearUpdatedAt")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.ClearUpdatedAt = data + case "updatedBy": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedBy")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedBy = data + case "clearUpdatedBy": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clearUpdatedBy")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.ClearUpdatedBy = data + case "organizationID": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("organizationID")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.OrganizationID = data + case "name": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.Name = data + case "geo": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("geo")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.Geo = data + case "clearGeo": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clearGeo")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.ClearGeo = data + case "dsn": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("dsn")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.Dsn = data + case "token": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("token")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.Token = data + case "clearToken": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clearToken")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.ClearToken = data + case "status": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("status")) + data, err := ec.unmarshalODatabaseDatabaseStatus2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseStatus(ctx, v) + if err != nil { + return it, err + } + it.Status = data + case "provider": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("provider")) + data, err := ec.unmarshalODatabaseDatabaseProvider2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseProvider(ctx, v) + if err != nil { + return it, err + } + it.Provider = data + case "groupID": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("groupID")) + data, err := ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.GroupID = data + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputUpdateGroupInput(ctx context.Context, obj interface{}) (generated.UpdateGroupInput, error) { + var it generated.UpdateGroupInput + asMap := map[string]interface{}{} + for k, v := range obj.(map[string]interface{}) { + asMap[k] = v + } + + fieldsInOrder := [...]string{"updatedAt", "clearUpdatedAt", "updatedBy", "clearUpdatedBy", "name", "description", "clearDescription", "primaryLocation", "locations", "appendLocations", "clearLocations", "token", "clearToken", "region", "addDatabaseIDs", "removeDatabaseIDs", "clearDatabases"} + for _, k := range fieldsInOrder { + v, ok := asMap[k] + if !ok { + continue + } + switch k { + case "updatedAt": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedAt")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.UpdatedAt = data + case "clearUpdatedAt": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clearUpdatedAt")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.ClearUpdatedAt = data + case "updatedBy": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("updatedBy")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.UpdatedBy = data + case "clearUpdatedBy": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clearUpdatedBy")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.ClearUpdatedBy = data + case "name": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.Name = data + case "description": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("description")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.Description = data + case "clearDescription": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clearDescription")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.ClearDescription = data + case "primaryLocation": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("primaryLocation")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.PrimaryLocation = data + case "locations": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("locations")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.Locations = data + case "appendLocations": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("appendLocations")) + data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.AppendLocations = data + case "clearLocations": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clearLocations")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.ClearLocations = data + case "token": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("token")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.Token = data + case "clearToken": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clearToken")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.ClearToken = data + case "region": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("region")) + data, err := ec.unmarshalOGroupRegion2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐRegion(ctx, v) + if err != nil { + return it, err + } + it.Region = data + case "addDatabaseIDs": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("addDatabaseIDs")) + data, err := ec.unmarshalOID2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.AddDatabaseIDs = data + case "removeDatabaseIDs": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("removeDatabaseIDs")) + data, err := ec.unmarshalOID2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.RemoveDatabaseIDs = data + case "clearDatabases": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clearDatabases")) + data, err := ec.unmarshalOBoolean2bool(ctx, v) + if err != nil { + return it, err + } + it.ClearDatabases = data + } + } + + return it, nil +} + +// endregion **************************** input.gotpl ***************************** + +// region ************************** interface.gotpl *************************** + +func (ec *executionContext) _Node(ctx context.Context, sel ast.SelectionSet, obj generated.Noder) graphql.Marshaler { + switch obj := (obj).(type) { + case nil: + return graphql.Null + case *generated.Database: + if obj == nil { + return graphql.Null + } + return ec._Database(ctx, sel, obj) + case *generated.Group: + if obj == nil { + return graphql.Null + } + return ec._Group(ctx, sel, obj) + default: + panic(fmt.Errorf("unexpected type %T", obj)) + } +} + +// endregion ************************** interface.gotpl *************************** + +// region **************************** object.gotpl **************************** + +var databaseImplementors = []string{"Database", "Node"} + +func (ec *executionContext) _Database(ctx context.Context, sel ast.SelectionSet, obj *generated.Database) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, databaseImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("Database") + case "id": + out.Values[i] = ec._Database_id(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&out.Invalids, 1) + } + case "createdAt": + out.Values[i] = ec._Database_createdAt(ctx, field, obj) + case "updatedAt": + out.Values[i] = ec._Database_updatedAt(ctx, field, obj) + case "createdBy": + out.Values[i] = ec._Database_createdBy(ctx, field, obj) + case "updatedBy": + out.Values[i] = ec._Database_updatedBy(ctx, field, obj) + case "deletedAt": + out.Values[i] = ec._Database_deletedAt(ctx, field, obj) + case "deletedBy": + out.Values[i] = ec._Database_deletedBy(ctx, field, obj) + case "organizationID": + out.Values[i] = ec._Database_organizationID(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&out.Invalids, 1) + } + case "name": + out.Values[i] = ec._Database_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&out.Invalids, 1) + } + case "geo": + out.Values[i] = ec._Database_geo(ctx, field, obj) + case "dsn": + out.Values[i] = ec._Database_dsn(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&out.Invalids, 1) + } + case "groupID": + out.Values[i] = ec._Database_groupID(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&out.Invalids, 1) + } + case "status": + out.Values[i] = ec._Database_status(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&out.Invalids, 1) + } + case "provider": + out.Values[i] = ec._Database_provider(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&out.Invalids, 1) + } + case "group": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Database_group(ctx, field, obj) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } + return res + } + + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) + + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var databaseConnectionImplementors = []string{"DatabaseConnection"} + +func (ec *executionContext) _DatabaseConnection(ctx context.Context, sel ast.SelectionSet, obj *generated.DatabaseConnection) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, databaseConnectionImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("DatabaseConnection") + case "edges": + out.Values[i] = ec._DatabaseConnection_edges(ctx, field, obj) + case "pageInfo": + out.Values[i] = ec._DatabaseConnection_pageInfo(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "totalCount": + out.Values[i] = ec._DatabaseConnection_totalCount(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var databaseCreatePayloadImplementors = []string{"DatabaseCreatePayload"} + +func (ec *executionContext) _DatabaseCreatePayload(ctx context.Context, sel ast.SelectionSet, obj *DatabaseCreatePayload) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, databaseCreatePayloadImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("DatabaseCreatePayload") + case "database": + out.Values[i] = ec._DatabaseCreatePayload_database(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var databaseDeletePayloadImplementors = []string{"DatabaseDeletePayload"} + +func (ec *executionContext) _DatabaseDeletePayload(ctx context.Context, sel ast.SelectionSet, obj *DatabaseDeletePayload) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, databaseDeletePayloadImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("DatabaseDeletePayload") + case "deletedID": + out.Values[i] = ec._DatabaseDeletePayload_deletedID(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var databaseEdgeImplementors = []string{"DatabaseEdge"} + +func (ec *executionContext) _DatabaseEdge(ctx context.Context, sel ast.SelectionSet, obj *generated.DatabaseEdge) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, databaseEdgeImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("DatabaseEdge") + case "node": + out.Values[i] = ec._DatabaseEdge_node(ctx, field, obj) + case "cursor": + out.Values[i] = ec._DatabaseEdge_cursor(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var databaseUpdatePayloadImplementors = []string{"DatabaseUpdatePayload"} + +func (ec *executionContext) _DatabaseUpdatePayload(ctx context.Context, sel ast.SelectionSet, obj *DatabaseUpdatePayload) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, databaseUpdatePayloadImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("DatabaseUpdatePayload") + case "database": + out.Values[i] = ec._DatabaseUpdatePayload_database(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var groupImplementors = []string{"Group", "Node"} + +func (ec *executionContext) _Group(ctx context.Context, sel ast.SelectionSet, obj *generated.Group) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, groupImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("Group") + case "id": + out.Values[i] = ec._Group_id(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&out.Invalids, 1) + } + case "createdAt": + out.Values[i] = ec._Group_createdAt(ctx, field, obj) + case "updatedAt": + out.Values[i] = ec._Group_updatedAt(ctx, field, obj) + case "createdBy": + out.Values[i] = ec._Group_createdBy(ctx, field, obj) + case "updatedBy": + out.Values[i] = ec._Group_updatedBy(ctx, field, obj) + case "deletedAt": + out.Values[i] = ec._Group_deletedAt(ctx, field, obj) + case "deletedBy": + out.Values[i] = ec._Group_deletedBy(ctx, field, obj) + case "name": + out.Values[i] = ec._Group_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&out.Invalids, 1) + } + case "description": + out.Values[i] = ec._Group_description(ctx, field, obj) + case "primaryLocation": + out.Values[i] = ec._Group_primaryLocation(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&out.Invalids, 1) + } + case "locations": + out.Values[i] = ec._Group_locations(ctx, field, obj) + case "region": + out.Values[i] = ec._Group_region(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&out.Invalids, 1) + } + case "databases": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Group_databases(ctx, field, obj) + return res + } + + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) + + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var groupConnectionImplementors = []string{"GroupConnection"} + +func (ec *executionContext) _GroupConnection(ctx context.Context, sel ast.SelectionSet, obj *generated.GroupConnection) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, groupConnectionImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("GroupConnection") + case "edges": + out.Values[i] = ec._GroupConnection_edges(ctx, field, obj) + case "pageInfo": + out.Values[i] = ec._GroupConnection_pageInfo(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "totalCount": + out.Values[i] = ec._GroupConnection_totalCount(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var groupCreatePayloadImplementors = []string{"GroupCreatePayload"} + +func (ec *executionContext) _GroupCreatePayload(ctx context.Context, sel ast.SelectionSet, obj *GroupCreatePayload) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, groupCreatePayloadImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("GroupCreatePayload") + case "group": + out.Values[i] = ec._GroupCreatePayload_group(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var groupDeletePayloadImplementors = []string{"GroupDeletePayload"} + +func (ec *executionContext) _GroupDeletePayload(ctx context.Context, sel ast.SelectionSet, obj *GroupDeletePayload) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, groupDeletePayloadImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("GroupDeletePayload") + case "deletedID": + out.Values[i] = ec._GroupDeletePayload_deletedID(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var groupEdgeImplementors = []string{"GroupEdge"} + +func (ec *executionContext) _GroupEdge(ctx context.Context, sel ast.SelectionSet, obj *generated.GroupEdge) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, groupEdgeImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("GroupEdge") + case "node": + out.Values[i] = ec._GroupEdge_node(ctx, field, obj) + case "cursor": + out.Values[i] = ec._GroupEdge_cursor(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var groupUpdatePayloadImplementors = []string{"GroupUpdatePayload"} + +func (ec *executionContext) _GroupUpdatePayload(ctx context.Context, sel ast.SelectionSet, obj *GroupUpdatePayload) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, groupUpdatePayloadImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("GroupUpdatePayload") + case "group": + out.Values[i] = ec._GroupUpdatePayload_group(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var mutationImplementors = []string{"Mutation"} + +func (ec *executionContext) _Mutation(ctx context.Context, sel ast.SelectionSet) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, mutationImplementors) + ctx = graphql.WithFieldContext(ctx, &graphql.FieldContext{ + Object: "Mutation", + }) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + innerCtx := graphql.WithRootFieldContext(ctx, &graphql.RootFieldContext{ + Object: field.Name, + Field: field, + }) + + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("Mutation") + case "createDatabase": + out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { + return ec._Mutation_createDatabase(ctx, field) + }) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "updateDatabase": + out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { + return ec._Mutation_updateDatabase(ctx, field) + }) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "deleteDatabase": + out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { + return ec._Mutation_deleteDatabase(ctx, field) + }) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "createGroup": + out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { + return ec._Mutation_createGroup(ctx, field) + }) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "updateGroup": + out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { + return ec._Mutation_updateGroup(ctx, field) + }) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "deleteGroup": + out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { + return ec._Mutation_deleteGroup(ctx, field) + }) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var pageInfoImplementors = []string{"PageInfo"} + +func (ec *executionContext) _PageInfo(ctx context.Context, sel ast.SelectionSet, obj *entgql.PageInfo[string]) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, pageInfoImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("PageInfo") + case "hasNextPage": + out.Values[i] = ec._PageInfo_hasNextPage(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "hasPreviousPage": + out.Values[i] = ec._PageInfo_hasPreviousPage(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "startCursor": + out.Values[i] = ec._PageInfo_startCursor(ctx, field, obj) + case "endCursor": + out.Values[i] = ec._PageInfo_endCursor(ctx, field, obj) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var queryImplementors = []string{"Query"} + +func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, queryImplementors) + ctx = graphql.WithFieldContext(ctx, &graphql.FieldContext{ + Object: "Query", + }) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + innerCtx := graphql.WithRootFieldContext(ctx, &graphql.RootFieldContext{ + Object: field.Name, + Field: field, + }) + + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("Query") + case "node": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_node(ctx, field) + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "nodes": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_nodes(ctx, field) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "databases": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_databases(ctx, field) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "groups": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_groups(ctx, field) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "database": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_database(ctx, field) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "group": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_group(ctx, field) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "__type": + out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { + return ec._Query___type(ctx, field) + }) + case "__schema": + out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { + return ec._Query___schema(ctx, field) + }) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var __DirectiveImplementors = []string{"__Directive"} + +func (ec *executionContext) ___Directive(ctx context.Context, sel ast.SelectionSet, obj *introspection.Directive) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, __DirectiveImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("__Directive") + case "name": + out.Values[i] = ec.___Directive_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "description": + out.Values[i] = ec.___Directive_description(ctx, field, obj) + case "locations": + out.Values[i] = ec.___Directive_locations(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "args": + out.Values[i] = ec.___Directive_args(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "isRepeatable": + out.Values[i] = ec.___Directive_isRepeatable(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var __EnumValueImplementors = []string{"__EnumValue"} + +func (ec *executionContext) ___EnumValue(ctx context.Context, sel ast.SelectionSet, obj *introspection.EnumValue) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, __EnumValueImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("__EnumValue") + case "name": + out.Values[i] = ec.___EnumValue_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "description": + out.Values[i] = ec.___EnumValue_description(ctx, field, obj) + case "isDeprecated": + out.Values[i] = ec.___EnumValue_isDeprecated(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "deprecationReason": + out.Values[i] = ec.___EnumValue_deprecationReason(ctx, field, obj) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var __FieldImplementors = []string{"__Field"} + +func (ec *executionContext) ___Field(ctx context.Context, sel ast.SelectionSet, obj *introspection.Field) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, __FieldImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("__Field") + case "name": + out.Values[i] = ec.___Field_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "description": + out.Values[i] = ec.___Field_description(ctx, field, obj) + case "args": + out.Values[i] = ec.___Field_args(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "type": + out.Values[i] = ec.___Field_type(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "isDeprecated": + out.Values[i] = ec.___Field_isDeprecated(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "deprecationReason": + out.Values[i] = ec.___Field_deprecationReason(ctx, field, obj) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var __InputValueImplementors = []string{"__InputValue"} + +func (ec *executionContext) ___InputValue(ctx context.Context, sel ast.SelectionSet, obj *introspection.InputValue) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, __InputValueImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("__InputValue") + case "name": + out.Values[i] = ec.___InputValue_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "description": + out.Values[i] = ec.___InputValue_description(ctx, field, obj) + case "type": + out.Values[i] = ec.___InputValue_type(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "defaultValue": + out.Values[i] = ec.___InputValue_defaultValue(ctx, field, obj) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var __SchemaImplementors = []string{"__Schema"} + +func (ec *executionContext) ___Schema(ctx context.Context, sel ast.SelectionSet, obj *introspection.Schema) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, __SchemaImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("__Schema") + case "description": + out.Values[i] = ec.___Schema_description(ctx, field, obj) + case "types": + out.Values[i] = ec.___Schema_types(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "queryType": + out.Values[i] = ec.___Schema_queryType(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "mutationType": + out.Values[i] = ec.___Schema_mutationType(ctx, field, obj) + case "subscriptionType": + out.Values[i] = ec.___Schema_subscriptionType(ctx, field, obj) + case "directives": + out.Values[i] = ec.___Schema_directives(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var __TypeImplementors = []string{"__Type"} + +func (ec *executionContext) ___Type(ctx context.Context, sel ast.SelectionSet, obj *introspection.Type) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, __TypeImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("__Type") + case "kind": + out.Values[i] = ec.___Type_kind(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "name": + out.Values[i] = ec.___Type_name(ctx, field, obj) + case "description": + out.Values[i] = ec.___Type_description(ctx, field, obj) + case "fields": + out.Values[i] = ec.___Type_fields(ctx, field, obj) + case "interfaces": + out.Values[i] = ec.___Type_interfaces(ctx, field, obj) + case "possibleTypes": + out.Values[i] = ec.___Type_possibleTypes(ctx, field, obj) + case "enumValues": + out.Values[i] = ec.___Type_enumValues(ctx, field, obj) + case "inputFields": + out.Values[i] = ec.___Type_inputFields(ctx, field, obj) + case "ofType": + out.Values[i] = ec.___Type_ofType(ctx, field, obj) + case "specifiedByURL": + out.Values[i] = ec.___Type_specifiedByURL(ctx, field, obj) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +// endregion **************************** object.gotpl **************************** + +// region ***************************** type.gotpl ***************************** + +func (ec *executionContext) unmarshalNBoolean2bool(ctx context.Context, v interface{}) (bool, error) { + res, err := graphql.UnmarshalBoolean(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalNBoolean2bool(ctx context.Context, sel ast.SelectionSet, v bool) graphql.Marshaler { + res := graphql.MarshalBoolean(v) + if res == graphql.Null { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + } + return res +} + +func (ec *executionContext) unmarshalNCreateDatabaseInput2githubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐCreateDatabaseInput(ctx context.Context, v interface{}) (generated.CreateDatabaseInput, error) { + res, err := ec.unmarshalInputCreateDatabaseInput(ctx, v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) unmarshalNCreateGroupInput2githubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐCreateGroupInput(ctx context.Context, v interface{}) (generated.CreateGroupInput, error) { + res, err := ec.unmarshalInputCreateGroupInput(ctx, v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) unmarshalNCursor2entgoᚗioᚋcontribᚋentgqlᚐCursor(ctx context.Context, v interface{}) (entgql.Cursor[string], error) { + var res entgql.Cursor[string] + err := res.UnmarshalGQL(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalNCursor2entgoᚗioᚋcontribᚋentgqlᚐCursor(ctx context.Context, sel ast.SelectionSet, v entgql.Cursor[string]) graphql.Marshaler { + return v +} + +func (ec *executionContext) marshalNDatabase2githubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabase(ctx context.Context, sel ast.SelectionSet, v generated.Database) graphql.Marshaler { + return ec._Database(ctx, sel, &v) +} + +func (ec *executionContext) marshalNDatabase2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabase(ctx context.Context, sel ast.SelectionSet, v *generated.Database) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._Database(ctx, sel, v) +} + +func (ec *executionContext) marshalNDatabaseConnection2githubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabaseConnection(ctx context.Context, sel ast.SelectionSet, v generated.DatabaseConnection) graphql.Marshaler { + return ec._DatabaseConnection(ctx, sel, &v) +} + +func (ec *executionContext) marshalNDatabaseConnection2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabaseConnection(ctx context.Context, sel ast.SelectionSet, v *generated.DatabaseConnection) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._DatabaseConnection(ctx, sel, v) +} + +func (ec *executionContext) marshalNDatabaseCreatePayload2githubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋgraphapiᚐDatabaseCreatePayload(ctx context.Context, sel ast.SelectionSet, v DatabaseCreatePayload) graphql.Marshaler { + return ec._DatabaseCreatePayload(ctx, sel, &v) +} + +func (ec *executionContext) marshalNDatabaseCreatePayload2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋgraphapiᚐDatabaseCreatePayload(ctx context.Context, sel ast.SelectionSet, v *DatabaseCreatePayload) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._DatabaseCreatePayload(ctx, sel, v) +} + +func (ec *executionContext) unmarshalNDatabaseDatabaseProvider2githubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseProvider(ctx context.Context, v interface{}) (enums.DatabaseProvider, error) { + var res enums.DatabaseProvider + err := res.UnmarshalGQL(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalNDatabaseDatabaseProvider2githubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseProvider(ctx context.Context, sel ast.SelectionSet, v enums.DatabaseProvider) graphql.Marshaler { + return v +} + +func (ec *executionContext) unmarshalNDatabaseDatabaseStatus2githubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseStatus(ctx context.Context, v interface{}) (enums.DatabaseStatus, error) { + var res enums.DatabaseStatus + err := res.UnmarshalGQL(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalNDatabaseDatabaseStatus2githubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseStatus(ctx context.Context, sel ast.SelectionSet, v enums.DatabaseStatus) graphql.Marshaler { + return v +} + +func (ec *executionContext) marshalNDatabaseDeletePayload2githubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋgraphapiᚐDatabaseDeletePayload(ctx context.Context, sel ast.SelectionSet, v DatabaseDeletePayload) graphql.Marshaler { + return ec._DatabaseDeletePayload(ctx, sel, &v) +} + +func (ec *executionContext) marshalNDatabaseDeletePayload2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋgraphapiᚐDatabaseDeletePayload(ctx context.Context, sel ast.SelectionSet, v *DatabaseDeletePayload) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._DatabaseDeletePayload(ctx, sel, v) +} + +func (ec *executionContext) marshalNDatabaseUpdatePayload2githubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋgraphapiᚐDatabaseUpdatePayload(ctx context.Context, sel ast.SelectionSet, v DatabaseUpdatePayload) graphql.Marshaler { + return ec._DatabaseUpdatePayload(ctx, sel, &v) +} + +func (ec *executionContext) marshalNDatabaseUpdatePayload2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋgraphapiᚐDatabaseUpdatePayload(ctx context.Context, sel ast.SelectionSet, v *DatabaseUpdatePayload) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._DatabaseUpdatePayload(ctx, sel, v) +} + +func (ec *executionContext) unmarshalNDatabaseWhereInput2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabaseWhereInput(ctx context.Context, v interface{}) (*generated.DatabaseWhereInput, error) { + res, err := ec.unmarshalInputDatabaseWhereInput(ctx, v) + return &res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalNGroup2githubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐGroup(ctx context.Context, sel ast.SelectionSet, v generated.Group) graphql.Marshaler { + return ec._Group(ctx, sel, &v) +} + +func (ec *executionContext) marshalNGroup2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐGroup(ctx context.Context, sel ast.SelectionSet, v *generated.Group) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._Group(ctx, sel, v) +} + +func (ec *executionContext) marshalNGroupConnection2githubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐGroupConnection(ctx context.Context, sel ast.SelectionSet, v generated.GroupConnection) graphql.Marshaler { + return ec._GroupConnection(ctx, sel, &v) +} + +func (ec *executionContext) marshalNGroupConnection2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐGroupConnection(ctx context.Context, sel ast.SelectionSet, v *generated.GroupConnection) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._GroupConnection(ctx, sel, v) +} + +func (ec *executionContext) marshalNGroupCreatePayload2githubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋgraphapiᚐGroupCreatePayload(ctx context.Context, sel ast.SelectionSet, v GroupCreatePayload) graphql.Marshaler { + return ec._GroupCreatePayload(ctx, sel, &v) +} + +func (ec *executionContext) marshalNGroupCreatePayload2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋgraphapiᚐGroupCreatePayload(ctx context.Context, sel ast.SelectionSet, v *GroupCreatePayload) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._GroupCreatePayload(ctx, sel, v) +} + +func (ec *executionContext) marshalNGroupDeletePayload2githubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋgraphapiᚐGroupDeletePayload(ctx context.Context, sel ast.SelectionSet, v GroupDeletePayload) graphql.Marshaler { + return ec._GroupDeletePayload(ctx, sel, &v) +} + +func (ec *executionContext) marshalNGroupDeletePayload2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋgraphapiᚐGroupDeletePayload(ctx context.Context, sel ast.SelectionSet, v *GroupDeletePayload) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._GroupDeletePayload(ctx, sel, v) +} + +func (ec *executionContext) unmarshalNGroupRegion2githubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐRegion(ctx context.Context, v interface{}) (enums.Region, error) { + var res enums.Region + err := res.UnmarshalGQL(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalNGroupRegion2githubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐRegion(ctx context.Context, sel ast.SelectionSet, v enums.Region) graphql.Marshaler { + return v +} + +func (ec *executionContext) marshalNGroupUpdatePayload2githubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋgraphapiᚐGroupUpdatePayload(ctx context.Context, sel ast.SelectionSet, v GroupUpdatePayload) graphql.Marshaler { + return ec._GroupUpdatePayload(ctx, sel, &v) +} + +func (ec *executionContext) marshalNGroupUpdatePayload2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋgraphapiᚐGroupUpdatePayload(ctx context.Context, sel ast.SelectionSet, v *GroupUpdatePayload) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._GroupUpdatePayload(ctx, sel, v) +} + +func (ec *executionContext) unmarshalNGroupWhereInput2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐGroupWhereInput(ctx context.Context, v interface{}) (*generated.GroupWhereInput, error) { + res, err := ec.unmarshalInputGroupWhereInput(ctx, v) + return &res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) unmarshalNID2string(ctx context.Context, v interface{}) (string, error) { + res, err := graphql.UnmarshalString(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalNID2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler { + res := graphql.MarshalString(v) + if res == graphql.Null { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + } + return res +} + +func (ec *executionContext) unmarshalNID2ᚕstringᚄ(ctx context.Context, v interface{}) ([]string, error) { + var vSlice []interface{} + if v != nil { + vSlice = graphql.CoerceList(v) + } + var err error + res := make([]string, len(vSlice)) + for i := range vSlice { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) + res[i], err = ec.unmarshalNID2string(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalNID2ᚕstringᚄ(ctx context.Context, sel ast.SelectionSet, v []string) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + for i := range v { + ret[i] = ec.marshalNID2string(ctx, sel, v[i]) + } + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) unmarshalNInt2int(ctx context.Context, v interface{}) (int, error) { + res, err := graphql.UnmarshalInt(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalNInt2int(ctx context.Context, sel ast.SelectionSet, v int) graphql.Marshaler { + res := graphql.MarshalInt(v) + if res == graphql.Null { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + } + return res +} + +func (ec *executionContext) marshalNNode2ᚕgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐNoder(ctx context.Context, sel ast.SelectionSet, v []generated.Noder) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalONode2githubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐNoder(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + return ret +} + +func (ec *executionContext) marshalNPageInfo2entgoᚗioᚋcontribᚋentgqlᚐPageInfo(ctx context.Context, sel ast.SelectionSet, v entgql.PageInfo[string]) graphql.Marshaler { + return ec._PageInfo(ctx, sel, &v) +} + +func (ec *executionContext) unmarshalNString2string(ctx context.Context, v interface{}) (string, error) { + res, err := graphql.UnmarshalString(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalNString2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler { + res := graphql.MarshalString(v) + if res == graphql.Null { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + } + return res +} + +func (ec *executionContext) unmarshalNTime2timeᚐTime(ctx context.Context, v interface{}) (time.Time, error) { + res, err := graphql.UnmarshalTime(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalNTime2timeᚐTime(ctx context.Context, sel ast.SelectionSet, v time.Time) graphql.Marshaler { + res := graphql.MarshalTime(v) + if res == graphql.Null { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + } + return res +} + +func (ec *executionContext) unmarshalNUpdateDatabaseInput2githubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐUpdateDatabaseInput(ctx context.Context, v interface{}) (generated.UpdateDatabaseInput, error) { + res, err := ec.unmarshalInputUpdateDatabaseInput(ctx, v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) unmarshalNUpdateGroupInput2githubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐUpdateGroupInput(ctx context.Context, v interface{}) (generated.UpdateGroupInput, error) { + res, err := ec.unmarshalInputUpdateGroupInput(ctx, v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalN__Directive2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirective(ctx context.Context, sel ast.SelectionSet, v introspection.Directive) graphql.Marshaler { + return ec.___Directive(ctx, sel, &v) +} + +func (ec *executionContext) marshalN__Directive2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirectiveᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Directive) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalN__Directive2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirective(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) unmarshalN__DirectiveLocation2string(ctx context.Context, v interface{}) (string, error) { + res, err := graphql.UnmarshalString(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalN__DirectiveLocation2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler { + res := graphql.MarshalString(v) + if res == graphql.Null { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + } + return res +} + +func (ec *executionContext) unmarshalN__DirectiveLocation2ᚕstringᚄ(ctx context.Context, v interface{}) ([]string, error) { + var vSlice []interface{} + if v != nil { + vSlice = graphql.CoerceList(v) + } + var err error + res := make([]string, len(vSlice)) + for i := range vSlice { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) + res[i], err = ec.unmarshalN__DirectiveLocation2string(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalN__DirectiveLocation2ᚕstringᚄ(ctx context.Context, sel ast.SelectionSet, v []string) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalN__DirectiveLocation2string(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalN__EnumValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValue(ctx context.Context, sel ast.SelectionSet, v introspection.EnumValue) graphql.Marshaler { + return ec.___EnumValue(ctx, sel, &v) +} + +func (ec *executionContext) marshalN__Field2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐField(ctx context.Context, sel ast.SelectionSet, v introspection.Field) graphql.Marshaler { + return ec.___Field(ctx, sel, &v) +} + +func (ec *executionContext) marshalN__InputValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx context.Context, sel ast.SelectionSet, v introspection.InputValue) graphql.Marshaler { + return ec.___InputValue(ctx, sel, &v) +} + +func (ec *executionContext) marshalN__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.InputValue) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalN__InputValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalN__Type2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v introspection.Type) graphql.Marshaler { + return ec.___Type(ctx, sel, &v) +} + +func (ec *executionContext) marshalN__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Type) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalN__Type2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v *introspection.Type) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec.___Type(ctx, sel, v) +} + +func (ec *executionContext) unmarshalN__TypeKind2string(ctx context.Context, v interface{}) (string, error) { + res, err := graphql.UnmarshalString(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalN__TypeKind2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler { + res := graphql.MarshalString(v) + if res == graphql.Null { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + } + return res +} + +func (ec *executionContext) unmarshalOBoolean2bool(ctx context.Context, v interface{}) (bool, error) { + res, err := graphql.UnmarshalBoolean(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalOBoolean2bool(ctx context.Context, sel ast.SelectionSet, v bool) graphql.Marshaler { + res := graphql.MarshalBoolean(v) + return res +} + +func (ec *executionContext) unmarshalOBoolean2ᚖbool(ctx context.Context, v interface{}) (*bool, error) { + if v == nil { + return nil, nil + } + res, err := graphql.UnmarshalBoolean(v) + return &res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalOBoolean2ᚖbool(ctx context.Context, sel ast.SelectionSet, v *bool) graphql.Marshaler { + if v == nil { + return graphql.Null + } + res := graphql.MarshalBoolean(*v) + return res +} + +func (ec *executionContext) unmarshalOCursor2ᚖentgoᚗioᚋcontribᚋentgqlᚐCursor(ctx context.Context, v interface{}) (*entgql.Cursor[string], error) { + if v == nil { + return nil, nil + } + var res = new(entgql.Cursor[string]) + err := res.UnmarshalGQL(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalOCursor2ᚖentgoᚗioᚋcontribᚋentgqlᚐCursor(ctx context.Context, sel ast.SelectionSet, v *entgql.Cursor[string]) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return v +} + +func (ec *executionContext) marshalODatabase2ᚕᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabaseᚄ(ctx context.Context, sel ast.SelectionSet, v []*generated.Database) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNDatabase2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabase(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalODatabase2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabase(ctx context.Context, sel ast.SelectionSet, v *generated.Database) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._Database(ctx, sel, v) +} + +func (ec *executionContext) unmarshalODatabaseDatabaseProvider2ᚕgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseProviderᚄ(ctx context.Context, v interface{}) ([]enums.DatabaseProvider, error) { + if v == nil { + return nil, nil + } + var vSlice []interface{} + if v != nil { + vSlice = graphql.CoerceList(v) + } + var err error + res := make([]enums.DatabaseProvider, len(vSlice)) + for i := range vSlice { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) + res[i], err = ec.unmarshalNDatabaseDatabaseProvider2githubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseProvider(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalODatabaseDatabaseProvider2ᚕgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseProviderᚄ(ctx context.Context, sel ast.SelectionSet, v []enums.DatabaseProvider) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNDatabaseDatabaseProvider2githubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseProvider(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) unmarshalODatabaseDatabaseProvider2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseProvider(ctx context.Context, v interface{}) (*enums.DatabaseProvider, error) { + if v == nil { + return nil, nil + } + var res = new(enums.DatabaseProvider) + err := res.UnmarshalGQL(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalODatabaseDatabaseProvider2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseProvider(ctx context.Context, sel ast.SelectionSet, v *enums.DatabaseProvider) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return v +} + +func (ec *executionContext) unmarshalODatabaseDatabaseStatus2ᚕgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseStatusᚄ(ctx context.Context, v interface{}) ([]enums.DatabaseStatus, error) { + if v == nil { + return nil, nil + } + var vSlice []interface{} + if v != nil { + vSlice = graphql.CoerceList(v) + } + var err error + res := make([]enums.DatabaseStatus, len(vSlice)) + for i := range vSlice { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) + res[i], err = ec.unmarshalNDatabaseDatabaseStatus2githubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseStatus(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalODatabaseDatabaseStatus2ᚕgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseStatusᚄ(ctx context.Context, sel ast.SelectionSet, v []enums.DatabaseStatus) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNDatabaseDatabaseStatus2githubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseStatus(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) unmarshalODatabaseDatabaseStatus2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseStatus(ctx context.Context, v interface{}) (*enums.DatabaseStatus, error) { + if v == nil { + return nil, nil + } + var res = new(enums.DatabaseStatus) + err := res.UnmarshalGQL(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalODatabaseDatabaseStatus2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐDatabaseStatus(ctx context.Context, sel ast.SelectionSet, v *enums.DatabaseStatus) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return v +} + +func (ec *executionContext) marshalODatabaseEdge2ᚕᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabaseEdge(ctx context.Context, sel ast.SelectionSet, v []*generated.DatabaseEdge) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalODatabaseEdge2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabaseEdge(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + return ret +} + +func (ec *executionContext) marshalODatabaseEdge2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabaseEdge(ctx context.Context, sel ast.SelectionSet, v *generated.DatabaseEdge) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._DatabaseEdge(ctx, sel, v) +} + +func (ec *executionContext) unmarshalODatabaseWhereInput2ᚕᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabaseWhereInputᚄ(ctx context.Context, v interface{}) ([]*generated.DatabaseWhereInput, error) { + if v == nil { + return nil, nil + } + var vSlice []interface{} + if v != nil { + vSlice = graphql.CoerceList(v) + } + var err error + res := make([]*generated.DatabaseWhereInput, len(vSlice)) + for i := range vSlice { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) + res[i], err = ec.unmarshalNDatabaseWhereInput2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabaseWhereInput(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) unmarshalODatabaseWhereInput2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐDatabaseWhereInput(ctx context.Context, v interface{}) (*generated.DatabaseWhereInput, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalInputDatabaseWhereInput(ctx, v) + return &res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalOGroup2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐGroup(ctx context.Context, sel ast.SelectionSet, v *generated.Group) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._Group(ctx, sel, v) +} + +func (ec *executionContext) marshalOGroupEdge2ᚕᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐGroupEdge(ctx context.Context, sel ast.SelectionSet, v []*generated.GroupEdge) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalOGroupEdge2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐGroupEdge(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + return ret +} + +func (ec *executionContext) marshalOGroupEdge2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐGroupEdge(ctx context.Context, sel ast.SelectionSet, v *generated.GroupEdge) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._GroupEdge(ctx, sel, v) +} + +func (ec *executionContext) unmarshalOGroupRegion2ᚕgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐRegionᚄ(ctx context.Context, v interface{}) ([]enums.Region, error) { + if v == nil { + return nil, nil + } + var vSlice []interface{} + if v != nil { + vSlice = graphql.CoerceList(v) + } + var err error + res := make([]enums.Region, len(vSlice)) + for i := range vSlice { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) + res[i], err = ec.unmarshalNGroupRegion2githubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐRegion(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalOGroupRegion2ᚕgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐRegionᚄ(ctx context.Context, sel ast.SelectionSet, v []enums.Region) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNGroupRegion2githubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐRegion(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) unmarshalOGroupRegion2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐRegion(ctx context.Context, v interface{}) (*enums.Region, error) { + if v == nil { + return nil, nil + } + var res = new(enums.Region) + err := res.UnmarshalGQL(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalOGroupRegion2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋpkgᚋenumsᚐRegion(ctx context.Context, sel ast.SelectionSet, v *enums.Region) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return v +} + +func (ec *executionContext) unmarshalOGroupWhereInput2ᚕᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐGroupWhereInputᚄ(ctx context.Context, v interface{}) ([]*generated.GroupWhereInput, error) { + if v == nil { + return nil, nil + } + var vSlice []interface{} + if v != nil { + vSlice = graphql.CoerceList(v) + } + var err error + res := make([]*generated.GroupWhereInput, len(vSlice)) + for i := range vSlice { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) + res[i], err = ec.unmarshalNGroupWhereInput2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐGroupWhereInput(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) unmarshalOGroupWhereInput2ᚖgithubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐGroupWhereInput(ctx context.Context, v interface{}) (*generated.GroupWhereInput, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalInputGroupWhereInput(ctx, v) + return &res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) unmarshalOID2ᚕstringᚄ(ctx context.Context, v interface{}) ([]string, error) { + if v == nil { + return nil, nil + } + var vSlice []interface{} + if v != nil { + vSlice = graphql.CoerceList(v) + } + var err error + res := make([]string, len(vSlice)) + for i := range vSlice { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) + res[i], err = ec.unmarshalNID2string(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalOID2ᚕstringᚄ(ctx context.Context, sel ast.SelectionSet, v []string) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + for i := range v { + ret[i] = ec.marshalNID2string(ctx, sel, v[i]) + } + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) unmarshalOID2ᚖstring(ctx context.Context, v interface{}) (*string, error) { + if v == nil { + return nil, nil + } + res, err := graphql.UnmarshalString(v) + return &res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalOID2ᚖstring(ctx context.Context, sel ast.SelectionSet, v *string) graphql.Marshaler { + if v == nil { + return graphql.Null + } + res := graphql.MarshalString(*v) + return res +} + +func (ec *executionContext) unmarshalOInt2ᚖint(ctx context.Context, v interface{}) (*int, error) { + if v == nil { + return nil, nil + } + res, err := graphql.UnmarshalInt(v) + return &res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalOInt2ᚖint(ctx context.Context, sel ast.SelectionSet, v *int) graphql.Marshaler { + if v == nil { + return graphql.Null + } + res := graphql.MarshalInt(*v) + return res +} + +func (ec *executionContext) marshalONode2githubᚗcomᚋdatumforgeᚋgeodeticᚋinternalᚋentᚋgeneratedᚐNoder(ctx context.Context, sel ast.SelectionSet, v generated.Noder) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._Node(ctx, sel, v) +} + +func (ec *executionContext) unmarshalOString2string(ctx context.Context, v interface{}) (string, error) { + res, err := graphql.UnmarshalString(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalOString2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler { + res := graphql.MarshalString(v) + return res +} + +func (ec *executionContext) unmarshalOString2ᚕstringᚄ(ctx context.Context, v interface{}) ([]string, error) { + if v == nil { + return nil, nil + } + var vSlice []interface{} + if v != nil { + vSlice = graphql.CoerceList(v) + } + var err error + res := make([]string, len(vSlice)) + for i := range vSlice { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) + res[i], err = ec.unmarshalNString2string(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalOString2ᚕstringᚄ(ctx context.Context, sel ast.SelectionSet, v []string) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + for i := range v { + ret[i] = ec.marshalNString2string(ctx, sel, v[i]) + } + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) unmarshalOString2ᚖstring(ctx context.Context, v interface{}) (*string, error) { + if v == nil { + return nil, nil + } + res, err := graphql.UnmarshalString(v) + return &res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalOString2ᚖstring(ctx context.Context, sel ast.SelectionSet, v *string) graphql.Marshaler { + if v == nil { + return graphql.Null + } + res := graphql.MarshalString(*v) + return res +} + +func (ec *executionContext) unmarshalOTime2timeᚐTime(ctx context.Context, v interface{}) (time.Time, error) { + res, err := graphql.UnmarshalTime(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalOTime2timeᚐTime(ctx context.Context, sel ast.SelectionSet, v time.Time) graphql.Marshaler { + res := graphql.MarshalTime(v) + return res +} + +func (ec *executionContext) unmarshalOTime2ᚕtimeᚐTimeᚄ(ctx context.Context, v interface{}) ([]time.Time, error) { + if v == nil { + return nil, nil + } + var vSlice []interface{} + if v != nil { + vSlice = graphql.CoerceList(v) + } + var err error + res := make([]time.Time, len(vSlice)) + for i := range vSlice { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) + res[i], err = ec.unmarshalNTime2timeᚐTime(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalOTime2ᚕtimeᚐTimeᚄ(ctx context.Context, sel ast.SelectionSet, v []time.Time) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + for i := range v { + ret[i] = ec.marshalNTime2timeᚐTime(ctx, sel, v[i]) + } + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) unmarshalOTime2ᚖtimeᚐTime(ctx context.Context, v interface{}) (*time.Time, error) { + if v == nil { + return nil, nil + } + res, err := graphql.UnmarshalTime(v) + return &res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalOTime2ᚖtimeᚐTime(ctx context.Context, sel ast.SelectionSet, v *time.Time) graphql.Marshaler { + if v == nil { + return graphql.Null + } + res := graphql.MarshalTime(*v) + return res +} + +func (ec *executionContext) marshalO__EnumValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValueᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.EnumValue) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalN__EnumValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValue(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalO__Field2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐFieldᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Field) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalN__Field2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐField(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalO__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.InputValue) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalN__InputValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalO__Schema2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐSchema(ctx context.Context, sel ast.SelectionSet, v *introspection.Schema) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec.___Schema(ctx, sel, v) +} + +func (ec *executionContext) marshalO__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Type) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalN__Type2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v *introspection.Type) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec.___Type(ctx, sel, v) +} + +// endregion ***************************** type.gotpl ***************************** diff --git a/internal/graphapi/group.resolvers.go b/internal/graphapi/group.resolvers.go new file mode 100644 index 0000000..d81fce4 --- /dev/null +++ b/internal/graphapi/group.resolvers.go @@ -0,0 +1,97 @@ +package graphapi + +// This file will be automatically regenerated based on the schema, any resolver implementations +// will be copied through when generating and any unknown code will be moved to the end. +// Code generated by github.com/99designs/gqlgen + +import ( + "context" + + "github.com/datumforge/datum/pkg/rout" + "github.com/datumforge/geodetic/internal/ent/generated" + "github.com/datumforge/geodetic/internal/ent/generated/group" +) + +// CreateGroup is the resolver for the createGroup field. +func (r *mutationResolver) CreateGroup(ctx context.Context, input generated.CreateGroupInput) (*GroupCreatePayload, error) { + group, err := withTransactionalMutation(ctx).Group.Create().SetInput(input).Save(ctx) + if err != nil { + if generated.IsConstraintError(err) { + constraintError := err.(*generated.ConstraintError) + + r.logger.Debugw("constraint error", "error", constraintError.Error()) + + return nil, constraintError + } + + if generated.IsValidationError(err) { + ve := err.(*generated.ValidationError) + + return nil, rout.InvalidField(ve.Name) + } + + r.logger.Errorw("failed to create group", "error", err) + + return nil, err + } + + return &GroupCreatePayload{Group: group}, err +} + +// UpdateGroup is the resolver for the updateGroup field. +func (r *mutationResolver) UpdateGroup(ctx context.Context, name string, input generated.UpdateGroupInput) (*GroupUpdatePayload, error) { + group, err := withTransactionalMutation(ctx).Group. + Query(). + Where(group.NameEQ(name)). + Only(ctx) + if err != nil { + r.logger.Errorw("failed to get group", "error", err) + + return nil, err + } + + g, err := group.Update(). + SetInput(input). + Save(ctx) + if err != nil { + r.logger.Errorw("failed to update group", "error", err) + + return nil, err + } + + return &GroupUpdatePayload{Group: g}, nil +} + +// DeleteGroup is the resolver for the deleteGroup field. +func (r *mutationResolver) DeleteGroup(ctx context.Context, name string) (*GroupDeletePayload, error) { + group, err := withTransactionalMutation(ctx).Group.Query().Where(group.NameEQ(name)).Only(ctx) + if err != nil { + r.logger.Errorw("failed to get group", "error", err) + + return nil, err + } + + if err := generated.GroupEdgeCleanup(ctx, group.ID); err != nil { + return nil, newCascadeDeleteError(err) + } + + if err := withTransactionalMutation(ctx).Group.DeleteOneID(group.ID).Exec(ctx); err != nil { + r.logger.Errorw("failed to delete group", "error", err) + + return nil, err + } + + return &GroupDeletePayload{DeletedID: group.ID}, nil +} + +// Group is the resolver for the group field. +func (r *queryResolver) Group(ctx context.Context, name string) (*generated.Group, error) { + group, err := withTransactionalMutation(ctx).Group.Query().Where(group.NameEQ(name)).Only(ctx) + if err != nil { + r.logger.Errorw("failed to get group", "error", err) + + return nil, err + } + + return group, err +} diff --git a/internal/graphapi/group_test.go b/internal/graphapi/group_test.go new file mode 100644 index 0000000..bcab95c --- /dev/null +++ b/internal/graphapi/group_test.go @@ -0,0 +1,202 @@ +package graphapi_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + ent "github.com/datumforge/geodetic/internal/ent/generated" + "github.com/datumforge/geodetic/pkg/geodeticclient" +) + +func (suite *GraphTestSuite) TestQueryGroup() { + t := suite.T() + + group := (&GroupBuilder{client: suite.client}).MustNew(context.Background(), t) + + testCases := []struct { + name string + query string + expected *ent.Group + errorMsg string + }{ + { + name: "happy path group", + query: group.Name, + expected: group, + }, + { + name: "group not found", + query: "notfound", + expected: nil, + errorMsg: "group not found", + }, + } + + for _, tc := range testCases { + t.Run("Get "+tc.name, func(t *testing.T) { + resp, err := suite.client.geodetic.GetGroup(context.Background(), tc.query) + + if tc.errorMsg != "" { + require.Error(t, err) + assert.ErrorContains(t, err, tc.errorMsg) + assert.Nil(t, resp) + + return + } + + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Group) + }) + } + + (&GroupCleanup{client: suite.client, GroupID: group.ID}).MustDelete(context.Background(), t) +} + +func (suite *GraphTestSuite) TestListGroups() { + t := suite.T() + + group1 := (&GroupBuilder{client: suite.client}).MustNew(context.Background(), t) + group2 := (&GroupBuilder{client: suite.client}).MustNew(context.Background(), t) + + t.Run("List Groups", func(t *testing.T) { + resp, err := suite.client.geodetic.GetAllGroups(context.Background()) + + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Groups) + + assert.Len(t, resp.Groups.Edges, 2) + + group1Found := false + group2Found := false + + for _, g := range resp.Groups.Edges { + if g.Node.Name == group1.Name { + group1Found = true + } else if g.Node.Name == group2.Name { + group2Found = true + } + } + + assert.True(t, group1Found) + assert.True(t, group2Found) + }) + + (&GroupCleanup{client: suite.client, GroupID: group1.ID}).MustDelete(context.Background(), t) + (&GroupCleanup{client: suite.client, GroupID: group2.ID}).MustDelete(context.Background(), t) +} + +func (suite *GraphTestSuite) TestCreateGroup() { + t := suite.T() + + groupIDs := []string{} + + testCases := []struct { + name string + groupName string + loc string + errorMsg string + }{ + { + name: "happy path group", + groupName: "indiana-jones", + loc: "den", + }, + { + name: "group already exists", + groupName: "indiana-jones", + loc: "den", + errorMsg: "constraint failed", + }, + { + name: "empty group name", + groupName: "", + loc: "den", + errorMsg: "invalid or unparsable field: name", + }, + { + name: "empty location", + groupName: "lost-ark", + loc: "", + errorMsg: "invalid or unparsable field: primary_location", + }, + } + + for _, tc := range testCases { + t.Run("Create "+tc.name, func(t *testing.T) { + g := geodeticclient.CreateGroupInput{ + Name: tc.groupName, + PrimaryLocation: tc.loc, + } + + resp, err := suite.client.geodetic.CreateGroup(context.Background(), g) + + if tc.errorMsg != "" { + require.Error(t, err) + assert.ErrorContains(t, err, tc.errorMsg) + assert.Nil(t, resp) + + return + } + + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.CreateGroup) + + assert.Equal(t, tc.groupName, resp.CreateGroup.Group.Name) + + groupIDs = append(groupIDs, resp.CreateGroup.Group.ID) + }) + } + + // Cleanup groups + for _, id := range groupIDs { + (&GroupCleanup{client: suite.client, GroupID: id}).MustDelete(context.Background(), t) + } +} + +func (suite *GraphTestSuite) TestDeleteGroup() { + t := suite.T() + + group := (&GroupBuilder{client: suite.client}).MustNew(context.Background(), t) + + testCases := []struct { + name string + groupName string + errorMsg string + }{ + { + name: "happy path group", + groupName: group.Name, + }, + { + name: "group does not exist", + groupName: "raiders", + errorMsg: "group not found", + }, + } + + for _, tc := range testCases { + t.Run("Delete "+tc.name, func(t *testing.T) { + resp, err := suite.client.geodetic.DeleteGroup(context.Background(), tc.groupName) + + if tc.errorMsg != "" { + require.Error(t, err) + assert.ErrorContains(t, err, tc.errorMsg) + assert.Nil(t, resp) + + return + } + + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.DeleteGroup) + + assert.NotEmpty(t, resp.DeleteGroup.DeletedID) + }) + } +} diff --git a/internal/graphapi/helpers.go b/internal/graphapi/helpers.go new file mode 100644 index 0000000..22cc55c --- /dev/null +++ b/internal/graphapi/helpers.go @@ -0,0 +1,23 @@ +package graphapi + +import ( + "context" + + "github.com/99designs/gqlgen/graphql" + + ent "github.com/datumforge/geodetic/internal/ent/generated" +) + +// withTransactionalMutation automatically wrap the GraphQL mutations with a database transaction. +// This allows the ent.Client to commit at the end, or rollback the transaction in case of a GraphQL error. +func withTransactionalMutation(ctx context.Context) *ent.Client { //nolint:unused + return ent.FromContext(ctx) +} + +// injectClient adds the db client to the context to be used with transactional mutations +func injectClient(client *ent.Client) graphql.OperationMiddleware { + return func(ctx context.Context, next graphql.OperationHandler) graphql.ResponseHandler { + ctx = ent.NewContext(ctx, client) + return next(ctx) + } +} diff --git a/internal/graphapi/models_test.go b/internal/graphapi/models_test.go new file mode 100644 index 0000000..efd6edc --- /dev/null +++ b/internal/graphapi/models_test.go @@ -0,0 +1,109 @@ +package graphapi_test + +import ( + "context" + "fmt" + "testing" + + "github.com/brianvoe/gofakeit/v7" + + "github.com/datumforge/datum/pkg/utils/ulids" + + ent "github.com/datumforge/geodetic/internal/ent/generated" + "github.com/datumforge/geodetic/pkg/enums" +) + +type GroupBuilder struct { + client *client + + // Fields + Name string + Location string + Region enums.Region +} + +type GroupCleanup struct { + client *client + + // Fields + GroupID string +} + +type DatabaseBuilder struct { + client *client + + // Fields + Name string + OrgID string + DSN string + GroupID string +} + +type DatabaseCleanup struct { + client *client + + // Fields + DatabaseID string +} + +// MustNew group builder is used to create groups in the database +func (g *GroupBuilder) MustNew(ctx context.Context, t *testing.T) *ent.Group { + if g.Name == "" { + g.Name = gofakeit.AppName() + } + + if g.Location == "" { + g.Location = "den" + } + + if g.Region == "" { + g.Region = enums.Amer + } + + group := g.client.db.Group.Create(). + SetName(g.Name). + SetPrimaryLocation(g.Location). + SetRegion(g.Region). + SaveX(ctx) + + return group +} + +// MustDelete is used to cleanup groups in the database +func (g *GroupCleanup) MustDelete(ctx context.Context, t *testing.T) { + g.client.db.Group.DeleteOneID(g.GroupID).ExecX(ctx) +} + +// MustNew group builder is used to create databases in the database +func (d *DatabaseBuilder) MustNew(ctx context.Context, t *testing.T) *ent.Database { + if d.Name == "" { + d.Name = gofakeit.AppName() + } + + if d.OrgID == "" { + d.OrgID = ulids.New().String() + } + + if d.DSN == "" { + d.DSN = fmt.Sprintf("https://%s.turso.com", gofakeit.AppName()) + } + + if d.GroupID == "" { + group := (&GroupBuilder{client: d.client}).MustNew(ctx, t) + d.GroupID = group.ID + } + + db := d.client.db.Database.Create(). + SetName(d.Name). + SetOrganizationID(d.OrgID). + SetDsn(d.DSN). + SetGroupID(d.GroupID). + SaveX(ctx) + + return db +} + +// MustDelete is used to cleanup databases in the database +func (d *DatabaseCleanup) MustDelete(ctx context.Context, t *testing.T) { + d.client.db.Database.DeleteOneID(d.DatabaseID).ExecX(ctx) +} diff --git a/internal/graphapi/resolver.go b/internal/graphapi/resolver.go new file mode 100644 index 0000000..ad73ce6 --- /dev/null +++ b/internal/graphapi/resolver.go @@ -0,0 +1,163 @@ +package graphapi + +import ( + "fmt" + "net/http" + "time" + + "entgo.io/contrib/entgql" + "github.com/99designs/gqlgen/graphql/handler" + "github.com/99designs/gqlgen/graphql/handler/extension" + "github.com/99designs/gqlgen/graphql/handler/lru" + "github.com/99designs/gqlgen/graphql/handler/transport" + echo "github.com/datumforge/echox" + "github.com/gorilla/websocket" + "github.com/ravilushqa/otelgqlgen" + "github.com/wundergraph/graphql-go-tools/pkg/playground" + "go.uber.org/zap" + + ent "github.com/datumforge/geodetic/internal/ent/generated" +) + +// This file will not be regenerated automatically. +// +// It serves as dependency injection for your app, add any dependencies you require here. + +const ( + ActionGet = "get" + ActionUpdate = "update" + ActionDelete = "delete" + ActionCreate = "create" +) + +var ( + graphPath = "query" + playgroundPath = "playground" + + graphFullPath = fmt.Sprintf("/%s", graphPath) +) + +// Resolver provides a graph response resolver +type Resolver struct { + client *ent.Client + logger *zap.SugaredLogger +} + +// NewResolver returns a resolver configured with the given ent client +func NewResolver(client *ent.Client) *Resolver { + return &Resolver{ + client: client, + } +} + +func (r Resolver) WithLogger(l *zap.SugaredLogger) *Resolver { + r.logger = l + + return &r +} + +// Handler is an http handler wrapping a Resolver +type Handler struct { + r *Resolver + graphqlHandler *handler.Server + playground *playground.Playground + middleware []echo.MiddlewareFunc +} + +// Handler returns an http handler for a graph resolver +func (r *Resolver) Handler(withPlayground bool) *Handler { + srv := handler.NewDefaultServer( + NewExecutableSchema( + Config{ + Resolvers: r, + }, + ), + ) + + srv.AddTransport(transport.Websocket{ + KeepAlivePingInterval: 10 * time.Second, // nolint: gomnd + Upgrader: websocket.Upgrader{ + CheckOrigin: func(r *http.Request) bool { + return true + }, + }, + }) + srv.AddTransport(transport.Options{}) + srv.AddTransport(transport.GET{}) + srv.AddTransport(transport.POST{}) + srv.AddTransport(transport.MultipartForm{}) + + srv.SetQueryCache(lru.New(1000)) // nolint:gomnd + + srv.Use(extension.Introspection{}) + srv.Use(extension.AutomaticPersistedQuery{ + Cache: lru.New(100), // nolint:gomnd + }) + // add transactional db client + WithTransactions(srv, r.client) + + srv.Use(otelgqlgen.Middleware()) + + h := &Handler{ + r: r, + graphqlHandler: srv, + } + + if withPlayground { + h.playground = playground.New(playground.Config{ + PathPrefix: "/", + PlaygroundPath: playgroundPath, + GraphqlEndpointPath: graphFullPath, + }) + } + + return h +} + +// WithTransactions adds the transactioner to the ent db client +func WithTransactions(h *handler.Server, c *ent.Client) { + // setup transactional db client + h.AroundOperations(injectClient(c)) + h.Use(entgql.Transactioner{TxOpener: c}) +} + +// Handler returns the http.HandlerFunc for the GraphAPI +func (h *Handler) Handler() http.HandlerFunc { + return h.graphqlHandler.ServeHTTP +} + +// Routes for the the server +func (h *Handler) Routes(e *echo.Group) { + e.Use(h.middleware...) + + // Create the default POST graph endpoint + e.POST("/"+graphPath, func(c echo.Context) error { + h.graphqlHandler.ServeHTTP(c.Response(), c.Request()) + return nil + }) + + // Create a GET query endpoint in order to create short queries with a query string + e.GET("/"+graphPath, func(c echo.Context) error { + h.graphqlHandler.ServeHTTP(c.Response(), c.Request()) + return nil + }) + + if h.playground != nil { + handlers, err := h.playground.Handlers() + if err != nil { + h.r.logger.Fatal("error configuring playground handlers", "error", err) + return + } + + for i := range handlers { + // with the function we need to dereference the handler so that it remains + // the same in the function below + hCopy := handlers[i].Handler + + e.GET(handlers[i].Path, func(c echo.Context) error { + hCopy.ServeHTTP(c.Response(), c.Request()) + return nil + }) + } + } +} diff --git a/internal/graphapi/tools_test.go b/internal/graphapi/tools_test.go new file mode 100644 index 0000000..41d1c7d --- /dev/null +++ b/internal/graphapi/tools_test.go @@ -0,0 +1,137 @@ +package graphapi_test + +import ( + "context" + "log" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/99designs/gqlgen/graphql/handler" + "github.com/Yamashou/gqlgenc/clientv2" + "github.com/datumforge/datum/pkg/testutils" + "github.com/datumforge/go-turso" + "go.uber.org/zap" + "go.uber.org/zap/zaptest" + + ent "github.com/datumforge/geodetic/internal/ent/generated" + "github.com/datumforge/geodetic/internal/entdb" + "github.com/datumforge/geodetic/internal/graphapi" + "github.com/datumforge/geodetic/pkg/geodeticclient" +) + +// TestGraphTestSuite runs all the tests in the GraphTestSuite +func TestGraphTestSuite(t *testing.T) { + suite.Run(t, new(GraphTestSuite)) +} + +// GraphTestSuite handles the setup and teardown between tests +type GraphTestSuite struct { + suite.Suite + client *client + tc *testutils.TC +} + +// client contains all the clients the test need to interact with +type client struct { + db *ent.Client + geodetic geodeticclient.GeodeticClient +} + +type graphClient struct { + srvURL string + httpClient *http.Client +} + +func (suite *GraphTestSuite) SetupSuite() { + ctx := context.Background() + + suite.tc = entdb.NewTestContainer(ctx) +} + +func (suite *GraphTestSuite) SetupTest() { + t := suite.T() + + ctx := context.Background() + + // setup logger + logger := zap.NewNop().Sugar() + + // setup mock turso client + tc := turso.NewMockClient() + + opts := []ent.Option{ + ent.Logger(*logger), + ent.Turso(tc), + } + + // create database connection + db, err := entdb.NewTestClient(ctx, suite.tc, opts) + if err != nil { + require.NoError(t, err, "failed opening connection to database") + } + + // assign values + c := &client{ + db: db, + geodetic: graphTestClient(t, db), + } + + suite.client = c +} + +func (suite *GraphTestSuite) TearDownTest() { + if err := suite.client.db.Close(); err != nil { + log.Fatalf("failed to close database: %s", err) + } +} + +func (suite *GraphTestSuite) TearDownSuite() { + if suite.tc.Container != nil { + if err := suite.tc.Container.Terminate(context.Background()); err != nil { + log.Fatalf("failed to terminate container: %s", err) + } + } +} + +func graphTestClient(t *testing.T, c *ent.Client) geodeticclient.GeodeticClient { + logger := zaptest.NewLogger(t, zaptest.Level(zap.ErrorLevel)).Sugar() + + srv := handler.NewDefaultServer( + graphapi.NewExecutableSchema( + graphapi.Config{Resolvers: graphapi.NewResolver(c).WithLogger(logger)}, + )) + + graphapi.WithTransactions(srv, c) + + g := &graphClient{ + srvURL: "query", + httpClient: &http.Client{Transport: localRoundTripper{handler: srv}}, + } + + // set options + opt := &clientv2.Options{ + ParseDataAlongWithErrors: false, + } + + // setup interceptors + i := geodeticclient.WithEmptyInterceptor() + + return geodeticclient.NewClient(g.httpClient, g.srvURL, opt, i) +} + +// localRoundTripper is an http.RoundTripper that executes HTTP transactions +// by using handler directly, instead of going over an HTTP connection. +type localRoundTripper struct { + handler http.Handler +} + +func (l localRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + w := httptest.NewRecorder() + l.handler.ServeHTTP(w, req) + + return w.Result(), nil +} diff --git a/internal/httpserve/config/config.go b/internal/httpserve/config/config.go new file mode 100644 index 0000000..0c49b24 --- /dev/null +++ b/internal/httpserve/config/config.go @@ -0,0 +1,102 @@ +package config + +import ( + "crypto/tls" + "net/http" + "time" + + echo "github.com/datumforge/echox" + "go.uber.org/zap" + "golang.org/x/crypto/acme" + "golang.org/x/crypto/acme/autocert" + + "github.com/datumforge/datum/pkg/sessions" + + "github.com/datumforge/geodetic/config" + "github.com/datumforge/geodetic/internal/httpserve/handlers" +) + +var ( + // DefaultConfigRefresh sets the default interval to refresh the config. + DefaultConfigRefresh = 10 * time.Minute + // DefaultTLSConfig is the default TLS config used when HTTPS is enabled + DefaultTLSConfig = &tls.Config{ + MinVersion: tls.VersionTLS12, + CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256}, + PreferServerCipherSuites: true, + CipherSuites: []uint16{ + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + }, + } +) + +type ( + // Config contains the configuration for the datum server + Config struct { + // add all the configuration settings for the datum server + Settings config.Config + + // Logger contains the logger used by echo functions + Logger *zap.SugaredLogger + // Routes contains the handler functions + Routes []http.Handler + // DefaultMiddleware to enable on the echo server used on all requests + DefaultMiddleware []echo.MiddlewareFunc + // GraphMiddleware to enable on the echo server used on graph requests + GraphMiddleware []echo.MiddlewareFunc + // Handler contains the required settings for REST handlers including ready checks and JWT keys + Handler handlers.Handler + // SessionConfig manages sessions for users + SessionConfig *sessions.SessionConfig + } +) + +// Ensure that *Config implements ConfigProvider interface. +var _ ConfigProvider = &Config{} + +// GetConfig implements ConfigProvider. +func (c *Config) GetConfig() (*Config, error) { + return c, nil +} + +// WithTLSDefaults sets tls default settings assuming a default cert and key file location. +func (c Config) WithTLSDefaults() Config { + c.WithDefaultTLSConfig() + + return c +} + +// WithDefaultTLSConfig sets the default TLS Configuration +func (c Config) WithDefaultTLSConfig() Config { + c.Settings.Server.TLS.Enabled = true + c.Settings.Server.TLS.Config = DefaultTLSConfig + + return c +} + +// WithTLSCerts sets the TLS Cert and Key locations +func (c *Config) WithTLSCerts(certFile, certKey string) *Config { + c.Settings.Server.TLS.CertFile = certFile + c.Settings.Server.TLS.CertKey = certKey + + return c +} + +// WithAutoCert generates a letsencrypt certificate, a valid host must be provided +func (c *Config) WithAutoCert(host string) *Config { + autoTLSManager := autocert.Manager{ + Prompt: autocert.AcceptTOS, + // Cache certificates to avoid issues with rate limits (https://letsencrypt.org/docs/rate-limits) + Cache: autocert.DirCache("/var/www/.cache"), + HostPolicy: autocert.HostWhitelist(host), + } + + c.Settings.Server.TLS.Enabled = true + c.Settings.Server.TLS.Config = DefaultTLSConfig + + c.Settings.Server.TLS.Config.GetCertificate = autoTLSManager.GetCertificate + c.Settings.Server.TLS.Config.NextProtos = []string{acme.ALPNProto} + + return c +} diff --git a/internal/httpserve/config/configprovider.go b/internal/httpserve/config/configprovider.go new file mode 100644 index 0000000..3d63f69 --- /dev/null +++ b/internal/httpserve/config/configprovider.go @@ -0,0 +1,7 @@ +package config + +// ConfigProvider serves as a common interface to read echo server configuration +type ConfigProvider interface { + // GetConfig returns the server configuration + GetConfig() (*Config, error) +} diff --git a/internal/httpserve/config/configproviderrefresh.go b/internal/httpserve/config/configproviderrefresh.go new file mode 100644 index 0000000..32a65b2 --- /dev/null +++ b/internal/httpserve/config/configproviderrefresh.go @@ -0,0 +1,92 @@ +package config + +import ( + "sync" + "time" +) + +// ConfigProviderWithRefresh shows a config provider with automatic refresh; it contains fields and methods to manage the configuration, +// and refresh it periodically based on a specified interval +type ConfigProviderWithRefresh struct { + sync.RWMutex + + config *Config + + configProvider ConfigProvider + + refreshInterval time.Duration + + ticker *time.Ticker + stop chan bool +} + +// NewConfigProviderWithRefresh function is a constructor function that creates a new instance of ConfigProviderWithRefresh +func NewConfigProviderWithRefresh(cfgProvider ConfigProvider) (*ConfigProviderWithRefresh, error) { + cfg, err := cfgProvider.GetConfig() + if err != nil { + return nil, err + } + + cfgRefresh := &ConfigProviderWithRefresh{ + config: cfg, + configProvider: cfgProvider, + refreshInterval: cfg.Settings.RefreshInterval, + } + cfgRefresh.initialize() + + return cfgRefresh, nil +} + +// GetConfig retrieves the current echo server configuration; it acquires a read lock to ensure thread safety and returns the `config` field +func (s *ConfigProviderWithRefresh) GetConfig() (*Config, error) { + s.RLock() + defer s.RUnlock() + + return s.config, nil +} + +// initialize the config provider with refresh +func (s *ConfigProviderWithRefresh) initialize() { + if s.refreshInterval != 0 { + s.stop = make(chan bool) + s.ticker = time.NewTicker(s.refreshInterval) + + go s.refreshConfig() + } +} + +func (s *ConfigProviderWithRefresh) refreshConfig() { + for { + select { + case <-s.stop: + break + case <-s.ticker.C: + } + + newConfig, err := s.configProvider.GetConfig() + if err != nil { + s.config.Logger.Error("failed to load new server configuration") + continue + } + + s.config.Logger.Info("loaded new server configuration") + + s.Lock() + s.config = newConfig + s.Unlock() + } +} + +// Close function is used to stop the automatic refresh of the configuration. +// It stops the ticker that triggers the refresh and closes the stop channel, +// which signals the goroutine to stop refreshing the configuration +func (s *ConfigProviderWithRefresh) Close() { + if s.ticker != nil { + s.ticker.Stop() + } + + if s.stop != nil { + s.stop <- true + close(s.stop) + } +} diff --git a/internal/httpserve/config/doc.go b/internal/httpserve/config/doc.go new file mode 100644 index 0000000..7c130bc --- /dev/null +++ b/internal/httpserve/config/doc.go @@ -0,0 +1,2 @@ +// Package config holds the echo server configuration utilities +package config diff --git a/internal/httpserve/handlers/doc.go b/internal/httpserve/handlers/doc.go new file mode 100644 index 0000000..de283b7 --- /dev/null +++ b/internal/httpserve/handlers/doc.go @@ -0,0 +1,2 @@ +// Package handlers contains custom handler functions +package handlers diff --git a/internal/httpserve/handlers/handlers.go b/internal/httpserve/handlers/handlers.go new file mode 100644 index 0000000..1d03105 --- /dev/null +++ b/internal/httpserve/handlers/handlers.go @@ -0,0 +1,29 @@ +package handlers + +import ( + echo "github.com/datumforge/echox" + "github.com/redis/go-redis/v9" + "go.uber.org/zap" + + "github.com/datumforge/datum/pkg/sessions" + + ent "github.com/datumforge/geodetic/internal/ent/generated" +) + +// Handler contains configuration options for handlers +type Handler struct { + // IsTest is a flag to determine if the application is running in test mode and will mock external calls + IsTest bool + // DBClient to interact with the generated ent schema + DBClient *ent.Client + // RedisClient to interact with redis + RedisClient *redis.Client + // Logger provides the zap logger to do logging things from the handlers + Logger *zap.SugaredLogger + // ReadyChecks is a set of checkFuncs to determine if the application is "ready" upon startup + ReadyChecks Checks + // SessionConfig to handle sessions + SessionConfig *sessions.SessionConfig + // AuthMiddleware contains the middleware to be used for authenticated endpoints + AuthMiddleware []echo.MiddlewareFunc +} diff --git a/internal/httpserve/handlers/readiness.go b/internal/httpserve/handlers/readiness.go new file mode 100644 index 0000000..4ce49af --- /dev/null +++ b/internal/httpserve/handlers/readiness.go @@ -0,0 +1,57 @@ +package handlers + +import ( + "context" + "net/http" + + echo "github.com/datumforge/echox" +) + +// StatusReply returns server status +type StatusReply struct { + Status map[string]string `json:"status"` +} + +// CheckFunc is a function that can be used to check the status of a service +type CheckFunc func(ctx context.Context) error + +type Checks struct { + checks map[string]CheckFunc +} + +// AddReadinessCheck will accept a function to be ran during calls to /readyz +// These functions should accept a context and only return an error. When adding +// a readiness check a name is also provided, this name will be used when returning +// the state of all the checks +func (h *Handler) AddReadinessCheck(name string, f CheckFunc) { + // if this is null, create the struct before trying to add + if h.ReadyChecks.checks == nil { + h.ReadyChecks.checks = map[string]CheckFunc{} + } + + h.ReadyChecks.checks[name] = f +} + +func (c *Checks) ReadyHandler(ctx echo.Context) error { + failed := false + status := map[string]string{} + + for name, check := range c.checks { + if err := check(ctx.Request().Context()); err != nil { + failed = true + status[name] = err.Error() + } else { + status[name] = "OK" + } + } + + if failed { + return ctx.JSON(http.StatusServiceUnavailable, status) + } + + out := &StatusReply{ + Status: status, + } + + return ctx.JSON(http.StatusOK, out) +} diff --git a/internal/httpserve/route/base.go b/internal/httpserve/route/base.go new file mode 100644 index 0000000..f5bf834 --- /dev/null +++ b/internal/httpserve/route/base.go @@ -0,0 +1,46 @@ +package route + +import ( + "net/http" + + echo "github.com/datumforge/echox" + "github.com/prometheus/client_golang/prometheus/promhttp" + + "github.com/datumforge/geodetic/internal/httpserve/handlers" +) + +func registerLivenessHandler(router *echo.Echo) (err error) { + _, err = router.AddRoute(echo.Route{ + Method: http.MethodGet, + Path: "/livez", + Handler: func(c echo.Context) error { + return c.JSON(http.StatusOK, echo.Map{ + "status": "UP", + }) + }, + }.ForGroup(unversioned, mw)) + + return +} + +func registerReadinessHandler(router *echo.Echo, h *handlers.Handler) (err error) { + _, err = router.AddRoute(echo.Route{ + Method: http.MethodGet, + Path: "/ready", + Handler: func(c echo.Context) error { + return h.ReadyChecks.ReadyHandler(c) + }, + }.ForGroup(unversioned, mw)) + + return +} + +func registerMetricsHandler(router *echo.Echo) (err error) { + _, err = router.AddRoute(echo.Route{ + Method: http.MethodGet, + Path: "/metrics", + Handler: echo.WrapHandler(promhttp.Handler()), + }.ForGroup(unversioned, mw)) + + return +} diff --git a/internal/httpserve/route/doc.go b/internal/httpserve/route/doc.go new file mode 100644 index 0000000..b008eab --- /dev/null +++ b/internal/httpserve/route/doc.go @@ -0,0 +1,2 @@ +// Package route will hold the routes and route groups +package route diff --git a/internal/httpserve/route/openapi.json b/internal/httpserve/route/openapi.json new file mode 100644 index 0000000..93de96c --- /dev/null +++ b/internal/httpserve/route/openapi.json @@ -0,0 +1,1038 @@ +{ + "openapi": "3.1.0", + "info": { + "title": "Datum OpenAPI 3.1.0 Specifications", + "description": "Programmatic interfaces for interacting with Datum Services", + "termsOfService": "https://datum.net/tos", + "contact": { + "name": "Datum Support", + "url": "https://datum.net/support", + "email": "support@datum.net" + }, + "license": { + "name": "Apache 2.0", + "url": "https://www.apache.org/licenses/LICENSE-2.0" + }, + "version": "1.0.1" + }, + "servers": [ + { + "url": "https://api.datum.net/v1", + "description": "Datum Production API Endpoint" + }, + { + "url": "http://localhost:17608/v1", + "description": "http localhost endpoint for testing purposes" + } + ], + "paths": { + "/databases": { + "get": { + "tags": [ + "Database" + ], + "summary": "List Databases", + "description": "List Databases.", + "operationId": "listDatabase", + "parameters": [ + { + "name": "page", + "in": "query", + "description": "what page to render", + "schema": { + "type": "integer", + "minimum": 1 + } + }, + { + "name": "itemsPerPage", + "in": "query", + "description": "item count to render per page", + "schema": { + "type": "integer", + "maximum": 255, + "minimum": 1 + } + } + ], + "responses": { + "200": { + "description": "result Database list", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Database" + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/400" + }, + "404": { + "$ref": "#/components/responses/404" + }, + "409": { + "$ref": "#/components/responses/409" + }, + "500": { + "$ref": "#/components/responses/500" + } + } + }, + "post": { + "tags": [ + "Database" + ], + "summary": "Create a new Database", + "description": "Creates a new Database and persists it to storage.", + "operationId": "createDatabase", + "requestBody": { + "description": "Database to create", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "organization_id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "geo": { + "type": "string" + }, + "dsn": { + "type": "string" + }, + "group_id": { + "type": "string" + }, + "token": { + "type": "string" + }, + "status": { + "type": "string", + "enum": [ + "ACTIVE", + "CREATING", + "DELETING", + "DELETED" + ], + "default": "CREATING" + }, + "provider": { + "type": "string", + "enum": [ + "LOCAL", + "TURSO" + ], + "default": "LOCAL" + }, + "group": { + "type": "string" + } + }, + "required": [ + "organization_id", + "name", + "dsn", + "group_id", + "status", + "provider", + "group" + ] + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Database created", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Database" + } + } + } + }, + "400": { + "$ref": "#/components/responses/400" + }, + "409": { + "$ref": "#/components/responses/409" + }, + "500": { + "$ref": "#/components/responses/500" + } + } + } + }, + "/databases/{id}": { + "get": { + "tags": [ + "Database" + ], + "summary": "Find a Database by ID", + "description": "Finds the Database with the requested ID and returns it.", + "operationId": "readDatabase", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Database", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Database with requested ID was found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Database" + } + } + } + }, + "400": { + "$ref": "#/components/responses/400" + }, + "404": { + "$ref": "#/components/responses/404" + }, + "409": { + "$ref": "#/components/responses/409" + }, + "500": { + "$ref": "#/components/responses/500" + } + } + }, + "delete": { + "tags": [ + "Database" + ], + "summary": "Deletes a Database by ID", + "description": "Deletes the Database with the requested ID.", + "operationId": "deleteDatabase", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Database", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "204": { + "description": "Database with requested ID was deleted" + }, + "400": { + "$ref": "#/components/responses/400" + }, + "404": { + "$ref": "#/components/responses/404" + }, + "409": { + "$ref": "#/components/responses/409" + }, + "500": { + "$ref": "#/components/responses/500" + } + } + }, + "patch": { + "tags": [ + "Database" + ], + "summary": "Updates a Database", + "description": "Updates a Database and persists changes to storage.", + "operationId": "updateDatabase", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Database", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "description": "Database properties to update", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "organization_id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "geo": { + "type": "string" + }, + "dsn": { + "type": "string" + }, + "group_id": { + "type": "string" + }, + "token": { + "type": "string" + }, + "status": { + "type": "string", + "enum": [ + "ACTIVE", + "CREATING", + "DELETING", + "DELETED" + ], + "default": "CREATING" + }, + "provider": { + "type": "string", + "enum": [ + "LOCAL", + "TURSO" + ], + "default": "LOCAL" + }, + "group": { + "type": "string" + } + } + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Database updated", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Database" + } + } + } + }, + "400": { + "$ref": "#/components/responses/400" + }, + "404": { + "$ref": "#/components/responses/404" + }, + "409": { + "$ref": "#/components/responses/409" + }, + "500": { + "$ref": "#/components/responses/500" + } + } + } + }, + "/databases/{id}/group": { + "get": { + "tags": [ + "Database" + ], + "summary": "Find the attached Group", + "description": "Find the attached Group of the Database with the given ID", + "operationId": "readDatabaseGroup", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Database", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Group attached to Database with requested ID was found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Group" + } + } + } + }, + "400": { + "$ref": "#/components/responses/400" + }, + "404": { + "$ref": "#/components/responses/404" + }, + "409": { + "$ref": "#/components/responses/409" + }, + "500": { + "$ref": "#/components/responses/500" + } + } + } + }, + "/groups": { + "get": { + "tags": [ + "Group" + ], + "summary": "List Groups", + "description": "List Groups.", + "operationId": "listGroup", + "parameters": [ + { + "name": "page", + "in": "query", + "description": "what page to render", + "schema": { + "type": "integer", + "minimum": 1 + } + }, + { + "name": "itemsPerPage", + "in": "query", + "description": "item count to render per page", + "schema": { + "type": "integer", + "maximum": 255, + "minimum": 1 + } + } + ], + "responses": { + "200": { + "description": "result Group list", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Group" + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/400" + }, + "404": { + "$ref": "#/components/responses/404" + }, + "409": { + "$ref": "#/components/responses/409" + }, + "500": { + "$ref": "#/components/responses/500" + } + } + }, + "post": { + "tags": [ + "Group" + ], + "summary": "Create a new Group", + "description": "Creates a new Group and persists it to storage.", + "operationId": "createGroup", + "requestBody": { + "description": "Group to create", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "primary_location": { + "type": "string" + }, + "locations": { + "type": "array", + "items": { + "type": "string" + } + }, + "token": { + "type": "string" + }, + "region": { + "type": "string", + "enum": [ + "AMER", + "EMEA", + "APAC" + ], + "default": "AMER" + }, + "databases": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "name", + "primary_location", + "region" + ] + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Group created", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Group" + } + } + } + }, + "400": { + "$ref": "#/components/responses/400" + }, + "409": { + "$ref": "#/components/responses/409" + }, + "500": { + "$ref": "#/components/responses/500" + } + } + } + }, + "/groups/{id}": { + "get": { + "tags": [ + "Group" + ], + "summary": "Find a Group by ID", + "description": "Finds the Group with the requested ID and returns it.", + "operationId": "readGroup", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Group", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Group with requested ID was found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Group" + } + } + } + }, + "400": { + "$ref": "#/components/responses/400" + }, + "404": { + "$ref": "#/components/responses/404" + }, + "409": { + "$ref": "#/components/responses/409" + }, + "500": { + "$ref": "#/components/responses/500" + } + } + }, + "delete": { + "tags": [ + "Group" + ], + "summary": "Deletes a Group by ID", + "description": "Deletes the Group with the requested ID.", + "operationId": "deleteGroup", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Group", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "204": { + "description": "Group with requested ID was deleted" + }, + "400": { + "$ref": "#/components/responses/400" + }, + "404": { + "$ref": "#/components/responses/404" + }, + "409": { + "$ref": "#/components/responses/409" + }, + "500": { + "$ref": "#/components/responses/500" + } + } + }, + "patch": { + "tags": [ + "Group" + ], + "summary": "Updates a Group", + "description": "Updates a Group and persists changes to storage.", + "operationId": "updateGroup", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Group", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "description": "Group properties to update", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "primary_location": { + "type": "string" + }, + "locations": { + "type": "array", + "items": { + "type": "string" + } + }, + "token": { + "type": "string" + }, + "region": { + "type": "string", + "enum": [ + "AMER", + "EMEA", + "APAC" + ], + "default": "AMER" + }, + "databases": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Group updated", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Group" + } + } + } + }, + "400": { + "$ref": "#/components/responses/400" + }, + "404": { + "$ref": "#/components/responses/404" + }, + "409": { + "$ref": "#/components/responses/409" + }, + "500": { + "$ref": "#/components/responses/500" + } + } + } + }, + "/groups/{id}/databases": { + "get": { + "tags": [ + "Group" + ], + "summary": "List attached Databases", + "description": "List attached Databases.", + "operationId": "listGroupDatabases", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Group", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "page", + "in": "query", + "description": "what page to render", + "schema": { + "type": "integer" + } + }, + { + "name": "itemsPerPage", + "in": "query", + "description": "item count to render per page", + "schema": { + "type": "integer" + } + } + ], + "responses": { + "200": { + "description": "result Groups list", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Database" + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/400" + }, + "404": { + "$ref": "#/components/responses/404" + }, + "409": { + "$ref": "#/components/responses/409" + }, + "500": { + "$ref": "#/components/responses/500" + } + } + } + } + }, + "components": { + "schemas": { + "Database": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "created_at": { + "type": "string", + "format": "date-time" + }, + "updated_at": { + "type": "string", + "format": "date-time" + }, + "created_by": { + "type": "string" + }, + "updated_by": { + "type": "string" + }, + "deleted_at": { + "type": "string", + "format": "date-time" + }, + "deleted_by": { + "type": "string" + }, + "organization_id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "geo": { + "type": "string" + }, + "dsn": { + "type": "string" + }, + "group_id": { + "type": "string" + }, + "token": { + "type": "string" + }, + "status": { + "type": "string", + "enum": [ + "ACTIVE", + "CREATING", + "DELETING", + "DELETED" + ], + "default": "CREATING" + }, + "provider": { + "type": "string", + "enum": [ + "LOCAL", + "TURSO" + ], + "default": "LOCAL" + }, + "group": { + "$ref": "#/components/schemas/Group" + } + }, + "required": [ + "id", + "organization_id", + "name", + "dsn", + "group_id", + "status", + "provider", + "group" + ] + }, + "Group": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "created_at": { + "type": "string", + "format": "date-time" + }, + "updated_at": { + "type": "string", + "format": "date-time" + }, + "created_by": { + "type": "string" + }, + "updated_by": { + "type": "string" + }, + "deleted_at": { + "type": "string", + "format": "date-time" + }, + "deleted_by": { + "type": "string" + }, + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "primary_location": { + "type": "string" + }, + "locations": { + "type": "array", + "items": { + "type": "string" + } + }, + "token": { + "type": "string" + }, + "region": { + "type": "string", + "enum": [ + "AMER", + "EMEA", + "APAC" + ], + "default": "AMER" + }, + "databases": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Database" + } + } + }, + "required": [ + "id", + "name", + "primary_location", + "region" + ] + } + }, + "responses": { + "400": { + "description": "invalid input, data invalid", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "code": { + "type": "integer" + }, + "status": { + "type": "string" + }, + "errors": {} + }, + "required": [ + "code", + "status" + ] + } + } + } + }, + "403": { + "description": "insufficient permissions", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "code": { + "type": "integer" + }, + "status": { + "type": "string" + }, + "errors": {} + }, + "required": [ + "code", + "status" + ] + } + } + } + }, + "404": { + "description": "resource not found", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "code": { + "type": "integer" + }, + "status": { + "type": "string" + }, + "errors": {} + }, + "required": [ + "code", + "status" + ] + } + } + } + }, + "409": { + "description": "conflicting resources", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "code": { + "type": "integer" + }, + "status": { + "type": "string" + }, + "errors": {} + }, + "required": [ + "code", + "status" + ] + } + } + } + }, + "500": { + "description": "unexpected error", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "code": { + "type": "integer" + }, + "status": { + "type": "string" + }, + "errors": {} + }, + "required": [ + "code", + "status" + ] + } + } + } + } + } + } +} \ No newline at end of file diff --git a/internal/httpserve/route/routes.go b/internal/httpserve/route/routes.go new file mode 100644 index 0000000..c1e843c --- /dev/null +++ b/internal/httpserve/route/routes.go @@ -0,0 +1,84 @@ +package route + +import ( + "time" + + echo "github.com/datumforge/echox" + "github.com/datumforge/echox/middleware" + + "github.com/datumforge/datum/pkg/middleware/ratelimit" + + "github.com/datumforge/geodetic/internal/httpserve/handlers" +) + +const ( + V1Version = "v1" + unversioned = "" +) + +var ( + mw = []echo.MiddlewareFunc{middleware.Recover()} + + restrictedRateLimit = &ratelimit.Config{ + RateLimit: 1, + BurstLimit: 1, + ExpiresIn: 15 * time.Minute, //nolint:gomnd + } + restrictedEndpointsMW = []echo.MiddlewareFunc{} +) + +type Route struct { + Method string + Path string + Handler echo.HandlerFunc + Middlewares []echo.MiddlewareFunc + + Name string +} + +// RegisterRoutes with the echo routers +func RegisterRoutes(router *echo.Echo, h *handlers.Handler) error { + // Middleware for restricted endpoints + restrictedEndpointsMW = append(restrictedEndpointsMW, mw...) + restrictedEndpointsMW = append(restrictedEndpointsMW, ratelimit.RateLimiterWithConfig(restrictedRateLimit)) // add restricted ratelimit middleware + + // routeHandlers that take the router and handler as input + routeHandlers := []interface{}{ + // add handlers here + registerReadinessHandler, + } + + for _, route := range routeHandlers { + if err := route.(func(*echo.Echo, *handlers.Handler) error)(router, h); err != nil { + return err + } + } + + // register additional handlers that only require router input + additionalHandlers := []interface{}{ + registerLivenessHandler, + registerMetricsHandler, + } + + for _, route := range additionalHandlers { + if err := route.(func(*echo.Echo) error)(router); err != nil { + return err + } + } + + return nil +} + +// RegisterRoute with the echo server given a method, path, and handler definition +func (r *Route) RegisterRoute(router *echo.Echo) (err error) { + _, err = router.AddRoute(echo.Route{ + Method: r.Method, + Path: r.Path, + Handler: r.Handler, + Middlewares: r.Middlewares, + + Name: r.Name, + }) + + return +} diff --git a/internal/httpserve/server/doc.go b/internal/httpserve/server/doc.go new file mode 100644 index 0000000..63321b5 --- /dev/null +++ b/internal/httpserve/server/doc.go @@ -0,0 +1,2 @@ +// Package server contains the server functions +package server diff --git a/internal/httpserve/server/errors.go b/internal/httpserve/server/errors.go new file mode 100644 index 0000000..c65089f --- /dev/null +++ b/internal/httpserve/server/errors.go @@ -0,0 +1,11 @@ +package server + +import "errors" + +var ( + // ErrCertFileMissing is returned when https is enabled but no cert file is provided + ErrCertFileMissing = errors.New("no cert file found") + + // ErrKeyFileMissing is returned when https is enabled but no key file is provided + ErrKeyFileMissing = errors.New("no key file found") +) diff --git a/internal/httpserve/server/server.go b/internal/httpserve/server/server.go new file mode 100644 index 0000000..60dd1b4 --- /dev/null +++ b/internal/httpserve/server/server.go @@ -0,0 +1,90 @@ +package server + +import ( + "context" + + echo "github.com/datumforge/echox" + "go.uber.org/zap" + + echodebug "github.com/datumforge/datum/pkg/middleware/debug" + + "github.com/datumforge/geodetic/internal/httpserve/config" + "github.com/datumforge/geodetic/internal/httpserve/route" +) + +type Server struct { + // config contains the base server settings + config config.Config + // logger contains the zap logger + logger *zap.SugaredLogger + // handlers contains additional handlers to register with the echo server + handlers []handler +} + +type handler interface { + Routes(*echo.Group) +} + +// AddHandler provides the ability to add additional HTTP handlers that process +// requests. The handler that is provided should have a Routes(*echo.Group) +// function, which allows the routes to be added to the server. +func (s *Server) AddHandler(r handler) { + s.handlers = append(s.handlers, r) +} + +// NewServer returns a new Server configuration +func NewServer(c config.Config, l *zap.SugaredLogger) *Server { + return &Server{ + config: c, + logger: l, + } +} + +// StartEchoServer creates and starts the echo server with configured middleware and handlers +func (s *Server) StartEchoServer(ctx context.Context) error { + srv := echo.New() + + sc := echo.StartConfig{ + HideBanner: true, + HidePort: true, + Address: s.config.Settings.Server.Listen, + GracefulTimeout: s.config.Settings.Server.ShutdownGracePeriod, + GracefulContext: ctx, + } + + srv.Debug = s.config.Settings.Server.Debug + + if srv.Debug { + srv.Use(echodebug.BodyDump(s.logger)) + } + + for _, m := range s.config.DefaultMiddleware { + srv.Use(m) + } + + // Add base routes to the server + if err := route.RegisterRoutes(srv, &s.config.Handler); err != nil { + return err + } + + // Registers additional routes for the graph endpoints with middleware defined + for _, handler := range s.handlers { + handler.Routes(srv.Group("", s.config.GraphMiddleware...)) + } + + // Print routes on startup + routes := srv.Router().Routes() + for _, r := range routes { + s.logger.Infow("registered route", "route", r.Path(), "method", r.Method()) + } + + // if TLS is enabled, start new echo server with TLS + if s.config.Settings.Server.TLS.Enabled { + s.logger.Infow("starting in https mode") + + return sc.StartTLS(srv, s.config.Settings.Server.TLS.CertFile, s.config.Settings.Server.TLS.CertKey) + } + + // otherwise, start without TLS + return sc.Start(srv) +} diff --git a/internal/httpserve/server/validate.go b/internal/httpserve/server/validate.go new file mode 100644 index 0000000..375fc88 --- /dev/null +++ b/internal/httpserve/server/validate.go @@ -0,0 +1,14 @@ +package server + +// GetCertFiles for https enabled echo server and ensure the values are set +func GetCertFiles(certFile, keyFile string) (string, string, error) { + if certFile == "" { + return "", "", ErrCertFileMissing + } + + if keyFile == "" { + return "", "", ErrKeyFileMissing + } + + return certFile, keyFile, nil +} diff --git a/internal/httpserve/serveropts/doc.go b/internal/httpserve/serveropts/doc.go new file mode 100644 index 0000000..116a06a --- /dev/null +++ b/internal/httpserve/serveropts/doc.go @@ -0,0 +1,2 @@ +// Package serveropts contains an echo server options wrapper +package serveropts diff --git a/internal/httpserve/serveropts/option.go b/internal/httpserve/serveropts/option.go new file mode 100644 index 0000000..1ff0d08 --- /dev/null +++ b/internal/httpserve/serveropts/option.go @@ -0,0 +1,224 @@ +package serveropts + +import ( + echoprometheus "github.com/datumforge/echo-prometheus/v5" + echo "github.com/datumforge/echox" + "github.com/datumforge/echox/middleware" + "github.com/datumforge/echozap" + "github.com/datumforge/entx" + "github.com/redis/go-redis/v9" + "go.uber.org/zap" + + "github.com/datumforge/geodetic/internal/ent/generated" + "github.com/datumforge/geodetic/internal/graphapi" + "github.com/datumforge/geodetic/internal/httpserve/config" + "github.com/datumforge/geodetic/internal/httpserve/server" + + "github.com/datumforge/datum/pkg/cache" + authmw "github.com/datumforge/datum/pkg/middleware/auth" + "github.com/datumforge/datum/pkg/middleware/cachecontrol" + "github.com/datumforge/datum/pkg/middleware/cors" + "github.com/datumforge/datum/pkg/middleware/echocontext" + "github.com/datumforge/datum/pkg/middleware/mime" + "github.com/datumforge/datum/pkg/middleware/ratelimit" + "github.com/datumforge/datum/pkg/middleware/redirect" + "github.com/datumforge/datum/pkg/middleware/secure" + "github.com/datumforge/datum/pkg/sessions" +) + +type ServerOption interface { + apply(*ServerOptions) +} + +type applyFunc struct { + applyInternal func(*ServerOptions) +} + +func (fso *applyFunc) apply(s *ServerOptions) { + fso.applyInternal(s) +} + +func newApplyFunc(apply func(option *ServerOptions)) *applyFunc { + return &applyFunc{ + applyInternal: apply, + } +} + +// WithConfigProvider supplies the config for the server +func WithConfigProvider(cfgProvider config.ConfigProvider) ServerOption { + return newApplyFunc(func(s *ServerOptions) { + s.ConfigProvider = cfgProvider + }) +} + +// WithLogger supplies the logger for the server +func WithLogger(l *zap.SugaredLogger) ServerOption { + return newApplyFunc(func(s *ServerOptions) { + // Add logger to main config + s.Config.Logger = l + // Add logger to the handlers config + s.Config.Handler.Logger = l + }) +} + +// WithHTTPS sets up TLS config settings for the server +func WithHTTPS() ServerOption { + return newApplyFunc(func(s *ServerOptions) { + if !s.Config.Settings.Server.TLS.Enabled { + // this is set to enabled by WithServer + // if TLS is not enabled, move on + return + } + + s.Config.WithTLSDefaults() + + if !s.Config.Settings.Server.TLS.AutoCert { + s.Config.WithTLSCerts(s.Config.Settings.Server.TLS.CertFile, s.Config.Settings.Server.TLS.CertKey) + } + }) +} + +// WithReadyChecks adds readiness checks to the server +func WithReadyChecks(c *entx.EntClientConfig, r *redis.Client) ServerOption { + return newApplyFunc(func(s *ServerOptions) { + // Always add a check to the primary db connection + s.Config.Handler.AddReadinessCheck("db_primary", entx.Healthcheck(c.GetPrimaryDB())) + + // Check the secondary db, if enabled + if s.Config.Settings.DB.MultiWrite { + s.Config.Handler.AddReadinessCheck("db_secondary", entx.Healthcheck(c.GetSecondaryDB())) + } + + // Check the connection to redis, if enabled + if s.Config.Settings.Redis.Enabled { + s.Config.Handler.AddReadinessCheck("redis", cache.Healthcheck(r)) + } + }) +} + +// WithGraphRoute adds the graph handler to the server +func WithGraphRoute(srv *server.Server, c *generated.Client) ServerOption { + return newApplyFunc(func(s *ServerOptions) { + // Setup Graph API Handlers + r := graphapi.NewResolver(c). + WithLogger(s.Config.Logger.Named("resolvers")) + + handler := r.Handler(s.Config.Settings.Server.Dev) + + // Add Graph Handler + srv.AddHandler(handler) + }) +} + +// WithMiddleware adds the middleware to the server +func WithMiddleware() ServerOption { + return newApplyFunc(func(s *ServerOptions) { + // Initialize middleware if null + if s.Config.DefaultMiddleware == nil { + s.Config.DefaultMiddleware = []echo.MiddlewareFunc{} + } + + // default middleware + s.Config.DefaultMiddleware = append(s.Config.DefaultMiddleware, + middleware.RequestID(), // add request id + middleware.Recover(), // recover server from any panic/fatal error gracefully + middleware.LoggerWithConfig(middleware.LoggerConfig{ + Format: "remote_ip=${remote_ip}, method=${method}, uri=${uri}, status=${status}, session=${header:Set-Cookie}, host=${host}, referer=${referer}, user_agent=${user_agent}, route=${route}, path=${path}, auth=${header:Authorization}\n", + }), + echoprometheus.MetricsMiddleware(), // add prometheus metrics + echozap.ZapLogger(s.Config.Logger.Desugar()), // add zap logger, middleware requires the "regular" zap logger + echocontext.EchoContextToContextMiddleware(), // adds echo context to parent + mime.NewWithConfig(mime.Config{DefaultContentType: echo.MIMEApplicationJSONCharsetUTF8}), // add mime middleware + ) + }) +} + +// WithSessionManager sets up the default session manager with a 10 minute ttl +// with persistence to redis +func WithSessionManager(rc *redis.Client) ServerOption { + return newApplyFunc(func(s *ServerOptions) { + cc := sessions.DefaultCookieConfig + + // In order for things to work in dev mode with localhost + // we need to se the debug cookie config + if s.Config.Settings.Server.Dev { + cc = &sessions.DebugOnlyCookieConfig + } else { + cc.Name = sessions.DefaultCookieName + } + + sm := sessions.NewCookieStore[map[string]any](cc, + []byte(s.Config.Settings.Sessions.SigningKey), + []byte(s.Config.Settings.Sessions.EncryptionKey), + ) + + // add session middleware, this has to be added after the authMiddleware so we have the user id + // when we get to the session. this is also added here so its only added to the graph routes + // REST routes are expected to add the session middleware, as required + sessionConfig := sessions.NewSessionConfig( + sm, + sessions.WithPersistence(rc), + sessions.WithLogger(s.Config.Logger), + sessions.WithSkipperFunc(authmw.SessionSkipperFunc), + ) + + // set cookie config to be used + sessionConfig.CookieConfig = cc + + // Make the cookie session store available + // to graph and REST endpoints + s.Config.Handler.SessionConfig = &sessionConfig + s.Config.SessionConfig = &sessionConfig + + s.Config.GraphMiddleware = append(s.Config.GraphMiddleware, + sessions.LoadAndSaveWithConfig(sessionConfig), + ) + }) +} + +// WithRateLimiter sets up the rate limiter for the server +func WithRateLimiter() ServerOption { + return newApplyFunc(func(s *ServerOptions) { + if s.Config.Settings.Ratelimit.Enabled { + s.Config.DefaultMiddleware = append(s.Config.DefaultMiddleware, ratelimit.RateLimiterWithConfig(&s.Config.Settings.Ratelimit)) + } + }) +} + +// WithSecureMW sets up the secure middleware for the server +func WithSecureMW() ServerOption { + return newApplyFunc(func(s *ServerOptions) { + if s.Config.Settings.Server.Secure.Enabled { + s.Config.DefaultMiddleware = append(s.Config.DefaultMiddleware, secure.Secure(&s.Config.Settings.Server.Secure)) + } + }) +} + +// WithRedirects sets up the redirects for the server +func WithRedirects() ServerOption { + return newApplyFunc(func(s *ServerOptions) { + if s.Config.Settings.Server.Redirects.Enabled { + redirects := s.Config.Settings.Server.Redirects + s.Config.DefaultMiddleware = append(s.Config.DefaultMiddleware, redirect.NewWithConfig(redirects)) + } + }) +} + +// WithCacheHeaders sets up the cache control headers for the server +func WithCacheHeaders() ServerOption { + return newApplyFunc(func(s *ServerOptions) { + if s.Config.Settings.Server.CacheControl.Enabled { + cacheConfig := s.Config.Settings.Server.CacheControl + s.Config.DefaultMiddleware = append(s.Config.DefaultMiddleware, cachecontrol.NewWithConfig(cacheConfig)) + } + }) +} + +// WithCORS sets up the CORS middleware for the server +func WithCORS() ServerOption { + return newApplyFunc(func(s *ServerOptions) { + if s.Config.Settings.Server.CORS.Enabled { + s.Config.DefaultMiddleware = append(s.Config.DefaultMiddleware, cors.New(s.Config.Settings.Server.CORS.AllowOrigins)) + } + }) +} diff --git a/internal/httpserve/serveropts/server.go b/internal/httpserve/serveropts/server.go new file mode 100644 index 0000000..9f6ef60 --- /dev/null +++ b/internal/httpserve/serveropts/server.go @@ -0,0 +1,37 @@ +package serveropts + +import ( + "github.com/datumforge/geodetic/config" + serverconfig "github.com/datumforge/geodetic/internal/httpserve/config" +) + +type ServerOptions struct { + ConfigProvider serverconfig.ConfigProvider + Config serverconfig.Config +} + +func NewServerOptions(opts []ServerOption, cfgLoc string) *ServerOptions { + // load koanf config + c, err := config.Load(&cfgLoc) + if err != nil { + panic(err) + } + + so := &ServerOptions{ + Config: serverconfig.Config{ + Settings: *c, + }, + } + + for _, opt := range opts { + opt.apply(so) + } + + return so +} + +// AddServerOptions applies a server option after the initial setup +// this should be used when information is not available on NewServerOptions +func (so *ServerOptions) AddServerOptions(opt ServerOption) { + opt.apply(so) +} diff --git a/jsonschema/DOC.md b/jsonschema/DOC.md new file mode 100644 index 0000000..6d00124 --- /dev/null +++ b/jsonschema/DOC.md @@ -0,0 +1,286 @@ +# object + +Config contains the configuration for the datum server + + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**refresh\_interval**|`integer`|RefreshInterval determines how often to reload the config
|| +|[**server**](#server)|`object`|Server settings for the echo server
|yes| +|[**db**](#db)|`object`||yes| +|[**turso**](#turso)|`object`||yes| +|[**redis**](#redis)|`object`||| +|[**tracer**](#tracer)|`object`||| +|[**sessions**](#sessions)|`object`||| +|[**ratelimit**](#ratelimit)|`object`||| + +**Additional Properties:** not allowed + +## server: object + +Server settings for the echo server + + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**debug**|`boolean`|Debug enables debug mode for the server
|no| +|**dev**|`boolean`|Dev enables echo's dev mode options
|no| +|**listen**|`string`|Listen sets the listen address to serve the echo server on
|yes| +|**shutdown\_grace\_period**|`integer`|ShutdownGracePeriod sets the grace period for in flight requests before shutting down
|no| +|**read\_timeout**|`integer`|ReadTimeout sets the maximum duration for reading the entire request including the body
|no| +|**write\_timeout**|`integer`|WriteTimeout sets the maximum duration before timing out writes of the response
|no| +|**idle\_timeout**|`integer`|IdleTimeout sets the maximum amount of time to wait for the next request when keep-alives are enabled
|no| +|**read\_header\_timeout**|`integer`|ReadHeaderTimeout sets the amount of time allowed to read request headers
|no| +|[**tls**](#servertls)|`object`|TLS settings for the server for secure connections
|no| +|[**cors**](#servercors)|`object`||no| +|[**secure**](#serversecure)|`object`||no| +|[**redirects**](#serverredirects)|`object`||no| +|[**cacheControl**](#servercachecontrol)|`object`||no| +|[**mime**](#servermime)|`object`||no| + +**Additional Properties:** not allowed + +### server\.tls: object + +TLS settings for the server for secure connections + + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**enabled**|`boolean`|Enabled turns on TLS settings for the server
|| +|**cert\_file**|`string`|CertFile location for the TLS server
|| +|**cert\_key**|`string`|CertKey file location for the TLS server
|| +|**auto\_cert**|`boolean`|AutoCert generates the cert with letsencrypt, this does not work on localhost
|| + +**Additional Properties:** not allowed + +### server\.cors: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**enabled**|`boolean`||| +|[**prefixes**](#servercorsprefixes)|`object`||| +|[**allowOrigins**](#servercorsalloworigins)|`string[]`||| +|**cookieInsecure**|`boolean`||| + +**Additional Properties:** not allowed + +#### server\.cors\.prefixes: object + +**Additional Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| + + +#### server\.cors\.allowOrigins: array + +**Items** + +**Item Type:** `string` + +### server\.secure: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**enabled**|`boolean`||| +|**xssprotection**|`string`||| +|**contenttypenosniff**|`string`||| +|**xframeoptions**|`string`||| +|**hstspreloadenabled**|`boolean`||| +|**hstsmaxage**|`integer`||| +|**contentsecuritypolicy**|`string`||| +|**referrerpolicy**|`string`||| +|**cspreportonly**|`boolean`||| + +**Additional Properties:** not allowed + +### server\.redirects: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**enabled**|`boolean`||| +|[**redirects**](#serverredirectsredirects)|`object`||| +|**code**|`integer`||| + +**Additional Properties:** not allowed + +#### server\.redirects\.redirects: object + +**Additional Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| + + +### server\.cacheControl: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**enabled**|`boolean`||| +|[**noCacheHeaders**](#servercachecontrolnocacheheaders)|`object`||| +|[**etagHeaders**](#servercachecontroletagheaders)|`string[]`||| + +**Additional Properties:** not allowed + +#### server\.cacheControl\.noCacheHeaders: object + +**Additional Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| + + +#### server\.cacheControl\.etagHeaders: array + +**Items** + +**Item Type:** `string` + +### server\.mime: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**enabled**|`boolean`||| +|**mimeTypesFile**|`string`||| +|**defaultContentType**|`string`||| + +**Additional Properties:** not allowed + +## db: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**debug**|`boolean`|debug enables printing the debug database logs
|no| +|**databaseName**|`string`|the name of the database to use with otel tracing
|no| +|**driverName**|`string`|sql driver name
|no| +|**multiWrite**|`boolean`|enables writing to two databases simultaneously
|no| +|**primaryDbSource**|`string`|dsn of the primary database
|yes| +|**secondaryDbSource**|`string`|dsn of the secondary database if multi-write is enabled
|no| +|**cacheTTL**|`integer`|cache results for subsequent requests
|no| +|**runMigrations**|`boolean`|run migrations on startup
|no| + +**Additional Properties:** not allowed + +## turso: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**token**|`string`||yes| +|**base\_url**|`string`||yes| +|**org\_name**|`string`||yes| + +**Additional Properties:** not allowed + +## redis: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**enabled**|`boolean`||| +|**address**|`string`||| +|**name**|`string`||| +|**username**|`string`||| +|**password**|`string`||| +|**db**|`integer`||| +|**dialTimeout**|`integer`||| +|**readTimeout**|`integer`||| +|**writeTimeout**|`integer`||| +|**maxRetries**|`integer`||| +|**minIdleConns**|`integer`||| +|**maxIdleConns**|`integer`||| +|**maxActiveConns**|`integer`||| + +**Additional Properties:** not allowed + +## tracer: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**enabled**|`boolean`||| +|**provider**|`string`||| +|**environment**|`string`||| +|[**stdout**](#tracerstdout)|`object`||| +|[**otlp**](#tracerotlp)|`object`||| + +**Additional Properties:** not allowed + +### tracer\.stdout: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**pretty**|`boolean`||| +|**disableTimestamp**|`boolean`||| + +**Additional Properties:** not allowed + +### tracer\.otlp: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**endpoint**|`string`||| +|**insecure**|`boolean`||| +|**certificate**|`string`||| +|[**headers**](#tracerotlpheaders)|`string[]`||| +|**compression**|`string`||| +|**timeout**|`integer`||| + +**Additional Properties:** not allowed + +#### tracer\.otlp\.headers: array + +**Items** + +**Item Type:** `string` + +## sessions: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**signingKey**|`string`||| +|**encryptionKey**|`string`||| + +**Additional Properties:** not allowed + +## ratelimit: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**enabled**|`boolean`||| +|**limit**|`number`||| +|**burst**|`integer`||| +|**expires**|`integer`||| + +**Additional Properties:** not allowed + diff --git a/jsonschema/Taskfile.yml b/jsonschema/Taskfile.yml new file mode 100644 index 0000000..c1db1e8 --- /dev/null +++ b/jsonschema/Taskfile.yml @@ -0,0 +1,36 @@ +version: '3' + +tasks: + install: + desc: install dependencies + cmds: + - npm install jsonschema2mk --global + + generate: + desc: generate the jsonschema and documentation + cmds: + - task: schema + - task: docs + + schema: + desc: generate a new jsonschema and corresponding config/config.example.yaml + cmds: + - go run jsonschema/schema_generator.go + + docs: + desc: generate documentation from the jsonschema + cmds: + - npx jsonschema2mk --schema jsonschema/geodetic.config.json > jsonschema/api-docs.md + + ci: + desc: a task that runs during CI to confirm there are no changes after running generate + cmds: + - task: generate + - "git config --global --add safe.directory /workdir" + - | + status=$(git status --porcelain) + if [ -n "$status" ]; then + echo "detected git diff after running generate; please re-run tasks" + echo "$status" + exit 1 + fi \ No newline at end of file diff --git a/jsonschema/api-docs.md b/jsonschema/api-docs.md new file mode 100644 index 0000000..c0558b6 --- /dev/null +++ b/jsonschema/api-docs.md @@ -0,0 +1,287 @@ +# object + +Config contains the configuration for the datum server + + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**refresh\_interval**|`integer`|RefreshInterval determines how often to reload the config
|| +|[**server**](#server)|`object`|Server settings for the echo server
|yes| +|[**db**](#db)|`object`||yes| +|[**turso**](#turso)|`object`||yes| +|[**redis**](#redis)|`object`||| +|[**tracer**](#tracer)|`object`||| +|[**sessions**](#sessions)|`object`||| +|[**ratelimit**](#ratelimit)|`object`||| + +**Additional Properties:** not allowed + +## server: object + +Server settings for the echo server + + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**debug**|`boolean`|Debug enables debug mode for the server
|no| +|**dev**|`boolean`|Dev enables echo's dev mode options
|no| +|**listen**|`string`|Listen sets the listen address to serve the echo server on
|yes| +|**shutdown\_grace\_period**|`integer`|ShutdownGracePeriod sets the grace period for in flight requests before shutting down
|no| +|**read\_timeout**|`integer`|ReadTimeout sets the maximum duration for reading the entire request including the body
|no| +|**write\_timeout**|`integer`|WriteTimeout sets the maximum duration before timing out writes of the response
|no| +|**idle\_timeout**|`integer`|IdleTimeout sets the maximum amount of time to wait for the next request when keep-alives are enabled
|no| +|**read\_header\_timeout**|`integer`|ReadHeaderTimeout sets the amount of time allowed to read request headers
|no| +|[**tls**](#servertls)|`object`|TLS settings for the server for secure connections
|no| +|[**cors**](#servercors)|`object`||no| +|[**secure**](#serversecure)|`object`||no| +|[**redirects**](#serverredirects)|`object`||no| +|[**cacheControl**](#servercachecontrol)|`object`||no| +|[**mime**](#servermime)|`object`||no| + +**Additional Properties:** not allowed + +### server\.tls: object + +TLS settings for the server for secure connections + + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**enabled**|`boolean`|Enabled turns on TLS settings for the server
|| +|**cert\_file**|`string`|CertFile location for the TLS server
|| +|**cert\_key**|`string`|CertKey file location for the TLS server
|| +|**auto\_cert**|`boolean`|AutoCert generates the cert with letsencrypt, this does not work on localhost
|| + +**Additional Properties:** not allowed + +### server\.cors: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**enabled**|`boolean`||| +|[**prefixes**](#servercorsprefixes)|`object`||| +|[**allowOrigins**](#servercorsalloworigins)|`string[]`||| +|**cookieInsecure**|`boolean`||| + +**Additional Properties:** not allowed + +#### server\.cors\.prefixes: object + +**Additional Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| + + +#### server\.cors\.allowOrigins: array + +**Items** + +**Item Type:** `string` + +### server\.secure: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**enabled**|`boolean`||| +|**xssprotection**|`string`||| +|**contenttypenosniff**|`string`||| +|**xframeoptions**|`string`||| +|**hstspreloadenabled**|`boolean`||| +|**hstsmaxage**|`integer`||| +|**contentsecuritypolicy**|`string`||| +|**referrerpolicy**|`string`||| +|**cspreportonly**|`boolean`||| + +**Additional Properties:** not allowed + +### server\.redirects: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**enabled**|`boolean`||| +|[**redirects**](#serverredirectsredirects)|`object`||| +|**code**|`integer`||| + +**Additional Properties:** not allowed + +#### server\.redirects\.redirects: object + +**Additional Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| + + +### server\.cacheControl: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**enabled**|`boolean`||| +|[**noCacheHeaders**](#servercachecontrolnocacheheaders)|`object`||| +|[**etagHeaders**](#servercachecontroletagheaders)|`string[]`||| + +**Additional Properties:** not allowed + +#### server\.cacheControl\.noCacheHeaders: object + +**Additional Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| + + +#### server\.cacheControl\.etagHeaders: array + +**Items** + +**Item Type:** `string` + +### server\.mime: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**enabled**|`boolean`||| +|**mimeTypesFile**|`string`||| +|**defaultContentType**|`string`||| + +**Additional Properties:** not allowed + +## db: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**debug**|`boolean`|debug enables printing the debug database logs
|no| +|**databaseName**|`string`|the name of the database to use with otel tracing
|no| +|**driverName**|`string`|sql driver name
|no| +|**multiWrite**|`boolean`|enables writing to two databases simultaneously
|no| +|**primaryDbSource**|`string`|dsn of the primary database
|yes| +|**secondaryDbSource**|`string`|dsn of the secondary database if multi-write is enabled
|no| +|**cacheTTL**|`integer`|cache results for subsequent requests
|no| +|**runMigrations**|`boolean`|run migrations on startup
|no| +|**migrationProvider**|`string`|migration provider to use for running migrations
|no| + +**Additional Properties:** not allowed + +## turso: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**token**|`string`||yes| +|**baseUrl**|`string`||yes| +|**orgName**|`string`||yes| + +**Additional Properties:** not allowed + +## redis: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**enabled**|`boolean`||| +|**address**|`string`||| +|**name**|`string`||| +|**username**|`string`||| +|**password**|`string`||| +|**db**|`integer`||| +|**dialTimeout**|`integer`||| +|**readTimeout**|`integer`||| +|**writeTimeout**|`integer`||| +|**maxRetries**|`integer`||| +|**minIdleConns**|`integer`||| +|**maxIdleConns**|`integer`||| +|**maxActiveConns**|`integer`||| + +**Additional Properties:** not allowed + +## tracer: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**enabled**|`boolean`||| +|**provider**|`string`||| +|**environment**|`string`||| +|[**stdout**](#tracerstdout)|`object`||| +|[**otlp**](#tracerotlp)|`object`||| + +**Additional Properties:** not allowed + +### tracer\.stdout: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**pretty**|`boolean`||| +|**disableTimestamp**|`boolean`||| + +**Additional Properties:** not allowed + +### tracer\.otlp: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**endpoint**|`string`||| +|**insecure**|`boolean`||| +|**certificate**|`string`||| +|[**headers**](#tracerotlpheaders)|`string[]`||| +|**compression**|`string`||| +|**timeout**|`integer`||| + +**Additional Properties:** not allowed + +#### tracer\.otlp\.headers: array + +**Items** + +**Item Type:** `string` + +## sessions: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**signingKey**|`string`||| +|**encryptionKey**|`string`||| + +**Additional Properties:** not allowed + +## ratelimit: object + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**enabled**|`boolean`||| +|**limit**|`number`||| +|**burst**|`integer`||| +|**expires**|`integer`||| + +**Additional Properties:** not allowed + diff --git a/jsonschema/envparse/doc.go b/jsonschema/envparse/doc.go new file mode 100644 index 0000000..c666efa --- /dev/null +++ b/jsonschema/envparse/doc.go @@ -0,0 +1,2 @@ +// Package envparse provides a way to parse environment variables from a struct +package envparse diff --git a/jsonschema/envparse/parse.go b/jsonschema/envparse/parse.go new file mode 100644 index 0000000..ab43ba2 --- /dev/null +++ b/jsonschema/envparse/parse.go @@ -0,0 +1,126 @@ +package envparse + +import ( + "errors" + "fmt" + "reflect" + "strings" +) + +// ErrInvalidSpecification indicates that a specification is of the wrong type. +var ErrInvalidSpecification = errors.New("specification must be a struct pointer") + +type Config struct { + // FieldTagName is the name of the struct tag to use for the field name + FieldTagName string + // Skipper is the value of the tag to skip parsing of the field + Skipper string +} + +// varInfo maintains information about the configuration variable +type varInfo struct { + FieldName string + FullPath string + Key string + Type reflect.Type + Tags reflect.StructTag +} + +// GatherEnvInfo gathers information about the specified struct, including defaults and environment variable names. +func (c Config) GatherEnvInfo(prefix string, spec interface{}) ([]varInfo, error) { + s := reflect.ValueOf(spec) + + // Ensure the specification is a pointer to a struct + if s.Kind() != reflect.Ptr { + return nil, ErrInvalidSpecification + } + + s = s.Elem() + if s.Kind() != reflect.Struct { + return nil, ErrInvalidSpecification + } + + typeOfSpec := s.Type() + + // Create a slice to hold the information about the configuration variables + var infos []varInfo + + // Iterate over the struct fields + for i := range s.NumField() { + f := s.Field(i) + ftype := typeOfSpec.Field(i) + + if !f.CanSet() { + continue + } + + for f.Kind() == reflect.Ptr { + if f.IsNil() { + if f.Type().Elem().Kind() != reflect.Struct { + // nil pointer to a non-struct: leave it alone + break + } + + // nil pointer to struct: create a zero instance + f.Set(reflect.New(f.Type().Elem())) + } + + f = f.Elem() + } + + // Capture information about the config variable + fieldName := c.getFieldName(ftype) + if fieldName == c.Skipper { + continue + } + + info := varInfo{ + FieldName: fieldName, + FullPath: ftype.Name, + Type: ftype.Type, + Tags: ftype.Tag, + } + + // Default to the field name as the env var name (will be upcased) + info.Key = info.FieldName + + if prefix != "" { + info.Key = fmt.Sprintf("%s_%s", prefix, info.Key) + info.FullPath = fmt.Sprintf("%s.%s", strings.ToLower(strings.Replace(prefix, "_", ".", -1)), info.FieldName) // nolint: gocritic + } + + info.Key = strings.ToUpper(info.Key) + infos = append(infos, info) + + if f.Kind() == reflect.Struct { + innerPrefix := prefix + + if !ftype.Anonymous { + innerPrefix = info.Key + } + + embeddedPtr := f.Addr().Interface() + + // Recursively gather information about the embedded struct + embeddedInfos, err := c.GatherEnvInfo(innerPrefix, embeddedPtr) + if err != nil { + return nil, err + } + + infos = append(infos[:len(infos)-1], embeddedInfos...) + + continue + } + } + + return infos, nil +} + +func (c Config) getFieldName(ftype reflect.StructField) string { + if ftype.Tag.Get(c.FieldTagName) != "" { + return ftype.Tag.Get(c.FieldTagName) + } + + // default to skip if the koanf tag is not present + return c.Skipper +} diff --git a/jsonschema/geodetic.config.json b/jsonschema/geodetic.config.json new file mode 100644 index 0000000..a3dc934 --- /dev/null +++ b/jsonschema/geodetic.config.json @@ -0,0 +1,443 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://github.com/datumforge/geodetic/config/config.-config", + "$defs": { + "[]string": { + "items": { + "type": "string" + }, + "type": "array" + }, + "cache.Config": { + "properties": { + "enabled": { + "type": "boolean" + }, + "address": { + "type": "string" + }, + "name": { + "type": "string" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "db": { + "type": "integer" + }, + "dialTimeout": { + "type": "integer" + }, + "readTimeout": { + "type": "integer" + }, + "writeTimeout": { + "type": "integer" + }, + "maxRetries": { + "type": "integer" + }, + "minIdleConns": { + "type": "integer" + }, + "maxIdleConns": { + "type": "integer" + }, + "maxActiveConns": { + "type": "integer" + } + }, + "additionalProperties": false, + "type": "object" + }, + "cachecontrol.Config": { + "properties": { + "enabled": { + "type": "boolean" + }, + "noCacheHeaders": { + "$ref": "#/$defs/map[string]string" + }, + "etagHeaders": { + "$ref": "#/$defs/[]string" + } + }, + "additionalProperties": false, + "type": "object" + }, + "config.Server": { + "properties": { + "debug": { + "type": "boolean", + "description": "Debug enables debug mode for the server" + }, + "dev": { + "type": "boolean", + "description": "Dev enables echo's dev mode options" + }, + "listen": { + "type": "string", + "description": "Listen sets the listen address to serve the echo server on" + }, + "shutdown_grace_period": { + "type": "integer", + "description": "ShutdownGracePeriod sets the grace period for in flight requests before shutting down" + }, + "read_timeout": { + "type": "integer", + "description": "ReadTimeout sets the maximum duration for reading the entire request including the body" + }, + "write_timeout": { + "type": "integer", + "description": "WriteTimeout sets the maximum duration before timing out writes of the response" + }, + "idle_timeout": { + "type": "integer", + "description": "IdleTimeout sets the maximum amount of time to wait for the next request when keep-alives are enabled" + }, + "read_header_timeout": { + "type": "integer", + "description": "ReadHeaderTimeout sets the amount of time allowed to read request headers" + }, + "tls": { + "$ref": "#/$defs/config.TLS", + "description": "TLS contains the tls configuration settings" + }, + "cors": { + "$ref": "#/$defs/cors.Config", + "description": "CORS contains settings to allow cross origin settings and insecure cookies" + }, + "secure": { + "$ref": "#/$defs/secure.Config", + "description": "Secure contains settings for the secure middleware" + }, + "redirects": { + "$ref": "#/$defs/redirect.Config", + "description": "Redirect contains settings for the redirect middleware" + }, + "cacheControl": { + "$ref": "#/$defs/cachecontrol.Config", + "description": "CacheControl contains settings for the cache control middleware" + }, + "mime": { + "$ref": "#/$defs/mime.Config", + "description": "Mime contains settings for the mime middleware" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "listen" + ], + "description": "Server settings for the echo server" + }, + "config.TLS": { + "properties": { + "enabled": { + "type": "boolean", + "description": "Enabled turns on TLS settings for the server" + }, + "cert_file": { + "type": "string", + "description": "CertFile location for the TLS server" + }, + "cert_key": { + "type": "string", + "description": "CertKey file location for the TLS server" + }, + "auto_cert": { + "type": "boolean", + "description": "AutoCert generates the cert with letsencrypt, this does not work on localhost" + } + }, + "additionalProperties": false, + "type": "object", + "description": "TLS settings for the server for secure connections" + }, + "cors.Config": { + "properties": { + "enabled": { + "type": "boolean" + }, + "prefixes": { + "$ref": "#/$defs/map[string][]string" + }, + "allowOrigins": { + "$ref": "#/$defs/[]string" + }, + "cookieInsecure": { + "type": "boolean" + } + }, + "additionalProperties": false, + "type": "object" + }, + "entx.Config": { + "properties": { + "debug": { + "type": "boolean", + "description": "debug enables printing the debug database logs" + }, + "databaseName": { + "type": "string", + "description": "the name of the database to use with otel tracing" + }, + "driverName": { + "type": "string", + "description": "sql driver name" + }, + "multiWrite": { + "type": "boolean", + "description": "enables writing to two databases simultaneously" + }, + "primaryDbSource": { + "type": "string", + "description": "dsn of the primary database" + }, + "secondaryDbSource": { + "type": "string", + "description": "dsn of the secondary database if multi-write is enabled" + }, + "cacheTTL": { + "type": "integer", + "description": "cache results for subsequent requests" + }, + "runMigrations": { + "type": "boolean", + "description": "run migrations on startup" + }, + "migrationProvider": { + "type": "string", + "description": "migration provider to use for running migrations" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "primaryDbSource" + ] + }, + "map[string][]string": { + "additionalProperties": { + "$ref": "#/$defs/[]string" + }, + "type": "object" + }, + "map[string]string": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "mime.Config": { + "properties": { + "enabled": { + "type": "boolean" + }, + "mimeTypesFile": { + "type": "string" + }, + "defaultContentType": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object" + }, + "otelx.Config": { + "properties": { + "enabled": { + "type": "boolean" + }, + "provider": { + "type": "string" + }, + "environment": { + "type": "string" + }, + "stdout": { + "$ref": "#/$defs/otelx.StdOut" + }, + "otlp": { + "$ref": "#/$defs/otelx.OTLP" + } + }, + "additionalProperties": false, + "type": "object" + }, + "otelx.OTLP": { + "properties": { + "endpoint": { + "type": "string" + }, + "insecure": { + "type": "boolean" + }, + "certificate": { + "type": "string" + }, + "headers": { + "$ref": "#/$defs/[]string" + }, + "compression": { + "type": "string" + }, + "timeout": { + "type": "integer" + } + }, + "additionalProperties": false, + "type": "object" + }, + "otelx.StdOut": { + "properties": { + "pretty": { + "type": "boolean" + }, + "disableTimestamp": { + "type": "boolean" + } + }, + "additionalProperties": false, + "type": "object" + }, + "ratelimit.Config": { + "properties": { + "enabled": { + "type": "boolean" + }, + "limit": { + "type": "number" + }, + "burst": { + "type": "integer" + }, + "expires": { + "type": "integer" + } + }, + "additionalProperties": false, + "type": "object" + }, + "redirect.Config": { + "properties": { + "enabled": { + "type": "boolean" + }, + "redirects": { + "$ref": "#/$defs/map[string]string" + }, + "code": { + "type": "integer" + } + }, + "additionalProperties": false, + "type": "object" + }, + "secure.Config": { + "properties": { + "enabled": { + "type": "boolean" + }, + "xssprotection": { + "type": "string" + }, + "contenttypenosniff": { + "type": "string" + }, + "xframeoptions": { + "type": "string" + }, + "hstspreloadenabled": { + "type": "boolean" + }, + "hstsmaxage": { + "type": "integer" + }, + "contentsecuritypolicy": { + "type": "string" + }, + "referrerpolicy": { + "type": "string" + }, + "cspreportonly": { + "type": "boolean" + } + }, + "additionalProperties": false, + "type": "object" + }, + "sessions.Config": { + "properties": { + "signingKey": { + "type": "string" + }, + "encryptionKey": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object" + }, + "turso.Config": { + "properties": { + "token": { + "type": "string" + }, + "baseUrl": { + "type": "string" + }, + "orgName": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "token", + "baseUrl", + "orgName" + ] + } + }, + "properties": { + "refresh_interval": { + "type": "integer", + "description": "RefreshInterval determines how often to reload the config" + }, + "server": { + "$ref": "#/$defs/config.Server", + "description": "Server contains the echo server settings" + }, + "db": { + "$ref": "#/$defs/entx.Config", + "description": "DB contains the database configuration for the ent client" + }, + "turso": { + "$ref": "#/$defs/turso.Config", + "description": "Turso contains the configuration for the turso client" + }, + "redis": { + "$ref": "#/$defs/cache.Config", + "description": "Redis contains the redis configuration for the key-value store" + }, + "tracer": { + "$ref": "#/$defs/otelx.Config", + "description": "Tracer contains the tracing config for opentelemetry" + }, + "sessions": { + "$ref": "#/$defs/sessions.Config", + "description": "Sessions config for user sessions and cookies" + }, + "ratelimit": { + "$ref": "#/$defs/ratelimit.Config", + "description": "Ratelimit contains the configuration for the rate limiter" + } + }, + "additionalProperties": false, + "type": "object", + "description": "Config contains the configuration for the datum server" +} \ No newline at end of file diff --git a/jsonschema/schema_generator.go b/jsonschema/schema_generator.go new file mode 100644 index 0000000..2752f08 --- /dev/null +++ b/jsonschema/schema_generator.go @@ -0,0 +1,163 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "reflect" + + "github.com/invopop/jsonschema" + "github.com/invopop/yaml" + "github.com/mcuadros/go-defaults" + + "github.com/datumforge/geodetic/config" + "github.com/datumforge/geodetic/jsonschema/envparse" +) + +// const values used for the schema generator +const ( + repoName = "github.com/datumforge/geodetic/" + tagName = "koanf" + skipper = "-" + defaultTag = "default" + jsonSchemaPath = "./jsonschema/geodetic.config.json" + yamlConfigPath = "./config/config.example.yaml" + envConfigPath = "./config/.env.example" + configMapPath = "./config/configmap.yaml" + varPrefix = "GEODETIC" + ownerReadWrite = 0600 +) + +// includedPackages is a list of packages to include in the schema generation +// that contain Go comments to be added to the schema +// any external packages must use the jsonschema description tags to add comments +var includedPackages = []string{ + "./config", + "./internal/entdb", + "./internal/httpserve/handlers", +} + +// schemaConfig represents the configuration for the schema generator +type schemaConfig struct { + // jsonSchemaPath represents the file path of the JSON schema to be generated + jsonSchemaPath string + // yamlConfigPath is the file path to the YAML configuration to be generated + yamlConfigPath string + // envConfigPath is the file path to the environment variable configuration to be generated + envConfigPath string + // configMapPath is the file path to the kubernetes config map configuration to be generated + configMapPath string +} + +func main() { + c := schemaConfig{ + jsonSchemaPath: jsonSchemaPath, + yamlConfigPath: yamlConfigPath, + envConfigPath: envConfigPath, + configMapPath: configMapPath, + } + + if err := generateSchema(c, &config.Config{}); err != nil { + panic(err) + } +} + +// generateSchema generates a JSON schema and a YAML schema based on the provided schemaConfig and structure +func generateSchema(c schemaConfig, structure interface{}) error { + // override the default name to using the prefixed pkg name + r := jsonschema.Reflector{Namer: namePkg} + r.ExpandedStruct = true + // set `jsonschema:required` tag to true to generate required fields + r.RequiredFromJSONSchemaTags = true + // set the tag name to `koanf` for the koanf struct tags + r.FieldNameTag = tagName + + // add go comments to the schema + for _, pkg := range includedPackages { + if err := r.AddGoComments(repoName, pkg); err != nil { + panic(err.Error()) + } + } + + s := r.Reflect(structure) + + // generate the json schema + data, err := json.MarshalIndent(s, "", " ") + if err != nil { + panic(err.Error()) + } + + if err = os.WriteFile(c.jsonSchemaPath, data, ownerReadWrite); err != nil { + panic(err.Error()) + } + + // generate yaml schema with default + yamlConfig := &config.Config{} + defaults.SetDefaults(yamlConfig) + + // this uses the `json` tag to generate the yaml schema + yamlSchema, err := yaml.Marshal(yamlConfig) + if err != nil { + panic(err.Error()) + } + + if err = os.WriteFile(c.yamlConfigPath, yamlSchema, ownerReadWrite); err != nil { + panic(err.Error()) + } + + cp := envparse.Config{ + FieldTagName: tagName, + Skipper: skipper, + } + + out, err := cp.GatherEnvInfo(varPrefix, &config.Config{}) + if err != nil { + panic(err.Error()) + } + + // generate the environment variables from the config + envSchema := "" + configMapSchema := "\n" + + for _, k := range out { + defaultVal := k.Tags.Get(defaultTag) + + envSchema += fmt.Sprintf("%s=\"%s\"\n", k.Key, defaultVal) + + // if the default value is empty, use the value from the values.yaml + if defaultVal == "" { + configMapSchema += fmt.Sprintf(" %s: {{ .Values.%s }}\n", k.Key, k.FullPath) + } else { + if k.Type.Kind() == reflect.String { + defaultVal = "\"" + defaultVal + "\"" // add quotes to the string + } + + configMapSchema += fmt.Sprintf(" %s: {{ .Values.%s | %s }}\n", k.Key, k.FullPath, defaultVal) + } + } + + // write the environment variables to a file + if err = os.WriteFile(c.envConfigPath, []byte(envSchema), ownerReadWrite); err != nil { + panic(err.Error()) + } + + // Get the configmap header + cm, err := os.ReadFile("./jsonschema/templates/configmap.tmpl") + if err != nil { + panic(err.Error()) + } + + // append the configmap schema to the header + cm = append(cm, []byte(configMapSchema)...) + + // write the configmap to a file + if err = os.WriteFile(c.configMapPath, cm, ownerReadWrite); err != nil { + panic(err.Error()) + } + + return nil +} + +func namePkg(r reflect.Type) string { + return r.String() +} diff --git a/jsonschema/templates/configmap.tmpl b/jsonschema/templates/configmap.tmpl new file mode 100644 index 0000000..52cdcd4 --- /dev/null +++ b/jsonschema/templates/configmap.tmpl @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . -}}-config + {{ $labels := include "common.tplvalues.merge" (dict "values" ( list .Values.api.commonLabels (include "common.labels.standard" .) ) "context" . ) }} + labels: {{- include "common.tplvalues.render" ( dict "value" $labels "context" $) | nindent 4 }} + {{- if .Values.api.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.api.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +data: \ No newline at end of file diff --git a/main.go b/main.go new file mode 100644 index 0000000..63a7a42 --- /dev/null +++ b/main.go @@ -0,0 +1,11 @@ +// package main is the entry point +package main + +import ( + "github.com/datumforge/geodetic/cmd" + _ "github.com/datumforge/geodetic/internal/ent/generated/runtime" +) + +func main() { + cmd.Execute() +} diff --git a/pkg/enums/doc.go b/pkg/enums/doc.go new file mode 100644 index 0000000..ccc083a --- /dev/null +++ b/pkg/enums/doc.go @@ -0,0 +1,2 @@ +// Package enums has enums +package enums diff --git a/pkg/enums/provider.go b/pkg/enums/provider.go new file mode 100644 index 0000000..4e47ea2 --- /dev/null +++ b/pkg/enums/provider.go @@ -0,0 +1,59 @@ +package enums + +import ( + "fmt" + "io" + "strings" +) + +type DatabaseProvider string + +var ( + Local DatabaseProvider = "LOCAL" + Turso DatabaseProvider = "TURSO" + InvalidProvider DatabaseProvider = "INVALID" +) + +// Values returns a slice of strings that represents all the possible values of the DatabaseProvider enum. +// Possible default values are "LOCAL", and "TURSO" +func (DatabaseProvider) Values() (kinds []string) { + for _, s := range []DatabaseProvider{Local, Turso} { + kinds = append(kinds, string(s)) + } + + return +} + +// String returns the DatabaseProvider as a string +func (r DatabaseProvider) String() string { + return string(r) +} + +// ToDatabaseProvider returns the database provider enum based on string input +func ToDatabaseProvider(p string) *DatabaseProvider { + switch p := strings.ToUpper(p); p { + case Local.String(): + return &Local + case Turso.String(): + return &Turso + default: + return &InvalidProvider + } +} + +// MarshalGQL implement the Marshaler interface for gqlgen +func (r DatabaseProvider) MarshalGQL(w io.Writer) { + _, _ = w.Write([]byte(`"` + r.String() + `"`)) +} + +// UnmarshalGQL implement the Unmarshaler interface for gqlgen +func (r *DatabaseProvider) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("wrong type for DatabaseProvider, got: %T", v) //nolint:goerr113 + } + + *r = DatabaseProvider(str) + + return nil +} diff --git a/pkg/enums/provider_test.go b/pkg/enums/provider_test.go new file mode 100644 index 0000000..e113351 --- /dev/null +++ b/pkg/enums/provider_test.go @@ -0,0 +1,37 @@ +package enums_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/datumforge/geodetic/pkg/enums" +) + +func TestToDatabaseProvider(t *testing.T) { + testCases := []struct { + input string + expected enums.DatabaseProvider + }{ + { + input: "local", + expected: enums.Local, + }, + { + input: "Turso", + expected: enums.Turso, + }, + { + input: "UNKNOWN", + expected: enums.InvalidProvider, + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("Convert %s to DatabaseProvider", tc.input), func(t *testing.T) { + result := enums.ToDatabaseProvider(tc.input) + assert.Equal(t, tc.expected, *result) + }) + } +} diff --git a/pkg/enums/region.go b/pkg/enums/region.go new file mode 100644 index 0000000..5f0cf7a --- /dev/null +++ b/pkg/enums/region.go @@ -0,0 +1,62 @@ +package enums + +import ( + "fmt" + "io" + "strings" +) + +type Region string + +var ( + Amer Region = "AMER" + Emea Region = "EMEA" + Apac Region = "APAC" + InvalidRegion Region = "INVALID" +) + +// Values returns a slice of strings that represents all the possible values of the Region enum. +// Possible default values are "AMER", "EMEA", and "APAC" +func (Region) Values() (kinds []string) { + for _, s := range []Region{Amer, Emea, Apac} { + kinds = append(kinds, string(s)) + } + + return +} + +// String returns the Region as a string +func (r Region) String() string { + return string(r) +} + +// ToRegion returns the database provider enum based on string input +func ToRegion(p string) *Region { + switch p := strings.ToUpper(p); p { + case Amer.String(): + return &Amer + case Emea.String(): + return &Emea + case Apac.String(): + return &Apac + default: + return &InvalidRegion + } +} + +// MarshalGQL implement the Marshaler interface for gqlgen +func (r Region) MarshalGQL(w io.Writer) { + _, _ = w.Write([]byte(`"` + r.String() + `"`)) +} + +// UnmarshalGQL implement the Unmarshaler interface for gqlgen +func (r *Region) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("wrong type for Region, got: %T", v) //nolint:goerr113 + } + + *r = Region(str) + + return nil +} diff --git a/pkg/enums/region_test.go b/pkg/enums/region_test.go new file mode 100644 index 0000000..5d656d8 --- /dev/null +++ b/pkg/enums/region_test.go @@ -0,0 +1,41 @@ +package enums_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/datumforge/geodetic/pkg/enums" +) + +func TestToRegion(t *testing.T) { + testCases := []struct { + input string + expected enums.Region + }{ + { + input: "amer", + expected: enums.Amer, + }, + { + input: "EMEA", + expected: enums.Emea, + }, + { + input: "Apac", + expected: enums.Apac, + }, + { + input: "UNKNOWN", + expected: enums.InvalidRegion, + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("Convert %s to Region", tc.input), func(t *testing.T) { + result := enums.ToRegion(tc.input) + assert.Equal(t, tc.expected, *result) + }) + } +} diff --git a/pkg/enums/status.go b/pkg/enums/status.go new file mode 100644 index 0000000..8dbf8cb --- /dev/null +++ b/pkg/enums/status.go @@ -0,0 +1,65 @@ +package enums + +import ( + "fmt" + "io" + "strings" +) + +type DatabaseStatus string + +var ( + Active DatabaseStatus = "ACTIVE" + Creating DatabaseStatus = "CREATING" + Deleting DatabaseStatus = "DELETING" + Deleted DatabaseStatus = "DELETED" + InvalidStatus DatabaseStatus = "INVALID" +) + +// Values returns a slice of strings that represents all the possible values of the DatabaseStatus enum. +// Possible default values are "ACTIVE", "CREATING", "DELETING", and "DELETED". +func (DatabaseStatus) Values() (kinds []string) { + for _, s := range []DatabaseStatus{Active, Creating, Deleting, Deleted} { + kinds = append(kinds, string(s)) + } + + return +} + +// String returns the DatabaseStatus as a string +func (r DatabaseStatus) String() string { + return string(r) +} + +// ToDatabaseStatus returns the database status enum based on string input +func ToDatabaseStatus(r string) *DatabaseStatus { + switch r := strings.ToUpper(r); r { + case Active.String(): + return &Active + case Creating.String(): + return &Creating + case Deleting.String(): + return &Deleting + case Deleted.String(): + return &Deleted + default: + return &InvalidStatus + } +} + +// MarshalGQL implement the Marshaler interface for gqlgen +func (r DatabaseStatus) MarshalGQL(w io.Writer) { + _, _ = w.Write([]byte(`"` + r.String() + `"`)) +} + +// UnmarshalGQL implement the Unmarshaler interface for gqlgen +func (r *DatabaseStatus) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("wrong type for DatabaseStatus, got: %T", v) //nolint:goerr113 + } + + *r = DatabaseStatus(str) + + return nil +} diff --git a/pkg/enums/status_test.go b/pkg/enums/status_test.go new file mode 100644 index 0000000..444bf97 --- /dev/null +++ b/pkg/enums/status_test.go @@ -0,0 +1,45 @@ +package enums_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/datumforge/geodetic/pkg/enums" +) + +func TestToToDatabaseStatus(t *testing.T) { + testCases := []struct { + input string + expected enums.DatabaseStatus + }{ + { + input: "active", + expected: enums.Active, + }, + { + input: "deleted", + expected: enums.Deleted, + }, + { + input: "DELETING", + expected: enums.Deleting, + }, + { + input: "creating", + expected: enums.Creating, + }, + { + input: "UNKNOWN", + expected: enums.InvalidStatus, + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("Convert %s to DatabaseStatus", tc.input), func(t *testing.T) { + result := enums.ToDatabaseStatus(tc.input) + assert.Equal(t, tc.expected, *result) + }) + } +} diff --git a/pkg/geodeticclient/config.go b/pkg/geodeticclient/config.go new file mode 100644 index 0000000..8d9412e --- /dev/null +++ b/pkg/geodeticclient/config.go @@ -0,0 +1,44 @@ +package geodeticclient + +import ( + "net/http" + + "github.com/Yamashou/gqlgenc/clientv2" +) + +type Config struct { + // Enabled is a flag to enable the geodetic client + Enabled bool `json:"enabled" koanf:"enabled" jsonschema:"description=Enable the geodetic client" default:"true"` + // BaseURL is the base url for the geodetic service + BaseURL string `json:"baseUrl" koanf:"baseUrl" jsonschema:"description=Base URL for the geodetic service" default:"http://localhost:1337"` + // Endpoint is the endpoint for the graphql api + Endpoint string `json:"endpoint" koanf:"endpoint" jsonschema:"description=Endpoint for the graphql api" default:"query"` + // Debug is a flag to enable debug mode + Debug bool `json:"debug" koanf:"debug" jsonschema:"description=Enable debug mode" default:"false"` +} + +// NewDefaultClient creates a new default geodetic client based on the config +func (c Config) NewDefaultClient() GeodeticClient { + i := WithEmptyInterceptor() + interceptors := []clientv2.RequestInterceptor{i} + + if c.Debug { + interceptors = append(interceptors, WithLoggingInterceptor()) + } + + return c.NewClientWithInterceptors(interceptors) +} + +// NewClientWithInterceptors creates a new default geodetic client with the provided interceptors +func (c Config) NewClientWithInterceptors(i []clientv2.RequestInterceptor) GeodeticClient { + h := http.DefaultClient + + // set options + opts := &clientv2.Options{ + ParseDataAlongWithErrors: false, + } + + gc := NewClient(h, c.BaseURL, opts, i...) + + return gc +} diff --git a/pkg/geodeticclient/doc.go b/pkg/geodeticclient/doc.go new file mode 100644 index 0000000..0e9831a --- /dev/null +++ b/pkg/geodeticclient/doc.go @@ -0,0 +1,2 @@ +// Package geodeticclient contains the client to interact with the geodetic api +package geodeticclient diff --git a/pkg/geodeticclient/graphclient.go b/pkg/geodeticclient/graphclient.go new file mode 100644 index 0000000..4d3cf22 --- /dev/null +++ b/pkg/geodeticclient/graphclient.go @@ -0,0 +1,747 @@ +// Code generated by github.com/Yamashou/gqlgenc, DO NOT EDIT. + +package geodeticclient + +import ( + "context" + "net/http" + + "github.com/Yamashou/gqlgenc/clientv2" + "github.com/datumforge/geodetic/pkg/enums" +) + +type GeodeticClient interface { + GetDatabase(ctx context.Context, name string, interceptors ...clientv2.RequestInterceptor) (*GetDatabase, error) + GetAllDatabases(ctx context.Context, interceptors ...clientv2.RequestInterceptor) (*GetAllDatabases, error) + CreateDatabase(ctx context.Context, input CreateDatabaseInput, interceptors ...clientv2.RequestInterceptor) (*CreateDatabase, error) + DeleteDatabase(ctx context.Context, name string, interceptors ...clientv2.RequestInterceptor) (*DeleteDatabase, error) + GetGroup(ctx context.Context, name string, interceptors ...clientv2.RequestInterceptor) (*GetGroup, error) + GetAllGroups(ctx context.Context, interceptors ...clientv2.RequestInterceptor) (*GetAllGroups, error) + CreateGroup(ctx context.Context, input CreateGroupInput, interceptors ...clientv2.RequestInterceptor) (*CreateGroup, error) + DeleteGroup(ctx context.Context, name string, interceptors ...clientv2.RequestInterceptor) (*DeleteGroup, error) +} + +type Client struct { + Client *clientv2.Client +} + +func NewClient(cli *http.Client, baseURL string, options *clientv2.Options, interceptors ...clientv2.RequestInterceptor) GeodeticClient { + return &Client{Client: clientv2.NewClient(cli, baseURL, options, interceptors...)} +} + +type GetDatabase_Database struct { + ID string "json:\"id\" graphql:\"id\"" + Name string "json:\"name\" graphql:\"name\"" + OrganizationID string "json:\"organizationID\" graphql:\"organizationID\"" + Provider enums.DatabaseProvider "json:\"provider\" graphql:\"provider\"" + Status enums.DatabaseStatus "json:\"status\" graphql:\"status\"" + Dsn string "json:\"dsn\" graphql:\"dsn\"" + Geo *string "json:\"geo,omitempty\" graphql:\"geo\"" +} + +func (t *GetDatabase_Database) GetID() string { + if t == nil { + t = &GetDatabase_Database{} + } + return t.ID +} +func (t *GetDatabase_Database) GetName() string { + if t == nil { + t = &GetDatabase_Database{} + } + return t.Name +} +func (t *GetDatabase_Database) GetOrganizationID() string { + if t == nil { + t = &GetDatabase_Database{} + } + return t.OrganizationID +} +func (t *GetDatabase_Database) GetProvider() *enums.DatabaseProvider { + if t == nil { + t = &GetDatabase_Database{} + } + return &t.Provider +} +func (t *GetDatabase_Database) GetStatus() *enums.DatabaseStatus { + if t == nil { + t = &GetDatabase_Database{} + } + return &t.Status +} +func (t *GetDatabase_Database) GetDsn() string { + if t == nil { + t = &GetDatabase_Database{} + } + return t.Dsn +} +func (t *GetDatabase_Database) GetGeo() *string { + if t == nil { + t = &GetDatabase_Database{} + } + return t.Geo +} + +type GetAllDatabases_Databases_Edges_Node struct { + ID string "json:\"id\" graphql:\"id\"" + Name string "json:\"name\" graphql:\"name\"" + OrganizationID string "json:\"organizationID\" graphql:\"organizationID\"" + Provider enums.DatabaseProvider "json:\"provider\" graphql:\"provider\"" + Status enums.DatabaseStatus "json:\"status\" graphql:\"status\"" + Dsn string "json:\"dsn\" graphql:\"dsn\"" + Geo *string "json:\"geo,omitempty\" graphql:\"geo\"" +} + +func (t *GetAllDatabases_Databases_Edges_Node) GetID() string { + if t == nil { + t = &GetAllDatabases_Databases_Edges_Node{} + } + return t.ID +} +func (t *GetAllDatabases_Databases_Edges_Node) GetName() string { + if t == nil { + t = &GetAllDatabases_Databases_Edges_Node{} + } + return t.Name +} +func (t *GetAllDatabases_Databases_Edges_Node) GetOrganizationID() string { + if t == nil { + t = &GetAllDatabases_Databases_Edges_Node{} + } + return t.OrganizationID +} +func (t *GetAllDatabases_Databases_Edges_Node) GetProvider() *enums.DatabaseProvider { + if t == nil { + t = &GetAllDatabases_Databases_Edges_Node{} + } + return &t.Provider +} +func (t *GetAllDatabases_Databases_Edges_Node) GetStatus() *enums.DatabaseStatus { + if t == nil { + t = &GetAllDatabases_Databases_Edges_Node{} + } + return &t.Status +} +func (t *GetAllDatabases_Databases_Edges_Node) GetDsn() string { + if t == nil { + t = &GetAllDatabases_Databases_Edges_Node{} + } + return t.Dsn +} +func (t *GetAllDatabases_Databases_Edges_Node) GetGeo() *string { + if t == nil { + t = &GetAllDatabases_Databases_Edges_Node{} + } + return t.Geo +} + +type GetAllDatabases_Databases_Edges struct { + Node *GetAllDatabases_Databases_Edges_Node "json:\"node,omitempty\" graphql:\"node\"" +} + +func (t *GetAllDatabases_Databases_Edges) GetNode() *GetAllDatabases_Databases_Edges_Node { + if t == nil { + t = &GetAllDatabases_Databases_Edges{} + } + return t.Node +} + +type GetAllDatabases_Databases struct { + Edges []*GetAllDatabases_Databases_Edges "json:\"edges,omitempty\" graphql:\"edges\"" +} + +func (t *GetAllDatabases_Databases) GetEdges() []*GetAllDatabases_Databases_Edges { + if t == nil { + t = &GetAllDatabases_Databases{} + } + return t.Edges +} + +type CreateDatabase_CreateDatabase_Database struct { + ID string "json:\"id\" graphql:\"id\"" + Name string "json:\"name\" graphql:\"name\"" + OrganizationID string "json:\"organizationID\" graphql:\"organizationID\"" + Provider enums.DatabaseProvider "json:\"provider\" graphql:\"provider\"" + Status enums.DatabaseStatus "json:\"status\" graphql:\"status\"" + Dsn string "json:\"dsn\" graphql:\"dsn\"" + Geo *string "json:\"geo,omitempty\" graphql:\"geo\"" +} + +func (t *CreateDatabase_CreateDatabase_Database) GetID() string { + if t == nil { + t = &CreateDatabase_CreateDatabase_Database{} + } + return t.ID +} +func (t *CreateDatabase_CreateDatabase_Database) GetName() string { + if t == nil { + t = &CreateDatabase_CreateDatabase_Database{} + } + return t.Name +} +func (t *CreateDatabase_CreateDatabase_Database) GetOrganizationID() string { + if t == nil { + t = &CreateDatabase_CreateDatabase_Database{} + } + return t.OrganizationID +} +func (t *CreateDatabase_CreateDatabase_Database) GetProvider() *enums.DatabaseProvider { + if t == nil { + t = &CreateDatabase_CreateDatabase_Database{} + } + return &t.Provider +} +func (t *CreateDatabase_CreateDatabase_Database) GetStatus() *enums.DatabaseStatus { + if t == nil { + t = &CreateDatabase_CreateDatabase_Database{} + } + return &t.Status +} +func (t *CreateDatabase_CreateDatabase_Database) GetDsn() string { + if t == nil { + t = &CreateDatabase_CreateDatabase_Database{} + } + return t.Dsn +} +func (t *CreateDatabase_CreateDatabase_Database) GetGeo() *string { + if t == nil { + t = &CreateDatabase_CreateDatabase_Database{} + } + return t.Geo +} + +type CreateDatabase_CreateDatabase struct { + Database CreateDatabase_CreateDatabase_Database "json:\"database\" graphql:\"database\"" +} + +func (t *CreateDatabase_CreateDatabase) GetDatabase() *CreateDatabase_CreateDatabase_Database { + if t == nil { + t = &CreateDatabase_CreateDatabase{} + } + return &t.Database +} + +type DeleteDatabase_DeleteDatabase struct { + DeletedID string "json:\"deletedID\" graphql:\"deletedID\"" +} + +func (t *DeleteDatabase_DeleteDatabase) GetDeletedID() string { + if t == nil { + t = &DeleteDatabase_DeleteDatabase{} + } + return t.DeletedID +} + +type GetGroup_Group struct { + ID string "json:\"id\" graphql:\"id\"" + Name string "json:\"name\" graphql:\"name\"" + Description *string "json:\"description,omitempty\" graphql:\"description\"" + PrimaryLocation string "json:\"primaryLocation\" graphql:\"primaryLocation\"" + Locations []string "json:\"locations,omitempty\" graphql:\"locations\"" + Region enums.Region "json:\"region\" graphql:\"region\"" +} + +func (t *GetGroup_Group) GetID() string { + if t == nil { + t = &GetGroup_Group{} + } + return t.ID +} +func (t *GetGroup_Group) GetName() string { + if t == nil { + t = &GetGroup_Group{} + } + return t.Name +} +func (t *GetGroup_Group) GetDescription() *string { + if t == nil { + t = &GetGroup_Group{} + } + return t.Description +} +func (t *GetGroup_Group) GetPrimaryLocation() string { + if t == nil { + t = &GetGroup_Group{} + } + return t.PrimaryLocation +} +func (t *GetGroup_Group) GetLocations() []string { + if t == nil { + t = &GetGroup_Group{} + } + return t.Locations +} +func (t *GetGroup_Group) GetRegion() *enums.Region { + if t == nil { + t = &GetGroup_Group{} + } + return &t.Region +} + +type GetAllGroups_Groups_Edges_Node struct { + ID string "json:\"id\" graphql:\"id\"" + Name string "json:\"name\" graphql:\"name\"" + Description *string "json:\"description,omitempty\" graphql:\"description\"" + PrimaryLocation string "json:\"primaryLocation\" graphql:\"primaryLocation\"" + Locations []string "json:\"locations,omitempty\" graphql:\"locations\"" + Region enums.Region "json:\"region\" graphql:\"region\"" +} + +func (t *GetAllGroups_Groups_Edges_Node) GetID() string { + if t == nil { + t = &GetAllGroups_Groups_Edges_Node{} + } + return t.ID +} +func (t *GetAllGroups_Groups_Edges_Node) GetName() string { + if t == nil { + t = &GetAllGroups_Groups_Edges_Node{} + } + return t.Name +} +func (t *GetAllGroups_Groups_Edges_Node) GetDescription() *string { + if t == nil { + t = &GetAllGroups_Groups_Edges_Node{} + } + return t.Description +} +func (t *GetAllGroups_Groups_Edges_Node) GetPrimaryLocation() string { + if t == nil { + t = &GetAllGroups_Groups_Edges_Node{} + } + return t.PrimaryLocation +} +func (t *GetAllGroups_Groups_Edges_Node) GetLocations() []string { + if t == nil { + t = &GetAllGroups_Groups_Edges_Node{} + } + return t.Locations +} +func (t *GetAllGroups_Groups_Edges_Node) GetRegion() *enums.Region { + if t == nil { + t = &GetAllGroups_Groups_Edges_Node{} + } + return &t.Region +} + +type GetAllGroups_Groups_Edges struct { + Node *GetAllGroups_Groups_Edges_Node "json:\"node,omitempty\" graphql:\"node\"" +} + +func (t *GetAllGroups_Groups_Edges) GetNode() *GetAllGroups_Groups_Edges_Node { + if t == nil { + t = &GetAllGroups_Groups_Edges{} + } + return t.Node +} + +type GetAllGroups_Groups struct { + Edges []*GetAllGroups_Groups_Edges "json:\"edges,omitempty\" graphql:\"edges\"" +} + +func (t *GetAllGroups_Groups) GetEdges() []*GetAllGroups_Groups_Edges { + if t == nil { + t = &GetAllGroups_Groups{} + } + return t.Edges +} + +type CreateGroup_CreateGroup_Group struct { + ID string "json:\"id\" graphql:\"id\"" + Name string "json:\"name\" graphql:\"name\"" + Description *string "json:\"description,omitempty\" graphql:\"description\"" + PrimaryLocation string "json:\"primaryLocation\" graphql:\"primaryLocation\"" + Locations []string "json:\"locations,omitempty\" graphql:\"locations\"" + Region enums.Region "json:\"region\" graphql:\"region\"" +} + +func (t *CreateGroup_CreateGroup_Group) GetID() string { + if t == nil { + t = &CreateGroup_CreateGroup_Group{} + } + return t.ID +} +func (t *CreateGroup_CreateGroup_Group) GetName() string { + if t == nil { + t = &CreateGroup_CreateGroup_Group{} + } + return t.Name +} +func (t *CreateGroup_CreateGroup_Group) GetDescription() *string { + if t == nil { + t = &CreateGroup_CreateGroup_Group{} + } + return t.Description +} +func (t *CreateGroup_CreateGroup_Group) GetPrimaryLocation() string { + if t == nil { + t = &CreateGroup_CreateGroup_Group{} + } + return t.PrimaryLocation +} +func (t *CreateGroup_CreateGroup_Group) GetLocations() []string { + if t == nil { + t = &CreateGroup_CreateGroup_Group{} + } + return t.Locations +} +func (t *CreateGroup_CreateGroup_Group) GetRegion() *enums.Region { + if t == nil { + t = &CreateGroup_CreateGroup_Group{} + } + return &t.Region +} + +type CreateGroup_CreateGroup struct { + Group CreateGroup_CreateGroup_Group "json:\"group\" graphql:\"group\"" +} + +func (t *CreateGroup_CreateGroup) GetGroup() *CreateGroup_CreateGroup_Group { + if t == nil { + t = &CreateGroup_CreateGroup{} + } + return &t.Group +} + +type DeleteGroup_DeleteGroup struct { + DeletedID string "json:\"deletedID\" graphql:\"deletedID\"" +} + +func (t *DeleteGroup_DeleteGroup) GetDeletedID() string { + if t == nil { + t = &DeleteGroup_DeleteGroup{} + } + return t.DeletedID +} + +type GetDatabase struct { + Database GetDatabase_Database "json:\"database\" graphql:\"database\"" +} + +func (t *GetDatabase) GetDatabase() *GetDatabase_Database { + if t == nil { + t = &GetDatabase{} + } + return &t.Database +} + +type GetAllDatabases struct { + Databases GetAllDatabases_Databases "json:\"databases\" graphql:\"databases\"" +} + +func (t *GetAllDatabases) GetDatabases() *GetAllDatabases_Databases { + if t == nil { + t = &GetAllDatabases{} + } + return &t.Databases +} + +type CreateDatabase struct { + CreateDatabase CreateDatabase_CreateDatabase "json:\"createDatabase\" graphql:\"createDatabase\"" +} + +func (t *CreateDatabase) GetCreateDatabase() *CreateDatabase_CreateDatabase { + if t == nil { + t = &CreateDatabase{} + } + return &t.CreateDatabase +} + +type DeleteDatabase struct { + DeleteDatabase DeleteDatabase_DeleteDatabase "json:\"deleteDatabase\" graphql:\"deleteDatabase\"" +} + +func (t *DeleteDatabase) GetDeleteDatabase() *DeleteDatabase_DeleteDatabase { + if t == nil { + t = &DeleteDatabase{} + } + return &t.DeleteDatabase +} + +type GetGroup struct { + Group GetGroup_Group "json:\"group\" graphql:\"group\"" +} + +func (t *GetGroup) GetGroup() *GetGroup_Group { + if t == nil { + t = &GetGroup{} + } + return &t.Group +} + +type GetAllGroups struct { + Groups GetAllGroups_Groups "json:\"groups\" graphql:\"groups\"" +} + +func (t *GetAllGroups) GetGroups() *GetAllGroups_Groups { + if t == nil { + t = &GetAllGroups{} + } + return &t.Groups +} + +type CreateGroup struct { + CreateGroup CreateGroup_CreateGroup "json:\"createGroup\" graphql:\"createGroup\"" +} + +func (t *CreateGroup) GetCreateGroup() *CreateGroup_CreateGroup { + if t == nil { + t = &CreateGroup{} + } + return &t.CreateGroup +} + +type DeleteGroup struct { + DeleteGroup DeleteGroup_DeleteGroup "json:\"deleteGroup\" graphql:\"deleteGroup\"" +} + +func (t *DeleteGroup) GetDeleteGroup() *DeleteGroup_DeleteGroup { + if t == nil { + t = &DeleteGroup{} + } + return &t.DeleteGroup +} + +const GetDatabaseDocument = `query GetDatabase ($name: String!) { + database(name: $name) { + id + name + organizationID + provider + status + dsn + geo + } +} +` + +func (c *Client) GetDatabase(ctx context.Context, name string, interceptors ...clientv2.RequestInterceptor) (*GetDatabase, error) { + vars := map[string]interface{}{ + "name": name, + } + + var res GetDatabase + if err := c.Client.Post(ctx, "GetDatabase", GetDatabaseDocument, &res, vars, interceptors...); err != nil { + if c.Client.ParseDataWhenErrors { + return &res, err + } + + return nil, err + } + + return &res, nil +} + +const GetAllDatabasesDocument = `query GetAllDatabases { + databases { + edges { + node { + id + name + organizationID + provider + status + dsn + geo + } + } + } +} +` + +func (c *Client) GetAllDatabases(ctx context.Context, interceptors ...clientv2.RequestInterceptor) (*GetAllDatabases, error) { + vars := map[string]interface{}{} + + var res GetAllDatabases + if err := c.Client.Post(ctx, "GetAllDatabases", GetAllDatabasesDocument, &res, vars, interceptors...); err != nil { + if c.Client.ParseDataWhenErrors { + return &res, err + } + + return nil, err + } + + return &res, nil +} + +const CreateDatabaseDocument = `mutation CreateDatabase ($input: CreateDatabaseInput!) { + createDatabase(input: $input) { + database { + id + name + organizationID + provider + status + dsn + geo + } + } +} +` + +func (c *Client) CreateDatabase(ctx context.Context, input CreateDatabaseInput, interceptors ...clientv2.RequestInterceptor) (*CreateDatabase, error) { + vars := map[string]interface{}{ + "input": input, + } + + var res CreateDatabase + if err := c.Client.Post(ctx, "CreateDatabase", CreateDatabaseDocument, &res, vars, interceptors...); err != nil { + if c.Client.ParseDataWhenErrors { + return &res, err + } + + return nil, err + } + + return &res, nil +} + +const DeleteDatabaseDocument = `mutation DeleteDatabase ($name: String!) { + deleteDatabase(name: $name) { + deletedID + } +} +` + +func (c *Client) DeleteDatabase(ctx context.Context, name string, interceptors ...clientv2.RequestInterceptor) (*DeleteDatabase, error) { + vars := map[string]interface{}{ + "name": name, + } + + var res DeleteDatabase + if err := c.Client.Post(ctx, "DeleteDatabase", DeleteDatabaseDocument, &res, vars, interceptors...); err != nil { + if c.Client.ParseDataWhenErrors { + return &res, err + } + + return nil, err + } + + return &res, nil +} + +const GetGroupDocument = `query GetGroup ($name: String!) { + group(name: $name) { + id + name + description + primaryLocation + locations + region + } +} +` + +func (c *Client) GetGroup(ctx context.Context, name string, interceptors ...clientv2.RequestInterceptor) (*GetGroup, error) { + vars := map[string]interface{}{ + "name": name, + } + + var res GetGroup + if err := c.Client.Post(ctx, "GetGroup", GetGroupDocument, &res, vars, interceptors...); err != nil { + if c.Client.ParseDataWhenErrors { + return &res, err + } + + return nil, err + } + + return &res, nil +} + +const GetAllGroupsDocument = `query GetAllGroups { + groups { + edges { + node { + id + name + description + primaryLocation + locations + region + } + } + } +} +` + +func (c *Client) GetAllGroups(ctx context.Context, interceptors ...clientv2.RequestInterceptor) (*GetAllGroups, error) { + vars := map[string]interface{}{} + + var res GetAllGroups + if err := c.Client.Post(ctx, "GetAllGroups", GetAllGroupsDocument, &res, vars, interceptors...); err != nil { + if c.Client.ParseDataWhenErrors { + return &res, err + } + + return nil, err + } + + return &res, nil +} + +const CreateGroupDocument = `mutation CreateGroup ($input: CreateGroupInput!) { + createGroup(input: $input) { + group { + id + name + description + primaryLocation + locations + region + } + } +} +` + +func (c *Client) CreateGroup(ctx context.Context, input CreateGroupInput, interceptors ...clientv2.RequestInterceptor) (*CreateGroup, error) { + vars := map[string]interface{}{ + "input": input, + } + + var res CreateGroup + if err := c.Client.Post(ctx, "CreateGroup", CreateGroupDocument, &res, vars, interceptors...); err != nil { + if c.Client.ParseDataWhenErrors { + return &res, err + } + + return nil, err + } + + return &res, nil +} + +const DeleteGroupDocument = `mutation DeleteGroup ($name: String!) { + deleteGroup(name: $name) { + deletedID + } +} +` + +func (c *Client) DeleteGroup(ctx context.Context, name string, interceptors ...clientv2.RequestInterceptor) (*DeleteGroup, error) { + vars := map[string]interface{}{ + "name": name, + } + + var res DeleteGroup + if err := c.Client.Post(ctx, "DeleteGroup", DeleteGroupDocument, &res, vars, interceptors...); err != nil { + if c.Client.ParseDataWhenErrors { + return &res, err + } + + return nil, err + } + + return &res, nil +} + +var DocumentOperationNames = map[string]string{ + GetDatabaseDocument: "GetDatabase", + GetAllDatabasesDocument: "GetAllDatabases", + CreateDatabaseDocument: "CreateDatabase", + DeleteDatabaseDocument: "DeleteDatabase", + GetGroupDocument: "GetGroup", + GetAllGroupsDocument: "GetAllGroups", + CreateGroupDocument: "CreateGroup", + DeleteGroupDocument: "DeleteGroup", +} diff --git a/pkg/geodeticclient/interceptor.go b/pkg/geodeticclient/interceptor.go new file mode 100644 index 0000000..b8e21a2 --- /dev/null +++ b/pkg/geodeticclient/interceptor.go @@ -0,0 +1,55 @@ +package geodeticclient + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/Yamashou/gqlgenc/clientv2" + + "github.com/datumforge/datum/pkg/sessions" +) + +// WithAuthorization adds the authorization header and session to the client request +func WithAuthorization(accessToken string, session string) clientv2.RequestInterceptor { + return func( + ctx context.Context, + req *http.Request, + gqlInfo *clientv2.GQLRequestInfo, + res interface{}, + next clientv2.RequestInterceptorFunc, + ) error { + // setting authorization header if its not already set + h := req.Header.Get("Authorization") + if h == "" { + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", accessToken)) + } + + // add session cookie + if strings.Contains(req.Host, "localhost") { + req.AddCookie(sessions.NewDevSessionCookie(session)) + } else { + req.AddCookie(sessions.NewSessionCookie(session)) + } + + return next(ctx, req, gqlInfo, res) + } +} + +// WithLoggingInterceptor adds a http debug logging interceptor +func WithLoggingInterceptor() clientv2.RequestInterceptor { + return func(ctx context.Context, req *http.Request, gqlInfo *clientv2.GQLRequestInfo, res interface{}, next clientv2.RequestInterceptorFunc) error { + fmt.Println("Request header sent:", req.Header) + fmt.Println("Request body sent:", req.Body) + + return next(ctx, req, gqlInfo, res) + } +} + +// WithEmptyInterceptor adds an empty interceptor +func WithEmptyInterceptor() clientv2.RequestInterceptor { + return func(ctx context.Context, req *http.Request, gqlInfo *clientv2.GQLRequestInfo, res interface{}, next clientv2.RequestInterceptorFunc) error { + return next(ctx, req, gqlInfo, res) + } +} diff --git a/pkg/geodeticclient/models.go b/pkg/geodeticclient/models.go new file mode 100644 index 0000000..ec95aae --- /dev/null +++ b/pkg/geodeticclient/models.go @@ -0,0 +1,634 @@ +// Code generated by github.com/99designs/gqlgen, DO NOT EDIT. + +package geodeticclient + +import ( + "fmt" + "io" + "strconv" + "time" + + "github.com/datumforge/geodetic/pkg/enums" +) + +// CreateDatabaseInput is used for create Database object. +// Input was generated by ent. +type CreateDatabaseInput struct { + CreatedAt *time.Time `json:"createdAt,omitempty"` + UpdatedAt *time.Time `json:"updatedAt,omitempty"` + CreatedBy *string `json:"createdBy,omitempty"` + UpdatedBy *string `json:"updatedBy,omitempty"` + // the ID of the organization + OrganizationID string `json:"organizationID"` + // the name to the database + Name string `json:"name"` + // the geo location of the database + Geo *string `json:"geo,omitempty"` + // the DSN to the database + Dsn string `json:"dsn"` + // the auth token used to connect to the database + Token *string `json:"token,omitempty"` + // status of the database + Status *enums.DatabaseStatus `json:"status,omitempty"` + // provider of the database + Provider *enums.DatabaseProvider `json:"provider,omitempty"` + GroupID string `json:"groupID"` +} + +// CreateGroupInput is used for create Group object. +// Input was generated by ent. +type CreateGroupInput struct { + CreatedAt *time.Time `json:"createdAt,omitempty"` + UpdatedAt *time.Time `json:"updatedAt,omitempty"` + CreatedBy *string `json:"createdBy,omitempty"` + UpdatedBy *string `json:"updatedBy,omitempty"` + // the name of the group in turso + Name string `json:"name"` + // the description of the group + Description *string `json:"description,omitempty"` + // the primary of the group + PrimaryLocation string `json:"primaryLocation"` + // the replica locations of the group + Locations []string `json:"locations,omitempty"` + // the auth token used to connect to the group + Token *string `json:"token,omitempty"` + // region the group + Region *enums.Region `json:"region,omitempty"` + DatabaseIDs []string `json:"databaseIDs,omitempty"` +} + +type Database struct { + ID string `json:"id"` + CreatedAt *time.Time `json:"createdAt,omitempty"` + UpdatedAt *time.Time `json:"updatedAt,omitempty"` + CreatedBy *string `json:"createdBy,omitempty"` + UpdatedBy *string `json:"updatedBy,omitempty"` + DeletedAt *time.Time `json:"deletedAt,omitempty"` + DeletedBy *string `json:"deletedBy,omitempty"` + // the ID of the organization + OrganizationID string `json:"organizationID"` + // the name to the database + Name string `json:"name"` + // the geo location of the database + Geo *string `json:"geo,omitempty"` + // the DSN to the database + Dsn string `json:"dsn"` + // the ID of the group + GroupID string `json:"groupID"` + // status of the database + Status enums.DatabaseStatus `json:"status"` + // provider of the database + Provider enums.DatabaseProvider `json:"provider"` + Group *Group `json:"group"` +} + +func (Database) IsNode() {} + +// A connection to a list of items. +type DatabaseConnection struct { + // A list of edges. + Edges []*DatabaseEdge `json:"edges,omitempty"` + // Information to aid in pagination. + PageInfo *PageInfo `json:"pageInfo"` + // Identifies the total count of items in the connection. + TotalCount int64 `json:"totalCount"` +} + +// Return response for createDatabase mutation +type DatabaseCreatePayload struct { + // Created database + Database *Database `json:"database"` +} + +// Return response for deleteDatabase mutation +type DatabaseDeletePayload struct { + // Deleted database ID + DeletedID string `json:"deletedID"` +} + +// An edge in a connection. +type DatabaseEdge struct { + // The item at the end of the edge. + Node *Database `json:"node,omitempty"` + // A cursor for use in pagination. + Cursor string `json:"cursor"` +} + +// Return response for updateDatabase mutation +type DatabaseUpdatePayload struct { + // Updated database + Database *Database `json:"database"` +} + +// DatabaseWhereInput is used for filtering Database objects. +// Input was generated by ent. +type DatabaseWhereInput struct { + Not *DatabaseWhereInput `json:"not,omitempty"` + And []*DatabaseWhereInput `json:"and,omitempty"` + Or []*DatabaseWhereInput `json:"or,omitempty"` + // id field predicates + ID *string `json:"id,omitempty"` + IDNeq *string `json:"idNEQ,omitempty"` + IDIn []string `json:"idIn,omitempty"` + IDNotIn []string `json:"idNotIn,omitempty"` + IDGt *string `json:"idGT,omitempty"` + IDGte *string `json:"idGTE,omitempty"` + IDLt *string `json:"idLT,omitempty"` + IDLte *string `json:"idLTE,omitempty"` + IDEqualFold *string `json:"idEqualFold,omitempty"` + IDContainsFold *string `json:"idContainsFold,omitempty"` + // created_at field predicates + CreatedAt *time.Time `json:"createdAt,omitempty"` + CreatedAtNeq *time.Time `json:"createdAtNEQ,omitempty"` + CreatedAtIn []*time.Time `json:"createdAtIn,omitempty"` + CreatedAtNotIn []*time.Time `json:"createdAtNotIn,omitempty"` + CreatedAtGt *time.Time `json:"createdAtGT,omitempty"` + CreatedAtGte *time.Time `json:"createdAtGTE,omitempty"` + CreatedAtLt *time.Time `json:"createdAtLT,omitempty"` + CreatedAtLte *time.Time `json:"createdAtLTE,omitempty"` + CreatedAtIsNil *bool `json:"createdAtIsNil,omitempty"` + CreatedAtNotNil *bool `json:"createdAtNotNil,omitempty"` + // updated_at field predicates + UpdatedAt *time.Time `json:"updatedAt,omitempty"` + UpdatedAtNeq *time.Time `json:"updatedAtNEQ,omitempty"` + UpdatedAtIn []*time.Time `json:"updatedAtIn,omitempty"` + UpdatedAtNotIn []*time.Time `json:"updatedAtNotIn,omitempty"` + UpdatedAtGt *time.Time `json:"updatedAtGT,omitempty"` + UpdatedAtGte *time.Time `json:"updatedAtGTE,omitempty"` + UpdatedAtLt *time.Time `json:"updatedAtLT,omitempty"` + UpdatedAtLte *time.Time `json:"updatedAtLTE,omitempty"` + UpdatedAtIsNil *bool `json:"updatedAtIsNil,omitempty"` + UpdatedAtNotNil *bool `json:"updatedAtNotNil,omitempty"` + // created_by field predicates + CreatedBy *string `json:"createdBy,omitempty"` + CreatedByNeq *string `json:"createdByNEQ,omitempty"` + CreatedByIn []string `json:"createdByIn,omitempty"` + CreatedByNotIn []string `json:"createdByNotIn,omitempty"` + CreatedByGt *string `json:"createdByGT,omitempty"` + CreatedByGte *string `json:"createdByGTE,omitempty"` + CreatedByLt *string `json:"createdByLT,omitempty"` + CreatedByLte *string `json:"createdByLTE,omitempty"` + CreatedByContains *string `json:"createdByContains,omitempty"` + CreatedByHasPrefix *string `json:"createdByHasPrefix,omitempty"` + CreatedByHasSuffix *string `json:"createdByHasSuffix,omitempty"` + CreatedByIsNil *bool `json:"createdByIsNil,omitempty"` + CreatedByNotNil *bool `json:"createdByNotNil,omitempty"` + CreatedByEqualFold *string `json:"createdByEqualFold,omitempty"` + CreatedByContainsFold *string `json:"createdByContainsFold,omitempty"` + // updated_by field predicates + UpdatedBy *string `json:"updatedBy,omitempty"` + UpdatedByNeq *string `json:"updatedByNEQ,omitempty"` + UpdatedByIn []string `json:"updatedByIn,omitempty"` + UpdatedByNotIn []string `json:"updatedByNotIn,omitempty"` + UpdatedByGt *string `json:"updatedByGT,omitempty"` + UpdatedByGte *string `json:"updatedByGTE,omitempty"` + UpdatedByLt *string `json:"updatedByLT,omitempty"` + UpdatedByLte *string `json:"updatedByLTE,omitempty"` + UpdatedByContains *string `json:"updatedByContains,omitempty"` + UpdatedByHasPrefix *string `json:"updatedByHasPrefix,omitempty"` + UpdatedByHasSuffix *string `json:"updatedByHasSuffix,omitempty"` + UpdatedByIsNil *bool `json:"updatedByIsNil,omitempty"` + UpdatedByNotNil *bool `json:"updatedByNotNil,omitempty"` + UpdatedByEqualFold *string `json:"updatedByEqualFold,omitempty"` + UpdatedByContainsFold *string `json:"updatedByContainsFold,omitempty"` + // deleted_at field predicates + DeletedAt *time.Time `json:"deletedAt,omitempty"` + DeletedAtNeq *time.Time `json:"deletedAtNEQ,omitempty"` + DeletedAtIn []*time.Time `json:"deletedAtIn,omitempty"` + DeletedAtNotIn []*time.Time `json:"deletedAtNotIn,omitempty"` + DeletedAtGt *time.Time `json:"deletedAtGT,omitempty"` + DeletedAtGte *time.Time `json:"deletedAtGTE,omitempty"` + DeletedAtLt *time.Time `json:"deletedAtLT,omitempty"` + DeletedAtLte *time.Time `json:"deletedAtLTE,omitempty"` + DeletedAtIsNil *bool `json:"deletedAtIsNil,omitempty"` + DeletedAtNotNil *bool `json:"deletedAtNotNil,omitempty"` + // deleted_by field predicates + DeletedBy *string `json:"deletedBy,omitempty"` + DeletedByNeq *string `json:"deletedByNEQ,omitempty"` + DeletedByIn []string `json:"deletedByIn,omitempty"` + DeletedByNotIn []string `json:"deletedByNotIn,omitempty"` + DeletedByGt *string `json:"deletedByGT,omitempty"` + DeletedByGte *string `json:"deletedByGTE,omitempty"` + DeletedByLt *string `json:"deletedByLT,omitempty"` + DeletedByLte *string `json:"deletedByLTE,omitempty"` + DeletedByContains *string `json:"deletedByContains,omitempty"` + DeletedByHasPrefix *string `json:"deletedByHasPrefix,omitempty"` + DeletedByHasSuffix *string `json:"deletedByHasSuffix,omitempty"` + DeletedByIsNil *bool `json:"deletedByIsNil,omitempty"` + DeletedByNotNil *bool `json:"deletedByNotNil,omitempty"` + DeletedByEqualFold *string `json:"deletedByEqualFold,omitempty"` + DeletedByContainsFold *string `json:"deletedByContainsFold,omitempty"` + // organization_id field predicates + OrganizationID *string `json:"organizationID,omitempty"` + OrganizationIDNeq *string `json:"organizationIDNEQ,omitempty"` + OrganizationIDIn []string `json:"organizationIDIn,omitempty"` + OrganizationIDNotIn []string `json:"organizationIDNotIn,omitempty"` + OrganizationIDGt *string `json:"organizationIDGT,omitempty"` + OrganizationIDGte *string `json:"organizationIDGTE,omitempty"` + OrganizationIDLt *string `json:"organizationIDLT,omitempty"` + OrganizationIDLte *string `json:"organizationIDLTE,omitempty"` + OrganizationIDContains *string `json:"organizationIDContains,omitempty"` + OrganizationIDHasPrefix *string `json:"organizationIDHasPrefix,omitempty"` + OrganizationIDHasSuffix *string `json:"organizationIDHasSuffix,omitempty"` + OrganizationIDEqualFold *string `json:"organizationIDEqualFold,omitempty"` + OrganizationIDContainsFold *string `json:"organizationIDContainsFold,omitempty"` + // name field predicates + Name *string `json:"name,omitempty"` + NameNeq *string `json:"nameNEQ,omitempty"` + NameIn []string `json:"nameIn,omitempty"` + NameNotIn []string `json:"nameNotIn,omitempty"` + NameGt *string `json:"nameGT,omitempty"` + NameGte *string `json:"nameGTE,omitempty"` + NameLt *string `json:"nameLT,omitempty"` + NameLte *string `json:"nameLTE,omitempty"` + NameContains *string `json:"nameContains,omitempty"` + NameHasPrefix *string `json:"nameHasPrefix,omitempty"` + NameHasSuffix *string `json:"nameHasSuffix,omitempty"` + NameEqualFold *string `json:"nameEqualFold,omitempty"` + NameContainsFold *string `json:"nameContainsFold,omitempty"` + // geo field predicates + Geo *string `json:"geo,omitempty"` + GeoNeq *string `json:"geoNEQ,omitempty"` + GeoIn []string `json:"geoIn,omitempty"` + GeoNotIn []string `json:"geoNotIn,omitempty"` + GeoGt *string `json:"geoGT,omitempty"` + GeoGte *string `json:"geoGTE,omitempty"` + GeoLt *string `json:"geoLT,omitempty"` + GeoLte *string `json:"geoLTE,omitempty"` + GeoContains *string `json:"geoContains,omitempty"` + GeoHasPrefix *string `json:"geoHasPrefix,omitempty"` + GeoHasSuffix *string `json:"geoHasSuffix,omitempty"` + GeoIsNil *bool `json:"geoIsNil,omitempty"` + GeoNotNil *bool `json:"geoNotNil,omitempty"` + GeoEqualFold *string `json:"geoEqualFold,omitempty"` + GeoContainsFold *string `json:"geoContainsFold,omitempty"` + // dsn field predicates + Dsn *string `json:"dsn,omitempty"` + DsnNeq *string `json:"dsnNEQ,omitempty"` + DsnIn []string `json:"dsnIn,omitempty"` + DsnNotIn []string `json:"dsnNotIn,omitempty"` + DsnGt *string `json:"dsnGT,omitempty"` + DsnGte *string `json:"dsnGTE,omitempty"` + DsnLt *string `json:"dsnLT,omitempty"` + DsnLte *string `json:"dsnLTE,omitempty"` + DsnContains *string `json:"dsnContains,omitempty"` + DsnHasPrefix *string `json:"dsnHasPrefix,omitempty"` + DsnHasSuffix *string `json:"dsnHasSuffix,omitempty"` + DsnEqualFold *string `json:"dsnEqualFold,omitempty"` + DsnContainsFold *string `json:"dsnContainsFold,omitempty"` + // group_id field predicates + GroupID *string `json:"groupID,omitempty"` + GroupIDNeq *string `json:"groupIDNEQ,omitempty"` + GroupIDIn []string `json:"groupIDIn,omitempty"` + GroupIDNotIn []string `json:"groupIDNotIn,omitempty"` + GroupIDGt *string `json:"groupIDGT,omitempty"` + GroupIDGte *string `json:"groupIDGTE,omitempty"` + GroupIDLt *string `json:"groupIDLT,omitempty"` + GroupIDLte *string `json:"groupIDLTE,omitempty"` + GroupIDContains *string `json:"groupIDContains,omitempty"` + GroupIDHasPrefix *string `json:"groupIDHasPrefix,omitempty"` + GroupIDHasSuffix *string `json:"groupIDHasSuffix,omitempty"` + GroupIDEqualFold *string `json:"groupIDEqualFold,omitempty"` + GroupIDContainsFold *string `json:"groupIDContainsFold,omitempty"` + // status field predicates + Status *enums.DatabaseStatus `json:"status,omitempty"` + StatusNeq *enums.DatabaseStatus `json:"statusNEQ,omitempty"` + StatusIn []enums.DatabaseStatus `json:"statusIn,omitempty"` + StatusNotIn []enums.DatabaseStatus `json:"statusNotIn,omitempty"` + // provider field predicates + Provider *enums.DatabaseProvider `json:"provider,omitempty"` + ProviderNeq *enums.DatabaseProvider `json:"providerNEQ,omitempty"` + ProviderIn []enums.DatabaseProvider `json:"providerIn,omitempty"` + ProviderNotIn []enums.DatabaseProvider `json:"providerNotIn,omitempty"` + // group edge predicates + HasGroup *bool `json:"hasGroup,omitempty"` + HasGroupWith []*GroupWhereInput `json:"hasGroupWith,omitempty"` +} + +type Group struct { + ID string `json:"id"` + CreatedAt *time.Time `json:"createdAt,omitempty"` + UpdatedAt *time.Time `json:"updatedAt,omitempty"` + CreatedBy *string `json:"createdBy,omitempty"` + UpdatedBy *string `json:"updatedBy,omitempty"` + DeletedAt *time.Time `json:"deletedAt,omitempty"` + DeletedBy *string `json:"deletedBy,omitempty"` + // the name of the group in turso + Name string `json:"name"` + // the description of the group + Description *string `json:"description,omitempty"` + // the primary of the group + PrimaryLocation string `json:"primaryLocation"` + // the replica locations of the group + Locations []string `json:"locations,omitempty"` + // region the group + Region enums.Region `json:"region"` + Databases []*Database `json:"databases,omitempty"` +} + +func (Group) IsNode() {} + +// A connection to a list of items. +type GroupConnection struct { + // A list of edges. + Edges []*GroupEdge `json:"edges,omitempty"` + // Information to aid in pagination. + PageInfo *PageInfo `json:"pageInfo"` + // Identifies the total count of items in the connection. + TotalCount int64 `json:"totalCount"` +} + +// Return response for createGroup mutation +type GroupCreatePayload struct { + // Created group + Group *Group `json:"group"` +} + +// Return response for deleteGroup mutation +type GroupDeletePayload struct { + // Deleted group ID + DeletedID string `json:"deletedID"` +} + +// An edge in a connection. +type GroupEdge struct { + // The item at the end of the edge. + Node *Group `json:"node,omitempty"` + // A cursor for use in pagination. + Cursor string `json:"cursor"` +} + +// Return response for updateGroup mutation +type GroupUpdatePayload struct { + // Updated group + Group *Group `json:"group"` +} + +// GroupWhereInput is used for filtering Group objects. +// Input was generated by ent. +type GroupWhereInput struct { + Not *GroupWhereInput `json:"not,omitempty"` + And []*GroupWhereInput `json:"and,omitempty"` + Or []*GroupWhereInput `json:"or,omitempty"` + // id field predicates + ID *string `json:"id,omitempty"` + IDNeq *string `json:"idNEQ,omitempty"` + IDIn []string `json:"idIn,omitempty"` + IDNotIn []string `json:"idNotIn,omitempty"` + IDGt *string `json:"idGT,omitempty"` + IDGte *string `json:"idGTE,omitempty"` + IDLt *string `json:"idLT,omitempty"` + IDLte *string `json:"idLTE,omitempty"` + IDEqualFold *string `json:"idEqualFold,omitempty"` + IDContainsFold *string `json:"idContainsFold,omitempty"` + // created_at field predicates + CreatedAt *time.Time `json:"createdAt,omitempty"` + CreatedAtNeq *time.Time `json:"createdAtNEQ,omitempty"` + CreatedAtIn []*time.Time `json:"createdAtIn,omitempty"` + CreatedAtNotIn []*time.Time `json:"createdAtNotIn,omitempty"` + CreatedAtGt *time.Time `json:"createdAtGT,omitempty"` + CreatedAtGte *time.Time `json:"createdAtGTE,omitempty"` + CreatedAtLt *time.Time `json:"createdAtLT,omitempty"` + CreatedAtLte *time.Time `json:"createdAtLTE,omitempty"` + CreatedAtIsNil *bool `json:"createdAtIsNil,omitempty"` + CreatedAtNotNil *bool `json:"createdAtNotNil,omitempty"` + // updated_at field predicates + UpdatedAt *time.Time `json:"updatedAt,omitempty"` + UpdatedAtNeq *time.Time `json:"updatedAtNEQ,omitempty"` + UpdatedAtIn []*time.Time `json:"updatedAtIn,omitempty"` + UpdatedAtNotIn []*time.Time `json:"updatedAtNotIn,omitempty"` + UpdatedAtGt *time.Time `json:"updatedAtGT,omitempty"` + UpdatedAtGte *time.Time `json:"updatedAtGTE,omitempty"` + UpdatedAtLt *time.Time `json:"updatedAtLT,omitempty"` + UpdatedAtLte *time.Time `json:"updatedAtLTE,omitempty"` + UpdatedAtIsNil *bool `json:"updatedAtIsNil,omitempty"` + UpdatedAtNotNil *bool `json:"updatedAtNotNil,omitempty"` + // created_by field predicates + CreatedBy *string `json:"createdBy,omitempty"` + CreatedByNeq *string `json:"createdByNEQ,omitempty"` + CreatedByIn []string `json:"createdByIn,omitempty"` + CreatedByNotIn []string `json:"createdByNotIn,omitempty"` + CreatedByGt *string `json:"createdByGT,omitempty"` + CreatedByGte *string `json:"createdByGTE,omitempty"` + CreatedByLt *string `json:"createdByLT,omitempty"` + CreatedByLte *string `json:"createdByLTE,omitempty"` + CreatedByContains *string `json:"createdByContains,omitempty"` + CreatedByHasPrefix *string `json:"createdByHasPrefix,omitempty"` + CreatedByHasSuffix *string `json:"createdByHasSuffix,omitempty"` + CreatedByIsNil *bool `json:"createdByIsNil,omitempty"` + CreatedByNotNil *bool `json:"createdByNotNil,omitempty"` + CreatedByEqualFold *string `json:"createdByEqualFold,omitempty"` + CreatedByContainsFold *string `json:"createdByContainsFold,omitempty"` + // updated_by field predicates + UpdatedBy *string `json:"updatedBy,omitempty"` + UpdatedByNeq *string `json:"updatedByNEQ,omitempty"` + UpdatedByIn []string `json:"updatedByIn,omitempty"` + UpdatedByNotIn []string `json:"updatedByNotIn,omitempty"` + UpdatedByGt *string `json:"updatedByGT,omitempty"` + UpdatedByGte *string `json:"updatedByGTE,omitempty"` + UpdatedByLt *string `json:"updatedByLT,omitempty"` + UpdatedByLte *string `json:"updatedByLTE,omitempty"` + UpdatedByContains *string `json:"updatedByContains,omitempty"` + UpdatedByHasPrefix *string `json:"updatedByHasPrefix,omitempty"` + UpdatedByHasSuffix *string `json:"updatedByHasSuffix,omitempty"` + UpdatedByIsNil *bool `json:"updatedByIsNil,omitempty"` + UpdatedByNotNil *bool `json:"updatedByNotNil,omitempty"` + UpdatedByEqualFold *string `json:"updatedByEqualFold,omitempty"` + UpdatedByContainsFold *string `json:"updatedByContainsFold,omitempty"` + // deleted_at field predicates + DeletedAt *time.Time `json:"deletedAt,omitempty"` + DeletedAtNeq *time.Time `json:"deletedAtNEQ,omitempty"` + DeletedAtIn []*time.Time `json:"deletedAtIn,omitempty"` + DeletedAtNotIn []*time.Time `json:"deletedAtNotIn,omitempty"` + DeletedAtGt *time.Time `json:"deletedAtGT,omitempty"` + DeletedAtGte *time.Time `json:"deletedAtGTE,omitempty"` + DeletedAtLt *time.Time `json:"deletedAtLT,omitempty"` + DeletedAtLte *time.Time `json:"deletedAtLTE,omitempty"` + DeletedAtIsNil *bool `json:"deletedAtIsNil,omitempty"` + DeletedAtNotNil *bool `json:"deletedAtNotNil,omitempty"` + // deleted_by field predicates + DeletedBy *string `json:"deletedBy,omitempty"` + DeletedByNeq *string `json:"deletedByNEQ,omitempty"` + DeletedByIn []string `json:"deletedByIn,omitempty"` + DeletedByNotIn []string `json:"deletedByNotIn,omitempty"` + DeletedByGt *string `json:"deletedByGT,omitempty"` + DeletedByGte *string `json:"deletedByGTE,omitempty"` + DeletedByLt *string `json:"deletedByLT,omitempty"` + DeletedByLte *string `json:"deletedByLTE,omitempty"` + DeletedByContains *string `json:"deletedByContains,omitempty"` + DeletedByHasPrefix *string `json:"deletedByHasPrefix,omitempty"` + DeletedByHasSuffix *string `json:"deletedByHasSuffix,omitempty"` + DeletedByIsNil *bool `json:"deletedByIsNil,omitempty"` + DeletedByNotNil *bool `json:"deletedByNotNil,omitempty"` + DeletedByEqualFold *string `json:"deletedByEqualFold,omitempty"` + DeletedByContainsFold *string `json:"deletedByContainsFold,omitempty"` + // name field predicates + Name *string `json:"name,omitempty"` + NameNeq *string `json:"nameNEQ,omitempty"` + NameIn []string `json:"nameIn,omitempty"` + NameNotIn []string `json:"nameNotIn,omitempty"` + NameGt *string `json:"nameGT,omitempty"` + NameGte *string `json:"nameGTE,omitempty"` + NameLt *string `json:"nameLT,omitempty"` + NameLte *string `json:"nameLTE,omitempty"` + NameContains *string `json:"nameContains,omitempty"` + NameHasPrefix *string `json:"nameHasPrefix,omitempty"` + NameHasSuffix *string `json:"nameHasSuffix,omitempty"` + NameEqualFold *string `json:"nameEqualFold,omitempty"` + NameContainsFold *string `json:"nameContainsFold,omitempty"` + // description field predicates + Description *string `json:"description,omitempty"` + DescriptionNeq *string `json:"descriptionNEQ,omitempty"` + DescriptionIn []string `json:"descriptionIn,omitempty"` + DescriptionNotIn []string `json:"descriptionNotIn,omitempty"` + DescriptionGt *string `json:"descriptionGT,omitempty"` + DescriptionGte *string `json:"descriptionGTE,omitempty"` + DescriptionLt *string `json:"descriptionLT,omitempty"` + DescriptionLte *string `json:"descriptionLTE,omitempty"` + DescriptionContains *string `json:"descriptionContains,omitempty"` + DescriptionHasPrefix *string `json:"descriptionHasPrefix,omitempty"` + DescriptionHasSuffix *string `json:"descriptionHasSuffix,omitempty"` + DescriptionIsNil *bool `json:"descriptionIsNil,omitempty"` + DescriptionNotNil *bool `json:"descriptionNotNil,omitempty"` + DescriptionEqualFold *string `json:"descriptionEqualFold,omitempty"` + DescriptionContainsFold *string `json:"descriptionContainsFold,omitempty"` + // primary_location field predicates + PrimaryLocation *string `json:"primaryLocation,omitempty"` + PrimaryLocationNeq *string `json:"primaryLocationNEQ,omitempty"` + PrimaryLocationIn []string `json:"primaryLocationIn,omitempty"` + PrimaryLocationNotIn []string `json:"primaryLocationNotIn,omitempty"` + PrimaryLocationGt *string `json:"primaryLocationGT,omitempty"` + PrimaryLocationGte *string `json:"primaryLocationGTE,omitempty"` + PrimaryLocationLt *string `json:"primaryLocationLT,omitempty"` + PrimaryLocationLte *string `json:"primaryLocationLTE,omitempty"` + PrimaryLocationContains *string `json:"primaryLocationContains,omitempty"` + PrimaryLocationHasPrefix *string `json:"primaryLocationHasPrefix,omitempty"` + PrimaryLocationHasSuffix *string `json:"primaryLocationHasSuffix,omitempty"` + PrimaryLocationEqualFold *string `json:"primaryLocationEqualFold,omitempty"` + PrimaryLocationContainsFold *string `json:"primaryLocationContainsFold,omitempty"` + // region field predicates + Region *enums.Region `json:"region,omitempty"` + RegionNeq *enums.Region `json:"regionNEQ,omitempty"` + RegionIn []enums.Region `json:"regionIn,omitempty"` + RegionNotIn []enums.Region `json:"regionNotIn,omitempty"` + // databases edge predicates + HasDatabases *bool `json:"hasDatabases,omitempty"` + HasDatabasesWith []*DatabaseWhereInput `json:"hasDatabasesWith,omitempty"` +} + +type Mutation struct { +} + +// Information about pagination in a connection. +// https://relay.dev/graphql/connections.htm#sec-undefined.PageInfo +type PageInfo struct { + // When paginating forwards, are there more items? + HasNextPage bool `json:"hasNextPage"` + // When paginating backwards, are there more items? + HasPreviousPage bool `json:"hasPreviousPage"` + // When paginating backwards, the cursor to continue. + StartCursor *string `json:"startCursor,omitempty"` + // When paginating forwards, the cursor to continue. + EndCursor *string `json:"endCursor,omitempty"` +} + +type Query struct { +} + +// UpdateDatabaseInput is used for update Database object. +// Input was generated by ent. +type UpdateDatabaseInput struct { + UpdatedAt *time.Time `json:"updatedAt,omitempty"` + ClearUpdatedAt *bool `json:"clearUpdatedAt,omitempty"` + UpdatedBy *string `json:"updatedBy,omitempty"` + ClearUpdatedBy *bool `json:"clearUpdatedBy,omitempty"` + // the ID of the organization + OrganizationID *string `json:"organizationID,omitempty"` + // the name to the database + Name *string `json:"name,omitempty"` + // the geo location of the database + Geo *string `json:"geo,omitempty"` + ClearGeo *bool `json:"clearGeo,omitempty"` + // the DSN to the database + Dsn *string `json:"dsn,omitempty"` + // the auth token used to connect to the database + Token *string `json:"token,omitempty"` + ClearToken *bool `json:"clearToken,omitempty"` + // status of the database + Status *enums.DatabaseStatus `json:"status,omitempty"` + // provider of the database + Provider *enums.DatabaseProvider `json:"provider,omitempty"` + GroupID *string `json:"groupID,omitempty"` +} + +// UpdateGroupInput is used for update Group object. +// Input was generated by ent. +type UpdateGroupInput struct { + UpdatedAt *time.Time `json:"updatedAt,omitempty"` + ClearUpdatedAt *bool `json:"clearUpdatedAt,omitempty"` + UpdatedBy *string `json:"updatedBy,omitempty"` + ClearUpdatedBy *bool `json:"clearUpdatedBy,omitempty"` + // the name of the group in turso + Name *string `json:"name,omitempty"` + // the description of the group + Description *string `json:"description,omitempty"` + ClearDescription *bool `json:"clearDescription,omitempty"` + // the primary of the group + PrimaryLocation *string `json:"primaryLocation,omitempty"` + // the replica locations of the group + Locations []string `json:"locations,omitempty"` + AppendLocations []string `json:"appendLocations,omitempty"` + ClearLocations *bool `json:"clearLocations,omitempty"` + // the auth token used to connect to the group + Token *string `json:"token,omitempty"` + ClearToken *bool `json:"clearToken,omitempty"` + // region the group + Region *enums.Region `json:"region,omitempty"` + AddDatabaseIDs []string `json:"addDatabaseIDs,omitempty"` + RemoveDatabaseIDs []string `json:"removeDatabaseIDs,omitempty"` + ClearDatabases *bool `json:"clearDatabases,omitempty"` +} + +// Possible directions in which to order a list of items when provided an `orderBy` argument. +type OrderDirection string + +const ( + // Specifies an ascending order for a given `orderBy` argument. + OrderDirectionAsc OrderDirection = "ASC" + // Specifies a descending order for a given `orderBy` argument. + OrderDirectionDesc OrderDirection = "DESC" +) + +var AllOrderDirection = []OrderDirection{ + OrderDirectionAsc, + OrderDirectionDesc, +} + +func (e OrderDirection) IsValid() bool { + switch e { + case OrderDirectionAsc, OrderDirectionDesc: + return true + } + return false +} + +func (e OrderDirection) String() string { + return string(e) +} + +func (e *OrderDirection) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = OrderDirection(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid OrderDirection", str) + } + return nil +} + +func (e OrderDirection) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) +} diff --git a/query/.gitkeep b/query/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/query/database.graphql b/query/database.graphql new file mode 100644 index 0000000..7f5b37b --- /dev/null +++ b/query/database.graphql @@ -0,0 +1,47 @@ +query GetDatabase($name: String!) { + database(name: $name) { + id + name + organizationID + provider + status + dsn + geo + } +} + +query GetAllDatabases { + databases { + edges { + node { + id + name + organizationID + provider + status + dsn + geo + } + } + } +} + +mutation CreateDatabase($input: CreateDatabaseInput!) { + createDatabase(input: $input) { + database { + id + name + organizationID + provider + status + dsn + geo + } + } +} + +mutation DeleteDatabase($name: String!) { + deleteDatabase(name: $name) { + deletedID + } +} \ No newline at end of file diff --git a/query/group.graphql b/query/group.graphql new file mode 100644 index 0000000..cc0c317 --- /dev/null +++ b/query/group.graphql @@ -0,0 +1,44 @@ +query GetGroup($name: String!) { + group(name: $name) { + id + name + description + primaryLocation + locations + region + } +} + +query GetAllGroups { + groups { + edges { + node { + id + name + description + primaryLocation + locations + region + } + } + } +} + +mutation CreateGroup($input: CreateGroupInput!) { + createGroup(input: $input) { + group { + id + name + description + primaryLocation + locations + region + } + } +} + +mutation DeleteGroup($name: String!) { + deleteGroup(name: $name) { + deletedID + } +} \ No newline at end of file diff --git a/renovate.json b/renovate.json new file mode 100644 index 0000000..1ef04ec --- /dev/null +++ b/renovate.json @@ -0,0 +1,8 @@ +{ + "extends": [ + "config:base" + ], + "postUpdateOptions": [ + "gomodTidy" + ] +} \ No newline at end of file diff --git a/schema.graphql b/schema.graphql new file mode 100644 index 0000000..20dfe47 --- /dev/null +++ b/schema.graphql @@ -0,0 +1,971 @@ +directive @goField(forceResolver: Boolean, name: String) on FIELD_DEFINITION | INPUT_FIELD_DEFINITION +directive @goModel(model: String, models: [String!]) on OBJECT | INPUT_OBJECT | SCALAR | ENUM | INTERFACE | UNION +""" +CreateDatabaseInput is used for create Database object. +Input was generated by ent. +""" +input CreateDatabaseInput { + createdAt: Time + updatedAt: Time + createdBy: String + updatedBy: String + """ + the ID of the organization + """ + organizationID: String! + """ + the name to the database + """ + name: String! + """ + the geo location of the database + """ + geo: String + """ + the DSN to the database + """ + dsn: String! + """ + the auth token used to connect to the database + """ + token: String + """ + status of the database + """ + status: DatabaseDatabaseStatus + """ + provider of the database + """ + provider: DatabaseDatabaseProvider + groupID: ID! +} +""" +CreateGroupInput is used for create Group object. +Input was generated by ent. +""" +input CreateGroupInput { + createdAt: Time + updatedAt: Time + createdBy: String + updatedBy: String + """ + the name of the group in turso + """ + name: String! + """ + the description of the group + """ + description: String + """ + the primary of the group + """ + primaryLocation: String! + """ + the replica locations of the group + """ + locations: [String!] + """ + the auth token used to connect to the group + """ + token: String + """ + region the group + """ + region: GroupRegion + databaseIDs: [ID!] +} +""" +Define a Relay Cursor type: +https://relay.dev/graphql/connections.htm#sec-Cursor +""" +scalar Cursor +type Database implements Node { + id: ID! + createdAt: Time + updatedAt: Time + createdBy: String + updatedBy: String + deletedAt: Time + deletedBy: String + """ + the ID of the organization + """ + organizationID: String! + """ + the name to the database + """ + name: String! + """ + the geo location of the database + """ + geo: String + """ + the DSN to the database + """ + dsn: String! + """ + the ID of the group + """ + groupID: ID! + """ + status of the database + """ + status: DatabaseDatabaseStatus! + """ + provider of the database + """ + provider: DatabaseDatabaseProvider! + group: Group! +} +""" +A connection to a list of items. +""" +type DatabaseConnection { + """ + A list of edges. + """ + edges: [DatabaseEdge] + """ + Information to aid in pagination. + """ + pageInfo: PageInfo! + """ + Identifies the total count of items in the connection. + """ + totalCount: Int! +} +""" +Return response for createDatabase mutation +""" +type DatabaseCreatePayload { + """ + Created database + """ + database: Database! +} +""" +DatabaseDatabaseProvider is enum for the field provider +""" +enum DatabaseDatabaseProvider @goModel(model: "github.com/datumforge/geodetic/pkg/enums.DatabaseProvider") { + LOCAL + TURSO +} +""" +DatabaseDatabaseStatus is enum for the field status +""" +enum DatabaseDatabaseStatus @goModel(model: "github.com/datumforge/geodetic/pkg/enums.DatabaseStatus") { + ACTIVE + CREATING + DELETING + DELETED +} +""" +Return response for deleteDatabase mutation +""" +type DatabaseDeletePayload { + """ + Deleted database ID + """ + deletedID: ID! +} +""" +An edge in a connection. +""" +type DatabaseEdge { + """ + The item at the end of the edge. + """ + node: Database + """ + A cursor for use in pagination. + """ + cursor: Cursor! +} +""" +Return response for updateDatabase mutation +""" +type DatabaseUpdatePayload { + """ + Updated database + """ + database: Database! +} +""" +DatabaseWhereInput is used for filtering Database objects. +Input was generated by ent. +""" +input DatabaseWhereInput { + not: DatabaseWhereInput + and: [DatabaseWhereInput!] + or: [DatabaseWhereInput!] + """ + id field predicates + """ + id: ID + idNEQ: ID + idIn: [ID!] + idNotIn: [ID!] + idGT: ID + idGTE: ID + idLT: ID + idLTE: ID + idEqualFold: ID + idContainsFold: ID + """ + created_at field predicates + """ + createdAt: Time + createdAtNEQ: Time + createdAtIn: [Time!] + createdAtNotIn: [Time!] + createdAtGT: Time + createdAtGTE: Time + createdAtLT: Time + createdAtLTE: Time + createdAtIsNil: Boolean + createdAtNotNil: Boolean + """ + updated_at field predicates + """ + updatedAt: Time + updatedAtNEQ: Time + updatedAtIn: [Time!] + updatedAtNotIn: [Time!] + updatedAtGT: Time + updatedAtGTE: Time + updatedAtLT: Time + updatedAtLTE: Time + updatedAtIsNil: Boolean + updatedAtNotNil: Boolean + """ + created_by field predicates + """ + createdBy: String + createdByNEQ: String + createdByIn: [String!] + createdByNotIn: [String!] + createdByGT: String + createdByGTE: String + createdByLT: String + createdByLTE: String + createdByContains: String + createdByHasPrefix: String + createdByHasSuffix: String + createdByIsNil: Boolean + createdByNotNil: Boolean + createdByEqualFold: String + createdByContainsFold: String + """ + updated_by field predicates + """ + updatedBy: String + updatedByNEQ: String + updatedByIn: [String!] + updatedByNotIn: [String!] + updatedByGT: String + updatedByGTE: String + updatedByLT: String + updatedByLTE: String + updatedByContains: String + updatedByHasPrefix: String + updatedByHasSuffix: String + updatedByIsNil: Boolean + updatedByNotNil: Boolean + updatedByEqualFold: String + updatedByContainsFold: String + """ + deleted_at field predicates + """ + deletedAt: Time + deletedAtNEQ: Time + deletedAtIn: [Time!] + deletedAtNotIn: [Time!] + deletedAtGT: Time + deletedAtGTE: Time + deletedAtLT: Time + deletedAtLTE: Time + deletedAtIsNil: Boolean + deletedAtNotNil: Boolean + """ + deleted_by field predicates + """ + deletedBy: String + deletedByNEQ: String + deletedByIn: [String!] + deletedByNotIn: [String!] + deletedByGT: String + deletedByGTE: String + deletedByLT: String + deletedByLTE: String + deletedByContains: String + deletedByHasPrefix: String + deletedByHasSuffix: String + deletedByIsNil: Boolean + deletedByNotNil: Boolean + deletedByEqualFold: String + deletedByContainsFold: String + """ + organization_id field predicates + """ + organizationID: String + organizationIDNEQ: String + organizationIDIn: [String!] + organizationIDNotIn: [String!] + organizationIDGT: String + organizationIDGTE: String + organizationIDLT: String + organizationIDLTE: String + organizationIDContains: String + organizationIDHasPrefix: String + organizationIDHasSuffix: String + organizationIDEqualFold: String + organizationIDContainsFold: String + """ + name field predicates + """ + name: String + nameNEQ: String + nameIn: [String!] + nameNotIn: [String!] + nameGT: String + nameGTE: String + nameLT: String + nameLTE: String + nameContains: String + nameHasPrefix: String + nameHasSuffix: String + nameEqualFold: String + nameContainsFold: String + """ + geo field predicates + """ + geo: String + geoNEQ: String + geoIn: [String!] + geoNotIn: [String!] + geoGT: String + geoGTE: String + geoLT: String + geoLTE: String + geoContains: String + geoHasPrefix: String + geoHasSuffix: String + geoIsNil: Boolean + geoNotNil: Boolean + geoEqualFold: String + geoContainsFold: String + """ + dsn field predicates + """ + dsn: String + dsnNEQ: String + dsnIn: [String!] + dsnNotIn: [String!] + dsnGT: String + dsnGTE: String + dsnLT: String + dsnLTE: String + dsnContains: String + dsnHasPrefix: String + dsnHasSuffix: String + dsnEqualFold: String + dsnContainsFold: String + """ + group_id field predicates + """ + groupID: ID + groupIDNEQ: ID + groupIDIn: [ID!] + groupIDNotIn: [ID!] + groupIDGT: ID + groupIDGTE: ID + groupIDLT: ID + groupIDLTE: ID + groupIDContains: ID + groupIDHasPrefix: ID + groupIDHasSuffix: ID + groupIDEqualFold: ID + groupIDContainsFold: ID + """ + status field predicates + """ + status: DatabaseDatabaseStatus + statusNEQ: DatabaseDatabaseStatus + statusIn: [DatabaseDatabaseStatus!] + statusNotIn: [DatabaseDatabaseStatus!] + """ + provider field predicates + """ + provider: DatabaseDatabaseProvider + providerNEQ: DatabaseDatabaseProvider + providerIn: [DatabaseDatabaseProvider!] + providerNotIn: [DatabaseDatabaseProvider!] + """ + group edge predicates + """ + hasGroup: Boolean + hasGroupWith: [GroupWhereInput!] +} +type Group implements Node { + id: ID! + createdAt: Time + updatedAt: Time + createdBy: String + updatedBy: String + deletedAt: Time + deletedBy: String + """ + the name of the group in turso + """ + name: String! + """ + the description of the group + """ + description: String + """ + the primary of the group + """ + primaryLocation: String! + """ + the replica locations of the group + """ + locations: [String!] + """ + region the group + """ + region: GroupRegion! + databases: [Database!] +} +""" +A connection to a list of items. +""" +type GroupConnection { + """ + A list of edges. + """ + edges: [GroupEdge] + """ + Information to aid in pagination. + """ + pageInfo: PageInfo! + """ + Identifies the total count of items in the connection. + """ + totalCount: Int! +} +""" +Return response for createGroup mutation +""" +type GroupCreatePayload { + """ + Created group + """ + group: Group! +} +""" +Return response for deleteGroup mutation +""" +type GroupDeletePayload { + """ + Deleted group ID + """ + deletedID: ID! +} +""" +An edge in a connection. +""" +type GroupEdge { + """ + The item at the end of the edge. + """ + node: Group + """ + A cursor for use in pagination. + """ + cursor: Cursor! +} +""" +GroupRegion is enum for the field region +""" +enum GroupRegion @goModel(model: "github.com/datumforge/geodetic/pkg/enums.Region") { + AMER + EMEA + APAC +} +""" +Return response for updateGroup mutation +""" +type GroupUpdatePayload { + """ + Updated group + """ + group: Group! +} +""" +GroupWhereInput is used for filtering Group objects. +Input was generated by ent. +""" +input GroupWhereInput { + not: GroupWhereInput + and: [GroupWhereInput!] + or: [GroupWhereInput!] + """ + id field predicates + """ + id: ID + idNEQ: ID + idIn: [ID!] + idNotIn: [ID!] + idGT: ID + idGTE: ID + idLT: ID + idLTE: ID + idEqualFold: ID + idContainsFold: ID + """ + created_at field predicates + """ + createdAt: Time + createdAtNEQ: Time + createdAtIn: [Time!] + createdAtNotIn: [Time!] + createdAtGT: Time + createdAtGTE: Time + createdAtLT: Time + createdAtLTE: Time + createdAtIsNil: Boolean + createdAtNotNil: Boolean + """ + updated_at field predicates + """ + updatedAt: Time + updatedAtNEQ: Time + updatedAtIn: [Time!] + updatedAtNotIn: [Time!] + updatedAtGT: Time + updatedAtGTE: Time + updatedAtLT: Time + updatedAtLTE: Time + updatedAtIsNil: Boolean + updatedAtNotNil: Boolean + """ + created_by field predicates + """ + createdBy: String + createdByNEQ: String + createdByIn: [String!] + createdByNotIn: [String!] + createdByGT: String + createdByGTE: String + createdByLT: String + createdByLTE: String + createdByContains: String + createdByHasPrefix: String + createdByHasSuffix: String + createdByIsNil: Boolean + createdByNotNil: Boolean + createdByEqualFold: String + createdByContainsFold: String + """ + updated_by field predicates + """ + updatedBy: String + updatedByNEQ: String + updatedByIn: [String!] + updatedByNotIn: [String!] + updatedByGT: String + updatedByGTE: String + updatedByLT: String + updatedByLTE: String + updatedByContains: String + updatedByHasPrefix: String + updatedByHasSuffix: String + updatedByIsNil: Boolean + updatedByNotNil: Boolean + updatedByEqualFold: String + updatedByContainsFold: String + """ + deleted_at field predicates + """ + deletedAt: Time + deletedAtNEQ: Time + deletedAtIn: [Time!] + deletedAtNotIn: [Time!] + deletedAtGT: Time + deletedAtGTE: Time + deletedAtLT: Time + deletedAtLTE: Time + deletedAtIsNil: Boolean + deletedAtNotNil: Boolean + """ + deleted_by field predicates + """ + deletedBy: String + deletedByNEQ: String + deletedByIn: [String!] + deletedByNotIn: [String!] + deletedByGT: String + deletedByGTE: String + deletedByLT: String + deletedByLTE: String + deletedByContains: String + deletedByHasPrefix: String + deletedByHasSuffix: String + deletedByIsNil: Boolean + deletedByNotNil: Boolean + deletedByEqualFold: String + deletedByContainsFold: String + """ + name field predicates + """ + name: String + nameNEQ: String + nameIn: [String!] + nameNotIn: [String!] + nameGT: String + nameGTE: String + nameLT: String + nameLTE: String + nameContains: String + nameHasPrefix: String + nameHasSuffix: String + nameEqualFold: String + nameContainsFold: String + """ + description field predicates + """ + description: String + descriptionNEQ: String + descriptionIn: [String!] + descriptionNotIn: [String!] + descriptionGT: String + descriptionGTE: String + descriptionLT: String + descriptionLTE: String + descriptionContains: String + descriptionHasPrefix: String + descriptionHasSuffix: String + descriptionIsNil: Boolean + descriptionNotNil: Boolean + descriptionEqualFold: String + descriptionContainsFold: String + """ + primary_location field predicates + """ + primaryLocation: String + primaryLocationNEQ: String + primaryLocationIn: [String!] + primaryLocationNotIn: [String!] + primaryLocationGT: String + primaryLocationGTE: String + primaryLocationLT: String + primaryLocationLTE: String + primaryLocationContains: String + primaryLocationHasPrefix: String + primaryLocationHasSuffix: String + primaryLocationEqualFold: String + primaryLocationContainsFold: String + """ + region field predicates + """ + region: GroupRegion + regionNEQ: GroupRegion + regionIn: [GroupRegion!] + regionNotIn: [GroupRegion!] + """ + databases edge predicates + """ + hasDatabases: Boolean + hasDatabasesWith: [DatabaseWhereInput!] +} +""" +A valid JSON string. +""" +scalar JSON +type Mutation { + """ + Create a new database + """ + createDatabase( + """ + values of the database + """ + input: CreateDatabaseInput! + ): DatabaseCreatePayload! + """ + Update an existing database + """ + updateDatabase( + """ + Name of the database + """ + name: String! + + """ + New values for the database + """ + input: UpdateDatabaseInput! + ): DatabaseUpdatePayload! + """ + Delete an existing database + """ + deleteDatabase( + """ + Name of the database + """ + name: String! + ): DatabaseDeletePayload! + """ + Create a new group + """ + createGroup( + """ + values of the group + """ + input: CreateGroupInput! + ): GroupCreatePayload! + """ + Update an existing group + """ + updateGroup( + """ + Name of the group + """ + name: String! + + """ + New values for the group + """ + input: UpdateGroupInput! + ): GroupUpdatePayload! + """ + Delete an existing group + """ + deleteGroup( + """ + Name of the group + """ + name: String! + ): GroupDeletePayload! +} +""" +An object with an ID. +Follows the [Relay Global Object Identification Specification](https://relay.dev/graphql/objectidentification.htm) +""" +interface Node @goModel(model: "github.com/datumforge/geodetic/internal/ent/generated.Noder") { + """ + The id of the object. + """ + id: ID! +} +""" +Possible directions in which to order a list of items when provided an `orderBy` argument. +""" +enum OrderDirection { + """ + Specifies an ascending order for a given `orderBy` argument. + """ + ASC + """ + Specifies a descending order for a given `orderBy` argument. + """ + DESC +} +""" +Information about pagination in a connection. +https://relay.dev/graphql/connections.htm#sec-undefined.PageInfo +""" +type PageInfo { + """ + When paginating forwards, are there more items? + """ + hasNextPage: Boolean! + """ + When paginating backwards, are there more items? + """ + hasPreviousPage: Boolean! + """ + When paginating backwards, the cursor to continue. + """ + startCursor: Cursor + """ + When paginating forwards, the cursor to continue. + """ + endCursor: Cursor +} +type Query { + """ + Fetches an object given its ID. + """ + node( + """ + ID of the object. + """ + id: ID! + ): Node + """ + Lookup nodes by a list of IDs. + """ + nodes( + """ + The list of node IDs. + """ + ids: [ID!]! + ): [Node]! + databases( + """ + Returns the elements in the list that come after the specified cursor. + """ + after: Cursor + + """ + Returns the first _n_ elements from the list. + """ + first: Int + + """ + Returns the elements in the list that come before the specified cursor. + """ + before: Cursor + + """ + Returns the last _n_ elements from the list. + """ + last: Int + + """ + Filtering options for Databases returned from the connection. + """ + where: DatabaseWhereInput + ): DatabaseConnection! + groups( + """ + Returns the elements in the list that come after the specified cursor. + """ + after: Cursor + + """ + Returns the first _n_ elements from the list. + """ + first: Int + + """ + Returns the elements in the list that come before the specified cursor. + """ + before: Cursor + + """ + Returns the last _n_ elements from the list. + """ + last: Int + + """ + Filtering options for Groups returned from the connection. + """ + where: GroupWhereInput + ): GroupConnection! + """ + Look up database by ID + """ + database( + """ + Name of the database + """ + name: String! + ): Database! + """ + Look up group by ID + """ + group( + """ + Name of the group + """ + name: String! + ): Group! +} +""" +The builtin Time type +""" +scalar Time +""" +UpdateDatabaseInput is used for update Database object. +Input was generated by ent. +""" +input UpdateDatabaseInput { + updatedAt: Time + clearUpdatedAt: Boolean + updatedBy: String + clearUpdatedBy: Boolean + """ + the ID of the organization + """ + organizationID: String + """ + the name to the database + """ + name: String + """ + the geo location of the database + """ + geo: String + clearGeo: Boolean + """ + the DSN to the database + """ + dsn: String + """ + the auth token used to connect to the database + """ + token: String + clearToken: Boolean + """ + status of the database + """ + status: DatabaseDatabaseStatus + """ + provider of the database + """ + provider: DatabaseDatabaseProvider + groupID: ID +} +""" +UpdateGroupInput is used for update Group object. +Input was generated by ent. +""" +input UpdateGroupInput { + updatedAt: Time + clearUpdatedAt: Boolean + updatedBy: String + clearUpdatedBy: Boolean + """ + the name of the group in turso + """ + name: String + """ + the description of the group + """ + description: String + clearDescription: Boolean + """ + the primary of the group + """ + primaryLocation: String + """ + the replica locations of the group + """ + locations: [String!] + appendLocations: [String!] + clearLocations: Boolean + """ + the auth token used to connect to the group + """ + token: String + clearToken: Boolean + """ + region the group + """ + region: GroupRegion + addDatabaseIDs: [ID!] + removeDatabaseIDs: [ID!] + clearDatabases: Boolean +} diff --git a/schema/.gitkeep b/schema/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/schema/database.graphql b/schema/database.graphql new file mode 100644 index 0000000..c84a65d --- /dev/null +++ b/schema/database.graphql @@ -0,0 +1,75 @@ +extend type Query { + """ + Look up database by ID + """ + database( + """ + Name of the database + """ + name: String! + ): Database! +} + +extend type Mutation{ + """ + Create a new database + """ + createDatabase( + """ + values of the database + """ + input: CreateDatabaseInput! + ): DatabaseCreatePayload! + """ + Update an existing database + """ + updateDatabase( + """ + Name of the database + """ + name: String! + """ + New values for the database + """ + input: UpdateDatabaseInput! + ): DatabaseUpdatePayload! + """ + Delete an existing database + """ + deleteDatabase( + """ + Name of the database + """ + name: String! + ): DatabaseDeletePayload! +} + +""" +Return response for createDatabase mutation +""" +type DatabaseCreatePayload { + """ + Created database + """ + database: Database! +} + +""" +Return response for updateDatabase mutation +""" +type DatabaseUpdatePayload { + """ + Updated database + """ + database: Database! +} + +""" +Return response for deleteDatabase mutation +""" +type DatabaseDeletePayload { + """ + Deleted database ID + """ + deletedID: ID! +} \ No newline at end of file diff --git a/schema/ent.graphql b/schema/ent.graphql new file mode 100644 index 0000000..e51083f --- /dev/null +++ b/schema/ent.graphql @@ -0,0 +1,833 @@ +directive @goField(forceResolver: Boolean, name: String) on FIELD_DEFINITION | INPUT_FIELD_DEFINITION +directive @goModel(model: String, models: [String!]) on OBJECT | INPUT_OBJECT | SCALAR | ENUM | INTERFACE | UNION +""" +CreateDatabaseInput is used for create Database object. +Input was generated by ent. +""" +input CreateDatabaseInput { + createdAt: Time + updatedAt: Time + createdBy: String + updatedBy: String + """ + the ID of the organization + """ + organizationID: String! + """ + the name to the database + """ + name: String! + """ + the geo location of the database + """ + geo: String + """ + the DSN to the database + """ + dsn: String! + """ + the auth token used to connect to the database + """ + token: String + """ + status of the database + """ + status: DatabaseDatabaseStatus + """ + provider of the database + """ + provider: DatabaseDatabaseProvider + groupID: ID! +} +""" +CreateGroupInput is used for create Group object. +Input was generated by ent. +""" +input CreateGroupInput { + createdAt: Time + updatedAt: Time + createdBy: String + updatedBy: String + """ + the name of the group in turso + """ + name: String! + """ + the description of the group + """ + description: String + """ + the primary of the group + """ + primaryLocation: String! + """ + the replica locations of the group + """ + locations: [String!] + """ + the auth token used to connect to the group + """ + token: String + """ + region the group + """ + region: GroupRegion + databaseIDs: [ID!] +} +""" +Define a Relay Cursor type: +https://relay.dev/graphql/connections.htm#sec-Cursor +""" +scalar Cursor +type Database implements Node { + id: ID! + createdAt: Time + updatedAt: Time + createdBy: String + updatedBy: String + deletedAt: Time + deletedBy: String + """ + the ID of the organization + """ + organizationID: String! + """ + the name to the database + """ + name: String! + """ + the geo location of the database + """ + geo: String + """ + the DSN to the database + """ + dsn: String! + """ + the ID of the group + """ + groupID: ID! + """ + status of the database + """ + status: DatabaseDatabaseStatus! + """ + provider of the database + """ + provider: DatabaseDatabaseProvider! + group: Group! +} +""" +A connection to a list of items. +""" +type DatabaseConnection { + """ + A list of edges. + """ + edges: [DatabaseEdge] + """ + Information to aid in pagination. + """ + pageInfo: PageInfo! + """ + Identifies the total count of items in the connection. + """ + totalCount: Int! +} +""" +DatabaseDatabaseProvider is enum for the field provider +""" +enum DatabaseDatabaseProvider @goModel(model: "github.com/datumforge/geodetic/pkg/enums.DatabaseProvider") { + LOCAL + TURSO +} +""" +DatabaseDatabaseStatus is enum for the field status +""" +enum DatabaseDatabaseStatus @goModel(model: "github.com/datumforge/geodetic/pkg/enums.DatabaseStatus") { + ACTIVE + CREATING + DELETING + DELETED +} +""" +An edge in a connection. +""" +type DatabaseEdge { + """ + The item at the end of the edge. + """ + node: Database + """ + A cursor for use in pagination. + """ + cursor: Cursor! +} +""" +DatabaseWhereInput is used for filtering Database objects. +Input was generated by ent. +""" +input DatabaseWhereInput { + not: DatabaseWhereInput + and: [DatabaseWhereInput!] + or: [DatabaseWhereInput!] + """ + id field predicates + """ + id: ID + idNEQ: ID + idIn: [ID!] + idNotIn: [ID!] + idGT: ID + idGTE: ID + idLT: ID + idLTE: ID + idEqualFold: ID + idContainsFold: ID + """ + created_at field predicates + """ + createdAt: Time + createdAtNEQ: Time + createdAtIn: [Time!] + createdAtNotIn: [Time!] + createdAtGT: Time + createdAtGTE: Time + createdAtLT: Time + createdAtLTE: Time + createdAtIsNil: Boolean + createdAtNotNil: Boolean + """ + updated_at field predicates + """ + updatedAt: Time + updatedAtNEQ: Time + updatedAtIn: [Time!] + updatedAtNotIn: [Time!] + updatedAtGT: Time + updatedAtGTE: Time + updatedAtLT: Time + updatedAtLTE: Time + updatedAtIsNil: Boolean + updatedAtNotNil: Boolean + """ + created_by field predicates + """ + createdBy: String + createdByNEQ: String + createdByIn: [String!] + createdByNotIn: [String!] + createdByGT: String + createdByGTE: String + createdByLT: String + createdByLTE: String + createdByContains: String + createdByHasPrefix: String + createdByHasSuffix: String + createdByIsNil: Boolean + createdByNotNil: Boolean + createdByEqualFold: String + createdByContainsFold: String + """ + updated_by field predicates + """ + updatedBy: String + updatedByNEQ: String + updatedByIn: [String!] + updatedByNotIn: [String!] + updatedByGT: String + updatedByGTE: String + updatedByLT: String + updatedByLTE: String + updatedByContains: String + updatedByHasPrefix: String + updatedByHasSuffix: String + updatedByIsNil: Boolean + updatedByNotNil: Boolean + updatedByEqualFold: String + updatedByContainsFold: String + """ + deleted_at field predicates + """ + deletedAt: Time + deletedAtNEQ: Time + deletedAtIn: [Time!] + deletedAtNotIn: [Time!] + deletedAtGT: Time + deletedAtGTE: Time + deletedAtLT: Time + deletedAtLTE: Time + deletedAtIsNil: Boolean + deletedAtNotNil: Boolean + """ + deleted_by field predicates + """ + deletedBy: String + deletedByNEQ: String + deletedByIn: [String!] + deletedByNotIn: [String!] + deletedByGT: String + deletedByGTE: String + deletedByLT: String + deletedByLTE: String + deletedByContains: String + deletedByHasPrefix: String + deletedByHasSuffix: String + deletedByIsNil: Boolean + deletedByNotNil: Boolean + deletedByEqualFold: String + deletedByContainsFold: String + """ + organization_id field predicates + """ + organizationID: String + organizationIDNEQ: String + organizationIDIn: [String!] + organizationIDNotIn: [String!] + organizationIDGT: String + organizationIDGTE: String + organizationIDLT: String + organizationIDLTE: String + organizationIDContains: String + organizationIDHasPrefix: String + organizationIDHasSuffix: String + organizationIDEqualFold: String + organizationIDContainsFold: String + """ + name field predicates + """ + name: String + nameNEQ: String + nameIn: [String!] + nameNotIn: [String!] + nameGT: String + nameGTE: String + nameLT: String + nameLTE: String + nameContains: String + nameHasPrefix: String + nameHasSuffix: String + nameEqualFold: String + nameContainsFold: String + """ + geo field predicates + """ + geo: String + geoNEQ: String + geoIn: [String!] + geoNotIn: [String!] + geoGT: String + geoGTE: String + geoLT: String + geoLTE: String + geoContains: String + geoHasPrefix: String + geoHasSuffix: String + geoIsNil: Boolean + geoNotNil: Boolean + geoEqualFold: String + geoContainsFold: String + """ + dsn field predicates + """ + dsn: String + dsnNEQ: String + dsnIn: [String!] + dsnNotIn: [String!] + dsnGT: String + dsnGTE: String + dsnLT: String + dsnLTE: String + dsnContains: String + dsnHasPrefix: String + dsnHasSuffix: String + dsnEqualFold: String + dsnContainsFold: String + """ + group_id field predicates + """ + groupID: ID + groupIDNEQ: ID + groupIDIn: [ID!] + groupIDNotIn: [ID!] + groupIDGT: ID + groupIDGTE: ID + groupIDLT: ID + groupIDLTE: ID + groupIDContains: ID + groupIDHasPrefix: ID + groupIDHasSuffix: ID + groupIDEqualFold: ID + groupIDContainsFold: ID + """ + status field predicates + """ + status: DatabaseDatabaseStatus + statusNEQ: DatabaseDatabaseStatus + statusIn: [DatabaseDatabaseStatus!] + statusNotIn: [DatabaseDatabaseStatus!] + """ + provider field predicates + """ + provider: DatabaseDatabaseProvider + providerNEQ: DatabaseDatabaseProvider + providerIn: [DatabaseDatabaseProvider!] + providerNotIn: [DatabaseDatabaseProvider!] + """ + group edge predicates + """ + hasGroup: Boolean + hasGroupWith: [GroupWhereInput!] +} +type Group implements Node { + id: ID! + createdAt: Time + updatedAt: Time + createdBy: String + updatedBy: String + deletedAt: Time + deletedBy: String + """ + the name of the group in turso + """ + name: String! + """ + the description of the group + """ + description: String + """ + the primary of the group + """ + primaryLocation: String! + """ + the replica locations of the group + """ + locations: [String!] + """ + region the group + """ + region: GroupRegion! + databases: [Database!] +} +""" +A connection to a list of items. +""" +type GroupConnection { + """ + A list of edges. + """ + edges: [GroupEdge] + """ + Information to aid in pagination. + """ + pageInfo: PageInfo! + """ + Identifies the total count of items in the connection. + """ + totalCount: Int! +} +""" +An edge in a connection. +""" +type GroupEdge { + """ + The item at the end of the edge. + """ + node: Group + """ + A cursor for use in pagination. + """ + cursor: Cursor! +} +""" +GroupRegion is enum for the field region +""" +enum GroupRegion @goModel(model: "github.com/datumforge/geodetic/pkg/enums.Region") { + AMER + EMEA + APAC +} +""" +GroupWhereInput is used for filtering Group objects. +Input was generated by ent. +""" +input GroupWhereInput { + not: GroupWhereInput + and: [GroupWhereInput!] + or: [GroupWhereInput!] + """ + id field predicates + """ + id: ID + idNEQ: ID + idIn: [ID!] + idNotIn: [ID!] + idGT: ID + idGTE: ID + idLT: ID + idLTE: ID + idEqualFold: ID + idContainsFold: ID + """ + created_at field predicates + """ + createdAt: Time + createdAtNEQ: Time + createdAtIn: [Time!] + createdAtNotIn: [Time!] + createdAtGT: Time + createdAtGTE: Time + createdAtLT: Time + createdAtLTE: Time + createdAtIsNil: Boolean + createdAtNotNil: Boolean + """ + updated_at field predicates + """ + updatedAt: Time + updatedAtNEQ: Time + updatedAtIn: [Time!] + updatedAtNotIn: [Time!] + updatedAtGT: Time + updatedAtGTE: Time + updatedAtLT: Time + updatedAtLTE: Time + updatedAtIsNil: Boolean + updatedAtNotNil: Boolean + """ + created_by field predicates + """ + createdBy: String + createdByNEQ: String + createdByIn: [String!] + createdByNotIn: [String!] + createdByGT: String + createdByGTE: String + createdByLT: String + createdByLTE: String + createdByContains: String + createdByHasPrefix: String + createdByHasSuffix: String + createdByIsNil: Boolean + createdByNotNil: Boolean + createdByEqualFold: String + createdByContainsFold: String + """ + updated_by field predicates + """ + updatedBy: String + updatedByNEQ: String + updatedByIn: [String!] + updatedByNotIn: [String!] + updatedByGT: String + updatedByGTE: String + updatedByLT: String + updatedByLTE: String + updatedByContains: String + updatedByHasPrefix: String + updatedByHasSuffix: String + updatedByIsNil: Boolean + updatedByNotNil: Boolean + updatedByEqualFold: String + updatedByContainsFold: String + """ + deleted_at field predicates + """ + deletedAt: Time + deletedAtNEQ: Time + deletedAtIn: [Time!] + deletedAtNotIn: [Time!] + deletedAtGT: Time + deletedAtGTE: Time + deletedAtLT: Time + deletedAtLTE: Time + deletedAtIsNil: Boolean + deletedAtNotNil: Boolean + """ + deleted_by field predicates + """ + deletedBy: String + deletedByNEQ: String + deletedByIn: [String!] + deletedByNotIn: [String!] + deletedByGT: String + deletedByGTE: String + deletedByLT: String + deletedByLTE: String + deletedByContains: String + deletedByHasPrefix: String + deletedByHasSuffix: String + deletedByIsNil: Boolean + deletedByNotNil: Boolean + deletedByEqualFold: String + deletedByContainsFold: String + """ + name field predicates + """ + name: String + nameNEQ: String + nameIn: [String!] + nameNotIn: [String!] + nameGT: String + nameGTE: String + nameLT: String + nameLTE: String + nameContains: String + nameHasPrefix: String + nameHasSuffix: String + nameEqualFold: String + nameContainsFold: String + """ + description field predicates + """ + description: String + descriptionNEQ: String + descriptionIn: [String!] + descriptionNotIn: [String!] + descriptionGT: String + descriptionGTE: String + descriptionLT: String + descriptionLTE: String + descriptionContains: String + descriptionHasPrefix: String + descriptionHasSuffix: String + descriptionIsNil: Boolean + descriptionNotNil: Boolean + descriptionEqualFold: String + descriptionContainsFold: String + """ + primary_location field predicates + """ + primaryLocation: String + primaryLocationNEQ: String + primaryLocationIn: [String!] + primaryLocationNotIn: [String!] + primaryLocationGT: String + primaryLocationGTE: String + primaryLocationLT: String + primaryLocationLTE: String + primaryLocationContains: String + primaryLocationHasPrefix: String + primaryLocationHasSuffix: String + primaryLocationEqualFold: String + primaryLocationContainsFold: String + """ + region field predicates + """ + region: GroupRegion + regionNEQ: GroupRegion + regionIn: [GroupRegion!] + regionNotIn: [GroupRegion!] + """ + databases edge predicates + """ + hasDatabases: Boolean + hasDatabasesWith: [DatabaseWhereInput!] +} +""" +A valid JSON string. +""" +scalar JSON +""" +An object with an ID. +Follows the [Relay Global Object Identification Specification](https://relay.dev/graphql/objectidentification.htm) +""" +interface Node @goModel(model: "github.com/datumforge/geodetic/internal/ent/generated.Noder") { + """ + The id of the object. + """ + id: ID! +} +""" +Possible directions in which to order a list of items when provided an `orderBy` argument. +""" +enum OrderDirection { + """ + Specifies an ascending order for a given `orderBy` argument. + """ + ASC + """ + Specifies a descending order for a given `orderBy` argument. + """ + DESC +} +""" +Information about pagination in a connection. +https://relay.dev/graphql/connections.htm#sec-undefined.PageInfo +""" +type PageInfo { + """ + When paginating forwards, are there more items? + """ + hasNextPage: Boolean! + """ + When paginating backwards, are there more items? + """ + hasPreviousPage: Boolean! + """ + When paginating backwards, the cursor to continue. + """ + startCursor: Cursor + """ + When paginating forwards, the cursor to continue. + """ + endCursor: Cursor +} +type Query { + """ + Fetches an object given its ID. + """ + node( + """ + ID of the object. + """ + id: ID! + ): Node + """ + Lookup nodes by a list of IDs. + """ + nodes( + """ + The list of node IDs. + """ + ids: [ID!]! + ): [Node]! + databases( + """ + Returns the elements in the list that come after the specified cursor. + """ + after: Cursor + + """ + Returns the first _n_ elements from the list. + """ + first: Int + + """ + Returns the elements in the list that come before the specified cursor. + """ + before: Cursor + + """ + Returns the last _n_ elements from the list. + """ + last: Int + + """ + Filtering options for Databases returned from the connection. + """ + where: DatabaseWhereInput + ): DatabaseConnection! + groups( + """ + Returns the elements in the list that come after the specified cursor. + """ + after: Cursor + + """ + Returns the first _n_ elements from the list. + """ + first: Int + + """ + Returns the elements in the list that come before the specified cursor. + """ + before: Cursor + + """ + Returns the last _n_ elements from the list. + """ + last: Int + + """ + Filtering options for Groups returned from the connection. + """ + where: GroupWhereInput + ): GroupConnection! +} +""" +The builtin Time type +""" +scalar Time +""" +UpdateDatabaseInput is used for update Database object. +Input was generated by ent. +""" +input UpdateDatabaseInput { + updatedAt: Time + clearUpdatedAt: Boolean + updatedBy: String + clearUpdatedBy: Boolean + """ + the ID of the organization + """ + organizationID: String + """ + the name to the database + """ + name: String + """ + the geo location of the database + """ + geo: String + clearGeo: Boolean + """ + the DSN to the database + """ + dsn: String + """ + the auth token used to connect to the database + """ + token: String + clearToken: Boolean + """ + status of the database + """ + status: DatabaseDatabaseStatus + """ + provider of the database + """ + provider: DatabaseDatabaseProvider + groupID: ID +} +""" +UpdateGroupInput is used for update Group object. +Input was generated by ent. +""" +input UpdateGroupInput { + updatedAt: Time + clearUpdatedAt: Boolean + updatedBy: String + clearUpdatedBy: Boolean + """ + the name of the group in turso + """ + name: String + """ + the description of the group + """ + description: String + clearDescription: Boolean + """ + the primary of the group + """ + primaryLocation: String + """ + the replica locations of the group + """ + locations: [String!] + appendLocations: [String!] + clearLocations: Boolean + """ + the auth token used to connect to the group + """ + token: String + clearToken: Boolean + """ + region the group + """ + region: GroupRegion + addDatabaseIDs: [ID!] + removeDatabaseIDs: [ID!] + clearDatabases: Boolean +} diff --git a/schema/group.graphql b/schema/group.graphql new file mode 100644 index 0000000..cc5fb63 --- /dev/null +++ b/schema/group.graphql @@ -0,0 +1,75 @@ +extend type Query { + """ + Look up group by ID + """ + group( + """ + Name of the group + """ + name: String! + ): Group! +} + +extend type Mutation{ + """ + Create a new group + """ + createGroup( + """ + values of the group + """ + input: CreateGroupInput! + ): GroupCreatePayload! + """ + Update an existing group + """ + updateGroup( + """ + Name of the group + """ + name: String! + """ + New values for the group + """ + input: UpdateGroupInput! + ): GroupUpdatePayload! + """ + Delete an existing group + """ + deleteGroup( + """ + Name of the group + """ + name: String! + ): GroupDeletePayload! +} + +""" +Return response for createGroup mutation +""" +type GroupCreatePayload { + """ + Created group + """ + group: Group! +} + +""" +Return response for updateGroup mutation +""" +type GroupUpdatePayload { + """ + Updated group + """ + group: Group! +} + +""" +Return response for deleteGroup mutation +""" +type GroupDeletePayload { + """ + Deleted group ID + """ + deletedID: ID! +} \ No newline at end of file diff --git a/sonar-project.properties b/sonar-project.properties new file mode 100644 index 0000000..9ced7e4 --- /dev/null +++ b/sonar-project.properties @@ -0,0 +1,16 @@ +sonar.projectKey=datumforge_geodetic +sonar.organization=datumforge + +sonar.projectName=geodetic +sonar.projectVersion=1.0 + +sonar.sources=. + +sonar.exclusions=**/*_test.go,**/vendor/**,internal/ent/generated/**,internal/geodeticclient/**,query/**,schema/**,scripts/**,gen_schema.go,generate.go,tools.go,docker/**,db/**,db/backup/**,internal/graphapi/gen_server.go,internal/ent/entc.go +sonar.tests=. +sonar.test.inclusions=**/*_test.go +sonar.test.exclusions=**/vendor/** + +sonar.sourceEncoding=UTF-8 +sonar.go.coverage.reportPaths=coverage.out +sonar.externalIssuesReportPaths=results.txt \ No newline at end of file diff --git a/tools.go b/tools.go new file mode 100644 index 0000000..3da76e0 --- /dev/null +++ b/tools.go @@ -0,0 +1,10 @@ +//go:build tools +// +build tools + +package tools + +import ( + _ "github.com/99designs/gqlgen" + _ "github.com/99designs/gqlgen/graphql/introspection" + _ "github.com/Yamashou/gqlgenc" +)